id
int64
0
328k
repository_name
stringlengths
7
58
file_path
stringlengths
9
302
class_name
stringlengths
5
256
human_written_code
stringlengths
16
2.16M
class_skeleton
stringlengths
18
1.49M
total_program_units
int64
1
1.76k
total_doc_str
int64
0
771
AvgCountLine
float64
0
7.89k
AvgCountLineBlank
float64
0
297
AvgCountLineCode
float64
0
7.89k
AvgCountLineComment
float64
0
7.89k
AvgCyclomatic
float64
0
130
CommentToCodeRatio
float64
0
168
CountClassBase
float64
0
40
CountClassCoupled
float64
0
583
CountClassCoupledModified
float64
0
575
CountClassDerived
float64
0
5.35k
CountDeclInstanceMethod
float64
0
529
CountDeclInstanceVariable
float64
0
296
CountDeclMethod
float64
0
599
CountDeclMethodAll
float64
0
1.12k
CountLine
float64
1
40.4k
CountLineBlank
float64
0
8.16k
CountLineCode
float64
1
25.7k
CountLineCodeDecl
float64
1
8.15k
CountLineCodeExe
float64
0
24.2k
CountLineComment
float64
0
16.5k
CountStmt
float64
1
9.71k
CountStmtDecl
float64
1
8.15k
CountStmtExe
float64
0
9.69k
MaxCyclomatic
float64
0
759
MaxInheritanceTree
float64
0
16
MaxNesting
float64
0
34
SumCyclomatic
float64
0
2.9k
3,500
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeEmbeddings
import torch from torch import nn class LukeEmbeddings(nn.Module): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if position_ids is None: if input_ids is not None: position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange(self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device) return position_ids.unsqueeze(0).expand(input_shape)
class LukeEmbeddings(nn.Module): ''' Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. ''' def __init__(self, config): pass def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): pass def create_position_ids_from_inputs_embeds(self, inputs_embeds): ''' We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor ''' pass
4
2
21
3
15
3
3
0.29
1
1
0
0
3
6
3
13
71
13
45
23
35
13
33
17
29
6
1
2
8
3,501
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeEncoder
from torch import nn class LukeEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([LukeLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True): all_word_hidden_states = () if output_hidden_states else None all_entity_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_word_hidden_states = all_word_hidden_states + (word_hidden_states,) all_entity_hidden_states = all_entity_hidden_states + (entity_hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module(word_hidden_states, entity_hidden_states, attention_mask, layer_head_mask, output_attentions) word_hidden_states = layer_outputs[0] if entity_hidden_states is not None: entity_hidden_states = layer_outputs[1] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[2],) if output_hidden_states: all_word_hidden_states = all_word_hidden_states + (word_hidden_states,) all_entity_hidden_states = all_entity_hidden_states + (entity_hidden_states,) if not return_dict: return tuple((v for v in [word_hidden_states, all_word_hidden_states, all_self_attentions, entity_hidden_states, all_entity_hidden_states] if v is not None)) return BaseLukeModelOutput(last_hidden_state=word_hidden_states, hidden_states=all_word_hidden_states, attentions=all_self_attentions, entity_last_hidden_state=entity_hidden_states, entity_hidden_states=all_entity_hidden_states)
class LukeEncoder(nn.Module): def __init__(self, config): pass def forward(self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True): pass
3
0
37
4
34
0
7
0
1
6
2
0
2
3
2
12
76
8
68
21
56
0
29
12
26
12
1
2
13
3,502
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeEntityEmbeddings
import torch from typing import Optional, Union from torch import nn from .configuration_luke import LukeConfig class LukeEntityEmbeddings(nn.Module): def __init__(self, config: LukeConfig): super().__init__() self.config = config self.entity_embeddings = nn.Embedding(config.entity_vocab_size, config.entity_emb_size, padding_idx=0) if config.entity_emb_size != config.hidden_size: self.entity_embedding_dense = nn.Linear(config.entity_emb_size, config.hidden_size, bias=False) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, entity_ids: torch.LongTensor, position_ids: torch.LongTensor, token_type_ids: Optional[torch.LongTensor]=None): if token_type_ids is None: token_type_ids = torch.zeros_like(entity_ids) entity_embeddings = self.entity_embeddings(entity_ids) if self.config.entity_emb_size != self.config.hidden_size: entity_embeddings = self.entity_embedding_dense(entity_embeddings) position_embeddings = self.position_embeddings(position_ids.clamp(min=0)) position_embedding_mask = (position_ids != -1).type_as(position_embeddings).unsqueeze(-1) position_embeddings = position_embeddings * position_embedding_mask position_embeddings = torch.sum(position_embeddings, dim=-2) position_embeddings = position_embeddings / position_embedding_mask.sum(dim=-2).clamp(min=1e-07) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = entity_embeddings + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings
class LukeEntityEmbeddings(nn.Module): def __init__(self, config: LukeConfig): pass def forward(self, entity_ids: torch.LongTensor, position_ids: torch.LongTensor, token_type_ids: Optional[torch.LongTensor]=None): pass
3
0
18
4
14
0
3
0
1
2
1
0
2
7
2
12
38
9
29
17
24
0
27
15
24
3
1
1
5
3,503
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeForEntityClassification
import torch from typing import Optional, Union from torch import nn from ...utils import ModelOutput, auto_docstring, logging @auto_docstring(custom_intro='\n The LUKE model with a classification head on top (a linear layer on top of the hidden state of the first entity\n token) for entity classification tasks, such as Open Entity.\n ') class LukeForEntityClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.num_labels = config.num_labels self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, entity_ids: Optional[torch.LongTensor]=None, entity_attention_mask: Optional[torch.FloatTensor]=None, entity_token_type_ids: Optional[torch.LongTensor]=None, entity_position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, EntityClassificationOutput]: """ entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*): Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively. Examples: ```python >>> from transformers import AutoTokenizer, LukeForEntityClassification >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-open-entity") >>> model = LukeForEntityClassification.from_pretrained("studio-ousia/luke-large-finetuned-open-entity") >>> text = "Beyoncé lives in Los Angeles." >>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncé" >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) Predicted class: person ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True) feature_vector = outputs.entity_last_hidden_state[:, 0, :] feature_vector = self.dropout(feature_vector) logits = self.classifier(feature_vector) loss = None if labels is not None: labels = labels.to(logits.device) if labels.ndim == 1: loss = nn.functional.cross_entropy(logits, labels) else: loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits)) if not return_dict: return tuple((v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None)) return EntityClassificationOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n The LUKE model with a classification head on top (a linear layer on top of the hidden state of the first entity\n token) for entity classification tasks, such as Open Entity.\n ') class LukeForEntityClassification(LukePreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, entity_ids: Optional[torch.LongTensor]=None, entity_attention_mask: Optional[torch.FloatTensor]=None, entity_token_type_ids: Optional[torch.LongTensor]=None, entity_position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, EntityClassificationOutput]: ''' entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*): Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively. Examples: ```python >>> from transformers import AutoTokenizer, LukeForEntityClassification >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-open-entity") >>> model = LukeForEntityClassification.from_pretrained("studio-ousia/luke-large-finetuned-open-entity") >>> text = "Beyoncé lives in Los Angeles." >>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncé" >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) Predicted class: person ```''' pass
5
1
51
7
32
13
3
0.39
1
5
2
0
2
4
2
3
106
14
66
28
45
26
23
11
20
5
2
2
6
3,504
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeForEntityPairClassification
import torch from typing import Optional, Union from torch import nn from ...utils import ModelOutput, auto_docstring, logging @auto_docstring(custom_intro='\n The LUKE model with a classification head on top (a linear layer on top of the hidden states of the two entity\n tokens) for entity pair classification tasks, such as TACRED.\n ') class LukeForEntityPairClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.num_labels = config.num_labels self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size * 2, config.num_labels, False) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, entity_ids: Optional[torch.LongTensor]=None, entity_attention_mask: Optional[torch.FloatTensor]=None, entity_token_type_ids: Optional[torch.LongTensor]=None, entity_position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, EntityPairClassificationOutput]: """ entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*): Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively. Examples: ```python >>> from transformers import AutoTokenizer, LukeForEntityPairClassification >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-tacred") >>> model = LukeForEntityPairClassification.from_pretrained("studio-ousia/luke-large-finetuned-tacred") >>> text = "Beyoncé lives in Los Angeles." >>> entity_spans = [ ... (0, 7), ... (17, 28), ... ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles" >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) Predicted class: per:cities_of_residence ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True) feature_vector = torch.cat([outputs.entity_last_hidden_state[:, 0, :], outputs.entity_last_hidden_state[:, 1, :]], dim=1) feature_vector = self.dropout(feature_vector) logits = self.classifier(feature_vector) loss = None if labels is not None: labels = labels.to(logits.device) if labels.ndim == 1: loss = nn.functional.cross_entropy(logits, labels) else: loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits)) if not return_dict: return tuple((v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None)) return EntityPairClassificationOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n The LUKE model with a classification head on top (a linear layer on top of the hidden states of the two entity\n tokens) for entity pair classification tasks, such as TACRED.\n ') class LukeForEntityPairClassification(LukePreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, entity_ids: Optional[torch.LongTensor]=None, entity_attention_mask: Optional[torch.FloatTensor]=None, entity_token_type_ids: Optional[torch.LongTensor]=None, entity_position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, EntityPairClassificationOutput]: ''' entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*): Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively. Examples: ```python >>> from transformers import AutoTokenizer, LukeForEntityPairClassification >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-tacred") >>> model = LukeForEntityPairClassification.from_pretrained("studio-ousia/luke-large-finetuned-tacred") >>> text = "Beyoncé lives in Los Angeles." >>> entity_spans = [ ... (0, 7), ... (17, 28), ... ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles" >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) Predicted class: per:cities_of_residence ```''' pass
5
1
54
7
33
15
3
0.43
1
5
2
0
2
4
2
3
111
14
68
28
47
29
23
11
20
5
2
2
6
3,505
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeForEntitySpanClassification
from ...utils import ModelOutput, auto_docstring, logging import torch from typing import Optional, Union from torch import nn @auto_docstring(custom_intro='\n The LUKE model with a span classification head on top (a linear layer on top of the hidden states output) for tasks\n such as named entity recognition.\n ') class LukeForEntitySpanClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.num_labels = config.num_labels self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, entity_ids: Optional[torch.LongTensor]=None, entity_attention_mask: Optional[torch.LongTensor]=None, entity_token_type_ids: Optional[torch.LongTensor]=None, entity_position_ids: Optional[torch.LongTensor]=None, entity_start_positions: Optional[torch.LongTensor]=None, entity_end_positions: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, EntitySpanClassificationOutput]: """ entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. entity_start_positions (`torch.LongTensor`): The start positions of entities in the word token sequence. entity_end_positions (`torch.LongTensor`): The end positions of entities in the word token sequence. labels (`torch.LongTensor` of shape `(batch_size, entity_length)` or `(batch_size, entity_length, num_labels)`, *optional*): Labels for computing the classification loss. If the shape is `(batch_size, entity_length)`, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, entity_length, num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively. Examples: ```python >>> from transformers import AutoTokenizer, LukeForEntitySpanClassification >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003") >>> model = LukeForEntitySpanClassification.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003") >>> text = "Beyoncé lives in Los Angeles" # List all possible entity spans in the text >>> word_start_positions = [0, 8, 14, 17, 21] # character-based start positions of word tokens >>> word_end_positions = [7, 13, 16, 20, 28] # character-based end positions of word tokens >>> entity_spans = [] >>> for i, start_pos in enumerate(word_start_positions): ... for end_pos in word_end_positions[i:]: ... entity_spans.append((start_pos, end_pos)) >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> predicted_class_indices = logits.argmax(-1).squeeze().tolist() >>> for span, predicted_class_idx in zip(entity_spans, predicted_class_indices): ... if predicted_class_idx != 0: ... print(text[span[0] : span[1]], model.config.id2label[predicted_class_idx]) Beyoncé PER Los Angeles LOC ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True) hidden_size = outputs.last_hidden_state.size(-1) entity_start_positions = entity_start_positions.unsqueeze(-1).expand(-1, -1, hidden_size) if entity_start_positions.device != outputs.last_hidden_state.device: entity_start_positions = entity_start_positions.to(outputs.last_hidden_state.device) start_states = torch.gather(outputs.last_hidden_state, -2, entity_start_positions) entity_end_positions = entity_end_positions.unsqueeze(-1).expand(-1, -1, hidden_size) if entity_end_positions.device != outputs.last_hidden_state.device: entity_end_positions = entity_end_positions.to(outputs.last_hidden_state.device) end_states = torch.gather(outputs.last_hidden_state, -2, entity_end_positions) feature_vector = torch.cat([start_states, end_states, outputs.entity_last_hidden_state], dim=2) feature_vector = self.dropout(feature_vector) logits = self.classifier(feature_vector) loss = None if labels is not None: labels = labels.to(logits.device) if labels.ndim == 2: loss = nn.functional.cross_entropy(logits.view(-1, self.num_labels), labels.view(-1)) else: loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits)) if not return_dict: return tuple((v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None)) return EntitySpanClassificationOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n The LUKE model with a span classification head on top (a linear layer on top of the hidden states output) for tasks\n such as named entity recognition.\n ') class LukeForEntitySpanClassification(LukePreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, entity_ids: Optional[torch.LongTensor]=None, entity_attention_mask: Optional[torch.LongTensor]=None, entity_token_type_ids: Optional[torch.LongTensor]=None, entity_position_ids: Optional[torch.LongTensor]=None, entity_start_positions: Optional[torch.LongTensor]=None, entity_end_positions: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, EntitySpanClassificationOutput]: ''' entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. entity_start_positions (`torch.LongTensor`): The start positions of entities in the word token sequence. entity_end_positions (`torch.LongTensor`): The end positions of entities in the word token sequence. labels (`torch.LongTensor` of shape `(batch_size, entity_length)` or `(batch_size, entity_length, num_labels)`, *optional*): Labels for computing the classification loss. If the shape is `(batch_size, entity_length)`, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, entity_length, num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively. Examples: ```python >>> from transformers import AutoTokenizer, LukeForEntitySpanClassification >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003") >>> model = LukeForEntitySpanClassification.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003") >>> text = "Beyoncé lives in Los Angeles" # List all possible entity spans in the text >>> word_start_positions = [0, 8, 14, 17, 21] # character-based start positions of word tokens >>> word_end_positions = [7, 13, 16, 20, 28] # character-based end positions of word tokens >>> entity_spans = [] >>> for i, start_pos in enumerate(word_start_positions): ... for end_pos in word_end_positions[i:]: ... entity_spans.append((start_pos, end_pos)) >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> predicted_class_indices = logits.argmax(-1).squeeze().tolist() >>> for span, predicted_class_idx in zip(entity_spans, predicted_class_indices): ... if predicted_class_idx != 0: ... print(text[span[0] : span[1]], model.config.id2label[predicted_class_idx]) Beyoncé PER Los Angeles LOC ```''' pass
5
1
67
10
37
20
4
0.51
1
5
2
0
2
4
2
3
137
21
77
33
54
39
32
14
29
7
2
2
8
3,506
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeForMaskedLM
import torch from typing import Optional, Union from torch import nn from ...utils import ModelOutput, auto_docstring, logging @auto_docstring(custom_intro='\n The LUKE model with a language modeling head and entity prediction head on top for masked language modeling and\n masked entity prediction.\n ') class LukeForMaskedLM(LukePreTrainedModel): _tied_weights_keys = ['lm_head.decoder.weight', 'lm_head.decoder.bias', 'entity_predictions.decoder.weight'] def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.lm_head = LukeLMHead(config) self.entity_predictions = EntityPredictionHead(config) self.loss_fn = nn.CrossEntropyLoss() self.post_init() def tie_weights(self): super().tie_weights() self._tie_or_clone_weights(self.entity_predictions.decoder, self.luke.entity_embeddings.entity_embeddings) def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, entity_ids: Optional[torch.LongTensor]=None, entity_attention_mask: Optional[torch.LongTensor]=None, entity_token_type_ids: Optional[torch.LongTensor]=None, entity_position_ids: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, entity_labels: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LukeMaskedLMOutput]: """ entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` entity_labels (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True) loss = None mlm_loss = None logits = self.lm_head(outputs.last_hidden_state) if labels is not None: labels = labels.to(logits.device) mlm_loss = self.loss_fn(logits.view(-1, self.config.vocab_size), labels.view(-1)) if loss is None: loss = mlm_loss mep_loss = None entity_logits = None if outputs.entity_last_hidden_state is not None: entity_logits = self.entity_predictions(outputs.entity_last_hidden_state) if entity_labels is not None: mep_loss = self.loss_fn(entity_logits.view(-1, self.config.entity_vocab_size), entity_labels.view(-1)) if loss is None: loss = mep_loss else: loss = loss + mep_loss if not return_dict: return tuple((v for v in [loss, mlm_loss, mep_loss, logits, entity_logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None)) return LukeMaskedLMOutput(loss=loss, mlm_loss=mlm_loss, mep_loss=mep_loss, logits=logits, entity_logits=entity_logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n The LUKE model with a language modeling head and entity prediction head on top for masked language modeling and\n masked entity prediction.\n ') class LukeForMaskedLM(LukePreTrainedModel): def __init__(self, config): pass def tie_weights(self): pass def get_output_embeddings(self): pass def set_output_embeddings(self, new_embeddings): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, entity_ids: Optional[torch.LongTensor]=None, entity_attention_mask: Optional[torch.LongTensor]=None, entity_token_type_ids: Optional[torch.LongTensor]=None, entity_position_ids: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, entity_labels: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LukeMaskedLMOutput]: ''' entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` entity_labels (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` ''' pass
8
1
23
3
18
3
2
0.14
1
7
4
0
5
4
5
6
126
18
95
35
70
13
39
17
33
8
2
3
12
3,507
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeForMultipleChoice
from typing import Optional, Union from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch import nn import torch from ...utils import ModelOutput, auto_docstring, logging @auto_docstring class LukeForMultipleChoice(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.dropout = nn.Dropout(config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, entity_ids: Optional[torch.LongTensor]=None, entity_attention_mask: Optional[torch.FloatTensor]=None, entity_token_type_ids: Optional[torch.LongTensor]=None, entity_position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LukeMultipleChoiceModelOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None entity_ids = entity_ids.view(-1, entity_ids.size(-1)) if entity_ids is not None else None entity_attention_mask = entity_attention_mask.view(-1, entity_attention_mask.size(-1)) if entity_attention_mask is not None else None entity_token_type_ids = entity_token_type_ids.view(-1, entity_token_type_ids.size(-1)) if entity_token_type_ids is not None else None entity_position_ids = entity_position_ids.view(-1, entity_position_ids.size(-2), entity_position_ids.size(-1)) if entity_position_ids is not None else None outputs = self.luke(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True) pooled_output = outputs.pooler_output pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: labels = labels.to(reshaped_logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: return tuple((v for v in [loss, reshaped_logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None)) return LukeMultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions)
@auto_docstring class LukeForMultipleChoice(LukePreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, entity_ids: Optional[torch.LongTensor]=None, entity_attention_mask: Optional[torch.FloatTensor]=None, entity_token_type_ids: Optional[torch.LongTensor]=None, entity_position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LukeMultipleChoiceModelOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) ''' pass
5
1
57
5
48
4
8
0.08
1
5
2
0
2
3
2
3
121
11
102
30
77
8
32
13
29
14
2
1
16
3,508
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeForQuestionAnswering
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch import nn import torch from ...utils import ModelOutput, auto_docstring, logging from typing import Optional, Union @auto_docstring class LukeForQuestionAnswering(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.luke = LukeModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.FloatTensor]=None, entity_ids: Optional[torch.LongTensor]=None, entity_attention_mask: Optional[torch.FloatTensor]=None, entity_token_type_ids: Optional[torch.LongTensor]=None, entity_position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LukeQuestionAnsweringModelOutput]: """ entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True) sequence_output = outputs.last_hidden_state logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: return tuple((v for v in [total_loss, start_logits, end_logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None)) return LukeQuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions)
@auto_docstring class LukeForQuestionAnswering(LukePreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.FloatTensor]=None, entity_ids: Optional[torch.LongTensor]=None, entity_attention_mask: Optional[torch.FloatTensor]=None, entity_token_type_ids: Optional[torch.LongTensor]=None, entity_position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LukeQuestionAnsweringModelOutput]: ''' entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. ''' pass
5
1
51
5
40
7
4
0.15
1
5
2
0
2
3
2
3
110
11
86
33
60
13
31
15
28
6
2
2
7
3,509
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeForSequenceClassification
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch import nn import torch from ...utils import ModelOutput, auto_docstring, logging from typing import Optional, Union @auto_docstring(custom_intro='\n The LUKE Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ') class LukeForSequenceClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.luke = LukeModel(config) self.dropout = nn.Dropout(config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, entity_ids: Optional[torch.LongTensor]=None, entity_attention_mask: Optional[torch.FloatTensor]=None, entity_token_type_ids: Optional[torch.LongTensor]=None, entity_position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LukeSequenceClassifierOutput]: """ entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True) pooled_output = outputs.pooler_output pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: return tuple((v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None)) return LukeSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n The LUKE Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ') class LukeForSequenceClassification(LukePreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, entity_ids: Optional[torch.LongTensor]=None, entity_attention_mask: Optional[torch.FloatTensor]=None, entity_token_type_ids: Optional[torch.LongTensor]=None, entity_position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LukeSequenceClassifierOutput]: ''' entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
5
1
48
4
40
4
7
0.09
1
6
2
0
2
4
2
3
104
9
87
29
62
8
34
12
31
11
2
3
13
3,510
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeForTokenClassification
import torch from ...utils import ModelOutput, auto_docstring, logging from typing import Optional, Union from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch import nn @auto_docstring(custom_intro='\n The LUKE Model with a token classification head on top (a linear layer on top of the hidden-states output). To\n solve Named-Entity Recognition (NER) task using LUKE, `LukeForEntitySpanClassification` is more suitable than this\n class.\n ') class LukeForTokenClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.luke = LukeModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, entity_ids: Optional[torch.LongTensor]=None, entity_attention_mask: Optional[torch.FloatTensor]=None, entity_token_type_ids: Optional[torch.LongTensor]=None, entity_position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LukeTokenClassifierOutput]: """ entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True) sequence_output = outputs.last_hidden_state sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: return tuple((v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None)) return LukeTokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n The LUKE Model with a token classification head on top (a linear layer on top of the hidden-states output). To\n solve Named-Entity Recognition (NER) task using LUKE, `LukeForEntitySpanClassification` is more suitable than this\n class.\n ') class LukeForTokenClassification(LukePreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, entity_ids: Optional[torch.LongTensor]=None, entity_attention_mask: Optional[torch.FloatTensor]=None, entity_token_type_ids: Optional[torch.LongTensor]=None, entity_position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, LukeTokenClassifierOutput]: ''' entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) ''' pass
5
1
40
4
32
4
3
0.11
1
5
2
0
2
4
2
3
87
9
70
29
45
8
22
12
19
4
2
1
6
3,511
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeIntermediate
from ...activations import ACT2FN, gelu import torch from torch import nn class LukeIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class LukeIntermediate(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
6
0
2
0
1
3
0
0
2
2
2
12
13
1
12
5
9
0
11
5
8
2
1
1
3
3,512
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeLMHead
import torch from torch import nn from ...activations import ACT2FN, gelu class LukeLMHead(nn.Module): """Roberta Head for masked language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) x = self.decoder(x) return x def _tie_weights(self): if self.decoder.bias.device.type == 'meta': self.decoder.bias = self.bias else: self.bias = self.decoder.bias
class LukeLMHead(nn.Module): '''Roberta Head for masked language modeling.''' def __init__(self, config): pass def forward(self, features, **kwargs): pass def _tie_weights(self): pass
4
1
8
1
6
1
1
0.21
1
1
0
0
3
4
3
13
29
6
19
9
15
4
18
9
14
2
1
1
4
3,513
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeLayer
from ...modeling_layers import GradientCheckpointingLayer import torch from ...pytorch_utils import apply_chunking_to_forward class LukeLayer(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = LukeAttention(config) self.intermediate = LukeIntermediate(config) self.output = LukeOutput(config) def forward(self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False): word_size = word_hidden_states.size(1) self_attention_outputs = self.attention(word_hidden_states, entity_hidden_states, attention_mask, head_mask, output_attentions=output_attentions) if entity_hidden_states is None: concat_attention_output = self_attention_outputs[0] else: concat_attention_output = torch.cat(self_attention_outputs[:2], dim=1) outputs = self_attention_outputs[2:] layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, concat_attention_output) word_layer_output = layer_output[:, :word_size, :] if entity_hidden_states is None: entity_layer_output = None else: entity_layer_output = layer_output[:, word_size:, :] outputs = (word_layer_output, entity_layer_output) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output
class LukeLayer(GradientCheckpointingLayer): def __init__(self, config): pass def forward(self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False): pass def feed_forward_chunk(self, attention_output): pass
4
0
16
2
14
0
2
0.02
1
4
3
0
3
5
3
13
50
7
43
25
32
1
26
18
22
3
1
1
5
3,514
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeMaskedLMOutput
from ...utils import ModelOutput, auto_docstring, logging from dataclasses import dataclass import torch from typing import Optional, Union @dataclass @auto_docstring(custom_intro="\n Base class for model's outputs, with potential hidden states and attentions.\n ") class LukeMaskedLMOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): The sum of masked language modeling (MLM) loss and entity prediction loss. mlm_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Masked language modeling (MLM) loss. mep_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Masked entity prediction (MEP) loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). entity_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the entity prediction head (scores for each entity vocabulary token before SoftMax). entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. """ loss: Optional[torch.FloatTensor] = None mlm_loss: Optional[torch.FloatTensor] = None mep_loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None entity_logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass @auto_docstring(custom_intro="\n Base class for model's outputs, with potential hidden states and attentions.\n ") class LukeMaskedLMOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): The sum of masked language modeling (MLM) loss and entity prediction loss. mlm_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Masked language modeling (MLM) loss. mep_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Masked entity prediction (MEP) loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). entity_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the entity prediction head (scores for each entity vocabulary token before SoftMax). entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. ''' pass
3
1
0
0
0
0
0
3
1
0
0
0
0
0
0
0
40
4
9
9
8
27
9
9
8
0
1
0
0
3,515
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeModel
from ...utils import ModelOutput, auto_docstring, logging from .configuration_luke import LukeConfig import torch from typing import Optional, Union @auto_docstring(custom_intro='\n The bare LUKE model transformer outputting raw hidden-states for both word tokens and entities without any\n ') class LukeModel(LukePreTrainedModel): def __init__(self, config: LukeConfig, add_pooling_layer: bool=True): """ add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) self.config = config self.embeddings = LukeEmbeddings(config) self.entity_embeddings = LukeEntityEmbeddings(config) self.encoder = LukeEncoder(config) self.pooler = LukePooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def get_entity_embeddings(self): return self.entity_embeddings.entity_embeddings def set_entity_embeddings(self, value): self.entity_embeddings.entity_embeddings = value def _prune_heads(self, heads_to_prune): raise NotImplementedError('LUKE does not support the pruning of attention heads') @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, entity_ids: Optional[torch.LongTensor]=None, entity_attention_mask: Optional[torch.FloatTensor]=None, entity_token_type_ids: Optional[torch.LongTensor]=None, entity_position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseLukeModelOutputWithPooling]: """ entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. Examples: ```python >>> from transformers import AutoTokenizer, LukeModel >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-base") >>> model = LukeModel.from_pretrained("studio-ousia/luke-base") # Compute the contextualized entity representation corresponding to the entity mention "Beyoncé" >>> text = "Beyoncé lives in Los Angeles." >>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncé" >>> encoding = tokenizer(text, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt") >>> outputs = model(**encoding) >>> word_last_hidden_state = outputs.last_hidden_state >>> entity_last_hidden_state = outputs.entity_last_hidden_state # Input Wikipedia entities to obtain enriched contextualized representations of word tokens >>> text = "Beyoncé lives in Los Angeles." >>> entities = [ ... "Beyoncé", ... "Los Angeles", ... ] # Wikipedia entity titles corresponding to the entity mentions "Beyoncé" and "Los Angeles" >>> entity_spans = [ ... (0, 7), ... (17, 28), ... ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles" >>> encoding = tokenizer( ... text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt" ... ) >>> outputs = model(**encoding) >>> word_last_hidden_state = outputs.last_hidden_state >>> entity_last_hidden_state = outputs.entity_last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length), device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if entity_ids is not None: entity_seq_length = entity_ids.size(1) if entity_attention_mask is None: entity_attention_mask = torch.ones((batch_size, entity_seq_length), device=device) if entity_token_type_ids is None: entity_token_type_ids = torch.zeros((batch_size, entity_seq_length), dtype=torch.long, device=device) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) word_embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) extended_attention_mask = self.get_extended_attention_mask(attention_mask, entity_attention_mask) if entity_ids is None: entity_embedding_output = None else: entity_embedding_output = self.entity_embeddings(entity_ids, entity_position_ids, entity_token_type_ids) encoder_outputs = self.encoder(word_embedding_output, entity_embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseLukeModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, entity_last_hidden_state=encoder_outputs.entity_last_hidden_state, entity_hidden_states=encoder_outputs.entity_hidden_states) def get_extended_attention_mask(self, word_attention_mask: torch.LongTensor, entity_attention_mask: Optional[torch.LongTensor]): """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: word_attention_mask (`torch.LongTensor`): Attention mask for word tokens with ones indicating tokens to attend to, zeros for tokens to ignore. entity_attention_mask (`torch.LongTensor`, *optional*): Attention mask for entity tokens with ones indicating tokens to attend to, zeros for tokens to ignore. Returns: `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. """ attention_mask = word_attention_mask if entity_attention_mask is not None: attention_mask = torch.cat([attention_mask, entity_attention_mask], dim=-1) if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError(f'Wrong shape for attention_mask (shape {attention_mask.shape})') extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min return extended_attention_mask
@auto_docstring(custom_intro='\n The bare LUKE model transformer outputting raw hidden-states for both word tokens and entities without any\n ') class LukeModel(LukePreTrainedModel): def __init__(self, config: LukeConfig, add_pooling_layer: bool=True): ''' add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer ''' pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def get_entity_embeddings(self): pass def set_entity_embeddings(self, value): pass def _prune_heads(self, heads_to_prune): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, entity_ids: Optional[torch.LongTensor]=None, entity_attention_mask: Optional[torch.FloatTensor]=None, entity_token_type_ids: Optional[torch.LongTensor]=None, entity_position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseLukeModelOutputWithPooling]: ''' entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. Examples: ```python >>> from transformers import AutoTokenizer, LukeModel >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-base") >>> model = LukeModel.from_pretrained("studio-ousia/luke-base") # Compute the contextualized entity representation corresponding to the entity mention "Beyoncé" >>> text = "Beyoncé lives in Los Angeles." >>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncé" >>> encoding = tokenizer(text, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt") >>> outputs = model(**encoding) >>> word_last_hidden_state = outputs.last_hidden_state >>> entity_last_hidden_state = outputs.entity_last_hidden_state # Input Wikipedia entities to obtain enriched contextualized representations of word tokens >>> text = "Beyoncé lives in Los Angeles." >>> entities = [ ... "Beyoncé", ... "Los Angeles", ... ] # Wikipedia entity titles corresponding to the entity mentions "Beyoncé" and "Los Angeles" >>> entity_spans = [ ... (0, 7), ... (17, 28), ... ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles" >>> encoding = tokenizer( ... text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt" ... ) >>> outputs = model(**encoding) >>> word_last_hidden_state = outputs.last_hidden_state >>> entity_last_hidden_state = outputs.entity_last_hidden_state ```''' pass def get_extended_attention_mask(self, word_attention_mask: torch.LongTensor, entity_attention_mask: Optional[torch.LongTensor]): ''' Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: word_attention_mask (`torch.LongTensor`): Attention mask for word tokens with ones indicating tokens to attend to, zeros for tokens to ignore. entity_attention_mask (`torch.LongTensor`, *optional*): Attention mask for entity tokens with ones indicating tokens to attend to, zeros for tokens to ignore. Returns: `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. ''' pass
11
3
24
3
14
7
3
0.49
1
10
6
0
8
5
8
9
198
34
111
44
83
54
64
26
55
16
2
2
27
3,516
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeMultipleChoiceModelOutput
from dataclasses import dataclass import torch from typing import Optional, Union from ...utils import ModelOutput, auto_docstring, logging @dataclass @auto_docstring(custom_intro='\n Outputs of multiple choice models.\n ') class LukeMultipleChoiceModelOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): *num_choices* is the second dimension of the input tensors. (see *input_ids* above). Classification scores (before SoftMax). entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None entity_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass @auto_docstring(custom_intro='\n Outputs of multiple choice models.\n ') class LukeMultipleChoiceModelOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): *num_choices* is the second dimension of the input tensors. (see *input_ids* above). Classification scores (before SoftMax). entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. ''' pass
3
1
0
0
0
0
0
3.67
1
0
0
0
0
0
0
0
33
5
6
6
5
22
6
6
5
0
1
0
0
3,517
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeOutput
import torch from torch import nn class LukeOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class LukeOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
5
0
5
0
1
0
1
2
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
3,518
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukePooler
import torch from torch import nn class LukePooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output
class LukePooler(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
5
1
1
0.2
1
2
0
0
2
2
2
12
13
1
10
7
7
2
10
7
7
1
1
0
2
3,519
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukePreTrainedModel
from ...modeling_utils import PreTrainedModel from .configuration_luke import LukeConfig from torch import nn from ...utils import ModelOutput, auto_docstring, logging @auto_docstring class LukePreTrainedModel(PreTrainedModel): config: LukeConfig base_model_prefix = 'luke' supports_gradient_checkpointing = True _no_split_modules = ['LukeAttention', 'LukeEntityEmbeddings'] def _init_weights(self, module: nn.Module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): if module.embedding_dim == 1: module.weight.data.zero_() else: module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
@auto_docstring class LukePreTrainedModel(PreTrainedModel): def _init_weights(self, module: nn.Module): '''Initialize the weights''' pass
3
1
16
0
15
2
7
0.3
1
0
0
9
1
0
1
1
27
2
20
6
18
6
17
6
15
7
1
2
7
3,520
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeQuestionAnsweringModelOutput
from dataclasses import dataclass import torch from typing import Optional, Union from ...utils import ModelOutput, auto_docstring, logging @dataclass @auto_docstring(custom_intro='\n Outputs of question answering models.\n ') class LukeQuestionAnsweringModelOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. """ loss: Optional[torch.FloatTensor] = None start_logits: Optional[torch.FloatTensor] = None end_logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None entity_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass @auto_docstring(custom_intro='\n Outputs of question answering models.\n ') class LukeQuestionAnsweringModelOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. ''' pass
3
1
0
0
0
0
0
3.29
1
0
0
0
0
0
0
0
34
4
7
7
6
23
7
7
6
0
1
0
0
3,521
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeSelfAttention
import torch from torch import nn import math class LukeSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size {config.hidden_size} is not a multiple of the number of attention heads {config.num_attention_heads}.') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.use_entity_aware_attention = config.use_entity_aware_attention self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) if self.use_entity_aware_attention: self.w2e_query = nn.Linear(config.hidden_size, self.all_head_size) self.e2w_query = nn.Linear(config.hidden_size, self.all_head_size) self.e2e_query = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False): word_size = word_hidden_states.size(1) if entity_hidden_states is None: concat_hidden_states = word_hidden_states else: concat_hidden_states = torch.cat([word_hidden_states, entity_hidden_states], dim=1) key_layer = self.transpose_for_scores(self.key(concat_hidden_states)) value_layer = self.transpose_for_scores(self.value(concat_hidden_states)) if self.use_entity_aware_attention and entity_hidden_states is not None: w2w_query_layer = self.transpose_for_scores(self.query(word_hidden_states)) w2e_query_layer = self.transpose_for_scores(self.w2e_query(word_hidden_states)) e2w_query_layer = self.transpose_for_scores(self.e2w_query(entity_hidden_states)) e2e_query_layer = self.transpose_for_scores(self.e2e_query(entity_hidden_states)) w2w_key_layer = key_layer[:, :, :word_size, :] e2w_key_layer = key_layer[:, :, :word_size, :] w2e_key_layer = key_layer[:, :, word_size:, :] e2e_key_layer = key_layer[:, :, word_size:, :] w2w_attention_scores = torch.matmul(w2w_query_layer, w2w_key_layer.transpose(-1, -2)) w2e_attention_scores = torch.matmul(w2e_query_layer, w2e_key_layer.transpose(-1, -2)) e2w_attention_scores = torch.matmul(e2w_query_layer, e2w_key_layer.transpose(-1, -2)) e2e_attention_scores = torch.matmul(e2e_query_layer, e2e_key_layer.transpose(-1, -2)) word_attention_scores = torch.cat([w2w_attention_scores, w2e_attention_scores], dim=3) entity_attention_scores = torch.cat([e2w_attention_scores, e2e_attention_scores], dim=3) attention_scores = torch.cat([word_attention_scores, entity_attention_scores], dim=2) else: query_layer = self.transpose_for_scores(self.query(concat_hidden_states)) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) output_word_hidden_states = context_layer[:, :word_size, :] if entity_hidden_states is None: output_entity_hidden_states = None else: output_entity_hidden_states = context_layer[:, word_size:, :] if output_attentions: outputs = (output_word_hidden_states, output_entity_hidden_states, attention_probs) else: outputs = (output_word_hidden_states, output_entity_hidden_states) return outputs
class LukeSelfAttention(nn.Module): def __init__(self, config): pass def transpose_for_scores(self, x): pass def forward(self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False): pass
4
0
36
7
26
3
4
0.13
1
3
0
0
3
11
3
13
111
22
79
49
68
10
65
42
61
7
1
1
11
3,522
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeSelfOutput
import torch from torch import nn class LukeSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class LukeSelfOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
5
0
5
0
1
0
1
2
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
3,523
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeSequenceClassifierOutput
from ...utils import ModelOutput, auto_docstring, logging from dataclasses import dataclass import torch from typing import Optional, Union @dataclass @auto_docstring(custom_intro='\n Outputs of sentence classification models.\n ') class LukeSequenceClassifierOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None entity_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass @auto_docstring(custom_intro='\n Outputs of sentence classification models.\n ') class LukeSequenceClassifierOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. ''' pass
3
1
0
0
0
0
0
3.5
1
0
0
0
0
0
0
0
31
4
6
6
5
21
6
6
5
0
1
0
0
3,524
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/modeling_luke.py
transformers.models.luke.modeling_luke.LukeTokenClassifierOutput
from ...utils import ModelOutput, auto_docstring, logging from dataclasses import dataclass import torch from typing import Optional, Union @dataclass @auto_docstring(custom_intro='\n Base class for outputs of token classification models.\n ') class LukeTokenClassifierOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None entity_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass @auto_docstring(custom_intro='\n Base class for outputs of token classification models.\n ') class LukeTokenClassifierOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. ''' pass
3
1
0
0
0
0
0
3.5
1
0
0
0
0
0
0
0
31
4
6
6
5
21
6
6
5
0
1
0
0
3,525
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/luke/tokenization_luke.py
transformers.models.luke.tokenization_luke.LukeTokenizer
import itertools import numpy as np from ...utils import add_end_docstrings, is_torch_tensor, logging import os import json from collections.abc import Mapping from ...tokenization_utils_base import ENCODE_KWARGS_DOCSTRING, AddedToken, BatchEncoding, EncodedInput, PaddingStrategy, TensorType, TextInput, TextInputPair, TruncationStrategy, to_py_obj import regex as re from ...tokenization_utils import PreTrainedTokenizer from typing import Optional, Union class LukeTokenizer(PreTrainedTokenizer): """ Constructs a LUKE tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import LukeTokenizer >>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base") >>> tokenizer("Hello world")["input_ids"] [0, 31414, 232, 2] >>> tokenizer(" Hello world")["input_ids"] [0, 20920, 232, 2] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). </Tip> This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. It also creates entity sequences, namely `entity_ids`, `entity_attention_mask`, `entity_token_type_ids`, and `entity_position_ids` to be used by the LUKE model. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. entity_vocab_file (`str`): Path to the entity vocabulary file. task (`str`, *optional*): Task for which you want to prepare sequences. One of `"entity_classification"`, `"entity_pair_classification"`, or `"entity_span_classification"`. If you specify this argument, the entity sequence is automatically created based on the given entity span(s). max_entity_length (`int`, *optional*, defaults to 32): The maximum length of `entity_ids`. max_mention_length (`int`, *optional*, defaults to 30): The maximum number of tokens inside an entity span. entity_token_1 (`str`, *optional*, defaults to `<ent>`): The special token used to represent an entity span in a word token sequence. This token is only used when `task` is set to `"entity_classification"` or `"entity_pair_classification"`. entity_token_2 (`str`, *optional*, defaults to `<ent2>`): The special token used to represent an entity span in a word token sequence. This token is only used when `task` is set to `"entity_pair_classification"`. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (LUKE tokenizer detect beginning of words by the preceding space). """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, merges_file, entity_vocab_file, task=None, max_entity_length=32, max_mention_length=30, entity_token_1='<ent>', entity_token_2='<ent2>', entity_unk_token='[UNK]', entity_pad_token='[PAD]', entity_mask_token='[MASK]', entity_mask2_token='[MASK2]', errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, **kwargs): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: bpe_merges = merges_handle.read().split('\n')[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+") entity_token_1 = AddedToken(entity_token_1, lstrip=False, rstrip=False) if isinstance(entity_token_1, str) else entity_token_1 entity_token_2 = AddedToken(entity_token_2, lstrip=False, rstrip=False) if isinstance(entity_token_2, str) else entity_token_2 kwargs['additional_special_tokens'] = kwargs.get('additional_special_tokens', []) kwargs['additional_special_tokens'] += [entity_token_1, entity_token_2] with open(entity_vocab_file, encoding='utf-8') as entity_vocab_handle: self.entity_vocab = json.load(entity_vocab_handle) for entity_special_token in [entity_unk_token, entity_pad_token, entity_mask_token, entity_mask2_token]: if entity_special_token not in self.entity_vocab: raise ValueError(f'Specified entity special token ``{entity_special_token}`` is not found in entity_vocab. Probably an incorrect entity vocab file is loaded: {entity_vocab_file}.') self.entity_unk_token_id = self.entity_vocab[entity_unk_token] self.entity_pad_token_id = self.entity_vocab[entity_pad_token] self.entity_mask_token_id = self.entity_vocab[entity_mask_token] self.entity_mask2_token_id = self.entity_vocab[entity_mask2_token] self.task = task if task is None or task == 'entity_span_classification': self.max_entity_length = max_entity_length elif task == 'entity_classification': self.max_entity_length = 1 elif task == 'entity_pair_classification': self.max_entity_length = 2 else: raise ValueError(f"Task {task} not supported. Select task from ['entity_classification', 'entity_pair_classification', 'entity_span_classification'] only.") self.max_mention_length = max_mention_length super().__init__(errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, task=task, max_entity_length=32, max_mention_length=30, entity_token_1='<ent>', entity_token_2='<ent2>', entity_unk_token=entity_unk_token, entity_pad_token=entity_pad_token, entity_mask_token=entity_mask_token, entity_mask2_token=entity_mask2_token, **kwargs) @property def vocab_size(self): return len(self.encoder) def get_vocab(self): vocab = dict(self.encoder).copy() vocab.update(self.added_tokens_encoder) return vocab def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) self.cache[token] = word return word def _tokenize(self, text): """Tokenize a string.""" bpe_tokens = [] for token in re.findall(self.pat, text): token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8'))) bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' '))) return bpe_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" text = ''.join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors) return text def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A LUKE sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) + [1] return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. LUKE does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(text) > 0 and (not text[0].isspace())): text = ' ' + text return (text, kwargs) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def __call__(self, text: Union[TextInput, list[TextInput]], text_pair: Optional[Union[TextInput, list[TextInput]]]=None, entity_spans: Optional[Union[EntitySpanInput, list[EntitySpanInput]]]=None, entity_spans_pair: Optional[Union[EntitySpanInput, list[EntitySpanInput]]]=None, entities: Optional[Union[EntityInput, list[EntityInput]]]=None, entities_pair: Optional[Union[EntityInput, list[EntityInput]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, is_split_into_words: Optional[bool]=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences, depending on the task you want to prepare them for. Args: text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this tokenizer does not support tokenization based on pretokenized strings. text_pair (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this tokenizer does not support tokenization based on pretokenized strings. entity_spans (`list[tuple[int, int]]`, `list[list[tuple[int, int]]]`, *optional*): The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each with two integers denoting character-based start and end positions of entities. If you specify `"entity_classification"` or `"entity_pair_classification"` as the `task` argument in the constructor, the length of each sequence must be 1 or 2, respectively. If you specify `entities`, the length of each sequence must be equal to the length of each sequence of `entities`. entity_spans_pair (`list[tuple[int, int]]`, `list[list[tuple[int, int]]]`, *optional*): The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each with two integers denoting character-based start and end positions of entities. If you specify the `task` argument in the constructor, this argument is ignored. If you specify `entities_pair`, the length of each sequence must be equal to the length of each sequence of `entities_pair`. entities (`list[str]`, `list[list[str]]`, *optional*): The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of each sequence must be equal to the length of each sequence of `entity_spans`. If you specify `entity_spans` without specifying this argument, the entity sequence or the batch of entity sequences is automatically constructed by filling it with the [MASK] entity. entities_pair (`list[str]`, `list[list[str]]`, *optional*): The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of each sequence must be equal to the length of each sequence of `entity_spans_pair`. If you specify `entity_spans_pair` without specifying this argument, the entity sequence or the batch of entity sequences is automatically constructed by filling it with the [MASK] entity. max_entity_length (`int`, *optional*): The maximum length of `entity_ids`. """ is_valid_single_text = isinstance(text, str) is_valid_batch_text = isinstance(text, (list, tuple)) and (len(text) == 0 or isinstance(text[0], str)) if not (is_valid_single_text or is_valid_batch_text): raise ValueError('text input must be of type `str` (single example) or `list[str]` (batch).') is_valid_single_text_pair = isinstance(text_pair, str) is_valid_batch_text_pair = isinstance(text_pair, (list, tuple)) and (len(text_pair) == 0 or isinstance(text_pair[0], str)) if not (text_pair is None or is_valid_single_text_pair or is_valid_batch_text_pair): raise ValueError('text_pair input must be of type `str` (single example) or `list[str]` (batch).') is_batched = bool(isinstance(text, (list, tuple))) if is_batched: batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text if entities is None: batch_entities_or_entities_pairs = None else: batch_entities_or_entities_pairs = list(zip(entities, entities_pair)) if entities_pair is not None else entities if entity_spans is None: batch_entity_spans_or_entity_spans_pairs = None else: batch_entity_spans_or_entity_spans_pairs = list(zip(entity_spans, entity_spans_pair)) if entity_spans_pair is not None else entity_spans return self.batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, batch_entity_spans_or_entity_spans_pairs=batch_entity_spans_or_entity_spans_pairs, batch_entities_or_entities_pairs=batch_entities_or_entities_pairs, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, max_entity_length=max_entity_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) else: return self.encode_plus(text=text, text_pair=text_pair, entity_spans=entity_spans, entity_spans_pair=entity_spans_pair, entities=entities, entities_pair=entities_pair, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, max_entity_length=max_entity_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) def _encode_plus(self, text: Union[TextInput], text_pair: Optional[Union[TextInput]]=None, entity_spans: Optional[EntitySpanInput]=None, entity_spans_pair: Optional[EntitySpanInput]=None, entities: Optional[EntityInput]=None, entities_pair: Optional[EntityInput]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, is_split_into_words: Optional[bool]=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast. More information on available tokenizers at https://github.com/huggingface/transformers/pull/2674') if is_split_into_words: raise NotImplementedError('is_split_into_words is not supported in this tokenizer.') first_ids, second_ids, first_entity_ids, second_entity_ids, first_entity_token_spans, second_entity_token_spans = self._create_input_sequence(text=text, text_pair=text_pair, entities=entities, entities_pair=entities_pair, entity_spans=entity_spans, entity_spans_pair=entity_spans_pair, **kwargs) return self.prepare_for_model(first_ids, pair_ids=second_ids, entity_ids=first_entity_ids, pair_entity_ids=second_entity_ids, entity_token_spans=first_entity_token_spans, pair_entity_token_spans=second_entity_token_spans, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, max_entity_length=max_entity_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose) def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair]], batch_entity_spans_or_entity_spans_pairs: Optional[Union[list[EntitySpanInput], list[tuple[EntitySpanInput, EntitySpanInput]]]]=None, batch_entities_or_entities_pairs: Optional[Union[list[EntityInput], list[tuple[EntityInput, EntityInput]]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, is_split_into_words: Optional[bool]=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast.') if is_split_into_words: raise NotImplementedError('is_split_into_words is not supported in this tokenizer.') input_ids = [] entity_ids = [] entity_token_spans = [] for index, text_or_text_pair in enumerate(batch_text_or_text_pairs): if not isinstance(text_or_text_pair, (list, tuple)): text, text_pair = (text_or_text_pair, None) else: text, text_pair = text_or_text_pair entities, entities_pair = (None, None) if batch_entities_or_entities_pairs is not None: entities_or_entities_pairs = batch_entities_or_entities_pairs[index] if entities_or_entities_pairs: if isinstance(entities_or_entities_pairs[0], str): entities, entities_pair = (entities_or_entities_pairs, None) else: entities, entities_pair = entities_or_entities_pairs entity_spans, entity_spans_pair = (None, None) if batch_entity_spans_or_entity_spans_pairs is not None: entity_spans_or_entity_spans_pairs = batch_entity_spans_or_entity_spans_pairs[index] if len(entity_spans_or_entity_spans_pairs) > 0 and isinstance(entity_spans_or_entity_spans_pairs[0], list): entity_spans, entity_spans_pair = entity_spans_or_entity_spans_pairs else: entity_spans, entity_spans_pair = (entity_spans_or_entity_spans_pairs, None) first_ids, second_ids, first_entity_ids, second_entity_ids, first_entity_token_spans, second_entity_token_spans = self._create_input_sequence(text=text, text_pair=text_pair, entities=entities, entities_pair=entities_pair, entity_spans=entity_spans, entity_spans_pair=entity_spans_pair, **kwargs) input_ids.append((first_ids, second_ids)) entity_ids.append((first_entity_ids, second_entity_ids)) entity_token_spans.append((first_entity_token_spans, second_entity_token_spans)) batch_outputs = self._batch_prepare_for_model(input_ids, batch_entity_ids_pairs=entity_ids, batch_entity_token_spans_pairs=entity_token_spans, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, max_entity_length=max_entity_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose) return BatchEncoding(batch_outputs) def _check_entity_input_format(self, entities: Optional[EntityInput], entity_spans: Optional[EntitySpanInput]): if not isinstance(entity_spans, list): raise TypeError('entity_spans should be given as a list') elif len(entity_spans) > 0 and (not isinstance(entity_spans[0], tuple)): raise ValueError('entity_spans should be given as a list of tuples containing the start and end character indices') if entities is not None: if not isinstance(entities, list): raise ValueError('If you specify entities, they should be given as a list') if len(entities) > 0 and (not isinstance(entities[0], str)): raise ValueError('If you specify entities, they should be given as a list of entity names') if len(entities) != len(entity_spans): raise ValueError('If you specify entities, entities and entity_spans must be the same length') def _create_input_sequence(self, text: Union[TextInput], text_pair: Optional[Union[TextInput]]=None, entities: Optional[EntityInput]=None, entities_pair: Optional[EntityInput]=None, entity_spans: Optional[EntitySpanInput]=None, entity_spans_pair: Optional[EntitySpanInput]=None, **kwargs) -> tuple[list, list, list, list, list, list]: def get_input_ids(text): tokens = self.tokenize(text, **kwargs) return self.convert_tokens_to_ids(tokens) def get_input_ids_and_entity_token_spans(text, entity_spans): if entity_spans is None: return (get_input_ids(text), None) cur = 0 input_ids = [] entity_token_spans = [None] * len(entity_spans) split_char_positions = sorted(frozenset(itertools.chain(*entity_spans))) char_pos2token_pos = {} for split_char_position in split_char_positions: orig_split_char_position = split_char_position if split_char_position > 0 and text[split_char_position - 1] == ' ': split_char_position -= 1 if cur != split_char_position: input_ids += get_input_ids(text[cur:split_char_position]) cur = split_char_position char_pos2token_pos[orig_split_char_position] = len(input_ids) input_ids += get_input_ids(text[cur:]) entity_token_spans = [(char_pos2token_pos[char_start], char_pos2token_pos[char_end]) for char_start, char_end in entity_spans] return (input_ids, entity_token_spans) first_ids, second_ids = (None, None) first_entity_ids, second_entity_ids = (None, None) first_entity_token_spans, second_entity_token_spans = (None, None) if self.task is None: if entity_spans is None: first_ids = get_input_ids(text) else: self._check_entity_input_format(entities, entity_spans) first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans) if entities is None: first_entity_ids = [self.entity_mask_token_id] * len(entity_spans) else: first_entity_ids = [self.entity_vocab.get(entity, self.entity_unk_token_id) for entity in entities] if text_pair is not None: if entity_spans_pair is None: second_ids = get_input_ids(text_pair) else: self._check_entity_input_format(entities_pair, entity_spans_pair) second_ids, second_entity_token_spans = get_input_ids_and_entity_token_spans(text_pair, entity_spans_pair) if entities_pair is None: second_entity_ids = [self.entity_mask_token_id] * len(entity_spans_pair) else: second_entity_ids = [self.entity_vocab.get(entity, self.entity_unk_token_id) for entity in entities_pair] elif self.task == 'entity_classification': if not (isinstance(entity_spans, list) and len(entity_spans) == 1 and isinstance(entity_spans[0], tuple)): raise ValueError('Entity spans should be a list containing a single tuple containing the start and end character indices of an entity') first_entity_ids = [self.entity_mask_token_id] first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans) entity_token_start, entity_token_end = first_entity_token_spans[0] first_ids = first_ids[:entity_token_end] + [self.additional_special_tokens_ids[0]] + first_ids[entity_token_end:] first_ids = first_ids[:entity_token_start] + [self.additional_special_tokens_ids[0]] + first_ids[entity_token_start:] first_entity_token_spans = [(entity_token_start, entity_token_end + 2)] elif self.task == 'entity_pair_classification': if not (isinstance(entity_spans, list) and len(entity_spans) == 2 and isinstance(entity_spans[0], tuple) and isinstance(entity_spans[1], tuple)): raise ValueError('Entity spans should be provided as a list of two tuples, each tuple containing the start and end character indices of an entity') head_span, tail_span = entity_spans first_entity_ids = [self.entity_mask_token_id, self.entity_mask2_token_id] first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans) head_token_span, tail_token_span = first_entity_token_spans token_span_with_special_token_ids = [(head_token_span, self.additional_special_tokens_ids[0]), (tail_token_span, self.additional_special_tokens_ids[1])] if head_token_span[0] < tail_token_span[0]: first_entity_token_spans[0] = (head_token_span[0], head_token_span[1] + 2) first_entity_token_spans[1] = (tail_token_span[0] + 2, tail_token_span[1] + 4) token_span_with_special_token_ids = reversed(token_span_with_special_token_ids) else: first_entity_token_spans[0] = (head_token_span[0] + 2, head_token_span[1] + 4) first_entity_token_spans[1] = (tail_token_span[0], tail_token_span[1] + 2) for (entity_token_start, entity_token_end), special_token_id in token_span_with_special_token_ids: first_ids = first_ids[:entity_token_end] + [special_token_id] + first_ids[entity_token_end:] first_ids = first_ids[:entity_token_start] + [special_token_id] + first_ids[entity_token_start:] elif self.task == 'entity_span_classification': if not (isinstance(entity_spans, list) and len(entity_spans) > 0 and isinstance(entity_spans[0], tuple)): raise ValueError('Entity spans should be provided as a list of tuples, each tuple containing the start and end character indices of an entity') first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans) first_entity_ids = [self.entity_mask_token_id] * len(entity_spans) else: raise ValueError(f'Task {self.task} not supported') return (first_ids, second_ids, first_entity_ids, second_entity_ids, first_entity_token_spans, second_entity_token_spans) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def _batch_prepare_for_model(self, batch_ids_pairs: list[tuple[list[int], None]], batch_entity_ids_pairs: list[tuple[Optional[list[int]], Optional[list[int]]]], batch_entity_token_spans_pairs: list[tuple[Optional[list[tuple[int, int]]], Optional[list[tuple[int, int]]]]], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding: """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens Args: batch_ids_pairs: list of tokenized input ids or input ids pairs batch_entity_ids_pairs: list of entity ids or entity ids pairs batch_entity_token_spans_pairs: list of entity spans or entity spans pairs max_entity_length: The maximum length of the entity sequence. """ batch_outputs = {} for input_ids, entity_ids, entity_token_span_pairs in zip(batch_ids_pairs, batch_entity_ids_pairs, batch_entity_token_spans_pairs): first_ids, second_ids = input_ids first_entity_ids, second_entity_ids = entity_ids first_entity_token_spans, second_entity_token_spans = entity_token_span_pairs outputs = self.prepare_for_model(first_ids, second_ids, entity_ids=first_entity_ids, pair_entity_ids=second_entity_ids, entity_token_spans=first_entity_token_spans, pair_entity_token_spans=second_entity_token_spans, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, max_entity_length=max_entity_length, stride=stride, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def prepare_for_model(self, ids: list[int], pair_ids: Optional[list[int]]=None, entity_ids: Optional[list[int]]=None, pair_entity_ids: Optional[list[int]]=None, entity_token_spans: Optional[list[tuple[int, int]]]=None, pair_entity_token_spans: Optional[list[tuple[int, int]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, prepend_batch_axis: bool=False, **kwargs) -> BatchEncoding: """ Prepares a sequence of input id, entity id and entity span, or a pair of sequences of inputs ids, entity ids, entity spans so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Please Note, for *pair_ids* different than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an error. Args: ids (`list[int]`): Tokenized input ids of the first sequence. pair_ids (`list[int]`, *optional*): Tokenized input ids of the second sequence. entity_ids (`list[int]`, *optional*): Entity ids of the first sequence. pair_entity_ids (`list[int]`, *optional*): Entity ids of the second sequence. entity_token_spans (`list[tuple[int, int]]`, *optional*): Entity spans of the first sequence. pair_entity_token_spans (`list[tuple[int, int]]`, *optional*): Entity spans of the second sequence. max_entity_length (`int`, *optional*): The maximum length of the entity sequence. """ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs) pair = bool(pair_ids is not None) len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 if return_token_type_ids and (not add_special_tokens): raise ValueError('Asking to return token_type_ids while setting add_special_tokens to False results in an undefined behavior. Please set add_special_tokens to True or set return_token_type_ids to None.') if return_overflowing_tokens and truncation_strategy == TruncationStrategy.LONGEST_FIRST and (pair_ids is not None): raise ValueError('Not possible to return overflowing tokens for pair of sequences with the `longest_first`. Please select another truncation strategy than `longest_first`, for instance `only_second` or `only_first`.') if return_token_type_ids is None: return_token_type_ids = 'token_type_ids' in self.model_input_names if return_attention_mask is None: return_attention_mask = 'attention_mask' in self.model_input_names encoded_inputs = {} total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) overflowing_tokens = [] if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and (total_len > max_length): ids, pair_ids, overflowing_tokens = self.truncate_sequences(ids, pair_ids=pair_ids, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride) if return_overflowing_tokens: encoded_inputs['overflowing_tokens'] = overflowing_tokens encoded_inputs['num_truncated_tokens'] = total_len - max_length if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) entity_token_offset = 1 pair_entity_token_offset = len(ids) + 3 else: sequence = ids + pair_ids if pair else ids token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else []) entity_token_offset = 0 pair_entity_token_offset = len(ids) encoded_inputs['input_ids'] = sequence if return_token_type_ids: encoded_inputs['token_type_ids'] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs['special_tokens_mask'] = self.get_special_tokens_mask(ids, pair_ids) else: encoded_inputs['special_tokens_mask'] = [0] * len(sequence) if not max_entity_length: max_entity_length = self.max_entity_length if entity_ids is not None: total_entity_len = 0 num_invalid_entities = 0 valid_entity_ids = [ent_id for ent_id, span in zip(entity_ids, entity_token_spans) if span[1] <= len(ids)] valid_entity_token_spans = [span for span in entity_token_spans if span[1] <= len(ids)] total_entity_len += len(valid_entity_ids) num_invalid_entities += len(entity_ids) - len(valid_entity_ids) valid_pair_entity_ids, valid_pair_entity_token_spans = (None, None) if pair_entity_ids is not None: valid_pair_entity_ids = [ent_id for ent_id, span in zip(pair_entity_ids, pair_entity_token_spans) if span[1] <= len(pair_ids)] valid_pair_entity_token_spans = [span for span in pair_entity_token_spans if span[1] <= len(pair_ids)] total_entity_len += len(valid_pair_entity_ids) num_invalid_entities += len(pair_entity_ids) - len(valid_pair_entity_ids) if num_invalid_entities != 0: logger.warning(f'{num_invalid_entities} entities are ignored because their entity spans are invalid due to the truncation of input tokens') if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and total_entity_len > max_entity_length: valid_entity_ids, valid_pair_entity_ids, overflowing_entities = self.truncate_sequences(valid_entity_ids, pair_ids=valid_pair_entity_ids, num_tokens_to_remove=total_entity_len - max_entity_length, truncation_strategy=truncation_strategy, stride=stride) valid_entity_token_spans = valid_entity_token_spans[:len(valid_entity_ids)] if valid_pair_entity_token_spans is not None: valid_pair_entity_token_spans = valid_pair_entity_token_spans[:len(valid_pair_entity_ids)] if return_overflowing_tokens: encoded_inputs['overflowing_entities'] = overflowing_entities encoded_inputs['num_truncated_entities'] = total_entity_len - max_entity_length final_entity_ids = valid_entity_ids + valid_pair_entity_ids if valid_pair_entity_ids else valid_entity_ids encoded_inputs['entity_ids'] = list(final_entity_ids) entity_position_ids = [] entity_start_positions = [] entity_end_positions = [] for token_spans, offset in ((valid_entity_token_spans, entity_token_offset), (valid_pair_entity_token_spans, pair_entity_token_offset)): if token_spans is not None: for start, end in token_spans: start += offset end += offset position_ids = list(range(start, end))[:self.max_mention_length] position_ids += [-1] * (self.max_mention_length - end + start) entity_position_ids.append(position_ids) entity_start_positions.append(start) entity_end_positions.append(end - 1) encoded_inputs['entity_position_ids'] = entity_position_ids if self.task == 'entity_span_classification': encoded_inputs['entity_start_positions'] = entity_start_positions encoded_inputs['entity_end_positions'] = entity_end_positions if return_token_type_ids: encoded_inputs['entity_token_type_ids'] = [0] * len(encoded_inputs['entity_ids']) self._eventual_warn_about_too_long_sequence(encoded_inputs['input_ids'], max_length, verbose) if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: encoded_inputs = self.pad(encoded_inputs, max_length=max_length, max_entity_length=max_entity_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask) if return_length: encoded_inputs['length'] = len(encoded_inputs['input_ids']) batch_outputs = BatchEncoding(encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis) return batch_outputs def pad(self, encoded_inputs: Union[BatchEncoding, list[BatchEncoding], dict[str, EncodedInput], dict[str, list[EncodedInput]], list[dict[str, EncodedInput]]], padding: Union[bool, str, PaddingStrategy]=True, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, verbose: bool=True) -> BatchEncoding: """ Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length in the batch. Padding side (left/right) padding token ids are defined at the tokenizer level (with `self.padding_side`, `self.pad_token_id` and `self.pad_token_type_id`) .. note:: If the `encoded_inputs` passed are dictionary of numpy arrays or PyTorch tensors the result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of PyTorch tensors, you will lose the specific device of your tensors however. Args: encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `dict[str, list[int]]`, `dict[str, list[list[int]]` or `list[dict[str, list[int]]]`): Tokenized inputs. Can represent one input ([`BatchEncoding`] or `dict[str, list[int]]`) or a batch of tokenized inputs (list of [`BatchEncoding`], *dict[str, list[list[int]]]* or *list[dict[str, list[int]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader collate function. Instead of `list[int]` you can have tensors (numpy arrays, or PyTorch tensors), see the note above for the return type. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). max_entity_length (`int`, *optional*): The maximum length of the entity sequence. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side: The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. """ if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], Mapping): encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0]} if self.model_input_names[0] not in encoded_inputs: raise ValueError(f'You should supply an encoding or a list of encodings to this method that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}') required_input = encoded_inputs[self.model_input_names[0]] if not required_input: if return_attention_mask: encoded_inputs['attention_mask'] = [] return encoded_inputs first_element = required_input[0] if isinstance(first_element, (list, tuple)): index = 0 while len(required_input[index]) == 0: index += 1 if index < len(required_input): first_element = required_input[index][0] if not isinstance(first_element, (int, list, tuple)): if is_torch_tensor(first_element): return_tensors = 'pt' if return_tensors is None else return_tensors elif isinstance(first_element, np.ndarray): return_tensors = 'np' if return_tensors is None else return_tensors else: raise ValueError(f'type of {first_element} unknown: {type(first_element)}. Should be one of a python, numpy, or pytorch object.') for key, value in encoded_inputs.items(): encoded_inputs[key] = to_py_obj(value) padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies(padding=padding, max_length=max_length, verbose=verbose) if max_entity_length is None: max_entity_length = self.max_entity_length required_input = encoded_inputs[self.model_input_names[0]] if required_input and (not isinstance(required_input[0], (list, tuple))): encoded_inputs = self._pad(encoded_inputs, max_length=max_length, max_entity_length=max_entity_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask) return BatchEncoding(encoded_inputs, tensor_type=return_tensors) batch_size = len(required_input) if any((len(v) != batch_size for v in encoded_inputs.values())): raise ValueError('Some items in the output dictionary have a different batch size than others.') if padding_strategy == PaddingStrategy.LONGEST: max_length = max((len(inputs) for inputs in required_input)) max_entity_length = max((len(inputs) for inputs in encoded_inputs['entity_ids'])) if 'entity_ids' in encoded_inputs else 0 padding_strategy = PaddingStrategy.MAX_LENGTH batch_outputs = {} for i in range(batch_size): inputs = {k: v[i] for k, v in encoded_inputs.items()} outputs = self._pad(inputs, max_length=max_length, max_entity_length=max_entity_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) return BatchEncoding(batch_outputs, tensor_type=return_tensors) def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, max_entity_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. max_entity_length: The maximum length of the entity sequence. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side: The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ entities_provided = bool('entity_ids' in encoded_inputs) if return_attention_mask is None: return_attention_mask = 'attention_mask' in self.model_input_names if padding_strategy == PaddingStrategy.LONGEST: max_length = len(encoded_inputs['input_ids']) if entities_provided: max_entity_length = len(encoded_inputs['entity_ids']) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = (max_length // pad_to_multiple_of + 1) * pad_to_multiple_of if entities_provided and max_entity_length is not None and (pad_to_multiple_of is not None) and (max_entity_length % pad_to_multiple_of != 0): max_entity_length = (max_entity_length // pad_to_multiple_of + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and (len(encoded_inputs['input_ids']) != max_length or (entities_provided and len(encoded_inputs['entity_ids']) != max_entity_length)) if return_attention_mask and 'attention_mask' not in encoded_inputs: encoded_inputs['attention_mask'] = [1] * len(encoded_inputs['input_ids']) if entities_provided and return_attention_mask and ('entity_attention_mask' not in encoded_inputs): encoded_inputs['entity_attention_mask'] = [1] * len(encoded_inputs['entity_ids']) if needs_to_be_padded: difference = max_length - len(encoded_inputs['input_ids']) padding_side = padding_side if padding_side is not None else self.padding_side if entities_provided: entity_difference = max_entity_length - len(encoded_inputs['entity_ids']) if padding_side == 'right': if return_attention_mask: encoded_inputs['attention_mask'] = encoded_inputs['attention_mask'] + [0] * difference if entities_provided: encoded_inputs['entity_attention_mask'] = encoded_inputs['entity_attention_mask'] + [0] * entity_difference if 'token_type_ids' in encoded_inputs: encoded_inputs['token_type_ids'] = encoded_inputs['token_type_ids'] + [0] * difference if entities_provided: encoded_inputs['entity_token_type_ids'] = encoded_inputs['entity_token_type_ids'] + [0] * entity_difference if 'special_tokens_mask' in encoded_inputs: encoded_inputs['special_tokens_mask'] = encoded_inputs['special_tokens_mask'] + [1] * difference encoded_inputs['input_ids'] = encoded_inputs['input_ids'] + [self.pad_token_id] * difference if entities_provided: encoded_inputs['entity_ids'] = encoded_inputs['entity_ids'] + [self.entity_pad_token_id] * entity_difference encoded_inputs['entity_position_ids'] = encoded_inputs['entity_position_ids'] + [[-1] * self.max_mention_length] * entity_difference if self.task == 'entity_span_classification': encoded_inputs['entity_start_positions'] = encoded_inputs['entity_start_positions'] + [0] * entity_difference encoded_inputs['entity_end_positions'] = encoded_inputs['entity_end_positions'] + [0] * entity_difference elif padding_side == 'left': if return_attention_mask: encoded_inputs['attention_mask'] = [0] * difference + encoded_inputs['attention_mask'] if entities_provided: encoded_inputs['entity_attention_mask'] = [0] * entity_difference + encoded_inputs['entity_attention_mask'] if 'token_type_ids' in encoded_inputs: encoded_inputs['token_type_ids'] = [0] * difference + encoded_inputs['token_type_ids'] if entities_provided: encoded_inputs['entity_token_type_ids'] = [0] * entity_difference + encoded_inputs['entity_token_type_ids'] if 'special_tokens_mask' in encoded_inputs: encoded_inputs['special_tokens_mask'] = [1] * difference + encoded_inputs['special_tokens_mask'] encoded_inputs['input_ids'] = [self.pad_token_id] * difference + encoded_inputs['input_ids'] if entities_provided: encoded_inputs['entity_ids'] = [self.entity_pad_token_id] * entity_difference + encoded_inputs['entity_ids'] encoded_inputs['entity_position_ids'] = [[-1] * self.max_mention_length] * entity_difference + encoded_inputs['entity_position_ids'] if self.task == 'entity_span_classification': encoded_inputs['entity_start_positions'] = [0] * entity_difference + encoded_inputs['entity_start_positions'] encoded_inputs['entity_end_positions'] = [0] * entity_difference + encoded_inputs['entity_end_positions'] else: raise ValueError('Invalid padding strategy:' + str(padding_side)) return encoded_inputs def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n') index = 0 with open(merge_file, 'w', encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write(' '.join(bpe_tokens) + '\n') index += 1 entity_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['entity_vocab_file']) with open(entity_vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.entity_vocab, indent=2, sort_keys=True, ensure_ascii=False) + '\n') return (vocab_file, merge_file, entity_vocab_file)
class LukeTokenizer(PreTrainedTokenizer): ''' Constructs a LUKE tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import LukeTokenizer >>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base") >>> tokenizer("Hello world")["input_ids"] [0, 31414, 232, 2] >>> tokenizer(" Hello world")["input_ids"] [0, 20920, 232, 2] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). </Tip> This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. It also creates entity sequences, namely `entity_ids`, `entity_attention_mask`, `entity_token_type_ids`, and `entity_position_ids` to be used by the LUKE model. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. entity_vocab_file (`str`): Path to the entity vocabulary file. task (`str`, *optional*): Task for which you want to prepare sequences. One of `"entity_classification"`, `"entity_pair_classification"`, or `"entity_span_classification"`. If you specify this argument, the entity sequence is automatically created based on the given entity span(s). max_entity_length (`int`, *optional*, defaults to 32): The maximum length of `entity_ids`. max_mention_length (`int`, *optional*, defaults to 30): The maximum number of tokens inside an entity span. entity_token_1 (`str`, *optional*, defaults to `<ent>`): The special token used to represent an entity span in a word token sequence. This token is only used when `task` is set to `"entity_classification"` or `"entity_pair_classification"`. entity_token_2 (`str`, *optional*, defaults to `<ent2>`): The special token used to represent an entity span in a word token sequence. This token is only used when `task` is set to `"entity_pair_classification"`. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (LUKE tokenizer detect beginning of words by the preceding space). ''' def __init__(self, vocab_file, merges_file, entity_vocab_file, task=None, max_entity_length=32, max_mention_length=30, entity_token_1='<ent>', entity_token_2='<ent2>', entity_unk_token='[UNK]', entity_pad_token='[PAD]', entity_mask_token='[MASK]', entity_mask2_token='[MASK2]', errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, **kwargs): pass @property def vocab_size(self): pass def get_vocab(self): pass def bpe(self, token): pass def _tokenize(self, text): '''Tokenize a string.''' pass def _convert_token_to_id(self, token): '''Converts a token (str) in an id using the vocab.''' pass def _convert_id_to_token(self, index): '''Converts an index (integer) in a token (str) using the vocab.''' pass def convert_tokens_to_string(self, tokens): '''Converts a sequence of tokens (string) in a single string.''' pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A LUKE sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: ''' Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. ''' pass def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Create a mask from the two sequences passed to be used in a sequence-pair classification task. LUKE does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. ''' pass def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): pass @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def __call__(self, text: Union[TextInput, list[TextInput]], text_pair: Optional[Union[TextInput, list[TextInput]]]=None, entity_spans: Optional[Union[EntitySpanInput, list[EntitySpanInput]]]=None, entity_spans_pair: Optional[Union[EntitySpanInput, list[EntitySpanInput]]]=None, entities: Optional[Union[EntityInput, list[EntityInput]]]=None, entities_pair: Optional[Union[EntityInput, list[EntityInput]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, is_split_into_words: Optional[bool]=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: ''' Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences, depending on the task you want to prepare them for. Args: text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this tokenizer does not support tokenization based on pretokenized strings. text_pair (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this tokenizer does not support tokenization based on pretokenized strings. entity_spans (`list[tuple[int, int]]`, `list[list[tuple[int, int]]]`, *optional*): The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each with two integers denoting character-based start and end positions of entities. If you specify `"entity_classification"` or `"entity_pair_classification"` as the `task` argument in the constructor, the length of each sequence must be 1 or 2, respectively. If you specify `entities`, the length of each sequence must be equal to the length of each sequence of `entities`. entity_spans_pair (`list[tuple[int, int]]`, `list[list[tuple[int, int]]]`, *optional*): The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each with two integers denoting character-based start and end positions of entities. If you specify the `task` argument in the constructor, this argument is ignored. If you specify `entities_pair`, the length of each sequence must be equal to the length of each sequence of `entities_pair`. entities (`list[str]`, `list[list[str]]`, *optional*): The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of each sequence must be equal to the length of each sequence of `entity_spans`. If you specify `entity_spans` without specifying this argument, the entity sequence or the batch of entity sequences is automatically constructed by filling it with the [MASK] entity. entities_pair (`list[str]`, `list[list[str]]`, *optional*): The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of each sequence must be equal to the length of each sequence of `entity_spans_pair`. If you specify `entity_spans_pair` without specifying this argument, the entity sequence or the batch of entity sequences is automatically constructed by filling it with the [MASK] entity. max_entity_length (`int`, *optional*): The maximum length of `entity_ids`. ''' pass def _encode_plus(self, text: Union[TextInput], text_pair: Optional[Union[TextInput]]=None, entity_spans: Optional[EntitySpanInput]=None, entity_spans_pair: Optional[EntitySpanInput]=None, entities: Optional[EntityInput]=None, entities_pair: Optional[EntityInput]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, is_split_into_words: Optional[bool]=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: pass def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair]], batch_entity_spans_or_entity_spans_pairs: Optional[Union[list[EntitySpanInput], list[tuple[EntitySpanInput, EntitySpanInput]]]]=None, batch_entities_or_entities_pairs: Optional[Union[list[EntityInput], list[tuple[EntityInput, EntityInput]]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, is_split_into_words: Optional[bool]=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: pass def _check_entity_input_format(self, entities: Optional[EntityInput], entity_spans: Optional[EntitySpanInput]): pass def _create_input_sequence(self, text: Union[TextInput], text_pair: Optional[Union[TextInput]]=None, entities: Optional[EntityInput]=None, entities_pair: Optional[EntityInput]=None, entity_spans: Optional[EntitySpanInput]=None, entity_spans_pair: Optional[EntitySpanInput]=None, **kwargs) -> tuple[list, list, list, list, list, list]: pass def get_input_ids(text): pass def get_input_ids_and_entity_token_spans(text, entity_spans): pass @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def _batch_prepare_for_model(self, batch_ids_pairs: list[tuple[list[int], None]], batch_entity_ids_pairs: list[tuple[Optional[list[int]], Optional[list[int]]]], batch_entity_token_spans_pairs: list[tuple[Optional[list[tuple[int, int]]], Optional[list[tuple[int, int]]]]], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding: ''' Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens Args: batch_ids_pairs: list of tokenized input ids or input ids pairs batch_entity_ids_pairs: list of entity ids or entity ids pairs batch_entity_token_spans_pairs: list of entity spans or entity spans pairs max_entity_length: The maximum length of the entity sequence. ''' pass @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def prepare_for_model(self, ids: list[int], pair_ids: Optional[list[int]]=None, entity_ids: Optional[list[int]]=None, pair_entity_ids: Optional[list[int]]=None, entity_token_spans: Optional[list[tuple[int, int]]]=None, pair_entity_token_spans: Optional[list[tuple[int, int]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, prepend_batch_axis: bool=False, **kwargs) -> BatchEncoding: ''' Prepares a sequence of input id, entity id and entity span, or a pair of sequences of inputs ids, entity ids, entity spans so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Please Note, for *pair_ids* different than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an error. Args: ids (`list[int]`): Tokenized input ids of the first sequence. pair_ids (`list[int]`, *optional*): Tokenized input ids of the second sequence. entity_ids (`list[int]`, *optional*): Entity ids of the first sequence. pair_entity_ids (`list[int]`, *optional*): Entity ids of the second sequence. entity_token_spans (`list[tuple[int, int]]`, *optional*): Entity spans of the first sequence. pair_entity_token_spans (`list[tuple[int, int]]`, *optional*): Entity spans of the second sequence. max_entity_length (`int`, *optional*): The maximum length of the entity sequence. ''' pass def pad(self, encoded_inputs: Union[BatchEncoding, list[BatchEncoding], dict[str, EncodedInput], dict[str, list[EncodedInput]], list[dict[str, EncodedInput]]], padding: Union[bool, str, PaddingStrategy]=True, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, verbose: bool=True) -> BatchEncoding: ''' Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length in the batch. Padding side (left/right) padding token ids are defined at the tokenizer level (with `self.padding_side`, `self.pad_token_id` and `self.pad_token_type_id`) .. note:: If the `encoded_inputs` passed are dictionary of numpy arrays or PyTorch tensors the result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of PyTorch tensors, you will lose the specific device of your tensors however. Args: encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `dict[str, list[int]]`, `dict[str, list[list[int]]` or `list[dict[str, list[int]]]`): Tokenized inputs. Can represent one input ([`BatchEncoding`] or `dict[str, list[int]]`) or a batch of tokenized inputs (list of [`BatchEncoding`], *dict[str, list[list[int]]]* or *list[dict[str, list[int]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader collate function. Instead of `list[int]` you can have tensors (numpy arrays, or PyTorch tensors), see the note above for the return type. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). max_entity_length (`int`, *optional*): The maximum length of the entity sequence. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side: The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. ''' pass def _pad(self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, max_entity_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict: ''' Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. max_entity_length: The maximum length of the entity sequence. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side: The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) ''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass
29
13
61
6
46
9
8
0.29
1
22
2
0
22
17
22
111
1,555
172
1,084
357
865
310
497
148
472
30
3
4
182
3,526
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/configuration_lxmert.py
transformers.models.lxmert.configuration_lxmert.LxmertConfig
from ...configuration_utils import PretrainedConfig class LxmertConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`LxmertModel`] or a [`TFLxmertModel`]. It is used to instantiate a LXMERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Lxmert [unc-nlp/lxmert-base-uncased](https://huggingface.co/unc-nlp/lxmert-base-uncased) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the LXMERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`LxmertModel`] or [`TFLxmertModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_qa_labels (`int`, *optional*, defaults to 9500): This represents the total number of different question answering (QA) labels there are. If using more than one dataset with QA, the user will need to account for the total number of labels that all of the datasets have in total. num_object_labels (`int`, *optional*, defaults to 1600): This represents the total number of semantically unique objects that lxmert will be able to classify a pooled-object feature as belonging too. num_attr_labels (`int`, *optional*, defaults to 400): This represents the total number of semantically unique attributes that lxmert will be able to classify a pooled-object feature as possessing. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the *token_type_ids* passed into [`BertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. l_layers (`int`, *optional*, defaults to 9): Number of hidden layers in the Transformer language encoder. x_layers (`int`, *optional*, defaults to 5): Number of hidden layers in the Transformer cross modality encoder. r_layers (`int`, *optional*, defaults to 5): Number of hidden layers in the Transformer visual encoder. visual_feat_dim (`int`, *optional*, defaults to 2048): This represents the last dimension of the pooled-object features used as input for the model, representing the size of each object feature itself. visual_pos_dim (`int`, *optional*, defaults to 4): This represents the number of spatial features that are mixed into the visual features. The default is set to 4 because most commonly this will represent the location of a bounding box. i.e., (x, y, width, height) visual_loss_normalizer (`float`, *optional*, defaults to 6.67): This represents the scaling factor in which each visual loss is multiplied by if during pretraining, one decided to train with multiple vision-based loss objectives. task_matched (`bool`, *optional*, defaults to `True`): This task is used for sentence-image matching. If the sentence correctly describes the image the label will be 1. If the sentence does not correctly describe the image, the label will be 0. task_mask_lm (`bool`, *optional*, defaults to `True`): Whether or not to add masked language modeling (as used in pretraining models such as BERT) to the loss objective. task_obj_predict (`bool`, *optional*, defaults to `True`): Whether or not to add object prediction, attribute prediction and feature regression to the loss objective. task_qa (`bool`, *optional*, defaults to `True`): Whether or not to add the question-answering loss to the objective visual_obj_loss (`bool`, *optional*, defaults to `True`): Whether or not to calculate the object-prediction loss objective visual_attr_loss (`bool`, *optional*, defaults to `True`): Whether or not to calculate the attribute-prediction loss objective visual_feat_loss (`bool`, *optional*, defaults to `True`): Whether or not to calculate the feature-regression loss objective """ model_type = 'lxmert' attribute_map = {} def __init__(self, vocab_size=30522, hidden_size=768, num_attention_heads=12, num_qa_labels=9500, num_object_labels=1600, num_attr_labels=400, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, l_layers=9, x_layers=5, r_layers=5, visual_feat_dim=2048, visual_pos_dim=4, visual_loss_normalizer=6.67, task_matched=True, task_mask_lm=True, task_obj_predict=True, task_qa=True, visual_obj_loss=True, visual_attr_loss=True, visual_feat_loss=True, **kwargs): self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.num_qa_labels = num_qa_labels self.num_object_labels = num_object_labels self.num_attr_labels = num_attr_labels self.l_layers = l_layers self.x_layers = x_layers self.r_layers = r_layers self.visual_feat_dim = visual_feat_dim self.visual_pos_dim = visual_pos_dim self.visual_loss_normalizer = visual_loss_normalizer self.task_matched = task_matched self.task_mask_lm = task_mask_lm self.task_obj_predict = task_obj_predict self.task_qa = task_qa self.visual_obj_loss = visual_obj_loss self.visual_attr_loss = visual_attr_loss self.visual_feat_loss = visual_feat_loss self.num_hidden_layers = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers} super().__init__(**kwargs)
class LxmertConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`LxmertModel`] or a [`TFLxmertModel`]. It is used to instantiate a LXMERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Lxmert [unc-nlp/lxmert-base-uncased](https://huggingface.co/unc-nlp/lxmert-base-uncased) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the LXMERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`LxmertModel`] or [`TFLxmertModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_qa_labels (`int`, *optional*, defaults to 9500): This represents the total number of different question answering (QA) labels there are. If using more than one dataset with QA, the user will need to account for the total number of labels that all of the datasets have in total. num_object_labels (`int`, *optional*, defaults to 1600): This represents the total number of semantically unique objects that lxmert will be able to classify a pooled-object feature as belonging too. num_attr_labels (`int`, *optional*, defaults to 400): This represents the total number of semantically unique attributes that lxmert will be able to classify a pooled-object feature as possessing. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the *token_type_ids* passed into [`BertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. l_layers (`int`, *optional*, defaults to 9): Number of hidden layers in the Transformer language encoder. x_layers (`int`, *optional*, defaults to 5): Number of hidden layers in the Transformer cross modality encoder. r_layers (`int`, *optional*, defaults to 5): Number of hidden layers in the Transformer visual encoder. visual_feat_dim (`int`, *optional*, defaults to 2048): This represents the last dimension of the pooled-object features used as input for the model, representing the size of each object feature itself. visual_pos_dim (`int`, *optional*, defaults to 4): This represents the number of spatial features that are mixed into the visual features. The default is set to 4 because most commonly this will represent the location of a bounding box. i.e., (x, y, width, height) visual_loss_normalizer (`float`, *optional*, defaults to 6.67): This represents the scaling factor in which each visual loss is multiplied by if during pretraining, one decided to train with multiple vision-based loss objectives. task_matched (`bool`, *optional*, defaults to `True`): This task is used for sentence-image matching. If the sentence correctly describes the image the label will be 1. If the sentence does not correctly describe the image, the label will be 0. task_mask_lm (`bool`, *optional*, defaults to `True`): Whether or not to add masked language modeling (as used in pretraining models such as BERT) to the loss objective. task_obj_predict (`bool`, *optional*, defaults to `True`): Whether or not to add object prediction, attribute prediction and feature regression to the loss objective. task_qa (`bool`, *optional*, defaults to `True`): Whether or not to add the question-answering loss to the objective visual_obj_loss (`bool`, *optional*, defaults to `True`): Whether or not to calculate the object-prediction loss objective visual_attr_loss (`bool`, *optional*, defaults to `True`): Whether or not to calculate the attribute-prediction loss objective visual_feat_loss (`bool`, *optional*, defaults to `True`): Whether or not to calculate the feature-regression loss objective ''' def __init__(self, vocab_size=30522, hidden_size=768, num_attention_heads=12, num_qa_labels=9500, num_object_labels=1600, num_attr_labels=400, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, l_layers=9, x_layers=5, r_layers=5, visual_feat_dim=2048, visual_pos_dim=4, visual_loss_normalizer=6.67, task_matched=True, task_mask_lm=True, task_obj_predict=True, task_qa=True, visual_obj_loss=True, visual_attr_loss=True, visual_feat_loss=True, **kwargs): pass
2
1
60
0
60
0
1
1.19
1
1
0
0
1
28
1
1
143
5
63
62
31
75
33
32
31
1
1
0
1
3,527
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.GeLU
from torch import nn from ...activations import ACT2FN, gelu class GeLU(nn.Module): def __init__(self): super().__init__() def forward(self, x): return gelu(x)
class GeLU(nn.Module): def __init__(self): pass def forward(self, x): pass
3
0
2
0
2
0
1
0
1
1
0
0
2
0
2
12
6
1
5
3
2
0
5
3
2
1
1
0
2
3,528
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertAttention
import math import torch from torch import nn class LxmertAttention(nn.Module): def __init__(self, config, ctx_dim=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.head_size = self.num_attention_heads * self.attention_head_size if ctx_dim is None: ctx_dim = config.hidden_size self.query = nn.Linear(config.hidden_size, self.head_size) self.key = nn.Linear(ctx_dim, self.head_size) self.value = nn.Linear(ctx_dim, self.head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states, context, attention_mask=None, output_attentions=False): batch_size, seq_length, _ = hidden_states.shape query_layer = self.query(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) key_layer = self.key(context).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) value_layer = self.value(context).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs
class LxmertAttention(nn.Module): def __init__(self, config, ctx_dim=None): pass def forward(self, hidden_states, context, attention_mask=None, output_attentions=False): pass
3
0
19
3
14
2
2
0.14
1
3
0
0
3
7
3
13
59
10
43
23
39
6
37
23
33
3
1
1
7
3,529
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertAttentionOutput
from torch import nn class LxmertAttentionOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class LxmertAttentionOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states, input_tensor): pass
3
0
5
0
5
0
1
0
1
1
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
3,530
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertCrossAttentionLayer
from torch import nn class LxmertCrossAttentionLayer(nn.Module): def __init__(self, config): super().__init__() self.att = LxmertAttention(config) self.output = LxmertAttentionOutput(config) def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None, output_attentions=False): output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions=output_attentions) if output_attentions: attention_probs = output[1] attention_output = self.output(output[0], input_tensor) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) return outputs
class LxmertCrossAttentionLayer(nn.Module): def __init__(self, config): pass def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None, output_attentions=False): pass
3
0
6
0
6
0
2
0
1
3
2
0
2
2
2
12
13
1
12
9
9
0
12
9
9
3
1
1
4
3,531
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertEmbeddings
import torch from torch import nn class LxmertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=0) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size, padding_idx=0) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids, token_type_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() device = input_ids.device else: input_shape = inputs_embeds.size()[:-1] device = inputs_embeds.device seq_length = input_shape[1] position_ids = torch.arange(seq_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0).expand(input_shape) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings
class LxmertEmbeddings(nn.Module): '''Construct the embeddings from word, position and token_type embeddings.''' def __init__(self, config): pass def forward(self, input_ids, token_type_ids=None, inputs_embeds=None): pass
3
1
17
3
14
1
3
0.11
1
1
0
0
2
5
2
12
38
7
28
15
25
3
27
15
24
4
1
1
5
3,532
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertEncoder
from torch import nn class LxmertEncoder(nn.Module): def __init__(self, config): super().__init__() self.visn_fc = LxmertVisualFeatureEncoder(config) self.config = config self.num_l_layers = config.l_layers self.num_x_layers = config.x_layers self.num_r_layers = config.r_layers self.layer = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_l_layers)]) self.x_layers = nn.ModuleList([LxmertXLayer(config) for _ in range(self.num_x_layers)]) self.r_layers = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_r_layers)]) def forward(self, lang_feats, lang_attention_mask, visual_feats, visual_pos, visual_attention_mask=None, output_attentions=None): vision_hidden_states = () language_hidden_states = () vision_attentions = () if output_attentions or self.config.output_attentions else None language_attentions = () if output_attentions or self.config.output_attentions else None cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None visual_feats = self.visn_fc(visual_feats, visual_pos) for layer_module in self.layer: l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions=output_attentions) lang_feats = l_outputs[0] language_hidden_states = language_hidden_states + (lang_feats,) if language_attentions is not None: language_attentions = language_attentions + (l_outputs[1],) for layer_module in self.r_layers: v_outputs = layer_module(visual_feats, visual_attention_mask, output_attentions=output_attentions) visual_feats = v_outputs[0] vision_hidden_states = vision_hidden_states + (visual_feats,) if vision_attentions is not None: vision_attentions = vision_attentions + (v_outputs[1],) for layer_module in self.x_layers: x_outputs = layer_module(lang_feats, lang_attention_mask, visual_feats, visual_attention_mask, output_attentions=output_attentions) lang_feats, visual_feats = x_outputs[:2] vision_hidden_states = vision_hidden_states + (visual_feats,) language_hidden_states = language_hidden_states + (lang_feats,) if cross_encoder_attentions is not None: cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],) visual_encoder_outputs = (vision_hidden_states, vision_attentions if output_attentions else None) lang_encoder_outputs = (language_hidden_states, language_attentions if output_attentions else None) return (visual_encoder_outputs, lang_encoder_outputs, cross_encoder_attentions if output_attentions else None)
class LxmertEncoder(nn.Module): def __init__(self, config): pass def forward(self, lang_feats, lang_attention_mask, visual_feats, visual_pos, visual_attention_mask=None, output_attentions=None): pass
3
0
39
4
32
4
7
0.11
1
5
3
0
2
8
2
12
79
8
64
30
53
7
40
22
37
13
1
2
14
3,533
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertForPreTraining
from ...utils import ModelOutput, auto_docstring, logging from torch import nn import torch from torch.nn import CrossEntropyLoss, SmoothL1Loss import warnings from typing import Optional, Union @auto_docstring class LxmertForPreTraining(LxmertPreTrainedModel): _tied_weights_keys = ['cls.predictions.decoder.weight'] def __init__(self, config): super().__init__(config) self.config = config self.num_qa_labels = config.num_qa_labels self.visual_loss_normalizer = config.visual_loss_normalizer self.task_mask_lm = config.task_mask_lm self.task_obj_predict = config.task_obj_predict self.task_matched = config.task_matched self.task_qa = config.task_qa self.lxmert = LxmertModel(config) self.cls = LxmertPreTrainingHeads(config, self.lxmert.embeddings.word_embeddings.weight) if self.task_obj_predict: self.obj_predict_head = LxmertVisualObjHead(config) if self.task_qa: self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels) self.post_init() self.loss_fcts = {'l2': SmoothL1Loss(reduction='none'), 'visual_ce': CrossEntropyLoss(reduction='none'), 'ce': CrossEntropyLoss()} visual_losses = {} if config.visual_obj_loss: visual_losses['obj'] = {'shape': (-1,), 'num': config.num_object_labels, 'loss': 'visual_ce'} if config.visual_attr_loss: visual_losses['attr'] = {'shape': (-1,), 'num': config.num_attr_labels, 'loss': 'visual_ce'} if config.visual_feat_loss: visual_losses['feat'] = {'shape': (-1, config.visual_feat_dim), 'num': config.visual_feat_dim, 'loss': 'l2'} self.visual_losses = visual_losses def _tie_weights(self): self.cls.predictions.decoder.weight = self.lxmert.embeddings.word_embeddings.weight def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing) self.cls.predictions.bias = self._resize_bias(self.cls.predictions.bias, new_num_tokens) return new_embeddings def _resize_bias(self, bias, new_num_tokens: int): old_num_tokens = bias.shape[0] if new_num_tokens <= old_num_tokens: new_bias = bias[:new_num_tokens] else: extra_bias = torch.zeros(new_num_tokens - old_num_tokens, device=bias.device) new_bias = torch.cat([bias, extra_bias]) new_bias = nn.Parameter(new_bias) return new_bias def resize_num_qa_labels(self, num_labels): """ Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size will add newly initialized weights. Reducing the size will remove weights from the end Args: num_labels (`int`, *optional*): New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized weights at the end. Reducing the size will remove weights from the end. If not provided or `None`, just returns a pointer to the qa labels ``torch.nn.Linear``` module of the model without doing anything. Return: `torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer """ cur_qa_logit_layer = self.get_qa_logit_layer() if num_labels is None or cur_qa_logit_layer is None: return new_qa_logit_layer = self._resize_qa_labels(num_labels) self.config.num_qa_labels = num_labels self.num_qa_labels = num_labels return new_qa_logit_layer def _resize_qa_labels(self, num_labels): cur_qa_logit_layer = self.get_qa_logit_layer() new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels) self._set_qa_logit_layer(new_qa_logit_layer) return self.get_qa_logit_layer() def get_qa_logit_layer(self) -> nn.Module: """ Returns the linear layer that produces question answering logits. Returns: `nn.Module`: A torch module mapping the question answering prediction hidden states or `None` if LXMERT does not have a visual answering head. """ if hasattr(self, 'answer_head'): return self.answer_head.logit_fc[-1] def _set_qa_logit_layer(self, qa_logit_layer): self.answer_head.logit_fc[-1] = qa_logit_layer def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels): if num_labels is None: return cur_qa_logit_layer cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size() if cur_qa_labels == num_labels: return cur_qa_logit_layer if getattr(cur_qa_logit_layer, 'bias', None) is not None: new_qa_logit_layer = nn.Linear(hidden_dim, num_labels) else: new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False) new_qa_logit_layer.to(cur_qa_logit_layer.weight.device) self._init_weights(new_qa_logit_layer) num_labels_to_copy = min(cur_qa_labels, num_labels) new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :] if getattr(cur_qa_logit_layer, 'bias', None) is not None: new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy] return new_qa_logit_layer @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, visual_feats: Optional[torch.FloatTensor]=None, visual_pos: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, visual_attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, obj_labels: Optional[dict[str, tuple[torch.FloatTensor, torch.FloatTensor]]]=None, matched_label: Optional[torch.LongTensor]=None, ans: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[LxmertForPreTrainingOutput, tuple[torch.FloatTensor]]: """ visual_feats (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model) These are currently not provided by the transformers library. visual_pos (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_pos_dim)`): This input represents spatial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spatial features to be normalized bounding boxes on a scale of 0 to 1. These are currently not provided by the transformers library. visual_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` obj_labels (`dict[Str: tuple[Torch.FloatTensor, Torch.FloatTensor]]`, *optional*): each key is named after each one of the visual losses and each element of the tuple is of the shape `(batch_size, num_features)` and `(batch_size, num_features, visual_feature_dim)` for each the label id and the label score respectively matched_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the whether or not the text input matches the image (classification) loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`: - 0 indicates that the sentence does not match the image, - 1 indicates that the sentence does match the image. ans (`Torch.Tensor` of shape `(batch_size)`, *optional*): a one hot representation hof the correct answer *optional* """ if 'masked_lm_labels' in kwargs: warnings.warn('The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.', FutureWarning) labels = kwargs.pop('masked_lm_labels') return_dict = return_dict if return_dict is not None else self.config.use_return_dict device = input_ids.device if input_ids is not None else inputs_embeds.device lxmert_output = self.lxmert(input_ids=input_ids, visual_feats=visual_feats, visual_pos=visual_pos, token_type_ids=token_type_ids, attention_mask=attention_mask, visual_attention_mask=visual_attention_mask, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict) lang_output, visual_output, pooled_output = (lxmert_output[0], lxmert_output[1], lxmert_output[2]) lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output) if self.task_qa: answer_score = self.answer_head(pooled_output) else: answer_score = pooled_output[0][0] total_loss = None if labels is None and matched_label is None and (obj_labels is None) and (ans is None) else torch.tensor(0.0, device=device) if labels is not None and self.task_mask_lm: masked_lm_loss = self.loss_fcts['ce'](lang_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) total_loss += masked_lm_loss if matched_label is not None and self.task_matched: matched_loss = self.loss_fcts['ce'](cross_relationship_score.view(-1, 2), matched_label.view(-1)) total_loss += matched_loss if obj_labels is not None and self.task_obj_predict: total_visual_loss = torch.tensor(0.0, device=input_ids.device) visual_prediction_scores_dict = self.obj_predict_head(visual_output) for key, key_info in self.visual_losses.items(): label, mask_conf = obj_labels[key] output_dim = key_info['num'] loss_fct_name = key_info['loss'] label_shape = key_info['shape'] weight = self.visual_loss_normalizer visual_loss_fct = self.loss_fcts[loss_fct_name] visual_prediction_scores = visual_prediction_scores_dict[key] visual_loss = visual_loss_fct(visual_prediction_scores.view(-1, output_dim), label.view(label_shape)) if visual_loss.dim() > 1: visual_loss = visual_loss.mean(1) visual_loss = (visual_loss * mask_conf.view(-1)).mean() * weight total_visual_loss += visual_loss total_loss += total_visual_loss if ans is not None and self.task_qa: answer_loss = self.loss_fcts['ce'](answer_score.view(-1, self.num_qa_labels), ans.view(-1)) total_loss += answer_loss if not return_dict: output = (lang_prediction_scores, cross_relationship_score, answer_score) + lxmert_output[3:] return (total_loss,) + output if total_loss is not None else output return LxmertForPreTrainingOutput(loss=total_loss, prediction_logits=lang_prediction_scores, cross_relationship_score=cross_relationship_score, question_answering_score=answer_score, language_hidden_states=lxmert_output.language_hidden_states, vision_hidden_states=lxmert_output.vision_hidden_states, language_attentions=lxmert_output.language_attentions, vision_attentions=lxmert_output.vision_attentions, cross_encoder_attentions=lxmert_output.cross_encoder_attentions)
@auto_docstring class LxmertForPreTraining(LxmertPreTrainedModel): def __init__(self, config): pass def _tie_weights(self): pass def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding: pass def _resize_bias(self, bias, new_num_tokens: int): pass def resize_num_qa_labels(self, num_labels): ''' Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size will add newly initialized weights. Reducing the size will remove weights from the end Args: num_labels (`int`, *optional*): New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized weights at the end. Reducing the size will remove weights from the end. If not provided or `None`, just returns a pointer to the qa labels ``torch.nn.Linear``` module of the model without doing anything. Return: `torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer ''' pass def _resize_qa_labels(self, num_labels): pass def get_qa_logit_layer(self) -> nn.Module: ''' Returns the linear layer that produces question answering logits. Returns: `nn.Module`: A torch module mapping the question answering prediction hidden states or `None` if LXMERT does not have a visual answering head. ''' pass def _set_qa_logit_layer(self, qa_logit_layer): pass def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, visual_feats: Optional[torch.FloatTensor]=None, visual_pos: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, visual_attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, obj_labels: Optional[dict[str, tuple[torch.FloatTensor, torch.FloatTensor]]]=None, matched_label: Optional[torch.LongTensor]=None, ans: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[LxmertForPreTrainingOutput, tuple[torch.FloatTensor]]: ''' visual_feats (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model) These are currently not provided by the transformers library. visual_pos (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_pos_dim)`): This input represents spatial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spatial features to be normalized bounding boxes on a scale of 0 to 1. These are currently not provided by the transformers library. visual_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` obj_labels (`dict[Str: tuple[Torch.FloatTensor, Torch.FloatTensor]]`, *optional*): each key is named after each one of the visual losses and each element of the tuple is of the shape `(batch_size, num_features)` and `(batch_size, num_features, visual_feature_dim)` for each the label id and the label score respectively matched_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the whether or not the text input matches the image (classification) loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`: - 0 indicates that the sentence does not match the image, - 1 indicates that the sentence does match the image. ans (`Torch.Tensor` of shape `(batch_size)`, *optional*): a one hot representation hof the correct answer *optional* ''' pass
13
3
30
3
22
5
4
0.24
1
11
5
0
9
13
9
10
280
35
199
77
168
47
116
57
106
14
2
3
34
3,534
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput
from ...utils import ModelOutput, auto_docstring, logging import torch from dataclasses import dataclass from typing import Optional, Union @dataclass @auto_docstring(custom_intro='\n Output type of [`LxmertForPreTraining`].\n ') class LxmertForPreTrainingOutput(ModelOutput): """ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). cross_relationship_score (`torch.FloatTensor` of shape `(batch_size, 2)`): Prediction scores of the textual matching objective (classification) head (scores of True/False continuation before SoftMax). question_answering_score (`torch.FloatTensor` of shape `(batch_size, n_qa_answers)`): Prediction scores of question answering objective (classification). language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None prediction_logits: Optional[torch.FloatTensor] = None cross_relationship_score: Optional[torch.FloatTensor] = None question_answering_score: Optional[torch.FloatTensor] = None language_hidden_states: Optional[tuple[torch.FloatTensor]] = None vision_hidden_states: Optional[tuple[torch.FloatTensor]] = None language_attentions: Optional[tuple[torch.FloatTensor]] = None vision_attentions: Optional[tuple[torch.FloatTensor]] = None cross_encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass @auto_docstring(custom_intro='\n Output type of [`LxmertForPreTraining`].\n ') class LxmertForPreTrainingOutput(ModelOutput): ''' loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). cross_relationship_score (`torch.FloatTensor` of shape `(batch_size, 2)`): Prediction scores of the textual matching objective (classification) head (scores of True/False continuation before SoftMax). question_answering_score (`torch.FloatTensor` of shape `(batch_size, n_qa_answers)`): Prediction scores of question answering objective (classification). language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
3
1
0
0
0
0
0
3.2
1
0
0
0
0
0
0
0
45
3
10
10
9
32
10
10
9
0
1
0
0
3,535
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnswering
from torch import nn import torch from typing import Optional, Union from ...utils import ModelOutput, auto_docstring, logging from torch.nn import CrossEntropyLoss, SmoothL1Loss @auto_docstring(custom_intro='\n Lxmert Model with a visual-answering head on top for downstream QA tasks\n ') class LxmertForQuestionAnswering(LxmertPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.num_qa_labels = config.num_qa_labels self.visual_loss_normalizer = config.visual_loss_normalizer self.lxmert = LxmertModel(config) self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels) self.post_init() self.loss = CrossEntropyLoss() def resize_num_qa_labels(self, num_labels): """ Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size will add newly initialized weights. Reducing the size will remove weights from the end Args: num_labels (`int`, *optional*): New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized weights at the end. Reducing the size will remove weights from the end. If not provided or `None`, just returns a pointer to the qa labels ``torch.nn.Linear``` module of the model without doing anything. Return: `torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer """ cur_qa_logit_layer = self.get_qa_logit_layer() if num_labels is None or cur_qa_logit_layer is None: return new_qa_logit_layer = self._resize_qa_labels(num_labels) self.config.num_qa_labels = num_labels self.num_qa_labels = num_labels return new_qa_logit_layer def _resize_qa_labels(self, num_labels): cur_qa_logit_layer = self.get_qa_logit_layer() new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels) self._set_qa_logit_layer(new_qa_logit_layer) return self.get_qa_logit_layer() def get_qa_logit_layer(self) -> nn.Module: """ Returns the linear layer that produces question answering logits Returns: `nn.Module`: A torch module mapping the question answering prediction hidden states. `None`: A NoneType object if Lxmert does not have the visual answering head. """ if hasattr(self, 'answer_head'): return self.answer_head.logit_fc[-1] def _set_qa_logit_layer(self, qa_logit_layer): self.answer_head.logit_fc[-1] = qa_logit_layer def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels): if num_labels is None: return cur_qa_logit_layer cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size() if cur_qa_labels == num_labels: return cur_qa_logit_layer if getattr(cur_qa_logit_layer, 'bias', None) is not None: new_qa_logit_layer = nn.Linear(hidden_dim, num_labels) else: new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False) new_qa_logit_layer.to(cur_qa_logit_layer.weight.device) self._init_weights(new_qa_logit_layer) num_labels_to_copy = min(cur_qa_labels, num_labels) new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :] if getattr(cur_qa_logit_layer, 'bias', None) is not None: new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy] return new_qa_logit_layer @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, visual_feats: Optional[torch.FloatTensor]=None, visual_pos: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, visual_attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[LxmertForQuestionAnsweringOutput, tuple[torch.FloatTensor]]: """ visual_feats (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model) These are currently not provided by the transformers library. visual_pos (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_pos_dim)`): This input represents spatial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spatial features to be normalized bounding boxes on a scale of 0 to 1. These are currently not provided by the transformers library. visual_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) labels (`Torch.Tensor` of shape `(batch_size)`, *optional*): A one-hot representation of the correct answer """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict lxmert_output = self.lxmert(input_ids=input_ids, visual_feats=visual_feats, visual_pos=visual_pos, token_type_ids=token_type_ids, attention_mask=attention_mask, visual_attention_mask=visual_attention_mask, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict) pooled_output = lxmert_output[2] answer_score = self.answer_head(pooled_output) loss = None if labels is not None: loss = self.loss(answer_score.view(-1, self.num_qa_labels), labels.view(-1)) if not return_dict: output = (answer_score,) + lxmert_output[3:] return (loss,) + output if loss is not None else output return LxmertForQuestionAnsweringOutput(loss=loss, question_answering_score=answer_score, language_hidden_states=lxmert_output.language_hidden_states, vision_hidden_states=lxmert_output.vision_hidden_states, language_attentions=lxmert_output.language_attentions, vision_attentions=lxmert_output.vision_attentions, cross_encoder_attentions=lxmert_output.cross_encoder_attentions)
@auto_docstring(custom_intro='\n Lxmert Model with a visual-answering head on top for downstream QA tasks\n ') class LxmertForQuestionAnswering(LxmertPreTrainedModel): def __init__(self, config): pass def resize_num_qa_labels(self, num_labels): ''' Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size will add newly initialized weights. Reducing the size will remove weights from the end Args: num_labels (`int`, *optional*): New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized weights at the end. Reducing the size will remove weights from the end. If not provided or `None`, just returns a pointer to the qa labels ``torch.nn.Linear``` module of the model without doing anything. Return: `torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer ''' pass def _resize_qa_labels(self, num_labels): pass def get_qa_logit_layer(self) -> nn.Module: ''' Returns the linear layer that produces question answering logits Returns: `nn.Module`: A torch module mapping the question answering prediction hidden states. `None`: A NoneType object if Lxmert does not have the visual answering head. ''' pass def _set_qa_logit_layer(self, qa_logit_layer): pass def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, visual_feats: Optional[torch.FloatTensor]=None, visual_pos: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, visual_attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[LxmertForQuestionAnsweringOutput, tuple[torch.FloatTensor]]: ''' visual_feats (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model) These are currently not provided by the transformers library. visual_pos (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_pos_dim)`): This input represents spatial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spatial features to be normalized bounding boxes on a scale of 0 to 1. These are currently not provided by the transformers library. visual_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) labels (`Torch.Tensor` of shape `(batch_size)`, *optional*): A one-hot representation of the correct answer ''' pass
10
3
20
3
13
4
2
0.31
1
6
3
0
7
6
7
8
150
26
95
40
68
29
56
26
48
5
2
1
17
3,536
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput
from ...utils import ModelOutput, auto_docstring, logging from dataclasses import dataclass from typing import Optional, Union import torch @dataclass @auto_docstring(custom_intro='\n Output type of [`LxmertForQuestionAnswering`].\n ') class LxmertForQuestionAnsweringOutput(ModelOutput): """ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.k. question_answering_score (`torch.FloatTensor` of shape `(batch_size, n_qa_answers)`, *optional*): Prediction scores of question answering objective (classification). language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None question_answering_score: Optional[torch.FloatTensor] = None language_hidden_states: Optional[tuple[torch.FloatTensor]] = None vision_hidden_states: Optional[tuple[torch.FloatTensor]] = None language_attentions: Optional[tuple[torch.FloatTensor]] = None vision_attentions: Optional[tuple[torch.FloatTensor]] = None cross_encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass @auto_docstring(custom_intro='\n Output type of [`LxmertForQuestionAnswering`].\n ') class LxmertForQuestionAnsweringOutput(ModelOutput): ''' loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.k. question_answering_score (`torch.FloatTensor` of shape `(batch_size, n_qa_answers)`, *optional*): Prediction scores of question answering objective (classification). language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
3
1
0
0
0
0
0
3.38
1
0
0
0
0
0
0
0
37
2
8
8
7
27
8
8
7
0
1
0
0
3,537
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertIntermediate
from torch import nn from ...activations import ACT2FN, gelu class LxmertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) self.intermediate_act_fn = ACT2FN[config.hidden_act] def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class LxmertIntermediate(nn.Module): def __init__(self, config): pass def forward(self, hidden_states): pass
3
0
4
0
4
0
1
0
1
1
0
0
2
2
2
12
10
1
9
5
6
0
9
5
6
1
1
0
2
3,538
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertLMPredictionHead
import torch from torch import nn class LxmertLMPredictionHead(nn.Module): def __init__(self, config, lxmert_model_embedding_weights): super().__init__() self.transform = LxmertPredictionHeadTransform(config) self.decoder = nn.Linear(lxmert_model_embedding_weights.size(1), lxmert_model_embedding_weights.size(0), bias=False) self.decoder.weight = lxmert_model_embedding_weights self.bias = nn.Parameter(torch.zeros(lxmert_model_embedding_weights.size(0))) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) + self.bias return hidden_states
class LxmertLMPredictionHead(nn.Module): def __init__(self, config, lxmert_model_embedding_weights): pass def forward(self, hidden_states): pass
3
0
9
1
7
1
1
0.13
1
2
1
0
2
3
2
12
19
2
15
6
12
2
11
6
8
1
1
0
2
3,539
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertLayer
from torch import nn class LxmertLayer(nn.Module): def __init__(self, config): super().__init__() self.attention = LxmertSelfAttentionLayer(config) self.intermediate = LxmertIntermediate(config) self.output = LxmertOutput(config) def forward(self, hidden_states, attention_mask=None, output_attentions=False): outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions) attention_output = outputs[0] intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) outputs = (layer_output,) + outputs[1:] return outputs
class LxmertLayer(nn.Module): def __init__(self, config): pass def forward(self, hidden_states, attention_mask=None, output_attentions=False): pass
3
0
6
0
6
1
1
0.08
1
4
3
0
2
3
2
12
14
1
13
10
10
1
13
10
10
1
1
0
2
3,540
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertModel
from typing import Optional, Union import torch from ...utils import ModelOutput, auto_docstring, logging @auto_docstring class LxmertModel(LxmertPreTrainedModel): def __init__(self, config): super().__init__(config) self.embeddings = LxmertEmbeddings(config) self.encoder = LxmertEncoder(config) self.pooler = LxmertPooler(config) self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, new_embeddings): self.embeddings.word_embeddings = new_embeddings @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, visual_feats: Optional[torch.FloatTensor]=None, visual_pos: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, visual_attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[LxmertModelOutput, tuple[torch.FloatTensor]]: """ visual_feats (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model) These are currently not provided by the transformers library. visual_pos (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_pos_dim)`): This input represents spatial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spatial features to be normalized bounding boxes on a scale of 0 to 1. These are currently not provided by the transformers library. visual_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if visual_feats is None: raise ValueError('`visual_feats` cannot be `None`') if visual_pos is None: raise ValueError('`visual_pos` cannot be `None`') device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min if visual_attention_mask is not None: extended_visual_attention_mask = visual_attention_mask.unsqueeze(1).unsqueeze(2) extended_visual_attention_mask = extended_visual_attention_mask.to(dtype=self.dtype) extended_visual_attention_mask = (1.0 - extended_visual_attention_mask) * torch.finfo(self.dtype).min else: extended_visual_attention_mask = None embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds) encoder_outputs = self.encoder(embedding_output, extended_attention_mask, visual_feats=visual_feats, visual_pos=visual_pos, visual_attention_mask=extended_visual_attention_mask, output_attentions=output_attentions) visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2] vision_hidden_states = visual_encoder_outputs[0] language_hidden_states = lang_encoder_outputs[0] all_attentions = () if output_attentions: language_attentions = lang_encoder_outputs[1] vision_attentions = visual_encoder_outputs[1] cross_encoder_attentions = encoder_outputs[2] all_attentions = (language_attentions, vision_attentions, cross_encoder_attentions) hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else () visual_output = vision_hidden_states[-1] lang_output = language_hidden_states[-1] pooled_output = self.pooler(lang_output) if not return_dict: return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions return LxmertModelOutput(pooled_output=pooled_output, language_output=lang_output, vision_output=visual_output, language_hidden_states=language_hidden_states if output_hidden_states else None, vision_hidden_states=vision_hidden_states if output_hidden_states else None, language_attentions=language_attentions if output_attentions else None, vision_attentions=vision_attentions if output_attentions else None, cross_encoder_attentions=cross_encoder_attentions if output_attentions else None)
@auto_docstring class LxmertModel(LxmertPreTrainedModel): def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, visual_feats: Optional[torch.FloatTensor]=None, visual_pos: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, visual_attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[LxmertModelOutput, tuple[torch.FloatTensor]]: ''' visual_feats (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model) These are currently not provided by the transformers library. visual_pos (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_pos_dim)`): This input represents spatial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spatial features to be normalized bounding boxes on a scale of 0 to 1. These are currently not provided by the transformers library. visual_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) ''' pass
7
1
31
4
23
4
6
0.14
1
7
4
0
4
3
4
5
132
18
100
38
77
14
56
25
51
21
2
1
24
3,541
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertModelOutput
from typing import Optional, Union from ...utils import ModelOutput, auto_docstring, logging import torch from dataclasses import dataclass @dataclass @auto_docstring(custom_intro='\n Lxmert\'s outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language,\n visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship"\n encoder")\n ') class LxmertModelOutput(ModelOutput): """ language_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the language encoder. vision_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the visual encoder. pooled_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed by a Linear layer and a Tanh activation function. The Linear language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ language_output: Optional[torch.FloatTensor] = None vision_output: Optional[torch.FloatTensor] = None pooled_output: Optional[torch.FloatTensor] = None language_hidden_states: Optional[tuple[torch.FloatTensor]] = None vision_hidden_states: Optional[tuple[torch.FloatTensor]] = None language_attentions: Optional[tuple[torch.FloatTensor]] = None vision_attentions: Optional[tuple[torch.FloatTensor]] = None cross_encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass @auto_docstring(custom_intro='\n Lxmert\'s outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language,\n visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship"\n encoder")\n ') class LxmertModelOutput(ModelOutput): ''' language_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the language encoder. vision_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the visual encoder. pooled_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed by a Linear layer and a Tanh activation function. The Linear language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
3
1
0
0
0
0
0
3.44
1
0
0
0
0
0
0
0
43
3
9
9
8
31
9
9
8
0
1
0
0
3,542
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertOutput
from torch import nn class LxmertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class LxmertOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states, input_tensor): pass
3
0
5
0
5
0
1
0
1
1
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
3,543
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertPooler
from torch import nn class LxmertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output
class LxmertPooler(nn.Module): def __init__(self, config): pass def forward(self, hidden_states): pass
3
0
6
0
5
1
1
0.2
1
1
0
0
2
2
2
12
13
1
10
7
7
2
10
7
7
1
1
0
2
3,544
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertPreTrainedModel
from torch import nn from .configuration_lxmert import LxmertConfig from ...utils import ModelOutput, auto_docstring, logging from ...modeling_utils import PreTrainedModel @auto_docstring class LxmertPreTrainedModel(PreTrainedModel): config: LxmertConfig base_model_prefix = 'lxmert' _supports_param_buffer_assignment = False def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, LxmertLMPredictionHead): module.bias.data.zero_()
@auto_docstring class LxmertPreTrainedModel(PreTrainedModel): def _init_weights(self, module): '''Initialize the weights''' pass
3
1
15
0
12
3
6
0.41
1
0
0
3
1
0
1
1
26
2
17
6
15
7
15
6
13
6
1
2
6
3,545
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertPreTrainingHeads
from torch import nn class LxmertPreTrainingHeads(nn.Module): def __init__(self, config, lxmert_model_embedding_weights): super().__init__() self.predictions = LxmertLMPredictionHead(config, lxmert_model_embedding_weights) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return (prediction_scores, seq_relationship_score)
class LxmertPreTrainingHeads(nn.Module): def __init__(self, config, lxmert_model_embedding_weights): pass def forward(self, sequence_output, pooled_output): pass
3
0
4
0
4
0
1
0
1
2
1
0
2
2
2
12
10
1
9
7
6
0
9
7
6
1
1
0
2
3,546
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertPredictionHeadTransform
from ...activations import ACT2FN, gelu from torch import nn class LxmertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.transform_act_fn = ACT2FN[config.hidden_act] self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states
class LxmertPredictionHeadTransform(nn.Module): def __init__(self, config): pass def forward(self, hidden_states): pass
3
0
5
0
5
0
1
0
1
1
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
3,547
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertSelfAttentionLayer
from torch import nn class LxmertSelfAttentionLayer(nn.Module): def __init__(self, config): super().__init__() self.self = LxmertAttention(config) self.output = LxmertAttentionOutput(config) def forward(self, input_tensor, attention_mask, output_attentions=False): output = self.self(input_tensor, input_tensor, attention_mask, output_attentions=output_attentions) if output_attentions: attention_probs = output[1] attention_output = self.output(output[0], input_tensor) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) return outputs
class LxmertSelfAttentionLayer(nn.Module): def __init__(self, config): pass def forward(self, input_tensor, attention_mask, output_attentions=False): pass
3
0
9
0
8
1
2
0.06
1
3
2
0
2
2
2
12
19
1
17
9
14
1
12
9
9
3
1
1
4
3,548
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertVisualAnswerHead
from torch import nn class LxmertVisualAnswerHead(nn.Module): def __init__(self, config, num_labels): super().__init__() hid_dim = config.hidden_size self.logit_fc = nn.Sequential(nn.Linear(hid_dim, hid_dim * 2), GeLU(), nn.LayerNorm(hid_dim * 2, eps=1e-12), nn.Linear(hid_dim * 2, num_labels)) def forward(self, hidden_states): return self.logit_fc(hidden_states)
class LxmertVisualAnswerHead(nn.Module): def __init__(self, config, num_labels): pass def forward(self, hidden_states): pass
3
0
6
0
6
0
1
0
1
2
1
0
2
1
2
12
13
1
12
5
9
0
7
5
4
1
1
0
2
3,549
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertVisualFeatureEncoder
from torch import nn class LxmertVisualFeatureEncoder(nn.Module): def __init__(self, config): super().__init__() feat_dim = config.visual_feat_dim pos_dim = config.visual_pos_dim self.visn_fc = nn.Linear(feat_dim, config.hidden_size) self.visn_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12) self.box_fc = nn.Linear(pos_dim, config.hidden_size) self.box_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, visual_feats, visual_pos): x = self.visn_fc(visual_feats) x = self.visn_layer_norm(x) y = self.box_fc(visual_pos) y = self.box_layer_norm(y) output = (x + y) / 2 output = self.dropout(output) return output
class LxmertVisualFeatureEncoder(nn.Module): def __init__(self, config): pass def forward(self, visual_feats, visual_pos): pass
3
0
12
2
9
1
1
0.11
1
1
0
0
2
5
2
12
25
5
18
13
15
2
18
13
15
1
1
0
2
3,550
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertVisualObjHead
from torch import nn class LxmertVisualObjHead(nn.Module): def __init__(self, config): super().__init__() self.transform = LxmertPredictionHeadTransform(config) visual_losses = {} if config.visual_obj_loss: visual_losses['obj'] = {'shape': (-1,), 'num': config.num_object_labels} if config.visual_attr_loss: visual_losses['attr'] = {'shape': (-1,), 'num': config.num_attr_labels} if config.visual_feat_loss: visual_losses['feat'] = {'shape': (-1, config.visual_feat_dim), 'num': config.visual_feat_dim} self.visual_losses = visual_losses self.decoder_dict = nn.ModuleDict({key: nn.Linear(config.hidden_size, self.visual_losses[key]['num']) for key in self.visual_losses}) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) output = {} for key in self.visual_losses: output[key] = self.decoder_dict[key](hidden_states) return output
class LxmertVisualObjHead(nn.Module): def __init__(self, config): pass def forward(self, hidden_states): pass
3
0
14
1
12
2
3
0.13
1
2
1
0
2
3
2
12
29
2
24
9
21
3
19
9
16
4
1
1
6
3,551
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/modeling_lxmert.py
transformers.models.lxmert.modeling_lxmert.LxmertXLayer
from torch import nn class LxmertXLayer(nn.Module): def __init__(self, config): super().__init__() self.visual_attention = LxmertCrossAttentionLayer(config) self.lang_self_att = LxmertSelfAttentionLayer(config) self.visn_self_att = LxmertSelfAttentionLayer(config) self.lang_inter = LxmertIntermediate(config) self.lang_output = LxmertOutput(config) self.visn_inter = LxmertIntermediate(config) self.visn_output = LxmertOutput(config) def cross_att(self, lang_input, lang_attention_mask, visual_input, visual_attention_mask, output_x_attentions=False): lang_att_output = self.visual_attention(lang_input, visual_input, ctx_att_mask=visual_attention_mask, output_attentions=output_x_attentions) visual_att_output = self.visual_attention(visual_input, lang_input, ctx_att_mask=lang_attention_mask, output_attentions=False) return (lang_att_output, visual_att_output) def self_att(self, lang_input, lang_attention_mask, visual_input, visual_attention_mask): lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions=False) visual_att_output = self.visn_self_att(visual_input, visual_attention_mask, output_attentions=False) return (lang_att_output[0], visual_att_output[0]) def output_fc(self, lang_input, visual_input): lang_inter_output = self.lang_inter(lang_input) visual_inter_output = self.visn_inter(visual_input) lang_output = self.lang_output(lang_inter_output, lang_input) visual_output = self.visn_output(visual_inter_output, visual_input) return (lang_output, visual_output) def forward(self, lang_feats, lang_attention_mask, visual_feats, visual_attention_mask, output_attentions=False): lang_att_output, visual_att_output = self.cross_att(lang_input=lang_feats, lang_attention_mask=lang_attention_mask, visual_input=visual_feats, visual_attention_mask=visual_attention_mask, output_x_attentions=output_attentions) attention_probs = lang_att_output[1:] lang_att_output, visual_att_output = self.self_att(lang_att_output[0], lang_attention_mask, visual_att_output[0], visual_attention_mask) lang_output, visual_output = self.output_fc(lang_att_output, visual_att_output) return (lang_output, visual_output, attention_probs[0]) if output_attentions else (lang_output, visual_output)
class LxmertXLayer(nn.Module): def __init__(self, config): pass def cross_att(self, lang_input, lang_attention_mask, visual_input, visual_attention_mask, output_x_attentions=False): pass def self_att(self, lang_input, lang_attention_mask, visual_input, visual_attention_mask): pass def output_fc(self, lang_input, visual_input): pass def forward(self, lang_feats, lang_attention_mask, visual_feats, visual_attention_mask, output_attentions=False): pass
6
0
17
1
14
1
1
0.1
1
5
4
0
5
7
5
15
89
9
73
38
53
7
30
24
24
2
1
0
6
3,552
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/tokenization_lxmert.py
transformers.models.lxmert.tokenization_lxmert.LxmertTokenizer
import os from typing import Optional from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace import collections class LxmertTokenizer(PreTrainedTokenizer): """ Construct a Lxmert tokenizer. Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original Lxmert). clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. """ vocab_files_names = VOCAB_FILES_NAMES def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, clean_up_tokenization_spaces=True, **kwargs): if not os.path.isfile(vocab_file): raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = LxmertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`") self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text, split_special_tokens=False): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens if not split_special_tokens else None): if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = ' '.join(tokens).replace(' ##', '').strip() return out_string def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Lxmert sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is not None: return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] return [1] + [0] * len(token_ids_0) + [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) else: vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(vocab_file, 'w', encoding='utf-8') as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!') index = token_index writer.write(token + '\n') index += 1 return (vocab_file,)
class LxmertTokenizer(PreTrainedTokenizer): ''' Construct a Lxmert tokenizer. Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original Lxmert). clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. ''' def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, clean_up_tokenization_spaces=True, **kwargs): pass @property def do_lower_case(self): pass @property def vocab_size(self): pass def get_vocab(self): pass def _tokenize(self, text, split_special_tokens=False): pass def _convert_token_to_id(self, token): '''Converts a token (str) in an id using the vocab.''' pass def _convert_id_to_token(self, index): '''Converts an index (integer) in a token (str) using the vocab.''' pass def convert_tokens_to_string(self, tokens): '''Converts a sequence of tokens (string) in a single string.''' pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Lxmert sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: ''' Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. ''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass
14
6
15
1
10
4
2
0.72
1
9
2
0
12
5
12
101
236
29
121
53
85
87
65
29
52
6
3
3
27
3,553
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/lxmert/tokenization_lxmert_fast.py
transformers.models.lxmert.tokenization_lxmert_fast.LxmertTokenizerFast
from .tokenization_lxmert import LxmertTokenizer import json from typing import Optional from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast class LxmertTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" Lxmert tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. clean_text (`bool`, *optional*, defaults to `True`): Whether or not to clean the text before tokenization by removing any control characters and replacing all whitespaces by the classic one. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original Lxmert). wordpieces_prefix (`str`, *optional*, defaults to `"##"`): The prefix for subwords. """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = LxmertTokenizer def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs) normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if normalizer_state.get('lowercase', do_lower_case) != do_lower_case or normalizer_state.get('strip_accents', strip_accents) != strip_accents or normalizer_state.get('handle_chinese_chars', tokenize_chinese_chars) != tokenize_chinese_chars: normalizer_class = getattr(normalizers, normalizer_state.pop('type')) normalizer_state['lowercase'] = do_lower_case normalizer_state['strip_accents'] = strip_accents normalizer_state['handle_chinese_chars'] = tokenize_chinese_chars self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) self.do_lower_case = do_lower_case def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Lxmert sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] if token_ids_1 is not None: output += token_ids_1 + [self.sep_token_id] return output def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files)
class LxmertTokenizerFast(PreTrainedTokenizerFast): ''' Construct a "fast" Lxmert tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. clean_text (`bool`, *optional*, defaults to `True`): Whether or not to clean the text before tokenization by removing any control characters and replacing all whitespaces by the classic one. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original Lxmert). wordpieces_prefix (`str`, *optional*, defaults to `"##"`): The prefix for subwords. ''' def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): pass def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Lxmert sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass
4
2
24
3
14
7
2
1.12
1
4
0
0
4
1
4
92
141
18
58
29
38
65
27
14
22
2
3
1
7
3,554
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/m2m_100/configuration_m2m_100.py
transformers.models.m2m_100.configuration_m2m_100.M2M100Config
from ...configuration_utils import PretrainedConfig class M2M100Config(PretrainedConfig): """ This is the configuration class to store the configuration of a [`M2M100Model`]. It is used to instantiate an M2M100 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the M2M100 [facebook/m2m100_418M](https://huggingface.co/facebook/m2m100_418M) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the M2M100 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`M2M100Model`] or d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. max_position_embeddings (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Example: ```python >>> from transformers import M2M100Config, M2M100Model >>> # Initializing a M2M100 facebook/m2m100_418M style configuration >>> configuration = M2M100Config() >>> # Initializing a model (with random weights) from the facebook/m2m100_418M style configuration >>> model = M2M100Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'm2m_100' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__(self, vocab_size=128112, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.05, decoder_layerdrop=0.05, use_cache=True, is_encoder_decoder=True, activation_function='relu', d_model=1024, dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, scale_embedding=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, **kwargs)
class M2M100Config(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`M2M100Model`]. It is used to instantiate an M2M100 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the M2M100 [facebook/m2m100_418M](https://huggingface.co/facebook/m2m100_418M) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the M2M100 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`M2M100Model`] or d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. max_position_embeddings (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Example: ```python >>> from transformers import M2M100Config, M2M100Model >>> # Initializing a M2M100 facebook/m2m100_418M style configuration >>> configuration = M2M100Config() >>> # Initializing a model (with random weights) from the facebook/m2m100_418M style configuration >>> model = M2M100Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=128112, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.05, decoder_layerdrop=0.05, use_cache=True, is_encoder_decoder=True, activation_function='relu', d_model=1024, dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, scale_embedding=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs): pass
2
1
55
1
54
1
1
1.03
1
1
0
0
1
19
1
1
128
11
58
50
30
60
25
24
23
1
1
0
1
3,555
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/m2m_100/configuration_m2m_100.py
transformers.models.m2m_100.configuration_m2m_100.M2M100OnnxConfig
from typing import Any from ... import PreTrainedTokenizer from ...onnx.utils import compute_effective_axis_dimension from ...onnx import OnnxConfig, OnnxSeq2SeqConfigWithPast from ...utils import is_torch_available, logging from collections import OrderedDict from collections.abc import Mapping class M2M100OnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})]) if self.use_past: common_inputs['decoder_input_ids'] = {0: 'batch'} common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: common_inputs['decoder_input_ids'] = {0: 'batch', 1: 'decoder_sequence'} common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(common_inputs, direction='inputs') return common_inputs def _generate_dummy_inputs_for_sequence_classification_and_question_answering(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0) token_to_add = tokenizer.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension(seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add) dummy_input = [' '.join([tokenizer.unk_token]) * seq_length] * batch_size common_inputs = dict(tokenizer(dummy_input, return_tensors='pt')) return common_inputs def _generate_dummy_inputs_for_default_and_seq2seq_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair) decoder_seq_length = seq_length if not self.use_past else 1 decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, decoder_seq_length, is_pair) decoder_inputs = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} common_inputs = dict(**encoder_inputs, **decoder_inputs) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch batch, encoder_seq_length = common_inputs['input_ids'].shape decoder_seq_length = common_inputs['decoder_input_ids'].shape[1] num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads encoder_shape = (batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads) decoder_past_length = decoder_seq_length + 3 decoder_shape = (batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads) common_inputs['decoder_attention_mask'] = torch.cat([common_inputs['decoder_attention_mask'], torch.ones(batch, decoder_past_length)], dim=1) common_inputs['past_key_values'] = [] num_encoder_layers, num_decoder_layers = self.num_layers min_num_layers = min(num_encoder_layers, num_decoder_layers) max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers remaining_side_name = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(min_num_layers): common_inputs['past_key_values'].append((torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape))) shape = encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(min_num_layers, max_num_layers): common_inputs['past_key_values'].append((torch.zeros(shape), torch.zeros(shape))) return common_inputs generate_dummy_inputs = _generate_dummy_inputs_for_default_and_seq2seq_lm
class M2M100OnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: pass def _generate_dummy_inputs_for_sequence_classification_and_question_answering(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: pass def _generate_dummy_inputs_for_default_and_seq2seq_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: pass
5
0
37
3
31
3
4
0.14
1
8
0
0
3
0
3
3
121
12
96
40
76
13
47
25
42
8
1
2
12
3,556
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/m2m_100/modeling_m2m_100.py
transformers.models.m2m_100.modeling_m2m_100.M2M100Attention
from .configuration_m2m_100 import M2M100Config from ...utils.deprecation import deprecate_kwarg from typing import Callable, Optional, Union from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...modeling_flash_attention_utils import FlashAttentionKwargs from torch import nn from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack import torch class M2M100Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[M2M100Config]=None, layer_idx: Optional[int]=None): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.is_causal = is_causal self.layer_idx = layer_idx if layer_idx is None and self.is_decoder: logger.warning_once(f'Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" is_cross_attention = key_value_states is not None bsz, tgt_len = hidden_states.shape[:-1] src_len = key_value_states.shape[1] if is_cross_attention else tgt_len q_input_shape = (bsz, tgt_len, -1, self.head_dim) kv_input_shape = (bsz, src_len, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2) is_updated = False if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k_proj(current_states) value_states = self.v_proj(current_states) key_states = key_states.view(*kv_input_shape).transpose(1, 2) value_states = value_states.view(*kv_input_shape).transpose(1, 2) if past_key_values is not None: cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position}) if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache): past_key_values.is_updated[self.layer_idx] = True attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) return (attn_output, attn_weights)
class M2M100Attention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[M2M100Config]=None, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: '''Input shape: Batch x Time x Channel''' pass
4
2
50
7
35
8
5
0.24
1
7
1
2
3
12
3
13
156
23
107
44
86
26
68
27
64
12
1
2
15
3,557
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/m2m_100/modeling_m2m_100.py
transformers.models.m2m_100.modeling_m2m_100.M2M100Decoder
from torch import nn from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...integrations.fsdp import is_fsdp_managed_module import math from typing import Callable, Optional, Union from .configuration_m2m_100 import M2M100Config from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput class M2M100Decoder(M2M100PreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`M2M100DecoderLayer`] Args: config: M2M100Config embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: M2M100Config, embed_tokens: Optional[nn.Embedding]=None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = M2M100ScaledWordEmbedding(config.vocab_size, config.d_model, self.padding_idx, embed_scale=embed_scale) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = M2M100SinusoidalPositionalEmbedding(config.max_position_embeddings, config.d_model, self.padding_idx) self.layers = nn.ModuleList([M2M100DecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False self.post_init() def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None): """ Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`...') use_cache = False if use_cache and past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) if use_cache and isinstance(past_key_values, tuple): logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.') past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) batch_size, seq_length = inputs_embeds.size()[:-1] past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device) if attention_mask is None and (not is_torchdynamo_compiling()): mask_seq_length = past_key_values_length + seq_length attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) self_attn_cache = past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values attention_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, self_attn_cache) encoder_attention_mask = self._update_cross_attn_mask(encoder_hidden_states, encoder_attention_mask, input_shape, inputs_embeds) positions = self.embed_positions(input_ids, inputs_embeds, past_key_values_length) positions = positions.to(inputs_embeds.device) hidden_states = inputs_embeds + positions hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions else None for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self) for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = torch.rand([]) skip_the_layer = self.training and dropout_probability < self.layerdrop if not skip_the_layer or synced_gpus: layer_outputs = decoder_layer(hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) hidden_states = layer_outputs[0] if skip_the_layer: continue if output_attentions: all_self_attns += (layer_outputs[1],) all_cross_attentions += (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)
class M2M100Decoder(M2M100PreTrainedModel): ''' Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`M2M100DecoderLayer`] Args: config: M2M100Config embed_tokens (nn.Embedding): output embedding ''' def __init__(self, config: M2M100Config, embed_tokens: Optional[nn.Embedding]=None): pass def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None): ''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. ''' pass
3
2
137
20
82
36
23
0.47
1
13
5
0
2
11
2
3
283
42
164
46
147
77
83
32
80
43
2
3
46
3,558
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/m2m_100/modeling_m2m_100.py
transformers.models.m2m_100.modeling_m2m_100.M2M100DecoderLayer
from ...utils.deprecation import deprecate_kwarg from .configuration_m2m_100 import M2M100Config from ...modeling_layers import GradientCheckpointingLayer from torch import nn import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Callable, Optional, Union from ...activations import ACT2FN class M2M100DecoderLayer(GradientCheckpointingLayer): def __init__(self, config: M2M100Config, layer_idx: Optional[int]=None): super().__init__() self.embed_dim = config.d_model self.self_attn = M2M100Attention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, is_causal=True, config=config, layer_idx=layer_idx) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = M2M100Attention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, config=config, layer_idx=layer_idx) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_values (`Cache`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs
class M2M100DecoderLayer(GradientCheckpointingLayer): def __init__(self, config: M2M100Config, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> torch.Tensor: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_values (`Cache`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. ''' pass
4
1
58
6
40
13
4
0.31
1
4
1
0
2
11
2
12
118
12
81
32
67
25
44
21
41
6
1
1
7
3,559
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/m2m_100/modeling_m2m_100.py
transformers.models.m2m_100.modeling_m2m_100.M2M100Encoder
from .configuration_m2m_100 import M2M100Config from typing import Callable, Optional, Union import torch import math from torch import nn from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...integrations.fsdp import is_fsdp_managed_module from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput class M2M100Encoder(M2M100PreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`M2M100EncoderLayer`]. Args: config: M2M100Config embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: M2M100Config, embed_tokens: Optional[nn.Embedding]=None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.embed_tokens = M2M100ScaledWordEmbedding(config.vocab_size, embed_dim, self.padding_idx, embed_scale=embed_scale) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = M2M100SinusoidalPositionalEmbedding(config.max_position_embeddings, embed_dim, self.padding_idx) self.layers = nn.ModuleList([M2M100EncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False self.post_init() def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): """ Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) embed_pos = self.embed_positions(input_ids, inputs_embeds) embed_pos = embed_pos.to(inputs_embeds.device) hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) attention_mask = self._update_full_mask(attention_mask, inputs_embeds) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self) for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) dropout_probability = torch.rand([]) skip_the_layer = self.training and dropout_probability < self.layerdrop if not skip_the_layer or synced_gpus: layer_outputs = encoder_layer(hidden_states, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
class M2M100Encoder(M2M100PreTrainedModel): ''' Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`M2M100EncoderLayer`]. Args: config: M2M100Config embed_tokens (nn.Embedding): output embedding ''' def __init__(self, config: M2M100Config, embed_tokens: Optional[nn.Embedding]=None): pass def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): ''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' pass
3
2
87
15
53
20
15
0.44
1
12
5
0
2
11
2
3
185
32
106
35
94
47
67
26
64
27
2
3
30
3,560
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/m2m_100/modeling_m2m_100.py
transformers.models.m2m_100.modeling_m2m_100.M2M100EncoderLayer
from torch import nn import torch from ...activations import ACT2FN from .configuration_m2m_100 import M2M100Config from ...modeling_layers import GradientCheckpointingLayer class M2M100EncoderLayer(GradientCheckpointingLayer): def __init__(self, config: M2M100Config): super().__init__() self.embed_dim = config.d_model self.self_attn = M2M100Attention(embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, config=config) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool=False) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16: clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) return (hidden_states, attn_weights)
class M2M100EncoderLayer(GradientCheckpointingLayer): def __init__(self, config: M2M100Config): pass def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool=False) -> torch.Tensor: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. ''' pass
3
1
33
3
25
6
2
0.22
1
4
1
0
2
9
2
12
68
7
50
22
41
11
32
16
29
3
1
1
4
3,561
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/m2m_100/modeling_m2m_100.py
transformers.models.m2m_100.modeling_m2m_100.M2M100ForConditionalGeneration
from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from .configuration_m2m_100 import M2M100Config from torch import nn from ...generation import GenerationMixin import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput from typing import Callable, Optional, Union from torch.nn import CrossEntropyLoss @auto_docstring(custom_intro='\n The M2M100 Model with a language modeling head. Can be used for summarization.\n ') class M2M100ForConditionalGeneration(M2M100PreTrainedModel, GenerationMixin): base_model_prefix = 'model' _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight', 'lm_head.weight'] def __init__(self, config: M2M100Config): super().__init__(config) self.model = M2M100Model(config) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], Seq2SeqLMOutput]: """ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) M2M100 uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example Translation: ```python >>> from transformers import AutoTokenizer, M2M100ForConditionalGeneration >>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/m2m100_418M") >>> text_to_translate = "Life is like a box of chocolates" >>> model_inputs = tokenizer(text_to_translate, return_tensors="pt") >>> # translate to French >>> gen_tokens = model.generate(**model_inputs, forced_bos_token_id=tokenizer.get_lang_id("fr")) >>> print(tokenizer.batch_decode(gen_tokens, skip_special_tokens=True)) ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if decoder_input_ids is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) lm_logits = self.lm_head(outputs[0]) masked_lm_loss = None if labels is not None: labels = labels.to(lm_logits.device) loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
@auto_docstring(custom_intro='\n The M2M100 Model with a language modeling head. Can be used for summarization.\n ') class M2M100ForConditionalGeneration(M2M100PreTrainedModel, GenerationMixin): def __init__(self, config: M2M100Config): pass def get_encoder(self): pass def get_decoder(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], Seq2SeqLMOutput]: ''' decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) M2M100 uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example Translation: ```python >>> from transformers import AutoTokenizer, M2M100ForConditionalGeneration >>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/m2m100_418M") >>> text_to_translate = "Life is like a box of chocolates" >>> model_inputs = tokenizer(text_to_translate, return_tensors="pt") >>> # translate to French >>> gen_tokens = model.generate(**model_inputs, forced_bos_token_id=tokenizer.get_lang_id("fr")) >>> print(tokenizer.batch_decode(gen_tokens, skip_special_tokens=True)) ``` ''' pass
7
1
14
1
12
1
2
0.1
2
7
3
0
6
2
7
8
112
14
89
39
59
9
37
19
29
7
2
2
14
3,562
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/m2m_100/modeling_m2m_100.py
transformers.models.m2m_100.modeling_m2m_100.M2M100Model
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache import math import torch from typing import Callable, Optional, Union from .configuration_m2m_100 import M2M100Config from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput @auto_docstring class M2M100Model(M2M100PreTrainedModel): _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight'] def __init__(self, config: M2M100Config): super().__init__(config) padding_idx, vocab_size = (config.pad_token_id, config.vocab_size) embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.shared = M2M100ScaledWordEmbedding(vocab_size, config.d_model, padding_idx, embed_scale=embed_scale) self.encoder = M2M100Encoder(config, self.shared) self.decoder = M2M100Decoder(config, self.shared) self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], Seq2SeqModelOutput]: """ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) M2M100 uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
@auto_docstring class M2M100Model(M2M100PreTrainedModel): def __init__(self, config: M2M100Config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def _tie_weights(self): pass def get_encoder(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], Seq2SeqModelOutput]: ''' decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) M2M100 uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. ''' pass
9
1
15
1
13
0
3
0.03
1
9
6
0
7
3
7
8
118
15
100
33
69
3
38
15
30
10
2
1
19
3,563
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/m2m_100/modeling_m2m_100.py
transformers.models.m2m_100.modeling_m2m_100.M2M100PreTrainedModel
from ...modeling_attn_mask_utils import AttentionMaskConverter, _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa import torch from typing import Callable, Optional, Union from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging from torch import nn from .configuration_m2m_100 import M2M100Config from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel @auto_docstring class M2M100PreTrainedModel(PreTrainedModel): config: M2M100Config base_model_prefix = 'model' supports_gradient_checkpointing = True _no_split_modules = ['M2M100EncoderLayer', 'M2M100DecoderLayer'] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _can_compile_fullgraph = False def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) module.bias.data.zero_() def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor): if attention_mask is not None: if 'flash' in self.config._attn_implementation: attention_mask = attention_mask if 0 in attention_mask else None elif self.config._attn_implementation == 'sdpa': attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype) elif self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False) else: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) return attention_mask def _update_causal_mask(self, attention_mask: Optional[Union[torch.Tensor, 'BlockMask']], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache): if self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) elif attention_mask is None: attention_mask = make_flex_block_causal_mask(torch.ones(size=(input_tensor.shape[0], input_tensor.shape[1]), device=attention_mask.device)) return attention_mask if 'flash' in self.config._attn_implementation: if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False if self.config._attn_implementation == 'sdpa' and (not using_compilable_cache): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0]) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']): min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor): if encoder_hidden_states is not None and encoder_attention_mask is not None: if 'flash' in self.config._attn_implementation: encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None elif self.config._attn_implementation == 'sdpa': encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) elif self.config._attn_implementation == 'flex_attention': if isinstance(encoder_attention_mask, torch.Tensor): encoder_attention_mask = make_flex_block_causal_mask(encoder_attention_mask, query_length=input_shape[-1], is_causal=False) else: encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) return encoder_attention_mask
@auto_docstring class M2M100PreTrainedModel(PreTrainedModel): def _init_weights(self, module): pass def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor): pass def _update_causal_mask(self, attention_mask: Optional[Union[torch.Tensor, 'BlockMask']], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache): pass @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): ''' Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. ''' pass def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor): pass
8
1
10
0
10
0
5
0
1
0
0
4
1
0
1
1
18
1
17
9
15
0
16
9
14
5
1
2
5
3,564
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/m2m_100/modeling_m2m_100.py
transformers.models.m2m_100.modeling_m2m_100.M2M100ScaledWordEmbedding
from torch import nn import torch from typing import Callable, Optional, Union class M2M100ScaledWordEmbedding(nn.Embedding): """ This module overrides nn.Embeddings' forward by multiplying with embeddings scale. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float]=1.0): super().__init__(num_embeddings, embedding_dim, padding_idx) self.embed_scale = embed_scale def forward(self, input_ids: torch.Tensor): return super().forward(input_ids) * self.embed_scale
class M2M100ScaledWordEmbedding(nn.Embedding): ''' This module overrides nn.Embeddings' forward by multiplying with embeddings scale. ''' def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float]=1.0): pass def forward(self, input_ids: torch.Tensor): pass
3
1
3
0
3
0
1
0.5
1
4
0
0
2
1
2
2
11
2
6
4
3
3
6
4
3
1
1
0
2
3,565
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/m2m_100/modeling_m2m_100.py
transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding
from torch import nn from typing import Callable, Optional, Union import math import torch class M2M100SinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None): super().__init__() self.offset = 2 self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.make_weights(num_positions + self.offset, embedding_dim, padding_idx) def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None): emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx) if hasattr(self, 'weights'): emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device) self.register_buffer('weights', emb_weights, persistent=False) @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb.to(torch.get_default_dtype()) @torch.no_grad() def forward(self, input_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, past_key_values_length: int=0): if input_ids is not None: bsz, seq_len = input_ids.size() position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(input_ids.device) else: bsz, seq_len = inputs_embeds.size()[:-1] position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length, self.padding_idx) max_pos = self.padding_idx + 1 + seq_len + past_key_values_length if max_pos > self.weights.size(0): self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx) return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach() @staticmethod def create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length, padding_idx): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange(padding_idx + 1, sequence_length + padding_idx + 1, dtype=torch.long, device=inputs_embeds.device) return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length @staticmethod def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx
class M2M100SinusoidalPositionalEmbedding(nn.Module): '''This module produces sinusoidal positional embeddings of any length.''' def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None): pass def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None): pass @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None): ''' Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". ''' pass @torch.no_grad() def forward(self, input_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, past_key_values_length: int=0): pass @staticmethod def create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length, padding_idx): ''' We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor ''' pass @staticmethod def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): ''' Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor ''' pass
11
4
13
2
9
3
2
0.34
1
3
0
0
4
3
5
15
76
13
47
22
37
16
38
18
32
3
1
1
10
3,566
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/m2m_100/tokenization_m2m_100.py
transformers.models.m2m_100.tokenization_m2m_100.M2M100Tokenizer
from typing import Any, Optional, Union import os from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils.import_utils import requires from shutil import copyfile from pathlib import Path @requires(backends=('sentencepiece',)) class M2M100Tokenizer(PreTrainedTokenizer): """ Construct an M2M100 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. spm_file (`str`): Path to [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that contains the vocabulary. src_lang (`str`, *optional*): A string representing the source language. tgt_lang (`str`, *optional*): A string representing the target language. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. language_codes (`str`, *optional*, defaults to `"m2m100"`): What language codes to use. Should be one of `"m2m100"` or `"wmt21"`. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. Examples: ```python >>> from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer >>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M") >>> tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="en", tgt_lang="ro") >>> src_text = " UN Chief Says There Is No Military Solution in Syria" >>> tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria" >>> model_inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt") >>> outputs = model(**model_inputs) # should work ```""" vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] prefix_tokens: list[int] = [] suffix_tokens: list[int] = [] def __init__(self, vocab_file, spm_file, src_lang=None, tgt_lang=None, bos_token='<s>', eos_token='</s>', sep_token='</s>', pad_token='<pad>', unk_token='<unk>', language_codes='m2m100', sp_model_kwargs: Optional[dict[str, Any]]=None, num_madeup_words=8, **kwargs) -> None: self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.language_codes = language_codes fairseq_language_code = FAIRSEQ_LANGUAGE_CODES[language_codes] self.lang_code_to_token = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code} additional_special_tokens = kwargs.pop('additional_special_tokens', []) for lang_code in fairseq_language_code: token = self.get_lang_token(lang_code) if token not in additional_special_tokens and lang_code not in str(token) not in self.added_tokens_encoder: additional_special_tokens.append(token) self.vocab_file = vocab_file self.encoder = load_json(vocab_file) self.decoder = {v: k for k, v in self.encoder.items()} self.spm_file = spm_file self.sp_model = load_spm(spm_file, self.sp_model_kwargs) self.encoder_size = len(self.encoder) self.lang_token_to_id = {self.get_lang_token(lang_code): self.encoder_size + i for i, lang_code in enumerate(fairseq_language_code)} self.lang_code_to_id = {lang_code: self.encoder_size + i for i, lang_code in enumerate(fairseq_language_code)} self.id_to_lang_token = {v: k for k, v in self.lang_token_to_id.items()} self._src_lang = src_lang if src_lang is not None else 'en' self.tgt_lang = tgt_lang self.cur_lang_id = self.get_lang_id(self._src_lang) self.num_madeup_words = num_madeup_words super().__init__(src_lang=src_lang, tgt_lang=tgt_lang, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, unk_token=unk_token, pad_token=pad_token, language_codes=language_codes, sp_model_kwargs=self.sp_model_kwargs, additional_special_tokens=additional_special_tokens, num_madeup_words=num_madeup_words, **kwargs) self.set_src_lang_special_tokens(self._src_lang) @property def vocab_size(self) -> int: return len(self.encoder) def get_vocab(self) -> dict: vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab @property def src_lang(self) -> str: return self._src_lang @src_lang.setter def src_lang(self, new_src_lang: str) -> None: self._src_lang = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def _tokenize(self, text: str) -> list[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(token, self.encoder[self.unk_token]) def _convert_id_to_token(self, index: int) -> str: """Converts an index (integer) in a token (str) using the decoder.""" if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" current_sub_tokens = [] out_string = '' for token in tokens: if token in self.all_special_tokens: out_string += self.sp_model.decode(current_sub_tokens) + token current_sub_tokens = [] else: current_sub_tokens.append(token) out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) prefix_ones = [1] * len(self.prefix_tokens) suffix_ones = [1] * len(self.suffix_tokens) if token_ids_1 is None: return prefix_ones + [0] * len(token_ids_0) + suffix_ones return prefix_ones + [0] * len(token_ids_0) + [0] * len(token_ids_1) + suffix_ones def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An MBART sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens def __getstate__(self) -> dict: state = self.__dict__.copy() state['sp_model'] = None return state def __setstate__(self, d: dict) -> None: self.__dict__ = d if not hasattr(self, 'sp_model_kwargs'): self.sp_model_kwargs = {} self.sp_model = load_spm(self.spm_file, self.sp_model_kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: save_dir = Path(save_directory) if not save_dir.is_dir(): raise OSError(f'{save_directory} should be a directory') vocab_save_path = save_dir / ((filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']) spm_save_path = save_dir / ((filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']) save_json(self.encoder, vocab_save_path) if os.path.abspath(self.spm_file) != os.path.abspath(spm_save_path) and os.path.isfile(self.spm_file): copyfile(self.spm_file, spm_save_path) elif not os.path.isfile(self.spm_file): with open(spm_save_path, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (str(vocab_save_path), str(spm_save_path)) def prepare_seq2seq_batch(self, src_texts: list[str], src_lang: str='en', tgt_texts: Optional[list[str]]=None, tgt_lang: str='ro', **kwargs) -> BatchEncoding: self.src_lang = src_lang self.tgt_lang = tgt_lang self.set_src_lang_special_tokens(self.src_lang) return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs) def _build_translation_inputs(self, raw_inputs, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs): """Used by translation pipeline, to prepare inputs for the generate function""" if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model') self.src_lang = src_lang inputs = self(raw_inputs, add_special_tokens=True, **extra_kwargs) tgt_lang_id = self.get_lang_id(tgt_lang) inputs['forced_bos_token_id'] = tgt_lang_id return inputs def _switch_to_input_mode(self): self.set_src_lang_special_tokens(self.src_lang) def _switch_to_target_mode(self): self.set_tgt_lang_special_tokens(self.tgt_lang) def set_src_lang_special_tokens(self, src_lang: str) -> None: """Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code].""" lang_token = self.get_lang_token(src_lang) self.cur_lang_id = self.lang_token_to_id[lang_token] self.prefix_tokens = [self.cur_lang_id] self.suffix_tokens = [self.eos_token_id] def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None: """Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code].""" lang_token = self.get_lang_token(tgt_lang) self.cur_lang_id = self.lang_token_to_id[lang_token] self.prefix_tokens = [self.cur_lang_id] self.suffix_tokens = [self.eos_token_id] def get_lang_token(self, lang: str) -> str: return self.lang_code_to_token[lang] def get_lang_id(self, lang: str) -> int: lang_token = self.get_lang_token(lang) return self.lang_token_to_id[lang_token]
@requires(backends=('sentencepiece',)) class M2M100Tokenizer(PreTrainedTokenizer): ''' Construct an M2M100 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. spm_file (`str`): Path to [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that contains the vocabulary. src_lang (`str`, *optional*): A string representing the source language. tgt_lang (`str`, *optional*): A string representing the target language. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. language_codes (`str`, *optional*, defaults to `"m2m100"`): What language codes to use. Should be one of `"m2m100"` or `"wmt21"`. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. Examples: ```python >>> from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer >>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M") >>> tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="en", tgt_lang="ro") >>> src_text = " UN Chief Says There Is No Military Solution in Syria" >>> tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria" >>> model_inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt") >>> outputs = model(**model_inputs) # should work ```''' def __init__(self, vocab_file, spm_file, src_lang=None, tgt_lang=None, bos_token='<s>', eos_token='</s>', sep_token='</s>', pad_token='<pad>', unk_token='<unk>', language_codes='m2m100', sp_model_kwargs: Optional[dict[str, Any]]=None, num_madeup_words=8, **kwargs) -> None: pass @property def vocab_size(self) -> int: pass def get_vocab(self) -> dict: pass @property def src_lang(self) -> str: pass @src_lang.setter def src_lang(self) -> str: pass def _tokenize(self, text: str) -> list[str]: pass def _convert_token_to_id(self, token): pass def _convert_id_to_token(self, index: int) -> str: '''Converts an index (integer) in a token (str) using the decoder.''' pass def convert_tokens_to_string(self, tokens): '''Converts a sequence of tokens (string) in a single string.''' pass def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: ''' Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. ''' pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An MBART sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def __getstate__(self) -> dict: pass def __setstate__(self, d: dict) -> None: pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass def prepare_seq2seq_batch(self, src_texts: list[str], src_lang: str='en', tgt_texts: Optional[list[str]]=None, tgt_lang: str='ro', **kwargs) -> BatchEncoding: pass def _build_translation_inputs(self, raw_inputs, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs): '''Used by translation pipeline, to prepare inputs for the generate function''' pass def _switch_to_input_mode(self): pass def _switch_to_target_mode(self): pass def set_src_lang_special_tokens(self, src_lang: str) -> None: '''Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code].''' pass def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None: '''Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code].''' pass def get_lang_token(self, lang: str) -> str: pass def get_lang_id(self, lang: str) -> int: pass
27
8
10
1
8
2
2
0.47
1
11
1
0
22
17
22
111
317
53
179
93
127
85
127
63
104
6
3
2
40
3,567
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mamba/configuration_mamba.py
transformers.models.mamba.configuration_mamba.MambaConfig
import math from ...configuration_utils import PretrainedConfig class MambaConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`MambaModel`]. It is used to instantiate a MAMBA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MAMBA [state-spaces/mamba-2.8b](https://huggingface.co/state-spaces/mamba-2.8b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50280): Vocabulary size of the MAMBA model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MambaModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the embeddings and hidden states. state_size (`int`, *optional*, defaults to 16): shape of the state space latents. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the model. layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): The epsilon to use in the layer normalization layers. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. bos_token_id (`int`, *optional*, defaults to 0): The id of the beginning of sentence token in the vocabulary. eos_token_id (`int`, *optional*, defaults to 0): The id of the end of sentence token in the vocabulary. expand (`int`, *optional*, defaults to 2): Expanding factor used to determine the intermediate size. conv_kernel (`int`, *optional*, defaults to 4): Size of the convolution kernel. use_bias (`bool`, *optional*, defaults to `False`): Whether or not to use bias in ["in_proj", "out_proj"] of the mixer block use_conv_bias (`bool`, *optional*, defaults to `True`): Whether or not to use bias in the convolution layer of the mixer block. hidden_act (`str`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. initializer_range (`float`, *optional*, defaults to 0.1): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. residual_in_fp32 (`bool`, *optional*, defaults to `True`): Whether or not residuals should be in `float32`. If set to `False` residuals will keep the same `dtype` as the rest of the model time_step_rank (`Union[int,str]`, *optional*, defaults to `"auto"`): Rank of the discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)` time_step_scale (`float`, *optional*, defaults to 1.0): Scale used used to scale `dt_proj.bias`. time_step_min (`float`, *optional*, defaults to 0.001): Minimum `time_step` used to bound `dt_proj.bias`. time_step_max (`float`, *optional*, defaults to 0.1): Maximum `time_step` used to bound `dt_proj.bias`. time_step_init_scheme (`float`, *optional*, defaults to `"random"`): Init scheme used for `dt_proj.weight`. Should be one of `["random","uniform"]` time_step_floor (`float`, *optional*, defaults to 0.0001): Minimum clamping value of the `dt_proj.bias` layer initialization. rescale_prenorm_residual (`bool`, *optional*, defaults to `False`): Whether or not to rescale `out_proj` weights when initializing. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the cache should be used. use_mambapy (`bool`, *optional*, defaults to `False`): Determines the fallback strategy during training if the CUDA-based official implementation of Mamba is not available. If `True`, the mamba.py implementation is used. If `False`, the naive and slower implementation is used. Consider switching to the naive version if memory is limited. Example: ```python >>> from transformers import MambaConfig, MambaModel >>> # Initializing a Mamba configuration >>> configuration = MambaConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = MambaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'mamba' def __init__(self, vocab_size=50280, hidden_size=768, state_size=16, num_hidden_layers=32, layer_norm_epsilon=1e-05, pad_token_id=0, bos_token_id=0, eos_token_id=0, expand=2, conv_kernel=4, use_bias=False, use_conv_bias=True, hidden_act='silu', initializer_range=0.1, residual_in_fp32=True, time_step_rank='auto', time_step_scale=1.0, time_step_min=0.001, time_step_max=0.1, time_step_init_scheme='random', time_step_floor=0.0001, rescale_prenorm_residual=False, use_cache=True, use_mambapy=False, **kwargs): self.vocab_size = vocab_size self.hidden_size = hidden_size self.state_size = state_size self.num_hidden_layers = num_hidden_layers self.layer_norm_epsilon = layer_norm_epsilon self.conv_kernel = conv_kernel self.expand = expand self.intermediate_size = int(expand * self.hidden_size) self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.use_bias = use_bias self.use_conv_bias = use_conv_bias self.hidden_act = hidden_act self.initializer_range = initializer_range self.time_step_rank = math.ceil(self.hidden_size / 16) if time_step_rank == 'auto' else time_step_rank self.time_step_scale = time_step_scale self.time_step_min = time_step_min self.time_step_max = time_step_max self.time_step_init_scheme = time_step_init_scheme self.time_step_floor = time_step_floor self.rescale_prenorm_residual = rescale_prenorm_residual self.residual_in_fp32 = residual_in_fp32 self.use_cache = use_cache self.use_mambapy = use_mambapy super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, **kwargs)
class MambaConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`MambaModel`]. It is used to instantiate a MAMBA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MAMBA [state-spaces/mamba-2.8b](https://huggingface.co/state-spaces/mamba-2.8b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50280): Vocabulary size of the MAMBA model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MambaModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the embeddings and hidden states. state_size (`int`, *optional*, defaults to 16): shape of the state space latents. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the model. layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): The epsilon to use in the layer normalization layers. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. bos_token_id (`int`, *optional*, defaults to 0): The id of the beginning of sentence token in the vocabulary. eos_token_id (`int`, *optional*, defaults to 0): The id of the end of sentence token in the vocabulary. expand (`int`, *optional*, defaults to 2): Expanding factor used to determine the intermediate size. conv_kernel (`int`, *optional*, defaults to 4): Size of the convolution kernel. use_bias (`bool`, *optional*, defaults to `False`): Whether or not to use bias in ["in_proj", "out_proj"] of the mixer block use_conv_bias (`bool`, *optional*, defaults to `True`): Whether or not to use bias in the convolution layer of the mixer block. hidden_act (`str`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. initializer_range (`float`, *optional*, defaults to 0.1): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. residual_in_fp32 (`bool`, *optional*, defaults to `True`): Whether or not residuals should be in `float32`. If set to `False` residuals will keep the same `dtype` as the rest of the model time_step_rank (`Union[int,str]`, *optional*, defaults to `"auto"`): Rank of the discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)` time_step_scale (`float`, *optional*, defaults to 1.0): Scale used used to scale `dt_proj.bias`. time_step_min (`float`, *optional*, defaults to 0.001): Minimum `time_step` used to bound `dt_proj.bias`. time_step_max (`float`, *optional*, defaults to 0.1): Maximum `time_step` used to bound `dt_proj.bias`. time_step_init_scheme (`float`, *optional*, defaults to `"random"`): Init scheme used for `dt_proj.weight`. Should be one of `["random","uniform"]` time_step_floor (`float`, *optional*, defaults to 0.0001): Minimum clamping value of the `dt_proj.bias` layer initialization. rescale_prenorm_residual (`bool`, *optional*, defaults to `False`): Whether or not to rescale `out_proj` weights when initializing. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the cache should be used. use_mambapy (`bool`, *optional*, defaults to `False`): Determines the fallback strategy during training if the CUDA-based official implementation of Mamba is not available. If `True`, the mamba.py implementation is used. If `False`, the naive and slower implementation is used. Consider switching to the naive version if memory is limited. Example: ```python >>> from transformers import MambaConfig, MambaModel >>> # Initializing a Mamba configuration >>> configuration = MambaConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = MambaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=50280, hidden_size=768, state_size=16, num_hidden_layers=32, layer_norm_epsilon=1e-05, pad_token_id=0, bos_token_id=0, eos_token_id=0, expand=2, conv_kernel=4, use_bias=False, use_conv_bias=True, hidden_act='silu', initializer_range=0.1, residual_in_fp32=True, time_step_rank='auto', time_step_scale=1.0, time_step_min=0.001, time_step_max=0.1, time_step_init_scheme='random', time_step_floor=0.0001, rescale_prenorm_residual=False, use_cache=True, use_mambapy=False, **kwargs): pass
2
1
55
1
54
0
2
1.14
1
2
0
0
1
25
1
1
132
12
56
55
27
64
29
28
27
2
1
0
2
3,568
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mamba/modeling_mamba.py
transformers.models.mamba.modeling_mamba.MambaBlock
from typing import Any, Optional, Union import torch from ...modeling_layers import GradientCheckpointingLayer class MambaBlock(GradientCheckpointingLayer): def __init__(self, config, layer_idx): super().__init__() self.config = config self.layer_idx = layer_idx self.residual_in_fp32 = config.residual_in_fp32 self.norm = MambaRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.mixer = MambaMixer(config, layer_idx=layer_idx) def forward(self, hidden_states, cache_params: Optional[MambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None): residual = hidden_states hidden_states = self.norm(hidden_states.to(dtype=self.norm.weight.dtype)) if self.residual_in_fp32: residual = residual.to(torch.float32) hidden_states = self.mixer(hidden_states, cache_params=cache_params, cache_position=cache_position, attention_mask=attention_mask) hidden_states = residual + hidden_states return hidden_states
class MambaBlock(GradientCheckpointingLayer): def __init__(self, config, layer_idx): pass def forward(self, hidden_states, cache_params: Optional[MambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None): pass
3
0
12
1
12
0
2
0
1
4
3
0
2
5
2
12
26
2
24
15
15
0
16
9
13
2
1
1
3
3,569
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mamba/modeling_mamba.py
transformers.models.mamba.modeling_mamba.MambaForCausalLM
from ...utils import ModelOutput, auto_docstring, logging import torch from torch.nn import CrossEntropyLoss from torch import nn from ...generation import GenerationMixin from typing import Any, Optional, Union @auto_docstring(custom_intro='\n The MAMBA Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ') class MambaForCausalLM(MambaPreTrainedModel, GenerationMixin): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): super().__init__(config) self.backbone = MambaModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.backbone.get_input_embeddings() def set_input_embeddings(self, new_embeddings): return self.backbone.set_input_embeddings(new_embeddings) def _update_model_kwargs_for_generation(self, outputs: ModelOutput, model_kwargs: dict[str, Any], num_new_tokens: int=1, **kwargs) -> dict[str, Any]: model_kwargs['cache_params'] = outputs.get('cache_params', None) if model_kwargs.get('use_cache', True) and 'cache_position' in model_kwargs and (model_kwargs['cache_position'] is not None): model_kwargs['cache_position'] = model_kwargs['cache_position'][-1:] + num_new_tokens if 'attention_mask' in model_kwargs: attention_mask = model_kwargs['attention_mask'] model_kwargs['attention_mask'] = torch.cat([attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1) return model_kwargs def prepare_inputs_for_generation(self, input_ids, inputs_embeds=None, use_cache=None, cache_params: Optional[MambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, **kwargs): model_inputs = {'input_ids': input_ids.contiguous()} if use_cache and cache_params is None: cache_position = torch.arange(0, self.backbone.config.conv_kernel, device=input_ids.device) if inputs_embeds is not None: model_inputs = {'inputs_embeds': inputs_embeds} max_batch_size = inputs_embeds.size(0) else: max_batch_size = input_ids.size(0) cache_params = MambaCache(self.backbone.config, max_batch_size, device=self.device, dtype=self.dtype) if use_cache and cache_position[0] > 0: model_inputs['input_ids'] = input_ids[:, -1].unsqueeze(-1).contiguous() attention_mask = None if not use_cache and inputs_embeds is not None: model_inputs = {'inputs_embeds': inputs_embeds} model_inputs.update({'cache_params': cache_params, 'use_cache': use_cache, 'cache_position': cache_position, 'attention_mask': attention_mask}) for key, value in kwargs.items(): if key not in model_inputs: model_inputs[key] = value return model_inputs @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_params: Optional[MambaCache]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple, MambaCausalLMOutput]: """ cache_params (`MambaCache`, *optional*): If passed along, the model uses the previous state in all the blocks (which will give the output for the `input_ids` provided as if the model add `state_input_ids + input_ids` as context). labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` use_cache (`bool`, *optional*): If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict mamba_outputs = self.backbone(input_ids, cache_params=cache_params, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict, use_cache=use_cache, cache_position=cache_position, attention_mask=attention_mask) hidden_states = mamba_outputs[0] logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype)).float() loss = None if labels is not None: labels = labels.to(logits.device) shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (logits,) + mamba_outputs[1:] return (loss,) + output if loss is not None else output return MambaCausalLMOutput(loss=loss, logits=logits, cache_params=mamba_outputs.cache_params, hidden_states=mamba_outputs.hidden_states)
@auto_docstring(custom_intro='\n The MAMBA Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ') class MambaForCausalLM(MambaPreTrainedModel, GenerationMixin): def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass def _update_model_kwargs_for_generation(self, outputs: ModelOutput, model_kwargs: dict[str, Any], num_new_tokens: int=1, **kwargs) -> dict[str, Any]: pass def prepare_inputs_for_generation(self, input_ids, inputs_embeds=None, use_cache=None, cache_params: Optional[MambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, **kwargs): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_params: Optional[MambaCache]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple, MambaCausalLMOutput]: ''' cache_params (`MambaCache`, *optional*): If passed along, the model uses the previous state in all the blocks (which will give the output for the `input_ids` provided as if the model add `state_input_ids + input_ids` as context). labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` use_cache (`bool`, *optional*): If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits. ''' pass
9
1
17
2
13
2
2
0.15
2
10
3
0
8
2
8
9
151
20
115
46
77
17
53
22
44
6
2
3
19
3,570
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mamba/modeling_mamba.py
transformers.models.mamba.modeling_mamba.MambaMixer
from typing import Any, Optional, Union import torch from ...utils.import_utils import is_causal_conv1d_available, is_kernels_available, is_mamba_ssm_available, is_mambapy_available from .configuration_mamba import MambaConfig from torch import nn from ...activations import ACT2FN class MambaMixer(nn.Module): """ Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`. A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective) ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4, and is why Mamba is called **selective** state spaces) """ def __init__(self, config: MambaConfig, layer_idx: int): super().__init__() self.config = config self.hidden_size = config.hidden_size self.ssm_state_size = config.state_size self.conv_kernel_size = config.conv_kernel self.intermediate_size = config.intermediate_size self.time_step_rank = int(config.time_step_rank) self.layer_idx = layer_idx self.use_conv_bias = config.use_conv_bias self.conv1d = nn.Conv1d(in_channels=self.intermediate_size, out_channels=self.intermediate_size, bias=config.use_conv_bias, kernel_size=config.conv_kernel, groups=self.intermediate_size, padding=config.conv_kernel - 1) self.activation = config.hidden_act self.act = ACT2FN[config.hidden_act] self.use_mambapy = config.use_mambapy self.in_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=config.use_bias) self.x_proj = nn.Linear(self.intermediate_size, self.time_step_rank + self.ssm_state_size * 2, bias=False) self.dt_proj = nn.Linear(self.time_step_rank, self.intermediate_size, bias=True) A = torch.arange(1, self.ssm_state_size + 1, dtype=torch.float32)[None, :] A = A.expand(self.intermediate_size, -1).contiguous() self.A_log = nn.Parameter(torch.log(A)) self.D = nn.Parameter(torch.ones(self.intermediate_size)) self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias) self.use_bias = config.use_bias self.warn_slow_implementation() def warn_slow_implementation(self): causal_conv1d_update, causal_conv1d_fn = _lazy_load_causal_conv1d() is_fast_path_available = all((selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)) if not is_fast_path_available: if self.use_mambapy: if is_mambapy_available(): logger.warning_once('The fast path is not available because one of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)` is None. Falling back to the mamba.py backend. To install follow https://github.com/state-spaces/mamba/#installation for mamba-ssm and install the kernels library using `pip install kernels` or https://github.com/Dao-AILab/causal-conv1d for causal-conv1d') else: raise ImportError('use_mambapy is set to True but the mambapy package is not installed. To install it follow https://github.com/alxndrTL/mamba.py.') else: logger.warning_once('The fast path is not available because one of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)` is None. Falling back to the sequential implementation of Mamba, as use_mambapy is set to False. To install follow https://github.com/state-spaces/mamba/#installation for mamba-ssm and install the kernels library using `pip install kernels` or https://github.com/Dao-AILab/causal-conv1d for causal-conv1d. For the mamba.py backend, follow https://github.com/alxndrTL/mamba.py.') def cuda_kernels_forward(self, hidden_states: torch.Tensor, cache_params: Optional[MambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None): projected_states = self.in_proj(hidden_states).transpose(1, 2) if self.training and cache_params is None: contextualized_states = mamba_inner_fn(projected_states, self.conv1d.weight, self.conv1d.bias if self.use_conv_bias else None, self.x_proj.weight, self.dt_proj.weight, self.out_proj.weight, self.out_proj.bias.float() if self.use_bias else None, -torch.exp(self.A_log.float()), None, None, self.D.float(), delta_bias=self.dt_proj.bias.float(), delta_softplus=True) else: causal_conv1d_update, causal_conv1d_fn = _lazy_load_causal_conv1d() hidden_states, gate = projected_states.chunk(2, dim=1) if attention_mask is not None: hidden_states = hidden_states * attention_mask.unsqueeze(1) conv_weights = self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2)) if cache_params is not None and cache_position[0] > 0: hidden_states = causal_conv1d_update(hidden_states.squeeze(-1), cache_params.conv_states[self.layer_idx], conv_weights, self.conv1d.bias, self.activation) hidden_states = hidden_states.unsqueeze(-1) else: if cache_params is not None: conv_states = nn.functional.pad(hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0)) cache_params.update_conv_state(self.layer_idx, conv_states, cache_position) hidden_states = causal_conv1d_fn(hidden_states, conv_weights, self.conv1d.bias, activation=self.activation) if attention_mask is not None: hidden_states = hidden_states * attention_mask.unsqueeze(1) ssm_parameters = self.x_proj(hidden_states.transpose(1, 2)) time_step, B, C = torch.split(ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1) discrete_time_step = self.dt_proj.weight @ time_step.transpose(1, 2) A = -torch.exp(self.A_log.float()) time_proj_bias = self.dt_proj.bias.float() if hasattr(self.dt_proj, 'bias') else None if cache_params is not None and cache_position[0] > 0: scan_outputs = selective_state_update(cache_params.ssm_states[self.layer_idx], hidden_states[..., 0], discrete_time_step[..., 0], A, B[:, 0], C[:, 0], self.D, gate[..., 0], time_proj_bias, dt_softplus=True).unsqueeze(-1) else: scan_outputs, ssm_state = selective_scan_fn(hidden_states, discrete_time_step, A, B.transpose(1, 2), C.transpose(1, 2), self.D.float(), gate, time_proj_bias, delta_softplus=True, return_last_state=True) if ssm_state is not None and cache_params is not None: cache_params.update_ssm_state(self.layer_idx, ssm_state) contextualized_states = self.out_proj(scan_outputs.transpose(1, 2)) return contextualized_states def slow_forward(self, input_states, cache_params: Optional[MambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None): batch_size, seq_len, _ = input_states.shape dtype = input_states.dtype projected_states = self.in_proj(input_states).transpose(1, 2) hidden_states, gate = projected_states.chunk(2, dim=1) if attention_mask is not None: hidden_states = hidden_states * attention_mask.unsqueeze(1) if cache_params is not None: ssm_state = cache_params.ssm_states[self.layer_idx].clone() ssm_state = ssm_state.to(hidden_states.device) if cache_position.shape[0] == self.conv_kernel_size: conv_state = nn.functional.pad(hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0)) cache_params.update_conv_state(self.layer_idx, conv_state, cache_position) hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) else: conv_state = cache_params.update_conv_state(self.layer_idx, hidden_states, cache_position) conv_state = conv_state.to(self.conv1d.weight.device) hidden_states = torch.sum(conv_state * self.conv1d.weight[:, 0, :], dim=-1) if self.use_conv_bias: hidden_states += self.conv1d.bias hidden_states = self.act(hidden_states).to(dtype).unsqueeze(-1) else: ssm_state = torch.zeros((batch_size, self.intermediate_size, self.ssm_state_size), device=hidden_states.device, dtype=dtype) hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) if attention_mask is not None: hidden_states = hidden_states * attention_mask.unsqueeze(1) ssm_parameters = self.x_proj(hidden_states.transpose(1, 2)) time_step, B, C = torch.split(ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1) discrete_time_step = self.dt_proj(time_step) discrete_time_step = nn.functional.softplus(discrete_time_step).transpose(1, 2) A = -torch.exp(self.A_log.float()) discrete_A = torch.exp(A[None, :, None, :] * discrete_time_step[:, :, :, None]) discrete_B = discrete_time_step[:, :, :, None] * B[:, None, :, :].float() deltaB_u = discrete_B * hidden_states[:, :, :, None].float() if self.use_mambapy and self.training and (cache_params is None): hs = pscan(discrete_A.transpose(1, 2), deltaB_u.transpose(1, 2)) scan_output = (hs @ C.unsqueeze(-1)).squeeze(3).transpose(1, 2) scan_output = scan_output + hidden_states * self.D[None, :, None] scan_output = scan_output * self.act(gate) else: scan_outputs = [] for i in range(seq_len): ssm_state = discrete_A[:, :, i, :] * ssm_state + deltaB_u[:, :, i, :] scan_output = torch.matmul(ssm_state.to(dtype), C[:, i, :].unsqueeze(-1)) scan_outputs.append(scan_output[:, :, 0]) scan_output = torch.stack(scan_outputs, dim=-1) scan_output = scan_output + hidden_states * self.D[None, :, None] scan_output = scan_output * self.act(gate) if cache_params is not None: cache_params.ssm_states[self.layer_idx].copy_(ssm_state) contextualized_states = self.out_proj(scan_output.transpose(1, 2)) return contextualized_states def forward(self, hidden_states, cache_params: Optional[MambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None): causal_conv1d_update, causal_conv1d_fn = _lazy_load_causal_conv1d() is_fast_path_available = all((selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)) if is_fast_path_available and 'cuda' in self.x_proj.weight.device.type and (not torch._dynamo.is_compiling()): return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask) return self.slow_forward(hidden_states, cache_params, cache_position, attention_mask)
class MambaMixer(nn.Module): ''' Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`. A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective) ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4, and is why Mamba is called **selective** state spaces) ''' def __init__(self, config: MambaConfig, layer_idx: int): pass def warn_slow_implementation(self): pass def cuda_kernels_forward(self, hidden_states: torch.Tensor, cache_params: Optional[MambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None): pass def slow_forward(self, input_states, cache_params: Optional[MambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None): pass def forward(self, hidden_states, cache_params: Optional[MambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None): pass
6
1
62
6
51
10
7
0.24
1
7
2
0
4
19
4
14
261
28
204
67
187
49
111
55
106
11
1
3
26
3,571
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mamba/modeling_mamba.py
transformers.models.mamba.modeling_mamba.MambaModel
import torch from torch import nn from ...utils import ModelOutput, auto_docstring, logging from typing import Any, Optional, Union @auto_docstring class MambaModel(MambaPreTrainedModel): def __init__(self, config): super().__init__(config) self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size) self.layers = nn.ModuleList([MambaBlock(config, layer_idx=idx) for idx in range(config.num_hidden_layers)]) self.gradient_checkpointing = False self.norm_f = MambaRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) self._register_load_state_dict_pre_hook(self.load_hook) self.post_init() def load_hook(self, state_dict, prefix, *args): for k in state_dict: if 'embedding.' in k: state_dict[k.replace('embedding.', 'embeddings.')] = state_dict.pop(k) break def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, new_embeddings): self.embeddings = new_embeddings @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, cache_params: Optional[MambaCache]=None, use_cache: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None) -> Union[tuple, MambaOutput]: """ cache_params (`MambaCache`, *optional*): If passed along, the model uses the previous state in all the blocks (which will give the output for the `input_ids` provided as if the model add `state_input_ids + input_ids` as context). use_cache (`bool`, *optional*): If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits. """ output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache if not self.training else False return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You must specify exactly one of input_ids or inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embeddings(input_ids) if self.gradient_checkpointing and self.training and use_cache: use_cache = False if use_cache: if cache_params is None: cache_params = MambaCache(self.config, inputs_embeds.size(0), device=inputs_embeds.device, dtype=inputs_embeds.dtype) cache_position = torch.arange(0, self.config.conv_kernel, device=inputs_embeds.device) elif cache_position is None: raise ValueError("You have to specify the `cache_position` manually when `use_cache=True` and `cache_params` is passed, you don't have to pass a `cache_params` if you are in prefilling stage because in that case it will be initialized for you automatically") else: cache_params = None hidden_states = inputs_embeds all_hidden_states = () if output_hidden_states else None for mixer_block in self.layers: hidden_states = mixer_block(hidden_states, cache_params=cache_params, cache_position=cache_position, attention_mask=attention_mask) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states = self.norm_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, cache_params, all_hidden_states] if v is not None)) return MambaOutput(last_hidden_state=hidden_states, cache_params=cache_params if use_cache else None, hidden_states=all_hidden_states)
@auto_docstring class MambaModel(MambaPreTrainedModel): def __init__(self, config): pass def load_hook(self, state_dict, prefix, *args): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, cache_params: Optional[MambaCache]=None, use_cache: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None) -> Union[tuple, MambaOutput]: ''' cache_params (`MambaCache`, *optional*): If passed along, the model uses the previous state in all the blocks (which will give the output for the `input_ids` provided as if the model add `state_input_ids + input_ids` as context). use_cache (`bool`, *optional*): If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits. ''' pass
8
1
19
2
16
1
5
0.06
1
9
4
0
5
5
5
6
106
16
86
26
64
5
48
14
42
18
2
2
24
3,572
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mamba/modeling_mamba.py
transformers.models.mamba.modeling_mamba.MambaOutput
from typing import Any, Optional, Union import torch from dataclasses import dataclass from ...utils import ModelOutput, auto_docstring, logging @dataclass @auto_docstring(custom_intro='\n Class for the MAMBA model outputs.\n ') class MambaOutput(ModelOutput): """ cache_params (`MambaCache`): The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to avoid providing the old `input_ids`. Includes both the State space model state matrices after the selective scan, and the Convolutional states """ last_hidden_state: Optional[torch.FloatTensor] = None cache_params: Optional[MambaCache] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None
@dataclass @auto_docstring(custom_intro='\n Class for the MAMBA model outputs.\n ') class MambaOutput(ModelOutput): ''' cache_params (`MambaCache`): The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to avoid providing the old `input_ids`. Includes both the State space model state matrices after the selective scan, and the Convolutional states ''' pass
3
1
0
0
0
0
0
3.5
1
0
0
0
0
0
0
0
22
4
4
4
3
14
4
4
3
0
1
0
0
3,573
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mamba/modeling_mamba.py
transformers.models.mamba.modeling_mamba.MambaPreTrainedModel
from .configuration_mamba import MambaConfig from torch import nn from ...utils import ModelOutput, auto_docstring, logging import math import torch from ...modeling_utils import PreTrainedModel @auto_docstring class MambaPreTrainedModel(PreTrainedModel): config: MambaConfig base_model_prefix = 'backbone' _no_split_modules = ['MambaBlock', 'MambaMixer'] supports_gradient_checkpointing = True _is_stateful = True def _init_weights(self, module): """Initialize the weights.""" std = self.config.initializer_range if isinstance(module, MambaMixer): A = torch.arange(1, module.ssm_state_size + 1, dtype=torch.float32)[None, :] A = A.expand(module.intermediate_size, -1).contiguous() module.A_log.copy_(torch.log(A)) module.D.data.fill_(1.0) dt_init_std = self.config.time_step_rank ** (-0.5) * self.config.time_step_scale if self.config.time_step_init_scheme == 'constant': nn.init.constant_(module.dt_proj.weight, dt_init_std) elif self.config.time_step_init_scheme == 'random': nn.init.uniform_(module.dt_proj.weight, -dt_init_std, dt_init_std) dt = torch.exp(torch.rand(self.config.intermediate_size) * (math.log(self.config.time_step_max) - math.log(self.config.time_step_min)) + math.log(self.config.time_step_min)).clamp(min=self.config.time_step_floor) inv_dt = dt + torch.log(-torch.expm1(-dt)) module.dt_proj.bias.copy_(inv_dt) module.dt_proj.bias._no_reinit = True nn.init.kaiming_uniform_(module.conv1d.weight, a=math.sqrt(5)) if module.conv1d.bias is not None: if not getattr(module.conv1d.bias, '_no_reinit', False): nn.init.zeros_(module.conv1d.bias) nn.init.kaiming_uniform_(module.out_proj.weight, a=math.sqrt(5)) if self.config.rescale_prenorm_residual: p = module.out_proj.weight p /= math.sqrt(self.config.num_hidden_layers) if isinstance(module, nn.Linear): if not getattr(module.weight, '_no_reinit', False): nn.init.normal_(module.weight, std=std) if module.bias is not None: if not getattr(module.bias, '_no_reinit', False): nn.init.zeros_(module.bias) elif isinstance(module, MambaRMSNorm): module.weight.data.fill_(1.0) elif isinstance(module, nn.Embedding): nn.init.normal_(module.weight, std=std)
@auto_docstring class MambaPreTrainedModel(PreTrainedModel): def _init_weights(self, module): '''Initialize the weights.''' pass
3
1
46
4
30
12
11
0.44
1
1
1
2
1
0
1
1
58
6
36
11
34
16
30
11
28
11
1
4
11
3,574
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mamba/modeling_mamba.py
transformers.models.mamba.modeling_mamba.MambaRMSNorm
from torch import nn import torch class MambaRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): """ MambaRMSNorm is equivalent to T5LayerNorm and LlamaRMSNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f'{self.weight.shape[0]}, eps={self.variance_epsilon}'
class MambaRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): ''' MambaRMSNorm is equivalent to T5LayerNorm and LlamaRMSNorm ''' pass def forward(self, hidden_states): pass def extra_repr(self): pass
4
1
5
0
4
1
1
0.23
1
1
0
0
3
2
3
13
18
2
13
8
9
3
13
8
9
1
1
0
3
3,575
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mamba2/configuration_mamba2.py
transformers.models.mamba2.configuration_mamba2.Mamba2Config
from ...configuration_utils import PretrainedConfig import math class Mamba2Config(PretrainedConfig): """ This is the configuration class to store the configuration of a [`Mamba2Model`]. It is used to instantiate a MAMBA2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MAMBA2 [state-spaces/mamba2-2.8b](https://huggingface.co/state-spaces/mamba2-2.8b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_heads (`int`, *optional*, defaults to 128): Number of heads for the evolution matrices of mamba 2. head_dim (`int`, *optional*, defaults to 64): Dimension of each head. vocab_size (`int`, *optional*, defaults to 32768): Vocabulary size of the MAMBA2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Mamba2Model`]. hidden_size (`int`, *optional*, defaults to 4096): Dimensionality of the embeddings and hidden states. state_size (`int`, *optional*, defaults to 128): shape of the state space latents. num_hidden_layers (`int`, *optional*, defaults to 64): Number of hidden layers in the model. layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): The epsilon to use in the layer normalization layers. pad_token_id (`int`, *optional*, defaults to 1): Padding token id. bos_token_id (`int`, *optional*, defaults to 0): The id of the beginning of sentence token in the vocabulary. eos_token_id (`int`, *optional*, defaults to 2): The id of the end of sentence token in the vocabulary. expand (`int`, *optional*, defaults to 2): Expanding factor used to determine the intermediate size. conv_kernel (`int`, *optional*, defaults to 4): Size of the convolution kernel. n_groups (`int`, *optional*, defaults to 8): Number of groups for the evolution matrices of mamba 2. use_bias (`bool`, *optional*, defaults to `False`): Whether or not to use bias in ["in_proj", "out_proj"] of the mixer block use_conv_bias (`bool`, *optional*, defaults to `True`): Whether or not to use bias in the convolution layer of the mixer block. hidden_act (`str`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. initializer_range (`float`, *optional*, defaults to 0.1): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. residual_in_fp32 (`bool`, *optional*, defaults to `True`): Whether or not residuals should be in `float32`. If set to `False` residuals will keep the same `dtype` as the rest of the model time_step_rank (`Union[int,str]`, *optional*, defaults to `"auto"`): Rank of the discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)` time_step_min (`float`, *optional*, defaults to 0.001): Minimum `time_step` used to bound `dt_proj.bias`. time_step_max (`float`, *optional*, defaults to 0.1): Maximum `time_step` used to bound `dt_proj.bias`. time_step_floor (`float`, *optional*, defaults to 0.0001): Minimum clamping value of the `dt_proj.bias` layer initialization. time_step_limit (`tuple`, *optional*, defaults to `(0.0, inf)`): Accepted range of time step values. rescale_prenorm_residual (`bool`, *optional*, defaults to `False`): Whether or not to rescale `out_proj` weights when initializing. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the cache should be used. rms_norm (`bool`, *optional*, defaults to `True`): Whether to use RMS norm or not. chunk_size (`int`, *optional*, defaults to 256): Size of the chunks that will comprise the sequence. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie word embeddings or not. Example: ```python >>> from transformers import Mamba2Config, Mamba2Model >>> # Initializing a Mamba2 configuration >>> configuration = Mamba2Config() >>> # Initializing a model (with random weights) from the configuration >>> model = Mamba2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'mamba2' def __init__(self, num_heads=128, head_dim=64, vocab_size=32768, hidden_size=4096, state_size=128, num_hidden_layers=64, layer_norm_epsilon=1e-05, pad_token_id=1, bos_token_id=0, eos_token_id=2, expand=2, conv_kernel=4, n_groups=8, use_bias=False, use_conv_bias=True, hidden_act='silu', initializer_range=0.1, residual_in_fp32=True, time_step_rank='auto', time_step_min=0.001, time_step_max=0.1, time_step_floor=0.0001, time_step_limit=(0.0, float('inf')), rescale_prenorm_residual=False, use_cache=True, rms_norm=True, chunk_size=256, tie_word_embeddings=False, **kwargs): if hidden_size * expand != num_heads * head_dim: raise ValueError(f'Inconsistent configuration: hidden_size * expand ({hidden_size * expand}) must equal num_heads * head_dim ({num_heads * head_dim}).') self.vocab_size = vocab_size self.hidden_size = hidden_size self.state_size = state_size self.num_hidden_layers = num_hidden_layers self.layer_norm_epsilon = layer_norm_epsilon self.conv_kernel = conv_kernel self.expand = expand self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.use_bias = use_bias self.use_conv_bias = use_conv_bias self.hidden_act = hidden_act self.initializer_range = initializer_range self.time_step_rank = math.ceil(self.hidden_size / 16) if time_step_rank == 'auto' else time_step_rank self.time_step_min = time_step_min self.time_step_max = time_step_max self.time_step_floor = time_step_floor self.rescale_prenorm_residual = rescale_prenorm_residual self.residual_in_fp32 = residual_in_fp32 self.use_cache = use_cache self.n_groups = n_groups self.num_heads = num_heads self.head_dim = head_dim self.rms_norm = rms_norm self.state_size = state_size self.chunk_size = chunk_size self.time_step_limit = time_step_limit self.tie_word_embeddings = tie_word_embeddings super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
class Mamba2Config(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`Mamba2Model`]. It is used to instantiate a MAMBA2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MAMBA2 [state-spaces/mamba2-2.8b](https://huggingface.co/state-spaces/mamba2-2.8b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_heads (`int`, *optional*, defaults to 128): Number of heads for the evolution matrices of mamba 2. head_dim (`int`, *optional*, defaults to 64): Dimension of each head. vocab_size (`int`, *optional*, defaults to 32768): Vocabulary size of the MAMBA2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Mamba2Model`]. hidden_size (`int`, *optional*, defaults to 4096): Dimensionality of the embeddings and hidden states. state_size (`int`, *optional*, defaults to 128): shape of the state space latents. num_hidden_layers (`int`, *optional*, defaults to 64): Number of hidden layers in the model. layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): The epsilon to use in the layer normalization layers. pad_token_id (`int`, *optional*, defaults to 1): Padding token id. bos_token_id (`int`, *optional*, defaults to 0): The id of the beginning of sentence token in the vocabulary. eos_token_id (`int`, *optional*, defaults to 2): The id of the end of sentence token in the vocabulary. expand (`int`, *optional*, defaults to 2): Expanding factor used to determine the intermediate size. conv_kernel (`int`, *optional*, defaults to 4): Size of the convolution kernel. n_groups (`int`, *optional*, defaults to 8): Number of groups for the evolution matrices of mamba 2. use_bias (`bool`, *optional*, defaults to `False`): Whether or not to use bias in ["in_proj", "out_proj"] of the mixer block use_conv_bias (`bool`, *optional*, defaults to `True`): Whether or not to use bias in the convolution layer of the mixer block. hidden_act (`str`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. initializer_range (`float`, *optional*, defaults to 0.1): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. residual_in_fp32 (`bool`, *optional*, defaults to `True`): Whether or not residuals should be in `float32`. If set to `False` residuals will keep the same `dtype` as the rest of the model time_step_rank (`Union[int,str]`, *optional*, defaults to `"auto"`): Rank of the discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)` time_step_min (`float`, *optional*, defaults to 0.001): Minimum `time_step` used to bound `dt_proj.bias`. time_step_max (`float`, *optional*, defaults to 0.1): Maximum `time_step` used to bound `dt_proj.bias`. time_step_floor (`float`, *optional*, defaults to 0.0001): Minimum clamping value of the `dt_proj.bias` layer initialization. time_step_limit (`tuple`, *optional*, defaults to `(0.0, inf)`): Accepted range of time step values. rescale_prenorm_residual (`bool`, *optional*, defaults to `False`): Whether or not to rescale `out_proj` weights when initializing. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the cache should be used. rms_norm (`bool`, *optional*, defaults to `True`): Whether to use RMS norm or not. chunk_size (`int`, *optional*, defaults to 256): Size of the chunks that will comprise the sequence. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie word embeddings or not. Example: ```python >>> from transformers import Mamba2Config, Mamba2Model >>> # Initializing a Mamba2 configuration >>> configuration = Mamba2Config() >>> # Initializing a model (with random weights) from the configuration >>> model = Mamba2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, num_heads=128, head_dim=64, vocab_size=32768, hidden_size=4096, state_size=128, num_hidden_layers=64, layer_norm_epsilon=1e-05, pad_token_id=1, bos_token_id=0, eos_token_id=2, expand=2, conv_kernel=4, n_groups=8, use_bias=False, use_conv_bias=True, hidden_act='silu', initializer_range=0.1, residual_in_fp32=True, time_step_rank='auto', time_step_min=0.001, time_step_max=0.1, time_step_floor=0.0001, time_step_limit=(0.0, float('inf')), rescale_prenorm_residual=False, use_cache=True, rms_norm=True, chunk_size=256, tie_word_embeddings=False, **kwargs): pass
2
1
70
2
68
0
2
1.03
1
2
0
0
1
28
1
1
155
13
70
62
37
72
33
31
31
2
1
0
2
3,576
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mamba2/modeling_mamba2.py
transformers.models.mamba2.modeling_mamba2.Mamba2Block
from typing import Optional, Union from ...modeling_layers import GradientCheckpointingLayer import torch class Mamba2Block(GradientCheckpointingLayer): def __init__(self, config, layer_idx): super().__init__() self.config = config self.layer_idx = layer_idx self.residual_in_fp32 = config.residual_in_fp32 self.norm = Mamba2RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.mixer = Mamba2Mixer(config, layer_idx=layer_idx) def forward(self, hidden_states, cache_params: Optional[Mamba2Cache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None): residual = hidden_states hidden_states = self.norm(hidden_states.to(dtype=self.norm.weight.dtype)) if self.residual_in_fp32: residual = residual.to(torch.float32) hidden_states = self.mixer(hidden_states, cache_params=cache_params, cache_position=cache_position, attention_mask=attention_mask) hidden_states = residual + hidden_states return hidden_states
class Mamba2Block(GradientCheckpointingLayer): def __init__(self, config, layer_idx): pass def forward(self, hidden_states, cache_params: Optional[Mamba2Cache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None): pass
3
0
12
1
12
0
2
0
1
5
3
0
2
5
2
12
26
2
24
15
15
0
16
9
13
2
1
1
3
3,577
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mamba2/modeling_mamba2.py
transformers.models.mamba2.modeling_mamba2.Mamba2Cache
from typing import Optional, Union import torch from .configuration_mamba2 import Mamba2Config class Mamba2Cache: """ Arguments: config: Mamba2Config batch_size: int dtype: torch.dtype device: torch.device Attributes: dtype: (`torch.dtype`): The default `dtype` used to initializing the cache. conv_kernel_size: (`int`): Model's convolution kernel size taken from config. n_groups: (`int`): Model's number of groups taken from the config - similar to tensor parallel in Transformer. state_size: (`int`): Model's SSM state size taken from config. num_heads: (`int`): The number of heads used in the linear attention / SSM. head_dim: (`int`): The respective dimension of the heads used in the linear attention / SSM. intermediate_size: (`int`): Model's intermediate_size based on (expand * hidden_dim) from config. conv_states: (`torch.Tensor`): A tensor of shape `[num_layers, batch_size, conv_kernel_size, intermediate_size + 2 * n_groups * state_size]` that holds convolutional states. ssm_states: (`torch.Tensor`): A tensor of shape `[num_layers, batch_size, num_heads, head_dim, state_size]` that holds ssm states. """ def __init__(self, config: Mamba2Config, batch_size: int, dtype: torch.dtype=torch.float16, device: Optional[str]=None): self.dtype = dtype self.conv_kernel_size = config.conv_kernel self.n_groups = config.n_groups self.state_size = config.state_size self.num_heads = config.num_heads self.head_dim = config.head_dim self.intermediate_size = int(config.expand * config.hidden_size) self.conv_states = torch.zeros(config.num_hidden_layers, batch_size, self.intermediate_size + 2 * self.n_groups * self.state_size, self.conv_kernel_size, device=device, dtype=dtype) self.ssm_states = torch.zeros(config.num_hidden_layers, batch_size, self.num_heads, self.head_dim, self.state_size, device=device, dtype=dtype) def update_conv_state(self, layer_idx: int, new_conv_state: torch.Tensor, cache_init: bool=False) -> torch.Tensor: if cache_init: self.conv_states[layer_idx] = new_conv_state.to(self.conv_states.device) else: self.conv_states[layer_idx] = self.conv_states[layer_idx].roll(shifts=-1, dims=-1) self.conv_states[layer_idx][:, :, -1] = new_conv_state[:, 0, :].to(self.conv_states.device) return self.conv_states[layer_idx] def update_ssm_state(self, layer_idx: int, new_ssm_state: torch.Tensor): self.ssm_states[layer_idx] = new_ssm_state.to(self.ssm_states.device) return self.ssm_states[layer_idx] def reset(self): self.conv_states.zero_() self.ssm_states.zero_()
class Mamba2Cache: ''' Arguments: config: Mamba2Config batch_size: int dtype: torch.dtype device: torch.device Attributes: dtype: (`torch.dtype`): The default `dtype` used to initializing the cache. conv_kernel_size: (`int`): Model's convolution kernel size taken from config. n_groups: (`int`): Model's number of groups taken from the config - similar to tensor parallel in Transformer. state_size: (`int`): Model's SSM state size taken from config. num_heads: (`int`): The number of heads used in the linear attention / SSM. head_dim: (`int`): The respective dimension of the heads used in the linear attention / SSM. intermediate_size: (`int`): Model's intermediate_size based on (expand * hidden_dim) from config. conv_states: (`torch.Tensor`): A tensor of shape `[num_layers, batch_size, conv_kernel_size, intermediate_size + 2 * n_groups * state_size]` that holds convolutional states. ssm_states: (`torch.Tensor`): A tensor of shape `[num_layers, batch_size, num_heads, head_dim, state_size]` that holds ssm states. ''' def __init__(self, config: Mamba2Config, batch_size: int, dtype: torch.dtype=torch.float16, device: Optional[str]=None): pass def update_conv_state(self, layer_idx: int, new_conv_state: torch.Tensor, cache_init: bool=False) -> torch.Tensor: pass def update_ssm_state(self, layer_idx: int, new_ssm_state: torch.Tensor): pass def reset(self): pass
5
1
11
0
11
0
1
0.6
0
5
1
0
4
9
4
4
75
6
43
18
34
26
23
14
18
2
0
1
5
3,578
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mamba2/modeling_mamba2.py
transformers.models.mamba2.modeling_mamba2.Mamba2ForCausalLM
import torch from ...utils import ModelOutput, auto_docstring, logging from typing import Optional, Union from ...generation import GenerationMixin from torch import nn @auto_docstring(custom_intro='\n The MAMBA2 Model transformer with a language modeling head on top (linear layer with weights not tied to the input\n embeddings).\n ') class Mamba2ForCausalLM(Mamba2PreTrainedModel, GenerationMixin): _tied_weights_keys = [] def __init__(self, config): super().__init__(config) self.backbone = Mamba2Model(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.backbone.get_input_embeddings() def set_input_embeddings(self, new_embeddings): return self.backbone.set_input_embeddings(new_embeddings) def prepare_inputs_for_generation(self, input_ids, inputs_embeds=None, use_cache=None, cache_params: Optional[Mamba2Cache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, **kwargs): model_inputs = {'input_ids': input_ids.contiguous()} if use_cache and cache_params is None: cache_position = torch.arange(0, self.backbone.config.conv_kernel, device=input_ids.device) if inputs_embeds is not None: model_inputs = {'inputs_embeds': inputs_embeds} max_batch_size = inputs_embeds.size(0) else: max_batch_size = input_ids.size(0) cache_params = Mamba2Cache(self.backbone.config, max_batch_size, device=self.device, dtype=self.dtype) if use_cache and cache_position[0] > 0: model_inputs['input_ids'] = input_ids[:, -1].unsqueeze(-1).contiguous() attention_mask = None if not use_cache and inputs_embeds is not None: model_inputs = {'inputs_embeds': inputs_embeds} model_inputs.update({'cache_params': cache_params, 'use_cache': use_cache, 'cache_position': cache_position, 'attention_mask': attention_mask}) for key, value in kwargs.items(): if key not in model_inputs: model_inputs[key] = value return model_inputs @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_params: Optional[Mamba2Cache]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple, Mamba2CausalLMOutput]: """ cache_params (`Mamba2Cache`, *optional*): If passed along, the model uses the previous state in all the blocks (which will give the output for the `input_ids` provided as if the model add `state_input_ids + input_ids` as context). labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` use_cache (`bool`, *optional*): If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits. cache_position (`torch.LongTensor` of shape `(batch_size,)`, *optional*): The position of the current input in the cache. This is used to ensure that the cache is correctly updated. If `cache_params` is passed, `cache_position` should also be passed. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict mamba2_outputs = self.backbone(input_ids, cache_params=cache_params, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict, use_cache=use_cache, cache_position=cache_position, attention_mask=attention_mask) hidden_states = mamba2_outputs[0] logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype)).float() loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) if not return_dict: output = (logits,) + mamba2_outputs[1:] return (loss,) + output if loss is not None else output return Mamba2CausalLMOutput(loss=loss, logits=logits, cache_params=mamba2_outputs.cache_params, hidden_states=mamba2_outputs.hidden_states)
@auto_docstring(custom_intro='\n The MAMBA2 Model transformer with a language modeling head on top (linear layer with weights not tied to the input\n embeddings).\n ') class Mamba2ForCausalLM(Mamba2PreTrainedModel, GenerationMixin): def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass def prepare_inputs_for_generation(self, input_ids, inputs_embeds=None, use_cache=None, cache_params: Optional[Mamba2Cache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, **kwargs): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_params: Optional[Mamba2Cache]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple, Mamba2CausalLMOutput]: ''' cache_params (`Mamba2Cache`, *optional*): If passed along, the model uses the previous state in all the blocks (which will give the output for the `input_ids` provided as if the model add `state_input_ids + input_ids` as context). labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` use_cache (`bool`, *optional*): If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits. cache_position (`torch.LongTensor` of shape `(batch_size,)`, *optional*): The position of the current input in the cache. This is used to ensure that the cache is correctly updated. If `cache_params` is passed, `cache_position` should also be passed. ''' pass
8
1
17
1
13
2
2
0.17
2
7
3
0
7
2
7
8
131
16
99
42
64
17
45
20
37
6
2
3
16
3,579
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mamba2/modeling_mamba2.py
transformers.models.mamba2.modeling_mamba2.Mamba2Mixer
from .configuration_mamba2 import Mamba2Config from ...activations import ACT2FN import torch from typing import Optional, Union from torch import nn class Mamba2Mixer(nn.Module): """ Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`. A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective) ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4, and is why Mamba is called **selective** state spaces) """ def __init__(self, config: Mamba2Config, layer_idx: int): super().__init__() self.num_heads = config.num_heads self.hidden_size = config.hidden_size self.ssm_state_size = config.state_size self.conv_kernel_size = config.conv_kernel self.intermediate_size = int(config.expand * self.hidden_size) self.time_step_rank = int(config.time_step_rank) self.layer_idx = layer_idx self.use_conv_bias = config.use_conv_bias self.activation = config.hidden_act self.act = ACT2FN[config.hidden_act] self.layer_norm_epsilon = config.layer_norm_epsilon self.rms_norm = config.rms_norm self.n_groups = config.n_groups self.head_dim = config.head_dim self.chunk_size = config.chunk_size self.time_step_limit = config.time_step_limit self.time_step_min = config.time_step_min self.time_step_max = config.time_step_max self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size self.conv1d = nn.Conv1d(in_channels=self.conv_dim, out_channels=self.conv_dim, bias=config.use_conv_bias, kernel_size=config.conv_kernel, groups=self.conv_dim, padding=config.conv_kernel - 1) projection_size = self.intermediate_size + self.conv_dim + self.num_heads self.in_proj = nn.Linear(self.hidden_size, projection_size, bias=config.use_bias) self.dt_bias = nn.Parameter(torch.ones(self.num_heads)) A = torch.arange(1, self.num_heads + 1) self.A_log = nn.Parameter(torch.log(A)) self.norm = MambaRMSNormGated(self.intermediate_size, eps=self.layer_norm_epsilon) self.D = nn.Parameter(torch.ones(self.num_heads)) self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias) self.use_bias = config.use_bias if not is_fast_path_available: logger.warning_once('The fast path is not available because on of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)` is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and https://github.com/Dao-AILab/causal-conv1d') def cuda_kernels_forward(self, hidden_states: torch.Tensor, cache_params: Optional[Mamba2Cache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None): hidden_states = apply_mask_to_padding_states(hidden_states, attention_mask) projected_states = self.in_proj(hidden_states) batch_size, seq_len, _ = hidden_states.shape groups_time_state_size = self.n_groups * self.ssm_state_size d_mlp = (projected_states.shape[-1] - 2 * self.intermediate_size - 2 * self.n_groups * self.ssm_state_size - self.num_heads) // 2 if cache_params is not None and cache_position is not None and (cache_position[0] > 0): _, _, gate, hidden_states_B_C, dt = projected_states.squeeze(1).split([d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1) hidden_states_B_C = causal_conv1d_update(hidden_states_B_C, cache_params.conv_states[self.layer_idx], self.conv1d.weight.squeeze(1), self.conv1d.bias, self.activation) hidden_states, B, C = torch.split(hidden_states_B_C, [self.intermediate_size, groups_time_state_size, groups_time_state_size], dim=-1) A = -torch.exp(self.A_log.float()) A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32) dt = dt[:, :, None].expand(-1, -1, self.head_dim) dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim) D = self.D[:, None, ...].expand(-1, self.head_dim) B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups) C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups) hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim) hidden_states = selective_state_update(cache_params.ssm_states[self.layer_idx], hidden_states_reshaped, dt, A, B, C, D, z=None, dt_bias=dt_bias, dt_softplus=True) hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim) hidden_states = self.norm(hidden_states, gate) out = self.out_proj(hidden_states)[:, None, ...] else: A = -torch.exp(self.A_log.float()) dt_limit_kwargs = {} if self.time_step_limit == (0.0, float('inf')) else {'dt_limit': self.time_step_limit} if self.training and cache_params is None: out = mamba_split_conv1d_scan_combined(projected_states, self.conv1d.weight.squeeze(1), self.conv1d.bias, self.dt_bias, A, D=self.D, chunk_size=self.chunk_size, seq_idx=None, activation=self.activation, rmsnorm_weight=self.norm.weight, rmsnorm_eps=self.norm.variance_epsilon, outproj_weight=self.out_proj.weight, outproj_bias=self.out_proj.bias, headdim=self.head_dim, ngroups=self.n_groups, norm_before_gate=False, return_final_states=False, **dt_limit_kwargs) else: _, _, gate, hidden_states_B_C, dt = projected_states.split([d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1) if cache_params is not None: hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2) conv_states = nn.functional.pad(hidden_states_B_C_transposed, (cache_params.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0)) cache_params.update_conv_state(layer_idx=self.layer_idx, new_conv_state=conv_states, cache_init=True) if self.activation not in ['silu', 'swish']: hidden_states_B_C = self.act(self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2)) else: hidden_states_B_C = causal_conv1d_fn(x=hidden_states_B_C.transpose(1, 2), weight=self.conv1d.weight.squeeze(1), bias=self.conv1d.bias, activation=self.activation).transpose(1, 2) hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask) hidden_states, B, C = torch.split(hidden_states_B_C, [self.intermediate_size, groups_time_state_size, groups_time_state_size], dim=-1) scan_output, ssm_state = mamba_chunk_scan_combined(hidden_states.view(batch_size, seq_len, -1, self.head_dim), dt, A, B.view(batch_size, seq_len, self.n_groups, -1), C.view(batch_size, seq_len, self.n_groups, -1), chunk_size=self.chunk_size, D=self.D, z=None, seq_idx=None, return_final_states=True, dt_bias=self.dt_bias, dt_softplus=True, **dt_limit_kwargs) if ssm_state is not None and cache_params is not None: cache_params.update_ssm_state(layer_idx=self.layer_idx, new_ssm_state=ssm_state) scan_output = scan_output.view(batch_size, seq_len, -1) scan_output = self.norm(scan_output, gate) out = self.out_proj(scan_output) return out def torch_forward(self, hidden_states: torch.Tensor, cache_params: Optional[Mamba2Cache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None): batch_size, seq_len, _ = hidden_states.shape dtype = hidden_states.dtype hidden_states = apply_mask_to_padding_states(hidden_states, attention_mask) projected_states = self.in_proj(hidden_states) d_mlp = (projected_states.shape[-1] - 2 * self.intermediate_size - 2 * self.n_groups * self.ssm_state_size - self.num_heads) // 2 _, _, gate, hidden_states_B_C, dt = projected_states.split([d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1) if cache_params is not None and cache_position is not None and (cache_position[0] > 0): cache_params.update_conv_state(layer_idx=self.layer_idx, new_conv_state=hidden_states_B_C, cache_init=False) conv_states = cache_params.conv_states[self.layer_idx].to(device=self.conv1d.weight.device) hidden_states_B_C = torch.sum(conv_states * self.conv1d.weight.squeeze(1), dim=-1) if self.use_conv_bias: hidden_states_B_C = hidden_states_B_C + self.conv1d.bias hidden_states_B_C = self.act(hidden_states_B_C) else: if cache_params is not None: hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2) conv_states = nn.functional.pad(hidden_states_B_C_transposed, (cache_params.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0)) cache_params.update_conv_state(layer_idx=self.layer_idx, new_conv_state=conv_states, cache_init=True) hidden_states_B_C = self.act(self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2)) hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask) hidden_states, B, C = torch.split(hidden_states_B_C, [self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size], dim=-1) A = -torch.exp(self.A_log.float()) if cache_params is not None and cache_position is not None and (cache_position[0] > 0): cache_device = cache_params.ssm_states.device dt = dt[:, 0, :][:, None, ...] dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim) dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim) dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype)) dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1]) A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32) dA = torch.exp(dt[..., None] * A).to(device=cache_device) B = B.reshape(batch_size, self.n_groups, -1)[..., None, :] B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous() B = B.reshape(batch_size, -1, B.shape[-1]) dB = dt[..., None] * B[..., None, :] hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim) dBx = (dB * hidden_states[..., None]).to(device=cache_device) cache_params.update_ssm_state(layer_idx=self.layer_idx, new_ssm_state=cache_params.ssm_states[self.layer_idx] * dA + dBx) C = C.reshape(batch_size, self.n_groups, -1)[..., None, :] C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous() C = C.reshape(batch_size, -1, C.shape[-1]) ssm_states = cache_params.ssm_states[self.layer_idx].to(device=C.device, dtype=C.dtype) ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size) C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1) y = torch.bmm(ssm_states_reshaped, C_reshaped) y = y.view(batch_size, self.num_heads, self.head_dim) D = self.D[..., None].expand(self.D.shape[0], self.head_dim) y = (y + hidden_states * D).to(y.dtype) y = y.reshape(batch_size, -1)[:, None, ...] else: dt = nn.functional.softplus(dt + self.dt_bias) dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1]) hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float() B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float() C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float() B = B.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads) C = C.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads) pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size) hidden_states = hidden_states * dt[..., None] A = A.to(hidden_states.dtype) * dt hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)] A = A.permute(0, 3, 1, 2) A_cumsum = torch.cumsum(A, dim=-1) L = torch.exp(segment_sum(A)) G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, :, :] G = G_intermediate.sum(dim=-1) M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None] M = M_intermediate.sum(dim=-1) Y_diag = (M[..., None] * hidden_states[:, :, None]).sum(dim=3) decay_states = torch.exp(A_cumsum[:, :, :, -1:] - A_cumsum) B_decay = B * decay_states.permute(0, -2, -1, 1)[..., None] states = (B_decay[..., None, :] * hidden_states[..., None]).sum(dim=2) if cache_params is not None and cache_position is not None and (cache_position[0] > 0): previous_states = cache_params.ssm_states[self.layer_idx][:, None, ...].to(device=states.device) else: previous_states = torch.zeros_like(states[:, :1]) states = torch.cat([previous_states, states], dim=1) decay_chunk = torch.exp(segment_sum(nn.functional.pad(A_cumsum[:, :, :, -1], (1, 0)))) decay_chunk = decay_chunk.transpose(1, 3) new_states = (decay_chunk[..., None, None] * states[:, :, None, ...]).sum(dim=1) states, ssm_state = (new_states[:, :-1], new_states[:, -1]) state_decay_out = torch.exp(A_cumsum) C_times_states = C[..., None, :] * states[:, :, None, ...] state_decay_out_permuted = state_decay_out.permute(0, 2, 3, 1) Y_off = C_times_states.sum(-1) * state_decay_out_permuted[..., None] y = Y_diag + Y_off y = y.reshape(batch_size, -1, self.num_heads, self.head_dim) y = y + D_residual if pad_size > 0: y = y[:, :seq_len, :, :] y = y.reshape(batch_size, seq_len, -1) if ssm_state is not None and cache_params is not None: cache_params.update_ssm_state(layer_idx=self.layer_idx, new_ssm_state=ssm_state) scan_output = self.norm(y, gate) contextualized_states = self.out_proj(scan_output.to(dtype)) return contextualized_states def forward(self, hidden_states, cache_params: Optional[Mamba2Cache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None): if is_fast_path_available and 'cuda' in self.in_proj.weight.device.type: return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask) return self.torch_forward(hidden_states, cache_params, cache_position, attention_mask)
class Mamba2Mixer(nn.Module): ''' Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`. A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective) ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4, and is why Mamba is called **selective** state spaces) ''' def __init__(self, config: Mamba2Config, layer_idx: int): pass def cuda_kernels_forward(self, hidden_states: torch.Tensor, cache_params: Optional[Mamba2Cache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None): pass def torch_forward(self, hidden_states: torch.Tensor, cache_params: Optional[Mamba2Cache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None): pass def forward(self, hidden_states, cache_params: Optional[Mamba2Cache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None): pass
5
1
108
15
76
19
5
0.28
1
7
3
0
4
27
4
14
443
63
306
103
289
85
181
91
176
8
1
3
20
3,580
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mamba2/modeling_mamba2.py
transformers.models.mamba2.modeling_mamba2.Mamba2Model
import torch from typing import Optional, Union from torch import nn from ...utils import ModelOutput, auto_docstring, logging @auto_docstring class Mamba2Model(Mamba2PreTrainedModel): def __init__(self, config): super().__init__(config) self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size) self.layers = nn.ModuleList([Mamba2Block(config, layer_idx=idx) for idx in range(config.num_hidden_layers)]) self.gradient_checkpointing = False self.norm_f = Mamba2RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) self._register_load_state_dict_pre_hook(self.load_hook) self.post_init() def load_hook(self, state_dict, prefix, *args): for k in state_dict: if 'embedding.' in k: state_dict[k.replace('embedding.', 'embeddings.')] = state_dict.pop(k) break def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, new_embeddings): self.embeddings = new_embeddings @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, cache_params: Optional[Mamba2Cache]=None, use_cache: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple, Mamba2Output]: """ cache_params (`Mamba2Cache`, *optional*): If passed along, the model uses the previous state in all the blocks (which will give the output for the `input_ids` provided as if the model add `state_input_ids + input_ids` as context). use_cache (`bool`, *optional*): If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits. cache_position (`torch.LongTensor` of shape `(batch_size,)`, *optional*): The position of the current input in the cache. This is used to ensure that the cache is correctly updated. If `cache_params` is passed, `cache_position` should also be passed. """ output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache if not self.training else False return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You must specify exactly one of input_ids or inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embeddings(input_ids) if self.gradient_checkpointing and self.training and use_cache: use_cache = False if use_cache: if cache_params is None: cache_params = Mamba2Cache(self.config, inputs_embeds.size(0), device=inputs_embeds.device, dtype=inputs_embeds.dtype) cache_position = torch.arange(0, self.config.conv_kernel, device=inputs_embeds.device) elif cache_position is None: raise ValueError("You have to specify the `cache_position` manually when `use_cache=True` and `cache_params` is passed, you don't have to pass a `cache_params` if you are in prefilling stage because in that case it will be initialized for you automatically") else: cache_params = None hidden_states = inputs_embeds all_hidden_states = () if output_hidden_states else None for mixer_block in self.layers: hidden_states = mixer_block(hidden_states, cache_params=cache_params, cache_position=cache_position, attention_mask=attention_mask) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states = self.norm_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, cache_params, all_hidden_states] if v is not None)) return Mamba2Output(last_hidden_state=hidden_states, cache_params=cache_params if use_cache else None, hidden_states=all_hidden_states)
@auto_docstring class Mamba2Model(Mamba2PreTrainedModel): def __init__(self, config): pass def load_hook(self, state_dict, prefix, *args): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, cache_params: Optional[Mamba2Cache]=None, use_cache: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple, Mamba2Output]: ''' cache_params (`Mamba2Cache`, *optional*): If passed along, the model uses the previous state in all the blocks (which will give the output for the `input_ids` provided as if the model add `state_input_ids + input_ids` as context). use_cache (`bool`, *optional*): If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits. cache_position (`torch.LongTensor` of shape `(batch_size,)`, *optional*): The position of the current input in the cache. This is used to ensure that the cache is correctly updated. If `cache_params` is passed, `cache_position` should also be passed. ''' pass
8
1
19
2
16
1
5
0.06
1
10
4
0
5
5
5
6
107
16
87
27
64
5
48
14
42
18
2
2
24
3,581
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mamba2/modeling_mamba2.py
transformers.models.mamba2.modeling_mamba2.Mamba2Output
from dataclasses import dataclass import torch from typing import Optional, Union from ...utils import ModelOutput, auto_docstring, logging @dataclass @auto_docstring(custom_intro='\n Class for the MAMBA2 model outputs.\n ') class Mamba2Output(ModelOutput): """ cache_params (`Mamba2Cache`): The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to avoid providing the old `input_ids`. Includes both the State space model state matrices after the selective scan, and the Convolutional states """ last_hidden_state: Optional[torch.FloatTensor] = None cache_params: Optional[Mamba2Cache] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None
@dataclass @auto_docstring(custom_intro='\n Class for the MAMBA2 model outputs.\n ') class Mamba2Output(ModelOutput): ''' cache_params (`Mamba2Cache`): The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to avoid providing the old `input_ids`. Includes both the State space model state matrices after the selective scan, and the Convolutional states ''' pass
3
1
0
0
0
0
0
3.5
1
0
0
0
0
0
0
0
22
4
4
4
3
14
4
4
3
0
1
0
0
3,582
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mamba2/modeling_mamba2.py
transformers.models.mamba2.modeling_mamba2.Mamba2PreTrainedModel
import torch from ...modeling_utils import PreTrainedModel from ...utils import ModelOutput, auto_docstring, logging import math from .configuration_mamba2 import Mamba2Config from torch import nn @auto_docstring class Mamba2PreTrainedModel(PreTrainedModel): config: Mamba2Config base_model_prefix = 'backbone' _no_split_modules = ['Mamba2Block'] supports_gradient_checkpointing = True _is_stateful = True def _init_weights(self, module): """Initialize the weights.""" std = self.config.initializer_range if isinstance(module, Mamba2Mixer): A = torch.arange(1, self.config.num_heads + 1) module.A_log.copy_(torch.log(A)) module.D.data.fill_(1.0) dt = torch.exp(torch.rand(self.config.num_heads) * (math.log(self.config.time_step_max) - math.log(self.config.time_step_min)) + math.log(self.config.time_step_min)).clamp(min=self.config.time_step_floor) inv_dt = dt + torch.log(-torch.expm1(-dt)) module.dt_bias.copy_(inv_dt) module.dt_bias._no_reinit = True nn.init.kaiming_uniform_(module.conv1d.weight, a=math.sqrt(5)) if module.conv1d.bias is not None: if not getattr(module.conv1d.bias, '_no_reinit', False): nn.init.zeros_(module.conv1d.bias) nn.init.kaiming_uniform_(module.out_proj.weight, a=math.sqrt(5)) if self.config.rescale_prenorm_residual: p = module.out_proj.weight p /= math.sqrt(self.config.num_hidden_layers) if isinstance(module, nn.Linear): if not getattr(module.weight, '_no_reinit', False): nn.init.normal_(module.weight, std=std) if module.bias is not None: if not getattr(module.bias, '_no_reinit', False): nn.init.zeros_(module.bias) elif isinstance(module, (Mamba2RMSNorm, MambaRMSNormGated)): module.weight.data.fill_(1.0) elif isinstance(module, nn.Embedding): nn.init.normal_(module.weight, std=std)
@auto_docstring class Mamba2PreTrainedModel(PreTrainedModel): def _init_weights(self, module): '''Initialize the weights.''' pass
3
1
41
4
25
12
9
0.52
1
1
1
2
1
0
1
1
53
6
31
10
29
16
26
10
24
9
1
4
9
3,583
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mamba2/modeling_mamba2.py
transformers.models.mamba2.modeling_mamba2.Mamba2RMSNorm
from torch import nn import torch class Mamba2RMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): """ Mamba2RMSNorm is equivalent to T5LayerNorm and LlamaRMSNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype)
class Mamba2RMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): ''' Mamba2RMSNorm is equivalent to T5LayerNorm and LlamaRMSNorm ''' pass def forward(self, hidden_states): pass
3
1
7
0
5
2
1
0.27
1
1
0
0
2
2
2
12
15
1
11
7
8
3
11
7
8
1
1
0
2
3,584
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/mamba2/modeling_mamba2.py
transformers.models.mamba2.modeling_mamba2.MambaRMSNormGated
import torch from torch import nn class MambaRMSNormGated(torch.nn.Module): def __init__(self, hidden_size, eps=1e-06): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states, gate=None): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) if gate is not None: hidden_states = hidden_states * nn.functional.silu(gate.to(torch.float32)) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype)
class MambaRMSNormGated(torch.nn.Module): def __init__(self, hidden_size, eps=1e-06): pass def forward(self, hidden_states, gate=None): pass
3
0
7
1
6
0
2
0
1
1
0
0
2
2
2
12
16
3
13
7
10
0
13
7
10
2
1
1
3
3,585
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/marian/configuration_marian.py
transformers.models.marian.configuration_marian.MarianConfig
from ...configuration_utils import PretrainedConfig class MarianConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`MarianModel`]. It is used to instantiate an Marian model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Marian [Helsinki-NLP/opus-mt-en-de](https://huggingface.co/Helsinki-NLP/opus-mt-en-de) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 58101): Vocabulary size of the Marian model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MarianModel`] or [`TFMarianModel`]. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. max_position_embeddings (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) forced_eos_token_id (`int`, *optional*, defaults to 0): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. Examples: ```python >>> from transformers import MarianModel, MarianConfig >>> # Initializing a Marian Helsinki-NLP/opus-mt-en-de style configuration >>> configuration = MarianConfig() >>> # Initializing a model from the Helsinki-NLP/opus-mt-en-de style configuration >>> model = MarianModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'marian' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__(self, vocab_size=58101, decoder_vocab_size=None, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=58100, scale_embedding=False, pad_token_id=58100, eos_token_id=0, forced_eos_token_id=0, share_encoder_decoder_embeddings=True, **kwargs): self.vocab_size = vocab_size self.decoder_vocab_size = decoder_vocab_size or vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding self.share_encoder_decoder_embeddings = share_encoder_decoder_embeddings super().__init__(pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, forced_eos_token_id=forced_eos_token_id, **kwargs)
class MarianConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`MarianModel`]. It is used to instantiate an Marian model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Marian [Helsinki-NLP/opus-mt-en-de](https://huggingface.co/Helsinki-NLP/opus-mt-en-de) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 58101): Vocabulary size of the Marian model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MarianModel`] or [`TFMarianModel`]. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. max_position_embeddings (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) forced_eos_token_id (`int`, *optional*, defaults to 0): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. Examples: ```python >>> from transformers import MarianModel, MarianConfig >>> # Initializing a Marian Helsinki-NLP/opus-mt-en-de style configuration >>> configuration = MarianConfig() >>> # Initializing a model from the Helsinki-NLP/opus-mt-en-de style configuration >>> model = MarianModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=58101, decoder_vocab_size=None, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=58100, scale_embedding=False, pad_token_id=58100, eos_token_id=0, forced_eos_token_id=0, share_encoder_decoder_embeddings=True, **kwargs): pass
2
1
58
0
58
1
1
1.02
1
1
0
0
1
21
1
1
134
10
62
54
32
63
27
26
25
1
1
0
1
3,586
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/marian/configuration_marian.py
transformers.models.marian.configuration_marian.MarianOnnxConfig
from ...onnx.utils import compute_effective_axis_dimension from typing import Any from collections import OrderedDict from collections.abc import Mapping from ... import PreTrainedTokenizer from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from ...utils import is_torch_available, logging class MarianOnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ['default', 'seq2seq-lm']: common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})]) if self.use_past: common_inputs['decoder_input_ids'] = {0: 'batch'} common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: common_inputs['decoder_input_ids'] = {0: 'batch', 1: 'decoder_sequence'} common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(common_inputs, direction='inputs') elif self.task == 'causal-lm': common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})]) if self.use_past: num_encoder_layers, _ = self.num_layers for i in range(num_encoder_layers): common_inputs[f'past_key_values.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'} common_inputs[f'past_key_values.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'} else: common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'})]) return common_inputs @property def outputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ['default', 'seq2seq-lm']: common_outputs = super().outputs else: common_outputs = super(OnnxConfigWithPast, self).outputs if self.use_past: num_encoder_layers, _ = self.num_layers for i in range(num_encoder_layers): common_outputs[f'present.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'} common_outputs[f'present.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def _generate_dummy_inputs_for_default_and_seq2seq_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: encoder_inputs = self._generate_dummy_inputs_for_encoder_and_decoder(tokenizer, batch_size, seq_length, is_pair) decoder_seq_length = seq_length if not self.use_past else 1 decoder_inputs = self._generate_dummy_inputs_for_encoder_and_decoder(tokenizer, batch_size, decoder_seq_length, is_pair) decoder_inputs = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} common_inputs = dict(**encoder_inputs, **decoder_inputs) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch batch, encoder_seq_length = common_inputs['input_ids'].shape decoder_seq_length = common_inputs['decoder_input_ids'].shape[1] num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads encoder_shape = (batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads) decoder_past_length = decoder_seq_length + 3 decoder_shape = (batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads) common_inputs['decoder_attention_mask'] = torch.cat([common_inputs['decoder_attention_mask'], torch.ones(batch, decoder_past_length)], dim=1) common_inputs['past_key_values'] = [] num_encoder_layers, num_decoder_layers = self.num_layers min_num_layers = min(num_encoder_layers, num_decoder_layers) max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers remaining_side_name = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(min_num_layers): common_inputs['past_key_values'].append((torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape))) shape = encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(min_num_layers, max_num_layers): common_inputs['past_key_values'].append((torch.zeros(shape), torch.zeros(shape))) return common_inputs def _generate_dummy_inputs_for_causal_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: common_inputs = self._generate_dummy_inputs_for_encoder_and_decoder(tokenizer, batch_size, seq_length, is_pair) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch batch, seqlen = common_inputs['input_ids'].shape past_key_values_length = seqlen + 2 num_encoder_layers, _ = self.num_layers num_encoder_attention_heads, _ = self.num_attention_heads past_shape = (batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads) mask_dtype = common_inputs['attention_mask'].dtype common_inputs['attention_mask'] = torch.cat([common_inputs['attention_mask'], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1) common_inputs['past_key_values'] = [(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers)] return common_inputs def _generate_dummy_inputs_for_encoder_and_decoder(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0) token_to_add = tokenizer.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension(seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add) dummy_input = [' '.join([tokenizer.unk_token]) * seq_length] * batch_size common_inputs = dict(tokenizer(dummy_input, return_tensors='pt')) return common_inputs def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: if self.task in ['default', 'seq2seq-lm']: common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair) else: common_inputs = self._generate_dummy_inputs_for_causal_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair) return common_inputs def _flatten_past_key_values_(self, flattened_output, name, idx, t): if self.task in ['default', 'seq2seq-lm']: flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t) else: flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(flattened_output, name, idx, t) @property def atol_for_validation(self) -> float: return 0.0001
class MarianOnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: pass @property def outputs(self) -> Mapping[str, Mapping[int, str]]: pass def _generate_dummy_inputs_for_default_and_seq2seq_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: pass def _generate_dummy_inputs_for_causal_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: pass def _generate_dummy_inputs_for_encoder_and_decoder(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: pass def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: pass def _flatten_past_key_values_(self, flattened_output, name, idx, t): pass @property def atol_for_validation(self) -> float: pass
12
0
26
2
23
1
4
0.08
1
10
0
0
8
1
8
8
226
21
190
75
148
15
90
43
79
8
1
3
28
3,587
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py
transformers.models.marian.convert_marian_tatoeba_to_pytorch.TatoebaConverter
import datetime import yaml from transformers.models.marian.convert_marian_to_pytorch import FRONT_MATTER_TEMPLATE, convert, convert_opus_name_to_hf_name, download_and_unzip, get_system_metadata import os from tqdm import tqdm import re import json from pathlib import Path class TatoebaConverter: """ Convert Tatoeba-Challenge models to huggingface format. Steps: 1. Convert numpy state dict to hf format (same code as OPUS-MT-Train conversion). 2. Rename opus model to huggingface format. This means replace each alpha3 code with an alpha2 code if a unique one exists. e.g. aav-eng -> aav-en, heb-eng -> he-en 3. Select the best model for a particular pair, parse the yml for it and write a model card. By default the best model is the one listed first in released-model-results, but it's also possible to specify the most recent one. """ def __init__(self, save_dir='marian_converted'): assert Path(DEFAULT_REPO).exists(), 'need git clone git@github.com:Helsinki-NLP/Tatoeba-Challenge.git' self.download_lang_info() self.model_results = json.load(open('Tatoeba-Challenge/models/released-model-results.json')) self.alpha3_to_alpha2 = {} for line in open(ISO_PATH): parts = line.split('\t') if len(parts[0]) == 3 and len(parts[3]) == 2: self.alpha3_to_alpha2[parts[0]] = parts[3] for line in LANG_CODE_PATH: parts = line.split(',') if len(parts[0]) == 3 and len(parts[1]) == 2: self.alpha3_to_alpha2[parts[0]] = parts[1] self.model_card_dir = Path(save_dir) self.tag2name = {} for key, value in GROUP_MEMBERS.items(): self.tag2name[key] = value[0] def convert_models(self, tatoeba_ids, dry_run=False): models_to_convert = [self.parse_metadata(x) for x in tatoeba_ids] save_dir = Path('marian_ckpt') dest_dir = Path(self.model_card_dir) dest_dir.mkdir(exist_ok=True) for model in tqdm(models_to_convert): if 'SentencePiece' not in model['pre-processing']: print(f"Skipping {model['release']} because it doesn't appear to use SentencePiece") continue if not os.path.exists(save_dir / model['_name']): download_and_unzip(f"{TATOEBA_MODELS_URL}/{model['release']}", save_dir / model['_name']) opus_language_groups_to_hf = convert_opus_name_to_hf_name pair_name = opus_language_groups_to_hf(model['_name']) convert(save_dir / model['_name'], dest_dir / f'opus-mt-{pair_name}') self.write_model_card(model, dry_run=dry_run) def expand_group_to_two_letter_codes(self, grp_name): return [self.alpha3_to_alpha2.get(x, x) for x in GROUP_MEMBERS[grp_name][1]] def is_group(self, code, name): return 'languages' in name or len(GROUP_MEMBERS.get(code, [])) > 1 def get_tags(self, code, name): if len(code) == 2: assert 'languages' not in name, f'{code}: {name}' return [code] elif self.is_group(code, name): group = self.expand_group_to_two_letter_codes(code) group.append(code) return group else: print(f'Three letter monolingual code: {code}') return [code] def resolve_lang_code(self, src, tgt) -> tuple[str, str]: src_tags = self.get_tags(src, self.tag2name[src]) tgt_tags = self.get_tags(tgt, self.tag2name[tgt]) return (src_tags, tgt_tags) @staticmethod def model_type_info_from_model_name(name): info = {'_has_backtranslated_data': False} if '1m' in name: info['_data_per_pair'] = str(1000000.0) if '2m' in name: info['_data_per_pair'] = str(2000000.0) if '4m' in name: info['_data_per_pair'] = str(4000000.0) if '+bt' in name: info['_has_backtranslated_data'] = True if 'tuned4' in name: info['_tuned'] = re.search('tuned4[^-]+', name).group() return info def write_model_card(self, model_dict, dry_run=False) -> str: """ Construct card from data parsed from YAML and the model's name. upload command: aws s3 sync model_card_dir s3://models.huggingface.co/bert/Helsinki-NLP/ --dryrun """ model_dir_url = f"{TATOEBA_MODELS_URL}/{model_dict['release']}" long_pair = model_dict['_name'].split('-') assert len(long_pair) == 2, f"got a translation pair {model_dict['_name']} that doesn't appear to be a pair" short_src = self.alpha3_to_alpha2.get(long_pair[0], long_pair[0]) short_tgt = self.alpha3_to_alpha2.get(long_pair[1], long_pair[1]) model_dict['_hf_model_id'] = f'opus-mt-{short_src}-{short_tgt}' a3_src, a3_tgt = model_dict['_name'].split('-') resolved_src_tags, resolved_tgt_tags = self.resolve_lang_code(a3_src, a3_tgt) a2_src_tags, a2_tgt_tags = ([], []) for tag in resolved_src_tags: if tag not in self.alpha3_to_alpha2: a2_src_tags.append(tag) for tag in resolved_tgt_tags: if tag not in self.alpha3_to_alpha2: a2_tgt_tags.append(tag) lang_tags = dedup(a2_src_tags + a2_tgt_tags) src_multilingual, tgt_multilingual = (len(a2_src_tags) > 1, len(a2_tgt_tags) > 1) s, t = (','.join(a2_src_tags), ','.join(a2_tgt_tags)) metadata = {'hf_name': model_dict['_name'], 'source_languages': s, 'target_languages': t, 'opus_readme_url': f'{model_dir_url}/README.md', 'original_repo': 'Tatoeba-Challenge', 'tags': ['translation'], 'languages': lang_tags} lang_tags = l2front_matter(lang_tags) metadata['src_constituents'] = list(GROUP_MEMBERS[a3_src][1]) metadata['tgt_constituents'] = list(GROUP_MEMBERS[a3_tgt][1]) metadata['src_multilingual'] = src_multilingual metadata['tgt_multilingual'] = tgt_multilingual backtranslated_data = '' if model_dict['_has_backtranslated_data']: backtranslated_data = ' with backtranslations' multilingual_data = '' if '_data_per_pair' in model_dict: multilingual_data = f"* data per pair in multilingual model: {model_dict['_data_per_pair']}\n" tuned = '' if '_tuned' in model_dict: tuned = f"* multilingual model tuned for: {model_dict['_tuned']}\n" model_base_filename = model_dict['release'].split('/')[-1] download = f"* download original weights: [{model_base_filename}]({model_dir_url}/{model_dict['release']})\n" langtoken = '' if tgt_multilingual: langtoken = '* a sentence-initial language token is required in the form of >>id<<(id = valid, usually three-letter target language ID)\n' metadata.update(get_system_metadata(DEFAULT_REPO)) scorestable = '' for k, v in model_dict.items(): if 'scores' in k: this_score_table = f'* {k}\n|Test set|score|\n|---|---|\n' pairs = sorted(v.items(), key=lambda x: x[1], reverse=True) for pair in pairs: this_score_table += f'|{pair[0]}|{pair[1]}|\n' scorestable += this_score_table datainfo = '' if 'training-data' in model_dict: datainfo += '* Training data: \n' for k, v in model_dict['training-data'].items(): datainfo += f' * {str(k)}: {str(v)}\n' if 'validation-data' in model_dict: datainfo += '* Validation data: \n' for k, v in model_dict['validation-data'].items(): datainfo += f' * {str(k)}: {str(v)}\n' if 'test-data' in model_dict: datainfo += '* Test data: \n' for k, v in model_dict['test-data'].items(): datainfo += f' * {str(k)}: {str(v)}\n' testsetfilename = model_dict['release'].replace('.zip', '.test.txt') testscoresfilename = model_dict['release'].replace('.zip', '.eval.txt') testset = f'* test set translations file: [test.txt]({model_dir_url}/{testsetfilename})\n' testscores = f'* test set scores file: [eval.txt]({model_dir_url}/{testscoresfilename})\n' readme_url = f"{TATOEBA_MODELS_URL}/{model_dict['_name']}/README.md" extra_markdown = f"\n### {model_dict['_name']}\n\n* source language name: {self.tag2name[a3_src]}\n* target language name: {self.tag2name[a3_tgt]}\n* OPUS readme: [README.md]({readme_url})\n" content = f"\n* model: {model_dict['modeltype']}\n* source language code{src_multilingual * 's'}: {', '.join(a2_src_tags)}\n* target language code{tgt_multilingual * 's'}: {', '.join(a2_tgt_tags)}\n* dataset: opus {backtranslated_data}\n* release date: {model_dict['release-date']}\n* pre-processing: {model_dict['pre-processing']}\n" + multilingual_data + tuned + download + langtoken + datainfo + testset + testscores + scorestable content = FRONT_MATTER_TEMPLATE.format(lang_tags) + extra_markdown + content items = '\n'.join([f'* {k}: {v}' for k, v in metadata.items()]) sec3 = '\n### System Info: \n' + items content += sec3 if dry_run: print('CONTENT:') print(content) print('METADATA:') print(metadata) return sub_dir = self.model_card_dir / model_dict['_hf_model_id'] sub_dir.mkdir(exist_ok=True) dest = sub_dir / 'README.md' dest.open('w').write(content) for k, v in metadata.items(): if isinstance(v, datetime.date): metadata[k] = datetime.datetime.strftime(v, '%Y-%m-%d') with open(sub_dir / 'metadata.json', 'w', encoding='utf-8') as writeobj: json.dump(metadata, writeobj) def download_lang_info(self): global LANG_CODE_PATH Path(LANG_CODE_PATH).parent.mkdir(exist_ok=True) import wget from huggingface_hub import hf_hub_download if not os.path.exists(ISO_PATH): wget.download(ISO_URL, ISO_PATH) if not os.path.exists(LANG_CODE_PATH): LANG_CODE_PATH = hf_hub_download(repo_id='huggingface/language_codes_marianMT', filename='language-codes-3b2.csv', repo_type='dataset') def parse_metadata(self, model_name, repo_path=DEFAULT_MODEL_DIR, method='best'): p = Path(repo_path) / model_name def url_to_name(url): return url.split('/')[-1].split('.')[0] if model_name not in self.model_results: method = 'newest' if method == 'best': results = [url_to_name(model['download']) for model in self.model_results[model_name]] ymls = [f for f in os.listdir(p) if f.endswith('.yml') and f[:-4] in results] ymls.sort(key=lambda x: results.index(x[:-4])) metadata = yaml.safe_load(open(p / ymls[0])) metadata.update(self.model_type_info_from_model_name(ymls[0][:-4])) elif method == 'newest': ymls = [f for f in os.listdir(p) if f.endswith('.yml')] ymls.sort(key=lambda x: datetime.datetime.strptime(re.search('\\d\\d\\d\\d-\\d\\d?-\\d\\d?', x).group(), '%Y-%m-%d')) metadata = yaml.safe_load(open(p / ymls[-1])) metadata.update(self.model_type_info_from_model_name(ymls[-1][:-4])) else: raise NotImplementedError(f"Don't know argument method='{method}' to parse_metadata()") metadata['_name'] = model_name return metadata
class TatoebaConverter: ''' Convert Tatoeba-Challenge models to huggingface format. Steps: 1. Convert numpy state dict to hf format (same code as OPUS-MT-Train conversion). 2. Rename opus model to huggingface format. This means replace each alpha3 code with an alpha2 code if a unique one exists. e.g. aav-eng -> aav-en, heb-eng -> he-en 3. Select the best model for a particular pair, parse the yml for it and write a model card. By default the best model is the one listed first in released-model-results, but it's also possible to specify the most recent one. ''' def __init__(self, save_dir='marian_converted'): pass def convert_models(self, tatoeba_ids, dry_run=False): pass def expand_group_to_two_letter_codes(self, grp_name): pass def is_group(self, code, name): pass def get_tags(self, code, name): pass def resolve_lang_code(self, src, tgt) -> tuple[str, str]: pass @staticmethod def model_type_info_from_model_name(name): pass def write_model_card(self, model_dict, dry_run=False) -> str: ''' Construct card from data parsed from YAML and the model's name. upload command: aws s3 sync model_card_dir s3://models.huggingface.co/bert/Helsinki-NLP/ --dryrun ''' pass def download_lang_info(self): pass def parse_metadata(self, model_name, repo_path=DEFAULT_MODEL_DIR, method='best'): pass def url_to_name(url): pass
13
2
23
2
20
1
5
0.12
0
7
0
0
9
4
10
10
277
35
220
73
204
26
178
71
163
21
0
3
51
3,588
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/marian/convert_marian_to_pytorch.py
transformers.models.marian.convert_marian_to_pytorch.OpusState
import torch from torch import nn import warnings from transformers import MarianConfig, MarianMTModel, MarianTokenizer import numpy as np class OpusState: def __init__(self, source_dir, eos_token_id=0): npz_path = find_model_file(source_dir) self.state_dict = np.load(npz_path) cfg = load_config_from_state_dict(self.state_dict) if cfg['dim-vocabs'][0] != cfg['dim-vocabs'][1]: raise ValueError if 'Wpos' in self.state_dict: raise ValueError('Wpos key in state dictionary') self.state_dict = dict(self.state_dict) if cfg['tied-embeddings-all']: cfg['tied-embeddings-src'] = True cfg['tied-embeddings'] = True self.share_encoder_decoder_embeddings = cfg['tied-embeddings-src'] self.source_dir = source_dir self.tokenizer = self.load_tokenizer() tokenizer_has_eos_token_id = hasattr(self.tokenizer, 'eos_token_id') and self.tokenizer.eos_token_id is not None eos_token_id = self.tokenizer.eos_token_id if tokenizer_has_eos_token_id else 0 if cfg['tied-embeddings-src']: self.wemb, self.final_bias = add_emb_entries(self.state_dict['Wemb'], self.state_dict[BIAS_KEY], 1) self.pad_token_id = self.wemb.shape[0] - 1 cfg['vocab_size'] = self.pad_token_id + 1 else: self.wemb, _ = add_emb_entries(self.state_dict['encoder_Wemb'], self.state_dict[BIAS_KEY], 1) self.dec_wemb, self.final_bias = add_emb_entries(self.state_dict['decoder_Wemb'], self.state_dict[BIAS_KEY], 1) self.pad_token_id = self.wemb.shape[0] - 1 cfg['vocab_size'] = self.pad_token_id + 1 cfg['decoder_vocab_size'] = self.pad_token_id + 1 if cfg['vocab_size'] != self.tokenizer.vocab_size: raise ValueError(f"Original vocab size {cfg['vocab_size']} and new vocab size {len(self.tokenizer.encoder)} mismatched.") self.state_keys = list(self.state_dict.keys()) if 'Wtype' in self.state_dict: raise ValueError('Wtype key in state dictionary') self._check_layer_entries() self.cfg = cfg hidden_size, intermediate_shape = self.state_dict['encoder_l1_ffn_W1'].shape if hidden_size != cfg['dim-emb']: raise ValueError(f"Hidden size {hidden_size} and configured size {cfg['dim_emb']} mismatched") decoder_yml = cast_marian_config(load_yaml(source_dir / 'decoder.yml')) check_marian_cfg_assumptions(cfg) self.hf_config = MarianConfig(vocab_size=cfg['vocab_size'], decoder_vocab_size=cfg.get('decoder_vocab_size', cfg['vocab_size']), share_encoder_decoder_embeddings=cfg['tied-embeddings-src'], decoder_layers=cfg['dec-depth'], encoder_layers=cfg['enc-depth'], decoder_attention_heads=cfg['transformer-heads'], encoder_attention_heads=cfg['transformer-heads'], decoder_ffn_dim=cfg['transformer-dim-ffn'], encoder_ffn_dim=cfg['transformer-dim-ffn'], d_model=cfg['dim-emb'], activation_function=cfg['transformer-ffn-activation'], pad_token_id=self.pad_token_id, eos_token_id=eos_token_id, forced_eos_token_id=eos_token_id, bos_token_id=0, max_position_embeddings=cfg['dim-emb'], scale_embedding=True, normalize_embedding='n' in cfg['transformer-preprocess'], static_position_embeddings=not cfg['transformer-train-position-embeddings'], tie_word_embeddings=cfg['tied-embeddings'], dropout=0.1, num_beams=decoder_yml['beam-size'], decoder_start_token_id=self.pad_token_id, bad_words_ids=[[self.pad_token_id]], max_length=512) def _check_layer_entries(self): self.encoder_l1 = self.sub_keys('encoder_l1') self.decoder_l1 = self.sub_keys('decoder_l1') self.decoder_l2 = self.sub_keys('decoder_l2') if len(self.encoder_l1) != 16: warnings.warn(f'Expected 16 keys for each encoder layer, got {len(self.encoder_l1)}') if len(self.decoder_l1) != 26: warnings.warn(f'Expected 26 keys for each decoder layer, got {len(self.decoder_l1)}') if len(self.decoder_l2) != 26: warnings.warn(f'Expected 26 keys for each decoder layer, got {len(self.decoder_l1)}') @property def extra_keys(self): extra = [] for k in self.state_keys: if k.startswith('encoder_l') or k.startswith('decoder_l') or k in [CONFIG_KEY, 'Wemb', 'encoder_Wemb', 'decoder_Wemb', 'Wpos', 'decoder_ff_logit_out_b']: continue else: extra.append(k) return extra def sub_keys(self, layer_prefix): return [remove_prefix(k, layer_prefix) for k in self.state_dict if k.startswith(layer_prefix)] def load_tokenizer(self): add_special_tokens_to_vocab(self.source_dir, not self.share_encoder_decoder_embeddings) return MarianTokenizer.from_pretrained(str(self.source_dir)) def load_marian_model(self) -> MarianMTModel: state_dict, cfg = (self.state_dict, self.hf_config) if not cfg.static_position_embeddings: raise ValueError('config.static_position_embeddings should be True') model = MarianMTModel(cfg) if 'hidden_size' in cfg.to_dict(): raise ValueError('hidden_size is in config') load_layers_(model.model.encoder.layers, state_dict, BART_CONVERTER) load_layers_(model.model.decoder.layers, state_dict, BART_CONVERTER, is_decoder=True) if self.cfg['tied-embeddings-src']: wemb_tensor = nn.Parameter(torch.FloatTensor(self.wemb)) bias_tensor = nn.Parameter(torch.FloatTensor(self.final_bias)) model.model.shared.weight = wemb_tensor model.model.encoder.embed_tokens = model.model.decoder.embed_tokens = model.model.shared else: wemb_tensor = nn.Parameter(torch.FloatTensor(self.wemb)) model.model.encoder.embed_tokens.weight = wemb_tensor decoder_wemb_tensor = nn.Parameter(torch.FloatTensor(self.dec_wemb)) bias_tensor = nn.Parameter(torch.FloatTensor(self.final_bias)) model.model.decoder.embed_tokens.weight = decoder_wemb_tensor if self.cfg['tied-embeddings']: model.lm_head.weight.data = model.model.decoder.embed_tokens.weight.data.clone() model.final_logits_bias = bias_tensor if 'Wpos' in state_dict: print('Unexpected: got Wpos') wpos_tensor = torch.tensor(state_dict['Wpos']) model.model.encoder.embed_positions.weight = wpos_tensor model.model.decoder.embed_positions.weight = wpos_tensor if cfg.normalize_embedding: if 'encoder_emb_ln_scale_pre' not in state_dict: raise ValueError('encoder_emb_ln_scale_pre is not in state dictionary') raise NotImplementedError('Need to convert layernorm_embedding') if self.extra_keys: raise ValueError(f'Failed to convert {self.extra_keys}') if model.get_input_embeddings().padding_idx != self.pad_token_id: raise ValueError(f'Padding tokens {model.get_input_embeddings().padding_idx} and {self.pad_token_id} mismatched') return model
class OpusState: def __init__(self, source_dir, eos_token_id=0): pass def _check_layer_entries(self): pass @property def extra_keys(self): pass def sub_keys(self, layer_prefix): pass def load_tokenizer(self): pass def load_marian_model(self) -> MarianMTModel: pass
8
0
28
3
24
2
5
0.07
0
8
0
0
6
14
6
6
173
20
144
35
136
10
98
34
91
10
0
2
28
3,589
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/marian/modeling_marian.py
transformers.models.marian.modeling_marian.MarianAttention
from typing import Callable, Optional, Union from torch import nn import torch from ...processing_utils import Unpack from .configuration_marian import MarianConfig from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...utils.deprecation import deprecate_kwarg class MarianAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[MarianConfig]=None, layer_idx: Optional[int]=None): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.is_causal = is_causal self.layer_idx = layer_idx if layer_idx is None and self.is_decoder: logger.warning_once(f'Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" is_cross_attention = key_value_states is not None bsz, tgt_len = hidden_states.shape[:-1] src_len = key_value_states.shape[1] if is_cross_attention else tgt_len q_input_shape = (bsz, tgt_len, -1, self.head_dim) kv_input_shape = (bsz, src_len, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2) is_updated = False if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k_proj(current_states) value_states = self.v_proj(current_states) key_states = key_states.view(*kv_input_shape).transpose(1, 2) value_states = value_states.view(*kv_input_shape).transpose(1, 2) if past_key_values is not None: cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position}) if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache): past_key_values.is_updated[self.layer_idx] = True attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) return (attn_output, attn_weights)
class MarianAttention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[MarianConfig]=None, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: '''Input shape: Batch x Time x Channel''' pass
4
2
50
7
35
8
5
0.24
1
7
1
0
3
12
3
13
156
23
107
44
86
26
68
27
64
12
1
2
15
3,590
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/marian/modeling_marian.py
transformers.models.marian.modeling_marian.MarianDecoder
from typing import Callable, Optional, Union import math from torch import nn from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput import torch from .configuration_marian import MarianConfig from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache class MarianDecoder(MarianPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`MarianDecoderLayer`] Args: config: MarianConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: MarianConfig, embed_tokens: Optional[nn.Embedding]=None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = nn.Embedding(config.decoder_vocab_size, config.d_model, self.padding_idx) self.embed_positions = MarianSinusoidalPositionalEmbedding(config.max_position_embeddings, config.d_model, self.padding_idx) self.layers = nn.ModuleList([MarianDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)]) self.gradient_checkpointing = False self.post_init() def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: """ Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You must specify exactly one of input_ids or inputs_embeds') elif input_ids is not None: input = input_ids input_shape = input.shape input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] input = inputs_embeds[:, :, -1] if inputs_embeds is None: inputs_embeds = self.embed_tokens(input) inputs_embeds = inputs_embeds * self.embed_scale if use_cache and past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) if encoder_hidden_states is not None else DynamicCache(config=self.config) if use_cache and isinstance(past_key_values, tuple): logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.') past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) batch_size, seq_length = inputs_embeds.size()[:-1] past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device) if attention_mask is None and (not is_torchdynamo_compiling()): mask_seq_length = past_key_values_length + seq_length attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) self_attn_cache = past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, self_attn_cache) encoder_attention_mask = self._update_cross_attn_mask(encoder_hidden_states, encoder_attention_mask, input_shape, inputs_embeds) position_ids = self.embed_positions((batch_size, seq_length), past_key_values_length, position_ids=cache_position) hidden_states = inputs_embeds + position_ids hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']): if attn_mask is not None: assert attn_mask.size()[0] == len(self.layers), f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.' for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue layer_outputs = decoder_layer(hidden_states, causal_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)
class MarianDecoder(MarianPreTrainedModel): ''' Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`MarianDecoderLayer`] Args: config: MarianConfig embed_tokens (nn.Embedding): output embedding ''' def __init__(self, config: MarianConfig, embed_tokens: Optional[nn.Embedding]=None): pass def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: ''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. ''' pass
3
2
59
8
35
16
10
0.5
1
12
4
0
4
9
4
6
246
38
139
42
120
69
74
28
69
36
2
3
41
3,591
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/marian/modeling_marian.py
transformers.models.marian.modeling_marian.MarianDecoderLayer
from typing import Callable, Optional, Union from torch import nn import torch from ...modeling_layers import GradientCheckpointingLayer from .configuration_marian import MarianConfig from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...utils.deprecation import deprecate_kwarg from ...activations import ACT2FN class MarianDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: MarianConfig, layer_idx: Optional[int]=None): super().__init__() self.embed_dim = config.d_model self.self_attn = MarianAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, is_causal=True, config=config, layer_idx=layer_idx) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = MarianAttention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, config=config, layer_idx=layer_idx) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_values (`Cache`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. """ residual = hidden_states hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs
class MarianDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: MarianConfig, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_values (`Cache`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. ''' pass
4
1
58
6
40
13
4
0.31
1
4
1
0
2
11
2
12
118
12
81
32
67
25
44
21
41
6
1
1
7
3,592
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/marian/modeling_marian.py
transformers.models.marian.modeling_marian.MarianDecoderWrapper
class MarianDecoderWrapper(MarianPreTrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. """ def __init__(self, config): super().__init__(config) self.decoder = MarianDecoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs)
class MarianDecoderWrapper(MarianPreTrainedModel): ''' This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. ''' def __init__(self, config): pass def forward(self, *args, **kwargs): pass
3
1
3
0
3
0
1
0.67
1
2
1
0
2
1
2
4
12
2
6
4
3
4
6
4
3
1
2
0
2
3,593
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/marian/modeling_marian.py
transformers.models.marian.modeling_marian.MarianEncoder
import torch from .configuration_marian import MarianConfig from typing import Callable, Optional, Union import math from torch import nn from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput class MarianEncoder(MarianPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`MarianEncoderLayer`]. Args: config: MarianConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: MarianConfig, embed_tokens: Optional[nn.Embedding]=None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) self.embed_positions = MarianSinusoidalPositionalEmbedding(config.max_position_embeddings, embed_dim, self.padding_idx) self.layers = nn.ModuleList([MarianEncoderLayer(config) for _ in range(config.encoder_layers)]) self.gradient_checkpointing = False self.post_init() def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], BaseModelOutput]: """ Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) attention_mask = self._update_full_mask(attention_mask, inputs_embeds) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if head_mask is not None: assert head_mask.size()[0] == len(self.layers), f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.' for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: to_drop = True if to_drop: layer_outputs = (None, None) else: layer_outputs = encoder_layer(hidden_states, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
class MarianEncoder(MarianPreTrainedModel): ''' Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`MarianEncoderLayer`]. Args: config: MarianConfig embed_tokens (nn.Embedding): output embedding ''' def __init__(self, config: MarianConfig, embed_tokens: Optional[nn.Embedding]=None): pass def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], BaseModelOutput]: ''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' pass
3
2
39
6
24
9
7
0.45
1
11
4
0
4
9
4
6
168
28
97
33
83
44
63
24
58
23
2
3
28
3,594
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/marian/modeling_marian.py
transformers.models.marian.modeling_marian.MarianEncoderLayer
from typing import Callable, Optional, Union from torch import nn import torch from ...modeling_layers import GradientCheckpointingLayer from .configuration_marian import MarianConfig from ...activations import ACT2FN class MarianEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: MarianConfig, layer_idx: Optional[int]=None): super().__init__() self.embed_dim = config.d_model self.self_attn = MarianAttention(embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, config=config, layer_idx=layer_idx) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
class MarianEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: MarianConfig, layer_idx: Optional[int]=None): pass def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. ''' pass
3
1
33
3
25
6
2
0.22
1
3
1
0
2
9
2
12
68
7
50
22
41
11
32
16
29
3
1
1
4
3,595
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/marian/modeling_marian.py
transformers.models.marian.modeling_marian.MarianForCausalLM
from typing import Callable, Optional, Union from torch.nn import CrossEntropyLoss from torch import nn from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging from ...generation import GenerationMixin from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache class MarianForCausalLM(MarianPreTrainedModel, GenerationMixin): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.model = MarianDecoderWrapper(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, CausalLMOutputWithCrossAttentions]: """ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, MarianForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-fr-en") >>> model = MarianForCausalLM.from_pretrained("Helsinki-NLP/opus-mt-fr-en", add_cross_attention=False) >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size] >>> list(logits.shape) == expected_shape True ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model.decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) logits = self.lm_head(outputs[0]) loss = None if labels is not None: labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)
class MarianForCausalLM(MarianPreTrainedModel, GenerationMixin): def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def set_decoder(self, decoder): pass def get_decoder(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, CausalLMOutputWithCrossAttentions]: ''' cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, MarianForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-fr-en") >>> model = MarianForCausalLM.from_pretrained("Helsinki-NLP/opus-mt-fr-en", add_cross_attention=False) >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size] >>> list(logits.shape) == expected_shape True ```''' pass
8
1
19
3
9
8
2
0.84
2
6
2
0
8
2
9
11
186
33
83
37
56
70
42
20
32
7
2
1
16
3,596
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/marian/modeling_marian.py
transformers.models.marian.modeling_marian.MarianMTModel
from typing import Callable, Optional, Union from torch.nn import CrossEntropyLoss from torch import nn from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging from ...generation import GenerationMixin from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput import torch from .configuration_marian import MarianConfig from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache @auto_docstring(custom_intro='\n The Marian Model with a language modeling head. Can be used for summarization.\n ') class MarianMTModel(MarianPreTrainedModel, GenerationMixin): base_model_prefix = 'model' _keys_to_ignore_on_load_missing = ['final_logits_bias', 'encoder.embed_positions.weight', 'decoder.embed_positions.weight'] _keys_to_ignore_on_save = ['model.encoder.embed_positions.weight', 'model.decoder.embed_positions.weight'] _tied_weights_keys = ['model.encoder.embed_tokens.weight', 'model.decoder.embed_tokens.weight', 'lm_head.weight'] def __init__(self, config: MarianConfig): super().__init__(config) self.model = MarianModel(config) target_vocab_size = config.vocab_size if config.share_encoder_decoder_embeddings else config.decoder_vocab_size self.register_buffer('final_logits_bias', torch.zeros((1, target_vocab_size))) self.lm_head = nn.Linear(config.d_model, target_vocab_size, bias=False) self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing) if self.config.share_encoder_decoder_embeddings: self._resize_final_logits_bias(new_num_tokens) return new_embeddings def _resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of=None, *args) -> nn.Embedding: old_embeddings = self.get_input_embeddings() new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens, pad_to_multiple_of) self.set_input_embeddings(new_embeddings) new_num_tokens = new_embeddings.weight.shape[0] if self.config.share_encoder_decoder_embeddings: self.config.decoder_vocab_size = new_num_tokens if self.config.share_encoder_decoder_embeddings and self.get_output_embeddings() is not None and (not self.config.tie_word_embeddings): old_lm_head = self.get_output_embeddings() new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens) self.set_output_embeddings(new_lm_head) return self.get_input_embeddings() def resize_decoder_token_embeddings(self, new_num_tokens): if self.config.share_encoder_decoder_embeddings: raise ValueError('`resize_decoder_token_embeddings` should not be called if `config.share_encoder_decoder_embeddings` is `True`. Please use `resize_token_embeddings` instead.') old_embeddings = self.model.get_decoder_input_embeddings() new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) self.model.set_decoder_input_embeddings(new_embeddings) if self.get_output_embeddings() is not None and (not self.config.tie_word_embeddings): old_lm_head = self.get_output_embeddings() new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens) self.set_output_embeddings(new_lm_head) model_embeds = self.model.get_decoder_input_embeddings() if new_num_tokens is None: return model_embeds self.config.decoder_vocab_size = new_num_tokens self.tie_weights() self._resize_final_logits_bias(new_num_tokens) return model_embeds def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer('final_logits_bias', new_bias) def set_output_embeddings(self, new_embeddings: nn.Embedding): self.lm_head = new_embeddings def tie_weights(self): """ Tie the weights between the input embeddings and the output embeddings. If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the weights instead. """ output_embeddings = self.get_output_embeddings() if output_embeddings is not None and getattr(self.config, 'tie_word_embeddings', True): word_embeddings = self.get_decoder().get_input_embeddings() self._tie_or_clone_weights(output_embeddings, word_embeddings) if getattr(self.config, 'is_encoder_decoder', False) and getattr(self.config, 'tie_encoder_decoder', False): if hasattr(self, self.base_model_prefix): self = getattr(self, self.base_model_prefix) tied_weights = self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix, 'encoder') self._dynamic_tied_weights_keys = tied_weights for module in self.modules(): if hasattr(module, '_tie_weights'): module._tie_weights() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Union[tuple[torch.Tensor], BaseModelOutput]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Seq2SeqLMOutput: """ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Marian uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, MarianMTModel >>> src = "fr" # source language >>> trg = "en" # target language >>> model_name = f"Helsinki-NLP/opus-mt-{src}-{trg}" >>> model = MarianMTModel.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) >>> sample_text = "où est l'arrêt de bus ?" >>> batch = tokenizer([sample_text], return_tensors="pt") >>> generated_ids = model.generate(**batch) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] "Where's the bus stop?" ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning('The `use_cache` argument is changed to `False` since `labels` is provided.') use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.decoder_vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
@auto_docstring(custom_intro='\n The Marian Model with a language modeling head. Can be used for summarization.\n ') class MarianMTModel(MarianPreTrainedModel, GenerationMixin): def __init__(self, config: MarianConfig): pass def get_encoder(self): pass def get_decoder(self): pass def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding: pass def _resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of=None, *args) -> nn.Embedding: pass def resize_decoder_token_embeddings(self, new_num_tokens): pass def _resize_final_logits_bias(self, new_num_tokens: int) -> None: pass def set_output_embeddings(self, new_embeddings: nn.Embedding): pass def tie_weights(self): ''' Tie the weights between the input embeddings and the output embeddings. If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the weights instead. ''' pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Union[tuple[torch.Tensor], BaseModelOutput]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Seq2SeqLMOutput: ''' decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Marian uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, MarianMTModel >>> src = "fr" # source language >>> trg = "en" # target language >>> model_name = f"Helsinki-NLP/opus-mt-{src}-{trg}" >>> model = MarianMTModel.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) >>> sample_text = "où est l'arrêt de bus ?" >>> batch = tokenizer([sample_text], return_tensors="pt") >>> generated_ids = model.generate(**batch) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] "Where's the bus stop?" ``` ''' pass def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): pass
14
2
15
2
12
2
3
0.14
2
10
4
0
12
4
13
15
228
36
168
69
130
24
99
46
85
8
2
2
34
3,597
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/marian/modeling_marian.py
transformers.models.marian.modeling_marian.MarianModel
from .configuration_marian import MarianConfig from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Callable, Optional, Union from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput from torch import nn import copy import torch @auto_docstring class MarianModel(MarianPreTrainedModel): _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight'] def __init__(self, config: MarianConfig): super().__init__(config) padding_idx, vocab_size = (config.pad_token_id, config.vocab_size) self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) if self.config.share_encoder_decoder_embeddings: encoder_embed_tokens = decoder_embed_tokens = self.shared else: encoder_embed_tokens = copy.deepcopy(self.shared) decoder_embed_tokens = copy.deepcopy(self.shared) self.shared = None self.encoder = MarianEncoder(config, encoder_embed_tokens) self.decoder = MarianDecoder(config, decoder_embed_tokens) self.post_init() def get_input_embeddings(self): return self.get_encoder().get_input_embeddings() def set_input_embeddings(self, value): if self.config.share_encoder_decoder_embeddings: self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared else: self.encoder.embed_tokens = value def get_decoder_input_embeddings(self): if self.config.share_encoder_decoder_embeddings: raise ValueError('`get_decoder_input_embeddings` should not be called if `config.share_encoder_decoder_embeddings` is `True`. Please use `get_input_embeddings` instead.') return self.get_decoder().get_input_embeddings() def set_decoder_input_embeddings(self, value): if self.config.share_encoder_decoder_embeddings: raise ValueError('`config.share_encoder_decoder_embeddings` is set to `True` meaning the decoder input embeddings are shared with the encoder. In order to set the decoder input embeddings, you should simply set the encoder input embeddings by calling `set_input_embeddings` with the appropriate embeddings.') self.decoder.embed_tokens = value def get_encoder(self): return self.encoder def resize_decoder_token_embeddings(self, new_num_tokens: int) -> nn.Embedding: if self.config.share_encoder_decoder_embeddings: raise ValueError('`resize_decoder_token_embeddings` should not be called if `config.share_encoder_decoder_embeddings` is `True`. Please use `resize_token_embeddings` instead.') old_embeddings = self.get_decoder_input_embeddings() new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) self.set_decoder_input_embeddings(new_embeddings) model_embeds = self.get_decoder_input_embeddings() if new_num_tokens is None: return model_embeds self.config.decoder_vocab_size = new_num_tokens self.tie_weights() return model_embeds @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Union[tuple[torch.Tensor], BaseModelOutput]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Seq2SeqModelOutput: """ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Marian uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoTokenizer, MarianModel >>> tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") >>> model = MarianModel.from_pretrained("Helsinki-NLP/opus-mt-en-de") >>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt") >>> decoder_inputs = tokenizer( ... "<pad> Studien haben gezeigt dass es hilfreich ist einen Hund zu besitzen", ... return_tensors="pt", ... add_special_tokens=False, ... ) >>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 26, 512] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
@auto_docstring class MarianModel(MarianPreTrainedModel): def __init__(self, config: MarianConfig): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def get_decoder_input_embeddings(self): pass def set_decoder_input_embeddings(self, value): pass def get_encoder(self): pass def resize_decoder_token_embeddings(self, new_num_tokens: int) -> nn.Embedding: pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Union[tuple[torch.Tensor], BaseModelOutput]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Seq2SeqModelOutput: ''' decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Marian uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoTokenizer, MarianModel >>> tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") >>> model = MarianModel.from_pretrained("Helsinki-NLP/opus-mt-en-de") >>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt") >>> decoder_inputs = tokenizer( ... "<pad> Studien haben gezeigt dass es hilfreich ist einen Hund zu besitzen", ... return_tensors="pt", ... add_special_tokens=False, ... ) >>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 26, 512] ```''' pass
11
1
19
2
14
3
3
0.22
1
10
5
0
9
3
9
11
181
28
126
38
97
28
58
20
48
10
2
1
24
3,598
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/marian/modeling_marian.py
transformers.models.marian.modeling_marian.MarianPreTrainedModel
from typing import Callable, Optional, Union from ...modeling_attn_mask_utils import AttentionMaskConverter, _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa from torch import nn from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging import torch from .configuration_marian import MarianConfig from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel @auto_docstring class MarianPreTrainedModel(PreTrainedModel): config: MarianConfig base_model_prefix = 'model' supports_gradient_checkpointing = True _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _can_compile_fullgraph = True def _init_weights(self, module: Union[nn.Linear, nn.Embedding, MarianSinusoidalPositionalEmbedding]): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, MarianSinusoidalPositionalEmbedding): module._init_weight() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) module.bias.data.zero_() @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = {'attention_mask': input_ids.ne(pad_token), 'input_ids': input_ids, 'decoder_input_ids': input_ids} return dummy_inputs def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor): if attention_mask is not None: if 'flash' in self.config._attn_implementation: attention_mask = attention_mask if 0 in attention_mask else None elif self.config._attn_implementation == 'sdpa': attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype) elif self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False) else: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) return attention_mask def _update_causal_mask(self, attention_mask: Optional[Union[torch.Tensor, 'BlockMask']], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache): if self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) elif attention_mask is None: attention_mask = make_flex_block_causal_mask(torch.ones(size=(input_tensor.shape[0], input_tensor.shape[1]), device=attention_mask.device)) return attention_mask if 'flash' in self.config._attn_implementation: if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False if self.config._attn_implementation == 'sdpa' and (not using_compilable_cache): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0]) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']): min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor): if encoder_hidden_states is not None and encoder_attention_mask is not None: if 'flash' in self.config._attn_implementation: encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None elif self.config._attn_implementation == 'sdpa': encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) elif self.config._attn_implementation == 'flex_attention': if isinstance(encoder_attention_mask, torch.Tensor): encoder_attention_mask = make_flex_block_causal_mask(encoder_attention_mask, query_length=input_shape[-1], is_causal=False) else: encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) return encoder_attention_mask
@auto_docstring class MarianPreTrainedModel(PreTrainedModel): def _init_weights(self, module: Union[nn.Linear, nn.Embedding, MarianSinusoidalPositionalEmbedding]): pass @property def dummy_inputs(self): pass def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor): pass def _update_causal_mask(self, attention_mask: Optional[Union[torch.Tensor, 'BlockMask']], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache): pass @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): ''' Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. ''' pass def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor): pass
10
1
11
0
11
0
4
0
1
1
1
6
2
0
2
2
28
2
26
11
22
0
19
10
16
6
1
2
7
3,599
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/marian/modeling_marian.py
transformers.models.marian.modeling_marian.MarianSinusoidalPositionalEmbedding
from typing import Callable, Optional, Union import numpy as np from torch import nn import torch class MarianSinusoidalPositionalEmbedding(nn.Embedding): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None) -> None: super().__init__(num_positions, embedding_dim) def _init_weight(self): """ Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in the 2nd half of the vector. [dim // 2:] """ n_pos, dim = self.weight.shape position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]) out = torch.empty(n_pos, dim, dtype=self.weight.dtype, requires_grad=False) sentinel = dim // 2 if dim % 2 == 0 else dim // 2 + 1 out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2])) self.weight = nn.Parameter(out, requires_grad=False) @torch.no_grad() def forward(self, input_ids_shape: torch.Size, past_key_values_length: int=0, position_ids: Optional[torch.Tensor]=None) -> torch.Tensor: """`input_ids_shape` is expected to be [bsz x seqlen].""" if position_ids is None: bsz, seq_len = input_ids_shape[:2] position_ids = torch.arange(past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device) return super().forward(position_ids)
class MarianSinusoidalPositionalEmbedding(nn.Embedding): '''This module produces sinusoidal positional embeddings of any length.''' def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None) -> None: pass def _init_weight(self): ''' Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in the 2nd half of the vector. [dim // 2:] ''' pass @torch.no_grad() def forward(self, input_ids_shape: torch.Size, past_key_values_length: int=0, position_ids: Optional[torch.Tensor]=None) -> torch.Tensor: '''`input_ids_shape` is expected to be [bsz x seqlen].''' pass
5
3
8
0
7
2
1
0.3
1
4
0
0
2
1
3
3
32
3
23
12
17
7
17
10
13
2
1
0
4