id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,400
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErnieEncoder
|
from ...cache_utils import Cache, EncoderDecoderCache
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch.nn as nn
from ...processing_utils import Unpack
from typing import Callable, Optional, Union
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
import torch
class ErnieEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([ErnieLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)])
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
for i, layer_module in enumerate(self.layer):
layer_head_mask = head_mask[i] if head_mask is not None else None
hidden_states = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_values, cache_position=cache_position, **kwargs)
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None)
|
class ErnieEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
pass
| 3
| 0
| 45
| 4
| 41
| 0
| 9
| 0
| 1
| 8
| 2
| 0
| 2
| 3
| 2
| 12
| 91
| 8
| 83
| 26
| 68
| 0
| 35
| 14
| 32
| 17
| 1
| 3
| 18
|
2,401
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErnieForCausalLM
|
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from typing import Callable, Optional, Union
from ...utils.generic import can_return_tuple, check_model_inputs
from ...processing_utils import Unpack
from ...generation import GenerationMixin
import torch.nn as nn
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
@auto_docstring(custom_intro='\n Ernie Model with a `language modeling` head on top for CLM fine-tuning.\n ')
class ErnieForCausalLM(ErniePreTrainedModel, GenerationMixin):
_tied_weights_keys = ['cls.predictions.decoder.bias', 'cls.predictions.decoder.weight']
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning('If you want to use `ErnieForCausalLM` as a standalone, add `is_decoder=True.`')
self.ernie = ErnieModel(config, add_pooling_layer=False)
self.cls = ErnieOnlyMLMHead(config)
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
self.cls.predictions.bias = new_embeddings.bias
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, past_key_values: Optional[list[torch.Tensor]]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
"""
task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
"""
if labels is not None:
use_cache = False
outputs = self.ernie(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, return_dict=True, **kwargs)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
lm_loss = None
if labels is not None:
lm_loss = self.loss_function(prediction_scores, labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)
|
@auto_docstring(custom_intro='\n Ernie Model with a `language modeling` head on top for CLM fine-tuning.\n ')
class ErnieForCausalLM(ErniePreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, past_key_values: Optional[list[torch.Tensor]]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
'''
task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
'''
pass
| 8
| 1
| 22
| 2
| 16
| 5
| 2
| 0.31
| 2
| 7
| 3
| 0
| 5
| 2
| 5
| 6
| 128
| 15
| 86
| 35
| 56
| 27
| 33
| 16
| 27
| 6
| 2
| 1
| 12
|
2,402
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErnieForMaskedLM
|
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from typing import Callable, Optional, Union
from ...utils.generic import can_return_tuple, check_model_inputs
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...processing_utils import Unpack
import torch.nn as nn
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
@auto_docstring
class ErnieForMaskedLM(ErniePreTrainedModel):
_tied_weights_keys = ['cls.predictions.decoder.bias', 'cls.predictions.decoder.weight']
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning('If you want to use `ErnieForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.')
self.ernie = ErnieModel(config, add_pooling_layer=False)
self.cls = ErnieOnlyMLMHead(config)
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
self.cls.predictions.bias = new_embeddings.bias
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], MaskedLMOutput]:
"""
task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
outputs = self.ernie(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, return_dict=True, **kwargs)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
if self.config.pad_token_id is None:
raise ValueError('The PAD token should be defined for generation')
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full((effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {'input_ids': input_ids, 'attention_mask': attention_mask}
@classmethod
def can_generate(cls) -> bool:
"""
Legacy correction: ErnieForMaskedLM can't call `generate()` from `GenerationMixin`, even though it has a
`prepare_inputs_for_generation` method.
"""
return False
|
@auto_docstring
class ErnieForMaskedLM(ErniePreTrainedModel):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], MaskedLMOutput]:
'''
task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
'''
pass
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
pass
@classmethod
def can_generate(cls) -> bool:
'''
Legacy correction: ErnieForMaskedLM can't call `generate()` from `GenerationMixin`, even though it has a
`prepare_inputs_for_generation` method.
'''
pass
| 11
| 2
| 18
| 2
| 14
| 2
| 2
| 0.16
| 1
| 7
| 3
| 0
| 5
| 2
| 5
| 6
| 111
| 17
| 82
| 34
| 53
| 13
| 36
| 18
| 30
| 5
| 2
| 1
| 11
|
2,403
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErnieForMultipleChoice
|
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from typing import Callable, Optional, Union
from ...utils.generic import can_return_tuple, check_model_inputs
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...processing_utils import Unpack
import torch.nn as nn
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
@auto_docstring
class ErnieForMultipleChoice(ErniePreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.ernie = ErnieModel(config)
classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, 1)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
task_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None
outputs = self.ernie(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class ErnieForMultipleChoice(ErniePreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
task_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
'''
pass
| 6
| 1
| 40
| 5
| 32
| 4
| 7
| 0.11
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 3
| 88
| 10
| 70
| 29
| 48
| 8
| 29
| 15
| 26
| 11
| 2
| 1
| 13
|
2,404
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErnieForNextSentencePrediction
|
import torch
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch.nn as nn
import warnings
from ...processing_utils import Unpack
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...utils.generic import can_return_tuple, check_model_inputs
from typing import Callable, Optional, Union
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
@auto_docstring(custom_intro='\n Ernie Model with a `next sentence prediction (classification)` head on top.\n ')
class ErnieForNextSentencePrediction(ErniePreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.ernie = ErnieModel(config)
self.cls = ErnieOnlyNSPHead(config)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], NextSentencePredictorOutput]:
"""
task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring). Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Example:
```python
>>> from transformers import AutoTokenizer, ErnieForNextSentencePrediction
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh")
>>> model = ErnieForNextSentencePrediction.from_pretrained("nghuyong/ernie-1.0-base-zh")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
```
"""
if 'next_sentence_label' in kwargs:
warnings.warn('The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.', FutureWarning)
labels = kwargs.pop('next_sentence_label')
outputs = self.ernie(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
return NextSentencePredictorOutput(loss=next_sentence_loss, logits=seq_relationship_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Ernie Model with a `next sentence prediction (classification)` head on top.\n ')
class ErnieForNextSentencePrediction(ErniePreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], NextSentencePredictorOutput]:
'''
task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring). Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Example:
```python
>>> from transformers import AutoTokenizer, ErnieForNextSentencePrediction
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh")
>>> model = ErnieForNextSentencePrediction.from_pretrained("nghuyong/ernie-1.0-base-zh")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
```
'''
pass
| 6
| 1
| 47
| 9
| 28
| 11
| 4
| 0.4
| 1
| 7
| 3
| 0
| 2
| 2
| 2
| 3
| 99
| 18
| 58
| 26
| 39
| 23
| 22
| 11
| 19
| 6
| 2
| 1
| 7
|
2,405
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErnieForPreTraining
|
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from typing import Callable, Optional, Union
from ...utils.generic import can_return_tuple, check_model_inputs
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...processing_utils import Unpack
import torch.nn as nn
import torch
@auto_docstring(custom_intro='\n Ernie Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next\n sentence prediction (classification)` head.\n ')
class ErnieForPreTraining(ErniePreTrainedModel):
_tied_weights_keys = ['cls.predictions.decoder.bias', 'cls.predictions.decoder.weight']
def __init__(self, config):
super().__init__(config)
self.ernie = ErnieModel(config)
self.cls = ErniePreTrainingHeads(config)
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
self.cls.predictions.bias = new_embeddings.bias
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, next_sentence_label: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], ErnieForPreTrainingOutput]:
"""
task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Example:
```python
>>> from transformers import AutoTokenizer, ErnieForPreTraining
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh")
>>> model = ErnieForPreTraining.from_pretrained("nghuyong/ernie-1.0-base-zh")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
```
"""
outputs = self.ernie(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return ErnieForPreTrainingOutput(loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Ernie Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next\n sentence prediction (classification)` head.\n ')
class ErnieForPreTraining(ErniePreTrainedModel):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, next_sentence_label: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], ErnieForPreTrainingOutput]:
'''
task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Example:
```python
>>> from transformers import AutoTokenizer, ErnieForPreTraining
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh")
>>> model = ErnieForPreTraining.from_pretrained("nghuyong/ernie-1.0-base-zh")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
```
'''
pass
| 8
| 1
| 24
| 4
| 14
| 7
| 2
| 0.48
| 1
| 6
| 3
| 0
| 4
| 2
| 4
| 5
| 107
| 18
| 60
| 31
| 39
| 29
| 27
| 16
| 22
| 5
| 2
| 1
| 8
|
2,406
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErnieForPreTrainingOutput
|
import torch
import torch.nn as nn
from dataclasses import dataclass
from typing import Callable, Optional, Union
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
@dataclass
@auto_docstring(custom_intro='\n Output type of [`ErnieForPreTraining`].\n ')
class ErnieForPreTrainingOutput(ModelOutput):
"""
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: Optional[torch.FloatTensor] = None
seq_relationship_logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`ErnieForPreTraining`].\n ')
class ErnieForPreTrainingOutput(ModelOutput):
'''
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 31
| 4
| 6
| 6
| 5
| 21
| 6
| 6
| 5
| 0
| 1
| 0
| 0
|
2,407
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErnieForQuestionAnswering
|
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from typing import Callable, Optional, Union
from ...utils.generic import can_return_tuple, check_model_inputs
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...processing_utils import Unpack
import torch.nn as nn
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
@auto_docstring
class ErnieForQuestionAnswering(ErniePreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.ernie = ErnieModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]:
"""
task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
"""
outputs = self.ernie(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class ErnieForQuestionAnswering(ErniePreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]:
'''
task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
'''
pass
| 6
| 1
| 42
| 5
| 31
| 7
| 4
| 0.22
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 3
| 88
| 10
| 64
| 31
| 46
| 14
| 32
| 16
| 29
| 7
| 2
| 2
| 8
|
2,408
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErnieForSequenceClassification
|
import torch
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch.nn as nn
from ...processing_utils import Unpack
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...utils.generic import can_return_tuple, check_model_inputs
from typing import Callable, Optional, Union
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
@auto_docstring(custom_intro='\n Ernie Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ')
class ErnieForSequenceClassification(ErniePreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.ernie = ErnieModel(config)
classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
"""
task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs = self.ernie(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Ernie Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ')
class ErnieForSequenceClassification(ErniePreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
'''
task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 6
| 1
| 43
| 4
| 36
| 4
| 7
| 0.11
| 1
| 6
| 2
| 0
| 2
| 5
| 2
| 3
| 90
| 9
| 73
| 29
| 56
| 8
| 36
| 15
| 33
| 12
| 2
| 3
| 14
|
2,409
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErnieForTokenClassification
|
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from typing import Callable, Optional, Union
from ...utils.generic import can_return_tuple, check_model_inputs
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...processing_utils import Unpack
import torch.nn as nn
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
@auto_docstring
class ErnieForTokenClassification(ErniePreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.ernie = ErnieModel(config, add_pooling_layer=False)
classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
"""
task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
outputs = self.ernie(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class ErnieForTokenClassification(ErniePreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
'''
task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
'''
pass
| 6
| 1
| 33
| 4
| 27
| 3
| 4
| 0.11
| 1
| 5
| 2
| 0
| 2
| 4
| 2
| 3
| 70
| 9
| 55
| 28
| 38
| 6
| 23
| 14
| 20
| 5
| 2
| 1
| 7
|
2,410
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErnieIntermediate
|
import torch.nn as nn
from ...activations import ACT2FN
import torch
class ErnieIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class ErnieIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
2,411
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErnieLMPredictionHead
|
import torch.nn as nn
import torch
class ErnieLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = ErniePredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def _tie_weights(self):
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
|
class ErnieLMPredictionHead(nn.Module):
def __init__(self, config):
pass
def _tie_weights(self):
pass
def forward(self, hidden_states):
pass
| 4
| 0
| 6
| 1
| 4
| 1
| 1
| 0.23
| 1
| 2
| 1
| 0
| 3
| 3
| 3
| 13
| 21
| 5
| 13
| 7
| 9
| 3
| 13
| 7
| 9
| 1
| 1
| 0
| 3
|
2,412
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErnieLayer
|
from ...cache_utils import Cache, EncoderDecoderCache
import torch
import torch.nn as nn
from ...processing_utils import Unpack
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from typing import Callable, Optional, Union
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from ...modeling_layers import GradientCheckpointingLayer
class ErnieLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = ErnieAttention(config, is_causal=config.is_decoder, layer_idx=layer_idx)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f'{self} should be used as a decoder model if cross attention is added')
self.crossattention = ErnieAttention(config, position_embedding_type='absolute', is_causal=False, layer_idx=layer_idx, is_cross_attention=True)
self.intermediate = ErnieIntermediate(config)
self.output = ErnieOutput(config)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
self_attention_output, _ = self.attention(hidden_states, attention_mask, head_mask, past_key_value=past_key_value, cache_position=cache_position, **kwargs)
attention_output = self_attention_output
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, 'crossattention'):
raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`')
cross_attention_output, _ = self.crossattention(self_attention_output, None, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value=past_key_value, **kwargs)
attention_output = cross_attention_output
layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
return layer_output
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
|
class ErnieLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
pass
def feed_forward_chunk(self, attention_output):
pass
| 4
| 0
| 27
| 2
| 23
| 2
| 4
| 0.1
| 1
| 7
| 3
| 0
| 3
| 8
| 3
| 13
| 84
| 9
| 70
| 32
| 57
| 7
| 41
| 23
| 37
| 7
| 1
| 2
| 11
|
2,413
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErnieModel
|
from ...utils.generic import can_return_tuple, check_model_inputs
from ...masking_utils import create_causal_mask
from ...processing_utils import Unpack
import torch.nn as nn
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
from ...cache_utils import Cache, EncoderDecoderCache
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from typing import Callable, Optional, Union
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa
@auto_docstring(custom_intro='\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in [Attention is\n all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,\n Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set\n to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and\n `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.\n ')
class ErnieModel(ErniePreTrainedModel):
_no_split_modules = ['ErnieLayer']
def __init__(self, config, add_pooling_layer=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.gradient_checkpointing = False
self.embeddings = ErnieEmbeddings(config)
self.encoder = ErnieEncoder(config)
self.pooler = ErniePooler(config) if add_pooling_layer else None
self.position_embedding_type = config.position_embedding_type
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Union[list[torch.FloatTensor], Cache]]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
"""
task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
"""
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
use_cache = False
return_legacy_cache = False
if use_cache and (not isinstance(past_key_values, Cache)):
logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')
return_legacy_cache = True
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=device)
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, task_type_ids=task_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length)
if attention_mask is not None and attention_mask.dim() == 2:
if self.config.is_decoder:
attention_mask = create_causal_mask(config=self.config, input_embeds=embedding_output, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values)
else:
attention_mask = self._update_full_mask(attention_mask, embedding_output)
elif attention_mask is not None and attention_mask.dim() == 3:
if 'flash' in self.config._attn_implementation or self.config._attn_implementation == 'flex_attention':
raise ValueError(f'Passing attention mask with a 3D/4D shape does not work with type {self.config._attn_implementation} - please use either `sdpa` or `eager` instead.')
attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
if encoder_attention_mask is not None:
if encoder_attention_mask.dim() == 2:
encoder_attention_mask = self._update_cross_attn_mask(encoder_hidden_states, encoder_attention_mask, embedding_output.shape[:2], embedding_output)
else:
if 'flash' in self.config._attn_implementation or self.config._attn_implementation == 'flex_attention':
raise ValueError(f'Passing attention mask with a 3D/4D shape does not work with type {self.config._attn_implementation} - please use either `sdpa` or `eager` instead.')
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
encoder_outputs = self.encoder(embedding_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_ids=position_ids, **kwargs)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if return_legacy_cache:
encoder_outputs.past_key_values = encoder_outputs.past_key_values.to_legacy_cache()
return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values)
def _create_attention_masks(self, input_shape, attention_mask, encoder_attention_mask, embedding_output, encoder_hidden_states, cache_position, past_key_values):
if attention_mask is not None and attention_mask.dim() == 2:
if self.config.is_decoder:
attention_mask = create_causal_mask(config=self.config, input_embeds=embedding_output, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values)
else:
attention_mask = self._update_full_mask(attention_mask, embedding_output)
elif attention_mask is not None and attention_mask.dim() == 3:
if 'flash' in self.config._attn_implementation or self.config._attn_implementation == 'flex_attention':
raise ValueError(f'Passing attention mask with a 3D/4D shape does not work with type {self.config._attn_implementation} - please use either `sdpa` or `eager` instead.')
attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
if encoder_attention_mask is not None:
if encoder_attention_mask.dim() == 2:
encoder_attention_mask = self._update_cross_attn_mask(encoder_hidden_states, encoder_attention_mask, embedding_output.shape[:2], embedding_output)
else:
if 'flash' in self.config._attn_implementation or self.config._attn_implementation == 'flex_attention':
raise ValueError(f'Passing attention mask with a 3D/4D shape does not work with type {self.config._attn_implementation} - please use either `sdpa` or `eager` instead.')
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
return (attention_mask, encoder_attention_mask)
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
if attention_mask is not None:
if 'flash' in self.config._attn_implementation:
attention_mask = attention_mask if 0 in attention_mask else None
elif self.config._attn_implementation == 'sdpa':
attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype)
elif self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False)
else:
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
return attention_mask
def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor):
if encoder_hidden_states is not None and encoder_attention_mask is not None:
if 'flash' in self.config._attn_implementation:
encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None
elif self.config._attn_implementation == 'sdpa':
encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
elif self.config._attn_implementation == 'flex_attention':
if isinstance(encoder_attention_mask, torch.Tensor):
encoder_attention_mask = make_flex_block_causal_mask(encoder_attention_mask, query_length=input_shape[-1], is_causal=False)
else:
encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
return encoder_attention_mask
| null | 12
| 3
| 31
| 3
| 21
| 7
| 5
| 0.42
| 1
| 8
| 4
| 0
| 5
| 4
| 5
| 6
| 180
| 24
| 110
| 42
| 82
| 46
| 55
| 25
| 49
| 18
| 2
| 2
| 24
|
2,414
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErnieOnlyMLMHead
|
import torch.nn as nn
import torch
class ErnieOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = ErnieLMPredictionHead(config)
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
prediction_scores = self.predictions(sequence_output)
return prediction_scores
|
class ErnieOnlyMLMHead(nn.Module):
def __init__(self, config):
pass
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 1
| 2
| 12
| 8
| 1
| 7
| 5
| 4
| 0
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
2,415
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErnieOnlyNSPHead
|
import torch.nn as nn
class ErnieOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
|
class ErnieOnlyNSPHead(nn.Module):
def __init__(self, config):
pass
def forward(self, pooled_output):
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 8
| 1
| 7
| 5
| 4
| 0
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
2,416
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErnieOutput
|
import torch.nn as nn
import torch
class ErnieOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class ErnieOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
2,417
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErniePooler
|
import torch.nn as nn
import torch
class ErniePooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
class ErniePooler(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.2
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 10
| 7
| 7
| 2
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
2,418
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErniePreTrainedModel
|
import torch.nn as nn
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from .configuration_ernie import ErnieConfig
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
@auto_docstring
class ErniePreTrainedModel(PreTrainedModel):
config_class = ErnieConfig
base_model_prefix = 'ernie'
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': ErnieLayer, 'attentions': ErnieSelfAttention, 'cross_attentions': ErnieCrossAttention}
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, ErnieLMPredictionHead):
module.bias.data.zero_()
|
@auto_docstring
class ErniePreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 15
| 0
| 12
| 3
| 6
| 0.44
| 1
| 0
| 0
| 9
| 1
| 0
| 1
| 1
| 25
| 2
| 16
| 5
| 14
| 7
| 14
| 5
| 12
| 6
| 1
| 2
| 6
|
2,419
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErniePreTrainingHeads
|
import torch.nn as nn
class ErniePreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = ErnieLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return (prediction_scores, seq_relationship_score)
|
class ErniePreTrainingHeads(nn.Module):
def __init__(self, config):
pass
def forward(self, sequence_output, pooled_output):
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 2
| 1
| 0
| 2
| 2
| 2
| 12
| 10
| 1
| 9
| 7
| 6
| 0
| 9
| 7
| 6
| 1
| 1
| 0
| 2
|
2,420
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErniePredictionHeadTransform
|
import torch.nn as nn
from ...activations import ACT2FN
import torch
class ErniePredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
|
class ErniePredictionHeadTransform(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 3
| 2
| 12
| 15
| 1
| 14
| 6
| 11
| 0
| 13
| 6
| 10
| 2
| 1
| 1
| 3
|
2,421
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErnieSelfAttention
|
from ...cache_utils import Cache, EncoderDecoderCache
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from typing import Callable, Optional, Union
from ...processing_utils import Unpack
import torch.nn as nn
import torch
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
class ErnieSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, is_causal=False, layer_idx=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.scaling = self.attention_head_size ** (-0.5)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute')
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.attention_head_size)
query_layer = self.query(hidden_states).view(*hidden_shape).transpose(1, 2)
key_layer = self.key(hidden_states).view(*hidden_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(*hidden_shape).transpose(1, 2)
if past_key_value is not None:
current_past_key_value = past_key_value
if isinstance(past_key_value, EncoderDecoderCache):
current_past_key_value = past_key_value.self_attention_cache
key_layer, value_layer = current_past_key_value.update(key_layer, value_layer, self.layer_idx, {'cache_position': cache_position})
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
if self.position_embedding_type != 'absolute':
raise ValueError(f'You are using {self.config._attn_implementation} as attention type. However, non-absolute positional embeddings can not work with them. Please load the model with `attn_implementation="eager"`.')
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_layer, key_layer, value_layer, attention_mask, dropout=0.0 if not self.training else self.dropout.p, scaling=self.scaling, head_mask=head_mask, use_cache=past_key_value is not None, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
return (attn_output, attn_weights)
|
class ErnieSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, is_causal=False, layer_idx=None):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 43
| 7
| 31
| 6
| 6
| 0.19
| 1
| 5
| 0
| 0
| 3
| 11
| 3
| 13
| 132
| 22
| 93
| 44
| 80
| 18
| 72
| 35
| 68
| 13
| 1
| 2
| 17
|
2,422
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErnieSelfOutput
|
import torch
import torch.nn as nn
class ErnieSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class ErnieSelfOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
2,423
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/configuration_esm.py
|
transformers.models.esm.configuration_esm.EsmConfig
|
from ...configuration_utils import PretrainedConfig
class EsmConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`ESMModel`]. It is used to instantiate a ESM model
according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the ESM
[facebook/esm-1b](https://huggingface.co/facebook/esm-1b) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*):
Vocabulary size of the ESM model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ESMModel`].
mask_token_id (`int`, *optional*):
The index of the mask token in the vocabulary. This must be included in the config because of the
"mask-dropout" scaling trick, which will scale the inputs depending on the number of masked tokens.
pad_token_id (`int`, *optional*):
The index of the padding token in the vocabulary. This must be included in the config because certain parts
of the ESM code use this instead of the attention mask.
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 1026):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query", "rotary"`.
For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
is_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
emb_layer_norm_before (`bool`, *optional*):
Whether to apply layer normalization after embeddings but before the main stem of the network.
token_dropout (`bool`, defaults to `False`):
When this is enabled, masked tokens are treated as if they had been dropped out by input dropout.
Examples:
```python
>>> from transformers import EsmModel, EsmConfig
>>> # Initializing a ESM facebook/esm-1b style configuration
>>> configuration = EsmConfig(vocab_size=33)
>>> # Initializing a model from the configuration
>>> model = EsmModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'esm'
def __init__(self, vocab_size=None, mask_token_id=None, pad_token_id=None, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=1026, initializer_range=0.02, layer_norm_eps=1e-12, position_embedding_type='absolute', use_cache=True, emb_layer_norm_before=None, token_dropout=False, is_folding_model=False, esmfold_config=None, vocab_list=None, **kwargs):
super().__init__(pad_token_id=pad_token_id, mask_token_id=mask_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.emb_layer_norm_before = emb_layer_norm_before
self.token_dropout = token_dropout
self.is_folding_model = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.')
esmfold_config = EsmFoldConfig()
elif isinstance(esmfold_config, dict):
esmfold_config = EsmFoldConfig(**esmfold_config)
self.esmfold_config = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!')
self.vocab_list = get_default_vocab_list()
else:
self.vocab_list = vocab_list
else:
self.esmfold_config = None
self.vocab_list = None
if self.esmfold_config is not None and getattr(self.esmfold_config, 'use_esm_attn_map', False):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!')
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
Returns:
`dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = super().to_dict()
if isinstance(self.esmfold_config, EsmFoldConfig):
output['esmfold_config'] = self.esmfold_config.to_dict()
return output
|
class EsmConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`ESMModel`]. It is used to instantiate a ESM model
according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the ESM
[facebook/esm-1b](https://huggingface.co/facebook/esm-1b) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*):
Vocabulary size of the ESM model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ESMModel`].
mask_token_id (`int`, *optional*):
The index of the mask token in the vocabulary. This must be included in the config because of the
"mask-dropout" scaling trick, which will scale the inputs depending on the number of masked tokens.
pad_token_id (`int`, *optional*):
The index of the padding token in the vocabulary. This must be included in the config because certain parts
of the ESM code use this instead of the attention mask.
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 1026):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query", "rotary"`.
For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
is_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
emb_layer_norm_before (`bool`, *optional*):
Whether to apply layer normalization after embeddings but before the main stem of the network.
token_dropout (`bool`, defaults to `False`):
When this is enabled, masked tokens are treated as if they had been dropped out by input dropout.
Examples:
```python
>>> from transformers import EsmModel, EsmConfig
>>> # Initializing a ESM facebook/esm-1b style configuration
>>> configuration = EsmConfig(vocab_size=33)
>>> # Initializing a model from the configuration
>>> model = EsmModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=None, mask_token_id=None, pad_token_id=None, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=1026, initializer_range=0.02, layer_norm_eps=1e-12, position_embedding_type='absolute', use_cache=True, emb_layer_norm_before=None, token_dropout=False, is_folding_model=False, esmfold_config=None, vocab_list=None, **kwargs):
pass
def to_dict(self):
'''
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
Returns:
`dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
'''
pass
| 3
| 2
| 34
| 1
| 31
| 3
| 4
| 1.05
| 1
| 4
| 1
| 0
| 2
| 17
| 2
| 2
| 142
| 13
| 63
| 44
| 38
| 66
| 38
| 22
| 35
| 6
| 1
| 2
| 8
|
2,424
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/configuration_esm.py
|
transformers.models.esm.configuration_esm.EsmFoldConfig
|
from dataclasses import asdict, dataclass
from typing import Optional
@dataclass
class EsmFoldConfig:
esm_type: Optional[str] = None
fp16_esm: bool = True
use_esm_attn_map: bool = False
esm_ablate_pairwise: bool = False
esm_ablate_sequence: bool = False
esm_input_dropout: float = 0
embed_aa: bool = True
bypass_lm: bool = False
lddt_head_hid_dim: int = 128
trunk: 'TrunkConfig' = None
def __post_init__(self):
if self.trunk is None:
self.trunk = TrunkConfig()
elif isinstance(self.trunk, dict):
self.trunk = TrunkConfig(**self.trunk)
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
Returns:
`dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = asdict(self)
output['trunk'] = self.trunk.to_dict()
return output
|
@dataclass
class EsmFoldConfig:
def __post_init__(self):
pass
def to_dict(self):
'''
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
Returns:
`dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
'''
pass
| 4
| 1
| 8
| 1
| 5
| 3
| 2
| 0.25
| 0
| 2
| 1
| 0
| 2
| 0
| 2
| 2
| 30
| 5
| 20
| 14
| 17
| 5
| 19
| 14
| 16
| 3
| 0
| 1
| 4
|
2,425
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/configuration_esm.py
|
transformers.models.esm.configuration_esm.StructureModuleConfig
|
from dataclasses import asdict, dataclass
@dataclass
class StructureModuleConfig:
"""
Args:
sequence_dim:
Single representation channel dimension
pairwise_dim:
Pair representation channel dimension
ipa_dim:
IPA hidden channel dimension
resnet_dim:
Angle resnet (Alg. 23 lines 11-14) hidden channel dimension
num_heads_ipa:
Number of IPA heads
num_qk_points:
Number of query/key points to generate during IPA
num_v_points:
Number of value points to generate during IPA
dropout_rate:
Dropout rate used throughout the layer
num_blocks:
Number of structure module blocks
num_transition_layers:
Number of layers in the single representation transition (Alg. 23 lines 8-9)
num_resnet_blocks:
Number of blocks in the angle resnet
num_angles:
Number of angles to generate in the angle resnet
trans_scale_factor:
Scale of single representation transition hidden dimension
epsilon:
Small number used in angle resnet normalization
inf:
Large number used for attention masking
"""
sequence_dim: int = 384
pairwise_dim: int = 128
ipa_dim: int = 16
resnet_dim: int = 128
num_heads_ipa: int = 12
num_qk_points: int = 4
num_v_points: int = 8
dropout_rate: float = 0.1
num_blocks: int = 8
num_transition_layers: int = 1
num_resnet_blocks: int = 2
num_angles: int = 7
trans_scale_factor: int = 10
epsilon: float = 1e-08
inf: float = 100000.0
def to_dict(self):
return asdict(self)
|
@dataclass
class StructureModuleConfig:
'''
Args:
sequence_dim:
Single representation channel dimension
pairwise_dim:
Pair representation channel dimension
ipa_dim:
IPA hidden channel dimension
resnet_dim:
Angle resnet (Alg. 23 lines 11-14) hidden channel dimension
num_heads_ipa:
Number of IPA heads
num_qk_points:
Number of query/key points to generate during IPA
num_v_points:
Number of value points to generate during IPA
dropout_rate:
Dropout rate used throughout the layer
num_blocks:
Number of structure module blocks
num_transition_layers:
Number of layers in the single representation transition (Alg. 23 lines 8-9)
num_resnet_blocks:
Number of blocks in the angle resnet
num_angles:
Number of angles to generate in the angle resnet
trans_scale_factor:
Scale of single representation transition hidden dimension
epsilon:
Small number used in angle resnet normalization
inf:
Large number used for attention masking
'''
def to_dict(self):
pass
| 3
| 1
| 2
| 0
| 2
| 0
| 1
| 1.83
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 53
| 2
| 18
| 17
| 16
| 33
| 18
| 17
| 16
| 1
| 0
| 0
| 1
|
2,426
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/configuration_esm.py
|
transformers.models.esm.configuration_esm.TrunkConfig
|
from typing import Optional
from dataclasses import asdict, dataclass
@dataclass
class TrunkConfig:
num_blocks: int = 48
sequence_state_dim: int = 1024
pairwise_state_dim: int = 128
sequence_head_width: int = 32
pairwise_head_width: int = 32
position_bins: int = 32
dropout: float = 0
layer_drop: float = 0
cpu_grad_checkpoint: bool = False
max_recycles: int = 4
chunk_size: Optional[int] = 128
structure_module: 'StructureModuleConfig' = None
def __post_init__(self):
if self.structure_module is None:
self.structure_module = StructureModuleConfig()
elif isinstance(self.structure_module, dict):
self.structure_module = StructureModuleConfig(**self.structure_module)
if self.max_recycles <= 0:
raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.')
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(f'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got {self.sequence_state_dim} and {self.sequence_state_dim}.')
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(f'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got {self.pairwise_state_dim} and {self.pairwise_state_dim}.')
sequence_num_heads = self.sequence_state_dim // self.sequence_head_width
pairwise_num_heads = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(f'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.')
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(f'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.')
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.')
if self.dropout >= 0.4:
raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.')
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
Returns:
`dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = asdict(self)
output['structure_module'] = self.structure_module.to_dict()
return output
|
@dataclass
class TrunkConfig:
def __post_init__(self):
pass
def to_dict(self):
'''
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
Returns:
`dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
'''
pass
| 4
| 1
| 24
| 3
| 19
| 3
| 6
| 0.1
| 0
| 3
| 1
| 0
| 2
| 0
| 2
| 2
| 62
| 7
| 50
| 18
| 47
| 5
| 37
| 18
| 34
| 10
| 0
| 1
| 11
|
2,427
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esm.py
|
transformers.models.esm.modeling_esm.EsmAttention
|
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...processing_utils import Unpack
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from torch import nn
class EsmAttention(nn.Module):
def __init__(self, config, layer_idx=None, is_cross_attention=False):
super().__init__()
self.self = EsmSelfAttention(config, layer_idx=layer_idx, is_cross_attention=is_cross_attention)
self.output = EsmSelfOutput(config)
self.pruned_heads = set()
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, **kwargs: Unpack[TransformersKwargs]):
hidden_states_ln = self.LayerNorm(hidden_states)
attn_output, _ = self.self(hidden_states_ln, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, **kwargs)
attn_output = self.output(attn_output, hidden_states)
return attn_output
|
class EsmAttention(nn.Module):
def __init__(self, config, layer_idx=None, is_cross_attention=False):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, **kwargs: Unpack[TransformersKwargs]):
pass
| 4
| 0
| 15
| 1
| 14
| 1
| 1
| 0.07
| 1
| 4
| 2
| 0
| 3
| 4
| 3
| 13
| 49
| 4
| 43
| 22
| 30
| 3
| 24
| 13
| 20
| 2
| 1
| 1
| 4
|
2,428
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esm.py
|
transformers.models.esm.modeling_esm.EsmClassificationHead
|
from torch import nn
import torch
class EsmClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :]
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
|
class EsmClassificationHead(nn.Module):
'''Head for sentence-level classification tasks.'''
def __init__(self, config):
pass
def forward(self, features, **kwargs):
pass
| 3
| 1
| 7
| 0
| 7
| 1
| 1
| 0.14
| 1
| 1
| 0
| 0
| 2
| 3
| 2
| 12
| 17
| 2
| 14
| 7
| 11
| 2
| 14
| 7
| 11
| 1
| 1
| 0
| 2
|
2,429
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esm.py
|
transformers.models.esm.modeling_esm.EsmContactPredictionHead
|
from torch import nn
class EsmContactPredictionHead(nn.Module):
"""Performs symmetrization, apc, and computes a logistic regression on the output features"""
def __init__(self, in_features: int, bias=True, eos_idx: int=2):
super().__init__()
self.in_features = in_features
self.eos_idx = eos_idx
self.regression = nn.Linear(in_features, 1, bias)
self.activation = nn.Sigmoid()
def forward(self, tokens, attentions):
eos_mask = tokens.ne(self.eos_idx).to(attentions)
eos_mask = eos_mask.unsqueeze(1) * eos_mask.unsqueeze(2)
attentions = attentions * eos_mask[:, None, None, :, :]
attentions = attentions[..., :-1, :-1]
attentions = attentions[..., 1:, 1:]
batch_size, layers, heads, seqlen, _ = attentions.size()
attentions = attentions.view(batch_size, layers * heads, seqlen, seqlen)
attentions = attentions.to(self.regression.weight.device)
attentions = average_product_correct(symmetrize(attentions))
attentions = attentions.permute(0, 2, 3, 1)
return self.activation(self.regression(attentions).squeeze(3))
|
class EsmContactPredictionHead(nn.Module):
'''Performs symmetrization, apc, and computes a logistic regression on the output features'''
def __init__(self, in_features: int, bias=True, eos_idx: int=2):
pass
def forward(self, tokens, attentions):
pass
| 3
| 1
| 15
| 1
| 13
| 2
| 1
| 0.19
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 33
| 3
| 26
| 14
| 18
| 5
| 19
| 9
| 16
| 1
| 1
| 0
| 2
|
2,430
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esm.py
|
transformers.models.esm.modeling_esm.EsmEmbeddings
|
from torch import nn
import torch
class EsmEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
if config.emb_layer_norm_before:
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
else:
self.layer_norm = None
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
self.padding_idx = config.pad_token_id
if self.position_embedding_type == 'absolute':
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx)
self.token_dropout = config.token_dropout
self.mask_token_id = config.mask_token_id
def forward(self, input_ids=None, attention_mask=None, position_ids=None, inputs_embeds=None):
if position_ids is None:
if input_ids is not None:
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
embeddings = inputs_embeds
if self.token_dropout and input_ids is not None:
embeddings = embeddings.masked_fill((input_ids == self.mask_token_id).unsqueeze(-1), 0.0)
mask_ratio_train = 0.15 * 0.8
src_lengths = attention_mask.sum(-1) if attention_mask is not None else input_ids.shape[1]
mask_ratio_observed = (input_ids == self.mask_token_id).sum(-1).float() / src_lengths
embeddings = (embeddings * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None]).to(embeddings.dtype)
if self.position_embedding_type == 'absolute':
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
if self.layer_norm is not None:
embeddings = self.layer_norm(embeddings)
if attention_mask is not None:
embeddings = (embeddings * attention_mask.unsqueeze(-1)).to(embeddings.dtype)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device)
return position_ids.unsqueeze(0).expand(input_shape)
|
class EsmEmbeddings(nn.Module):
'''
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
'''
def __init__(self, config):
pass
def forward(self, input_ids=None, attention_mask=None, position_ids=None, inputs_embeds=None):
pass
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
'''
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
'''
pass
| 4
| 2
| 27
| 3
| 17
| 7
| 4
| 0.43
| 1
| 1
| 0
| 0
| 3
| 8
| 3
| 13
| 88
| 13
| 53
| 22
| 47
| 23
| 41
| 20
| 37
| 8
| 1
| 2
| 11
|
2,431
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esm.py
|
transformers.models.esm.modeling_esm.EsmEncoder
|
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...processing_utils import Unpack
from torch import nn
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, SequenceClassifierOutput, TokenClassifierOutput
class EsmEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([EsmLayer(config) for _ in range(config.num_hidden_layers)])
self.emb_layer_norm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.gradient_checkpointing = False
@can_return_tuple
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, **kwargs: Unpack[TransformersKwargs]):
for i, layer_module in enumerate(self.layer):
layer_head_mask = head_mask[i] if head_mask is not None else None
hidden_states = layer_module(hidden_states, attention_mask=attention_mask, head_mask=layer_head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, **kwargs)
if self.emb_layer_norm_after:
hidden_states = self.emb_layer_norm_after(hidden_states)
return BaseModelOutputWithCrossAttentions(last_hidden_state=hidden_states)
|
class EsmEncoder(nn.Module):
def __init__(self, config):
pass
@can_return_tuple
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, **kwargs: Unpack[TransformersKwargs]):
pass
| 4
| 0
| 47
| 4
| 43
| 0
| 10
| 0
| 1
| 6
| 2
| 0
| 2
| 4
| 2
| 12
| 95
| 8
| 87
| 27
| 72
| 0
| 38
| 15
| 35
| 18
| 1
| 3
| 19
|
2,432
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esm.py
|
transformers.models.esm.modeling_esm.EsmForMaskedLM
|
from ...processing_utils import Unpack
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
from typing import Callable, Optional, Union
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
@auto_docstring
class EsmForMaskedLM(EsmPreTrainedModel):
_tied_weights_keys = ['lm_head.decoder.weight']
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning('If you want to use `EsmForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.')
self.esm = EsmModel(config, add_pooling_layer=False)
self.lm_head = EsmLMHead(config)
self.init_weights()
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, MaskedLMOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
outputs = self.esm(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, **kwargs)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(prediction_scores.device)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
def predict_contacts(self, tokens, attention_mask):
return self.esm.predict_contacts(tokens, attention_mask=attention_mask)
|
@auto_docstring
class EsmForMaskedLM(EsmPreTrainedModel):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, MaskedLMOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
'''
pass
def predict_contacts(self, tokens, attention_mask):
pass
| 9
| 1
| 15
| 2
| 12
| 2
| 2
| 0.12
| 1
| 6
| 3
| 0
| 5
| 2
| 5
| 6
| 89
| 13
| 68
| 29
| 42
| 8
| 29
| 15
| 23
| 5
| 2
| 1
| 10
|
2,433
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esm.py
|
transformers.models.esm.modeling_esm.EsmForSequenceClassification
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
import torch
from typing import Callable, Optional, Union
from ...processing_utils import Unpack
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
@auto_docstring(custom_intro='\n ESM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ')
class EsmForSequenceClassification(EsmPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.esm = EsmModel(config, add_pooling_layer=False)
self.classifier = EsmClassificationHead(config)
self.init_weights()
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, SequenceClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs = self.esm(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, **kwargs)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
labels = labels.to(logits.device)
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n ESM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ')
class EsmForSequenceClassification(EsmPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, SequenceClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 6
| 1
| 39
| 4
| 32
| 3
| 7
| 0.09
| 1
| 7
| 3
| 0
| 2
| 4
| 2
| 3
| 85
| 9
| 70
| 25
| 50
| 6
| 34
| 13
| 31
| 12
| 2
| 3
| 13
|
2,434
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esm.py
|
transformers.models.esm.modeling_esm.EsmForTokenClassification
|
from typing import Callable, Optional, Union
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
from ...processing_utils import Unpack
@auto_docstring
class EsmForTokenClassification(EsmPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.esm = EsmModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, TokenClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
outputs = self.esm(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, **kwargs)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(logits.device)
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class EsmForTokenClassification(EsmPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, TokenClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
'''
pass
| 6
| 1
| 30
| 5
| 24
| 2
| 3
| 0.07
| 1
| 5
| 2
| 0
| 2
| 4
| 2
| 3
| 68
| 10
| 54
| 25
| 34
| 4
| 23
| 13
| 20
| 5
| 2
| 1
| 6
|
2,435
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esm.py
|
transformers.models.esm.modeling_esm.EsmIntermediate
|
import torch
from torch import nn
class EsmIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = gelu(hidden_states)
return hidden_states
|
class EsmIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 1
| 2
| 12
| 9
| 1
| 8
| 4
| 5
| 0
| 8
| 4
| 5
| 1
| 1
| 0
| 2
|
2,436
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esm.py
|
transformers.models.esm.modeling_esm.EsmLMHead
|
from torch import nn
import torch
class EsmLMHead(nn.Module):
"""ESM Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
x = self.decoder(x) + self.bias
return x
|
class EsmLMHead(nn.Module):
'''ESM Head for masked language modeling.'''
def __init__(self, config):
pass
def forward(self, features, **kwargs):
pass
| 3
| 1
| 8
| 1
| 6
| 1
| 1
| 0.15
| 1
| 1
| 0
| 0
| 2
| 4
| 2
| 12
| 19
| 4
| 13
| 8
| 10
| 2
| 13
| 8
| 10
| 1
| 1
| 0
| 2
|
2,437
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esm.py
|
transformers.models.esm.modeling_esm.EsmLayer
|
from torch import nn
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...modeling_layers import GradientCheckpointingLayer
from ...processing_utils import Unpack
class EsmLayer(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = EsmAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise RuntimeError(f'{self} should be used as a decoder model if cross attention is added')
self.crossattention = EsmAttention(config, is_cross_attention=True)
self.intermediate = EsmIntermediate(config)
self.output = EsmOutput(config)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, **kwargs: Unpack[TransformersKwargs]):
attention_output = self.attention(hidden_states, attention_mask=attention_mask, head_mask=head_mask, **kwargs)
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, 'crossattention'):
raise AttributeError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`')
attention_output = self.crossattention(attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, **kwargs)
layer_output = self.feed_forward_chunk(attention_output)
return layer_output
def feed_forward_chunk(self, attention_output):
attention_output_ln = self.LayerNorm(attention_output)
intermediate_output = self.intermediate(attention_output_ln)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
|
class EsmLayer(GradientCheckpointingLayer):
def __init__(self, config):
pass
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, **kwargs: Unpack[TransformersKwargs]):
pass
def feed_forward_chunk(self, attention_output):
pass
| 4
| 0
| 27
| 2
| 23
| 2
| 4
| 0.1
| 1
| 6
| 3
| 0
| 3
| 9
| 3
| 13
| 84
| 9
| 70
| 34
| 57
| 7
| 43
| 25
| 39
| 7
| 1
| 2
| 11
|
2,438
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esm.py
|
transformers.models.esm.modeling_esm.EsmModel
|
from ...utils.generic import OutputRecorder, check_model_inputs
from typing import Callable, Optional, Union
import torch
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, SequenceClassifierOutput, TokenClassifierOutput
@auto_docstring
class EsmModel(EsmPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = EsmEmbeddings(config)
self.encoder = EsmEncoder(config)
self.pooler = EsmPooler(config) if add_pooling_layer else None
self.contact_head = EsmContactPredictionHead(in_features=config.num_hidden_layers * config.num_attention_heads, bias=True)
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
"""
input_ids (`torch.LongTensor` of shape `((batch_size, sequence_length))`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
position_ids (`torch.LongTensor` of shape `((batch_size, sequence_length))`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `((batch_size, sequence_length), hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if inputs_embeds is None:
inputs_embeds = self.embeddings(input_ids=input_ids, position_ids=position_ids)
if self.config._attn_implementation != 'flash_attention_2':
batch_size, seq_length = inputs_embeds.shape[:-1]
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length), device=inputs_embeds.device)
attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape=(batch_size, seq_length))
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
encoder_outputs = self.encoder(inputs_embeds, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, **kwargs)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output)
def predict_contacts(self, tokens, attention_mask):
attns = self(tokens, attention_mask=attention_mask, return_dict=True, output_attentions=True).attentions
attns = torch.stack(attns, dim=1)
attns *= attention_mask.unsqueeze(1).unsqueeze(2).unsqueeze(3)
attns *= attention_mask.unsqueeze(1).unsqueeze(2).unsqueeze(4)
return self.contact_head(tokens, attns)
|
@auto_docstring
class EsmModel(EsmPreTrainedModel):
'''
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
'''
def __init__(self, config, add_pooling_layer=True):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
'''
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
'''
input_ids (`torch.LongTensor` of shape `((batch_size, sequence_length))`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
position_ids (`torch.LongTensor` of shape `((batch_size, sequence_length))`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `((batch_size, sequence_length), hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
'''
pass
def predict_contacts(self, tokens, attention_mask):
pass
| 10
| 4
| 26
| 3
| 17
| 6
| 4
| 0.43
| 1
| 9
| 5
| 0
| 6
| 5
| 6
| 7
| 180
| 25
| 109
| 41
| 82
| 47
| 56
| 26
| 49
| 16
| 2
| 2
| 23
|
2,439
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esm.py
|
transformers.models.esm.modeling_esm.EsmOutput
|
from torch import nn
class EsmOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
return hidden_states
|
class EsmOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, input_tensor):
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 11
| 1
| 10
| 5
| 7
| 0
| 10
| 5
| 7
| 1
| 1
| 0
| 2
|
2,440
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esm.py
|
transformers.models.esm.modeling_esm.EsmPooler
|
from torch import nn
import torch
class EsmPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
class EsmPooler(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.2
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 10
| 7
| 7
| 2
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
2,441
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esm.py
|
transformers.models.esm.modeling_esm.EsmPreTrainedModel
|
from .configuration_esm import EsmConfig
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from torch import nn
from ...utils.generic import OutputRecorder, check_model_inputs
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
@auto_docstring
class EsmPreTrainedModel(PreTrainedModel):
config: EsmConfig
base_model_prefix = 'esm'
supports_gradient_checkpointing = True
_no_split_modules = ['EsmLayer', 'EsmFoldTriangularSelfAttentionBlock', 'EsmEmbeddings']
_keys_to_ignore_on_load_unexpected = ['position_embeddings.weight']
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': EsmLayer, 'attentions': [OutputRecorder(EsmSelfAttention, index=1, layer_name='attention')], 'cross_attentions': [OutputRecorder(EsmSelfAttention, index=1, layer_name='crossattention')]}
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, EsmLMHead):
module.bias.data.zero_()
def get_output_embeddings(self):
return None
|
@auto_docstring
class EsmPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
def get_output_embeddings(self):
pass
| 4
| 1
| 15
| 0
| 12
| 3
| 6
| 0.47
| 1
| 0
| 0
| 6
| 1
| 0
| 1
| 1
| 27
| 2
| 17
| 6
| 15
| 8
| 15
| 6
| 13
| 6
| 1
| 2
| 6
|
2,442
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esm.py
|
transformers.models.esm.modeling_esm.EsmSelfAttention
|
from typing import Callable, Optional, Union
from ...processing_utils import Unpack
from torch import nn
import torch
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
class EsmSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, layer_idx=None, is_cross_attention=False):
super().__init__()
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = config.attention_probs_dropout_prob
self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute')
self.rotary_embeddings = None
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
elif self.position_embedding_type == 'rotary':
self.rotary_embeddings = RotaryEmbedding(dim=self.attention_head_size)
self.scaling = 1.0
self.is_decoder = config.is_decoder
self.layer_idx = layer_idx
self.is_causal = self.is_decoder and (not is_cross_attention)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
batch_size, seq_length = hidden_states.shape[:-1]
hidden_shape = (batch_size, seq_length, -1, self.attention_head_size)
query_layer = self.query(hidden_states).view(hidden_shape).transpose(1, 2)
is_cross_attention = encoder_hidden_states is not None
current_states = encoder_hidden_states if is_cross_attention else hidden_states
attention_mask = encoder_attention_mask if is_cross_attention else attention_mask
key_layer = self.key(current_states).view(hidden_shape).transpose(1, 2)
value_layer = self.value(current_states).view(hidden_shape).transpose(1, 2)
query_layer = query_layer * self.attention_head_size ** (-0.5)
if self.position_embedding_type == 'rotary':
query_layer, key_layer = self.rotary_embeddings(query_layer, key_layer)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
if self.position_embedding_type in ['relative_key', 'relative_key_query']:
raise ValueError(f"ESM {self.config._attn_implementation} attention does not support {self.position_embedding_type} embeddings. Set attention explicitly to 'eager' with `model.set_attn_implementation('eager')`")
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_layer, key_layer, value_layer, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, head_mask=head_mask, **kwargs)
attn_output = attn_output.reshape(batch_size, seq_length, -1).contiguous()
return (attn_output, attn_weights)
|
class EsmSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, layer_idx=None, is_cross_attention=False):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 44
| 7
| 30
| 7
| 6
| 0.24
| 1
| 6
| 1
| 0
| 3
| 12
| 3
| 13
| 136
| 23
| 92
| 44
| 79
| 22
| 73
| 35
| 69
| 13
| 1
| 2
| 18
|
2,443
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esm.py
|
transformers.models.esm.modeling_esm.EsmSelfOutput
|
from torch import nn
class EsmSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
return hidden_states
|
class EsmSelfOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, input_tensor):
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 11
| 1
| 10
| 5
| 7
| 0
| 10
| 5
| 7
| 1
| 1
| 0
| 2
|
2,444
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esm.py
|
transformers.models.esm.modeling_esm.RotaryEmbedding
|
import torch
class RotaryEmbedding(torch.nn.Module):
"""
Rotary position embeddings based on those in
[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer). Query and keys are transformed by rotation
matrices which depend on their relative positions.
"""
inv_freq: torch.Tensor
def __init__(self, dim: int):
super().__init__()
inv_freq = 1.0 / 10000 ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim)
inv_freq = inv_freq
self.register_buffer('inv_freq', inv_freq)
self._seq_len_cached = None
self._cos_cached = None
self._sin_cached = None
def _update_cos_sin_tables(self, x, seq_dimension=2):
seq_len = x.shape[seq_dimension]
if seq_len != self._seq_len_cached or self._cos_cached.device != x.device:
self._seq_len_cached = seq_len
t = torch.arange(x.shape[seq_dimension], device=x.device).type_as(self.inv_freq)
freqs = torch.outer(t, self.inv_freq)
emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
self._cos_cached = emb.cos()[None, None, :, :]
self._sin_cached = emb.sin()[None, None, :, :]
return (self._cos_cached, self._sin_cached)
def forward(self, q: torch.Tensor, k: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
self._cos_cached, self._sin_cached = self._update_cos_sin_tables(k, seq_dimension=-2)
return (apply_rotary_pos_emb(q, self._cos_cached, self._sin_cached).to(dtype=q.dtype), apply_rotary_pos_emb(k, self._cos_cached, self._sin_cached).to(dtype=k.dtype))
|
class RotaryEmbedding(torch.nn.Module):
'''
Rotary position embeddings based on those in
[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer). Query and keys are transformed by rotation
matrices which depend on their relative positions.
'''
def __init__(self, dim: int):
pass
def _update_cos_sin_tables(self, x, seq_dimension=2):
pass
def forward(self, q: torch.Tensor, k: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 4
| 1
| 11
| 2
| 8
| 1
| 1
| 0.32
| 1
| 3
| 0
| 0
| 3
| 3
| 3
| 13
| 41
| 8
| 25
| 12
| 21
| 8
| 22
| 12
| 18
| 2
| 1
| 1
| 4
|
2,445
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmCategoricalMixture
|
import torch
import torch.nn as nn
class EsmCategoricalMixture:
def __init__(self, param, bins=50, start=0, end=1):
self.logits = param
bins = torch.linspace(start, end, bins + 1, device=self.logits.device, dtype=self.logits.dtype)
self.v_bins = (bins[:-1] + bins[1:]) / 2
def log_prob(self, true):
true_index = (true.unsqueeze(-1) - self.v_bins[[None] * true.ndim]).abs().argmin(-1)
nll = self.logits.log_softmax(-1)
return torch.take_along_dim(nll, true_index.unsqueeze(-1), dim=-1).squeeze(-1)
def mean(self):
return (self.logits.softmax(-1) @ self.v_bins.unsqueeze(1)).squeeze(-1)
|
class EsmCategoricalMixture:
def __init__(self, param, bins=50, start=0, end=1):
pass
def log_prob(self, true):
pass
def mean(self):
pass
| 4
| 0
| 5
| 0
| 3
| 1
| 1
| 0.36
| 0
| 0
| 0
| 0
| 3
| 2
| 3
| 3
| 17
| 2
| 11
| 8
| 7
| 4
| 11
| 8
| 7
| 1
| 0
| 0
| 3
|
2,446
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmFoldAngleResnet
|
import torch.nn as nn
import torch
class EsmFoldAngleResnet(nn.Module):
"""
Implements Algorithm 20, lines 11-14
"""
def __init__(self, config):
super().__init__()
self.config = config
self.linear_in = EsmFoldLinear(config.sequence_dim, config.resnet_dim)
self.linear_initial = EsmFoldLinear(config.sequence_dim, config.resnet_dim)
self.layers = nn.ModuleList()
for _ in range(config.num_resnet_blocks):
layer = EsmFoldAngleResnetBlock(config)
self.layers.append(layer)
self.linear_out = EsmFoldLinear(config.resnet_dim, config.num_angles * 2)
self.relu = nn.ReLU()
def forward(self, s: torch.Tensor, s_initial: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
"""
Args:
s:
[*, C_hidden] single embedding
s_initial:
[*, C_hidden] single embedding as of the start of the StructureModule
Returns:
[*, no_angles, 2] predicted angles
"""
s_initial = self.relu(s_initial)
s_initial = self.linear_initial(s_initial)
s = self.relu(s)
s = self.linear_in(s)
s = s + s_initial
for l in self.layers:
s = l(s)
s = self.relu(s)
s = self.linear_out(s)
s = s.view(s.shape[:-1] + (-1, 2))
unnormalized_s = s
norm_denom = torch.sqrt(torch.clamp(torch.sum(s ** 2, dim=-1, keepdim=True), min=self.config.epsilon))
s = s / norm_denom
return (unnormalized_s, s)
|
class EsmFoldAngleResnet(nn.Module):
'''
Implements Algorithm 20, lines 11-14
'''
def __init__(self, config):
pass
def forward(self, s: torch.Tensor, s_initial: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
'''
Args:
s:
[*, C_hidden] single embedding
s_initial:
[*, C_hidden] single embedding as of the start of the StructureModule
Returns:
[*, no_angles, 2] predicted angles
'''
pass
| 3
| 2
| 29
| 6
| 16
| 8
| 2
| 0.56
| 1
| 5
| 2
| 0
| 2
| 6
| 2
| 12
| 63
| 13
| 32
| 14
| 29
| 18
| 27
| 14
| 24
| 2
| 1
| 1
| 4
|
2,447
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmFoldAngleResnetBlock
|
import torch
import torch.nn as nn
class EsmFoldAngleResnetBlock(nn.Module):
def __init__(self, config):
super().__init__()
self.linear_1 = EsmFoldLinear(config.resnet_dim, config.resnet_dim, init='relu')
self.linear_2 = EsmFoldLinear(config.resnet_dim, config.resnet_dim, init='final')
self.relu = nn.ReLU()
def forward(self, a: torch.Tensor) -> torch.Tensor:
s_initial = a
a = self.relu(a)
a = self.linear_1(a)
a = self.relu(a)
a = self.linear_2(a)
return a + s_initial
|
class EsmFoldAngleResnetBlock(nn.Module):
def __init__(self, config):
pass
def forward(self, a: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 8
| 2
| 6
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 3
| 2
| 12
| 18
| 5
| 13
| 7
| 10
| 0
| 13
| 7
| 10
| 1
| 1
| 0
| 2
|
2,448
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmFoldAttention
|
from typing import Callable, Optional, Union
import torch.nn as nn
import math
import torch
class EsmFoldAttention(nn.Module):
"""
Standard multi-head attention using AlphaFold's default layer initialization. Allows multiple bias vectors.
"""
def __init__(self, c_q: int, c_k: int, c_v: int, c_hidden: int, no_heads: int, gating: bool=True):
"""
Args:
c_q:
Input dimension of query data
c_k:
Input dimension of key data
c_v:
Input dimension of value data
c_hidden:
Per-head hidden dimension
no_heads:
Number of attention heads
gating:
Whether the output should be gated using query data
"""
super().__init__()
self.c_q = c_q
self.c_k = c_k
self.c_v = c_v
self.c_hidden = c_hidden
self.no_heads = no_heads
self.gating = gating
self.linear_q = EsmFoldLinear(self.c_q, self.c_hidden * self.no_heads, bias=False, init='glorot')
self.linear_k = EsmFoldLinear(self.c_k, self.c_hidden * self.no_heads, bias=False, init='glorot')
self.linear_v = EsmFoldLinear(self.c_v, self.c_hidden * self.no_heads, bias=False, init='glorot')
self.linear_o = EsmFoldLinear(self.c_hidden * self.no_heads, self.c_q, init='final')
self.linear_g = None
if self.gating:
self.linear_g = EsmFoldLinear(self.c_q, self.c_hidden * self.no_heads, init='gating')
self.sigmoid = nn.Sigmoid()
def _prep_qkv(self, q_x: torch.Tensor, kv_x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
q = self.linear_q(q_x)
k = self.linear_k(kv_x)
v = self.linear_v(kv_x)
q = q.view(q.shape[:-1] + (self.no_heads, -1))
k = k.view(k.shape[:-1] + (self.no_heads, -1))
v = v.view(v.shape[:-1] + (self.no_heads, -1))
q = q.transpose(-2, -3)
k = k.transpose(-2, -3)
v = v.transpose(-2, -3)
q /= math.sqrt(self.c_hidden)
return (q, k, v)
def _wrap_up(self, o: torch.Tensor, q_x: torch.Tensor) -> torch.Tensor:
if self.linear_g is not None:
g = self.sigmoid(self.linear_g(q_x))
g = g.view(g.shape[:-1] + (self.no_heads, -1))
o = o * g
o = flatten_final_dims(o, 2)
o = self.linear_o(o)
return o
def forward(self, q_x: torch.Tensor, kv_x: torch.Tensor, biases: Optional[list[torch.Tensor]]=None, use_memory_efficient_kernel: bool=False, use_lma: bool=False, lma_q_chunk_size: int=1024, lma_kv_chunk_size: int=4096, use_flash: bool=False, flash_mask: Optional[torch.Tensor]=None) -> torch.Tensor:
"""
Args:
q_x:
[*, Q, C_q] query data
kv_x:
[*, K, C_k] key data
biases:
List of biases that broadcast to [*, H, Q, K]
use_memory_efficient_kernel:
Whether to use a custom memory-efficient attention kernel. This should be the default choice for most.
If none of the "use_<...>" flags are True, a stock PyTorch implementation is used instead
use_lma:
Whether to use low-memory attention (Staats & Rabe 2021). If none of the "use_<...>" flags are True, a
stock PyTorch implementation is used instead
lma_q_chunk_size:
Query chunk size (for LMA)
lma_kv_chunk_size:
Key/Value chunk size (for LMA)
Returns
[*, Q, C_q] attention update
"""
if use_lma and (lma_q_chunk_size is None or lma_kv_chunk_size is None):
raise ValueError('If use_lma is specified, lma_q_chunk_size and lma_kv_chunk_size must be provided')
if use_flash and biases is not None:
raise ValueError('use_flash is incompatible with the bias option. For masking, use flash_mask instead')
attn_options = [use_memory_efficient_kernel, use_lma, use_flash]
if sum(attn_options) > 1:
raise ValueError('Choose at most one alternative attention algorithm')
if biases is None:
biases = []
query, key, value = self._prep_qkv(q_x, kv_x)
key = permute_final_dims(key, (1, 0))
output = torch.matmul(query, key)
for b in biases:
output += b
output = softmax_no_cast(output, -1)
output = torch.matmul(output, value)
output = output.transpose(-2, -3)
output = self._wrap_up(output, q_x)
return output
|
class EsmFoldAttention(nn.Module):
'''
Standard multi-head attention using AlphaFold's default layer initialization. Allows multiple bias vectors.
'''
def __init__(self, c_q: int, c_k: int, c_v: int, c_hidden: int, no_heads: int, gating: bool=True):
'''
Args:
c_q:
Input dimension of query data
c_k:
Input dimension of key data
c_v:
Input dimension of value data
c_hidden:
Per-head hidden dimension
no_heads:
Number of attention heads
gating:
Whether the output should be gated using query data
'''
pass
def _prep_qkv(self, q_x: torch.Tensor, kv_x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
pass
def _wrap_up(self, o: torch.Tensor, q_x: torch.Tensor) -> torch.Tensor:
pass
def forward(self, q_x: torch.Tensor, kv_x: torch.Tensor, biases: Optional[list[torch.Tensor]]=None, use_memory_efficient_kernel: bool=False, use_lma: bool=False, lma_q_chunk_size: int=1024, lma_kv_chunk_size: int=4096, use_flash: bool=False, flash_mask: Optional[torch.Tensor]=None) -> torch.Tensor:
'''
Args:
q_x:
[*, Q, C_q] query data
kv_x:
[*, K, C_k] key data
biases:
List of biases that broadcast to [*, H, Q, K]
use_memory_efficient_kernel:
Whether to use a custom memory-efficient attention kernel. This should be the default choice for most.
If none of the "use_<...>" flags are True, a stock PyTorch implementation is used instead
use_lma:
Whether to use low-memory attention (Staats & Rabe 2021). If none of the "use_<...>" flags are True, a
stock PyTorch implementation is used instead
lma_q_chunk_size:
Query chunk size (for LMA)
lma_kv_chunk_size:
Key/Value chunk size (for LMA)
Returns
[*, Q, C_q] attention update
'''
pass
| 5
| 3
| 36
| 5
| 19
| 12
| 3
| 0.66
| 1
| 6
| 1
| 0
| 4
| 12
| 4
| 14
| 150
| 24
| 76
| 44
| 52
| 50
| 57
| 25
| 52
| 6
| 1
| 1
| 11
|
2,449
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmFoldBackboneUpdate
|
import torch
import torch.nn as nn
class EsmFoldBackboneUpdate(nn.Module):
"""
Implements part of Algorithm 23.
"""
def __init__(self, config):
super().__init__()
self.linear = EsmFoldLinear(config.sequence_dim, 6, init='final')
def forward(self, s: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
"""
Args:
[*, N_res, C_s] single representation
Returns:
[*, N_res, 6] update vector
"""
update = self.linear(s)
return update
|
class EsmFoldBackboneUpdate(nn.Module):
'''
Implements part of Algorithm 23.
'''
def __init__(self, config):
pass
def forward(self, s: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
'''
Args:
[*, N_res, C_s] single representation
Returns:
[*, N_res, 6] update vector
'''
pass
| 3
| 2
| 8
| 1
| 3
| 4
| 1
| 1.43
| 1
| 3
| 1
| 0
| 2
| 1
| 2
| 12
| 21
| 4
| 7
| 5
| 4
| 10
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
2,450
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmFoldDropout
|
from typing import Callable, Optional, Union
import torch.nn as nn
import torch
class EsmFoldDropout(nn.Module):
"""
Implementation of dropout with the ability to share the dropout mask along a particular dimension.
"""
def __init__(self, r: float, batch_dim: Union[int, list[int]]):
super().__init__()
self.r = r
if isinstance(batch_dim, int):
batch_dim = [batch_dim]
self.batch_dim = batch_dim
self.dropout = nn.Dropout(self.r)
def forward(self, x: torch.Tensor) -> torch.Tensor:
shape = list(x.shape)
if self.batch_dim is not None:
for bd in self.batch_dim:
shape[bd] = 1
return x * self.dropout(x.new_ones(shape))
|
class EsmFoldDropout(nn.Module):
'''
Implementation of dropout with the ability to share the dropout mask along a particular dimension.
'''
def __init__(self, r: float, batch_dim: Union[int, list[int]]):
pass
def forward(self, x: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 7
| 1
| 7
| 0
| 3
| 0.21
| 1
| 5
| 0
| 0
| 2
| 3
| 2
| 12
| 20
| 3
| 14
| 8
| 11
| 3
| 14
| 8
| 11
| 3
| 1
| 2
| 5
|
2,451
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmFoldInvariantPointAttention
|
import math
from typing import Callable, Optional, Union
import torch.nn as nn
import torch
from collections.abc import Sequence
from .openfold_utils import OFProtein, Rigid, Rotation, atom14_to_atom37, chunk_layer, compute_predicted_aligned_error, compute_tm, frames_and_literature_positions_to_atom14_pos, make_atom14_masks, residue_constants, to_pdb, torsion_angles_to_frames
import sys
class EsmFoldInvariantPointAttention(nn.Module):
"""
Implements Algorithm 22.
"""
def __init__(self, config):
super().__init__()
self.config = config
c_s = config.sequence_dim
c_z = config.pairwise_dim
self.hidden_dim = config.ipa_dim
self.num_heads = config.num_heads_ipa
self.num_qk_points = config.num_qk_points
self.num_v_points = config.num_v_points
hc = config.ipa_dim * config.num_heads_ipa
self.linear_q = EsmFoldLinear(c_s, hc)
self.linear_kv = EsmFoldLinear(c_s, 2 * hc)
hpq = config.num_heads_ipa * config.num_qk_points * 3
self.linear_q_points = EsmFoldLinear(c_s, hpq)
hpkv = config.num_heads_ipa * (config.num_qk_points + config.num_v_points) * 3
self.linear_kv_points = EsmFoldLinear(c_s, hpkv)
self.linear_b = EsmFoldLinear(c_z, config.num_heads_ipa)
self.head_weights = nn.Parameter(torch.zeros(config.num_heads_ipa))
concat_out_dim = config.num_heads_ipa * (c_z + config.ipa_dim + config.num_v_points * 4)
self.linear_out = EsmFoldLinear(concat_out_dim, c_s, init='final')
self.softmax = nn.Softmax(dim=-1)
self.softplus = nn.Softplus()
def forward(self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, _offload_inference: bool=False, _z_reference_list: Optional[Sequence[torch.Tensor]]=None) -> torch.Tensor:
"""
Args:
s:
[*, N_res, C_s] single representation
z:
[*, N_res, N_res, C_z] pair representation
r:
[*, N_res] transformation object
mask:
[*, N_res] mask
Returns:
[*, N_res, C_s] single representation update
"""
z = [z]
q = self.linear_q(s)
kv = self.linear_kv(s)
q = q.view(q.shape[:-1] + (self.num_heads, -1))
kv = kv.view(kv.shape[:-1] + (self.num_heads, -1))
k, v = torch.split(kv, self.hidden_dim, dim=-1)
q_pts = self.linear_q_points(s)
q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1)
q_pts = torch.stack(q_pts, dim=-1)
q_pts = r[..., None].apply(q_pts)
q_pts = q_pts.view(q_pts.shape[:-2] + (self.num_heads, self.num_qk_points, 3))
kv_pts = self.linear_kv_points(s)
kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1)
kv_pts = torch.stack(kv_pts, dim=-1)
kv_pts = r[..., None].apply(kv_pts)
kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.num_heads, -1, 3))
k_pts, v_pts = torch.split(kv_pts, [self.num_qk_points, self.num_v_points], dim=-2)
b = self.linear_b(z[0])
if _offload_inference:
assert sys.getrefcount(z[0]) == 2
z[0] = z[0].cpu()
device_type = q.device.type if q.device.type != 'mps' else 'cpu'
if is_fp16_enabled(device_type):
with torch.autocast(device_type=device_type, enabled=False):
a = torch.matmul(permute_final_dims(q.float(), (1, 0, 2)), permute_final_dims(k.float(), (1, 2, 0)))
else:
a = torch.matmul(permute_final_dims(q, (1, 0, 2)), permute_final_dims(k, (1, 2, 0)))
a *= math.sqrt(1.0 / (3 * self.hidden_dim))
a += math.sqrt(1.0 / 3) * permute_final_dims(b, (2, 0, 1))
pt_att = q_pts.unsqueeze(-4) - k_pts.unsqueeze(-5)
pt_att = pt_att ** 2
pt_att = sum(torch.unbind(pt_att, dim=-1))
head_weights = self.softplus(self.head_weights).view(*(1,) * len(pt_att.shape[:-2]) + (-1, 1))
head_weights = head_weights * math.sqrt(1.0 / (3 * (self.num_qk_points * 9.0 / 2)))
pt_att = pt_att * head_weights
pt_att = torch.sum(pt_att, dim=-1) * -0.5
square_mask = mask.unsqueeze(-1) * mask.unsqueeze(-2)
square_mask = self.config.inf * (square_mask - 1)
pt_att = permute_final_dims(pt_att, (2, 0, 1))
a = a + pt_att
a = a + square_mask.unsqueeze(-3)
a = self.softmax(a)
o = torch.matmul(a, v.transpose(-2, -3).to(dtype=a.dtype)).transpose(-2, -3)
o = flatten_final_dims(o, 2)
o_pt = torch.sum(a[..., None, :, :, None] * permute_final_dims(v_pts, (1, 3, 0, 2))[..., None, :, :], dim=-2)
o_pt = permute_final_dims(o_pt, (2, 0, 3, 1))
o_pt = r[..., None, None].invert_apply(o_pt)
o_pt_norm = flatten_final_dims(torch.sqrt(torch.sum(o_pt ** 2, dim=-1) + self.config.epsilon), 2)
o_pt = o_pt.reshape(*o_pt.shape[:-3], -1, 3)
if _offload_inference:
z[0] = z[0].to(o_pt.device)
o_pair = torch.matmul(a.transpose(-2, -3), z[0].to(dtype=a.dtype))
o_pair = flatten_final_dims(o_pair, 2)
s = self.linear_out(torch.cat((o, *torch.unbind(o_pt, dim=-1), o_pt_norm, o_pair), dim=-1).to(dtype=z[0].dtype))
return s
|
class EsmFoldInvariantPointAttention(nn.Module):
'''
Implements Algorithm 22.
'''
def __init__(self, config):
pass
def forward(self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, _offload_inference: bool=False, _z_reference_list: Optional[Sequence[torch.Tensor]]=None) -> torch.Tensor:
'''
Args:
s:
[*, N_res, C_s] single representation
z:
[*, N_res, N_res, C_z] pair representation
r:
[*, N_res] transformation object
mask:
[*, N_res] mask
Returns:
[*, N_res, C_s] single representation update
'''
pass
| 3
| 2
| 95
| 20
| 48
| 29
| 3
| 0.63
| 1
| 6
| 2
| 0
| 2
| 14
| 2
| 12
| 195
| 41
| 97
| 46
| 86
| 61
| 77
| 38
| 74
| 4
| 1
| 2
| 5
|
2,452
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmFoldLayerNorm
|
import torch.nn as nn
import torch
class EsmFoldLayerNorm(nn.Module):
def __init__(self, c_in, eps=1e-05):
super().__init__()
self.c_in = (c_in,)
self.eps = eps
self.weight = nn.Parameter(torch.ones(c_in))
self.bias = nn.Parameter(torch.zeros(c_in))
def forward(self, x):
d = x.dtype
if d is torch.bfloat16 and (not is_deepspeed_initialized()):
with torch.cuda.amp.autocast(enabled=False):
out = nn.functional.layer_norm(x, self.c_in, self.weight.to(dtype=d), self.bias.to(dtype=d), self.eps)
else:
out = nn.functional.layer_norm(x, self.c_in, self.weight, self.bias, self.eps)
return out
|
class EsmFoldLayerNorm(nn.Module):
def __init__(self, c_in, eps=1e-05):
pass
def forward(self, x):
pass
| 3
| 0
| 9
| 2
| 7
| 0
| 2
| 0
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 19
| 4
| 15
| 9
| 12
| 0
| 14
| 9
| 11
| 2
| 1
| 2
| 3
|
2,453
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmFoldLinear
|
import torch
from typing import Callable, Optional, Union
import torch.nn as nn
class EsmFoldLinear(nn.Linear):
"""
A Linear layer with built-in nonstandard initializations. Called just like torch.nn.Linear.
Implements the initializers in 1.11.4, plus some additional ones found in the code.
"""
def __init__(self, in_dim: int, out_dim: int, bias: bool=True, init: str='default', init_fn: Optional[Callable[[torch.Tensor, torch.Tensor], None]]=None):
"""
Args:
in_dim:
The final dimension of inputs to the layer
out_dim:
The final dimension of layer outputs
bias:
Whether to learn an additive bias. True by default
init:
The initializer to use. Choose from:
"default": LeCun fan-in truncated normal initialization "relu": He initialization w/ truncated normal
distribution "glorot": Fan-average Glorot uniform initialization "gating": Weights=0, Bias=1 "normal":
Normal initialization with std=1/sqrt(fan_in) "final": Weights=0, Bias=0
Overridden by init_fn if the latter is not None.
init_fn:
A custom initializer taking weight and bias as inputs. Overrides init if not None.
"""
super().__init__(in_dim, out_dim, bias=bias)
if bias:
with torch.no_grad():
self.bias.fill_(0)
self.init = init
self.init_fn = init_fn
if init not in ['default', 'relu', 'glorot', 'gating', 'normal', 'final']:
raise ValueError('Invalid init string.')
|
class EsmFoldLinear(nn.Linear):
'''
A Linear layer with built-in nonstandard initializations. Called just like torch.nn.Linear.
Implements the initializers in 1.11.4, plus some additional ones found in the code.
'''
def __init__(self, in_dim: int, out_dim: int, bias: bool=True, init: str='default', init_fn: Optional[Callable[[torch.Tensor, torch.Tensor], None]]=None):
'''
Args:
in_dim:
The final dimension of inputs to the layer
out_dim:
The final dimension of layer outputs
bias:
Whether to learn an additive bias. True by default
init:
The initializer to use. Choose from:
"default": LeCun fan-in truncated normal initialization "relu": He initialization w/ truncated normal
distribution "glorot": Fan-average Glorot uniform initialization "gating": Weights=0, Bias=1 "normal":
Normal initialization with std=1/sqrt(fan_in) "final": Weights=0, Bias=0
Overridden by init_fn if the latter is not None.
init_fn:
A custom initializer taking weight and bias as inputs. Overrides init if not None.
'''
pass
| 2
| 2
| 37
| 4
| 16
| 17
| 3
| 1.24
| 1
| 6
| 0
| 0
| 1
| 2
| 1
| 1
| 44
| 6
| 17
| 11
| 8
| 21
| 10
| 4
| 8
| 3
| 1
| 2
| 3
|
2,454
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmFoldPairToSequence
|
import torch.nn as nn
class EsmFoldPairToSequence(nn.Module):
def __init__(self, pairwise_state_dim, num_heads):
super().__init__()
self.layernorm = nn.LayerNorm(pairwise_state_dim)
self.linear = nn.Linear(pairwise_state_dim, num_heads, bias=False)
def forward(self, pairwise_state):
"""
Inputs:
pairwise_state: B x L x L x pairwise_state_dim
Output:
pairwise_bias: B x L x L x num_heads
"""
assert len(pairwise_state.shape) == 4
z = self.layernorm(pairwise_state)
pairwise_bias = self.linear(z)
return pairwise_bias
|
class EsmFoldPairToSequence(nn.Module):
def __init__(self, pairwise_state_dim, num_heads):
pass
def forward(self, pairwise_state):
'''
Inputs:
pairwise_state: B x L x L x pairwise_state_dim
Output:
pairwise_bias: B x L x L x num_heads
'''
pass
| 3
| 1
| 9
| 1
| 5
| 3
| 1
| 0.6
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 19
| 3
| 10
| 7
| 7
| 6
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
2,455
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmFoldPreTrainedModel
|
import torch
import torch.nn as nn
from .modeling_esm import EsmModel, EsmPreTrainedModel
class EsmFoldPreTrainedModel(EsmPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, EsmFoldLinear):
with torch.no_grad():
if module.init_fn is not None:
module.init_fn(module.weight, module.bias)
elif module.init == 'default':
trunc_normal_init_(module.weight, scale=1.0)
elif module.init == 'relu':
trunc_normal_init_(module.weight, scale=2.0)
elif module.init == 'glorot':
nn.init.xavier_uniform_(module.weight, gain=1)
elif module.init == 'gating':
module.weight.fill_(0.0)
if module.bias:
module.bias.fill_(1.0)
elif module.init == 'normal':
torch.nn.init.kaiming_normal_(module.weight, nonlinearity='linear')
elif module.init == 'final':
module.weight.fill_(0.0)
elif isinstance(module, EsmFoldInvariantPointAttention):
ipa_point_weights_init_(module.head_weights)
elif isinstance(module, EsmFoldTriangularSelfAttentionBlock):
torch.nn.init.zeros_(module.tri_mul_in.linear_z.weight)
torch.nn.init.zeros_(module.tri_mul_in.linear_z.bias)
torch.nn.init.zeros_(module.tri_mul_out.linear_z.weight)
torch.nn.init.zeros_(module.tri_mul_out.linear_z.bias)
torch.nn.init.zeros_(module.tri_att_start.mha.linear_o.weight)
torch.nn.init.zeros_(module.tri_att_start.mha.linear_o.bias)
torch.nn.init.zeros_(module.tri_att_end.mha.linear_o.weight)
torch.nn.init.zeros_(module.tri_att_end.mha.linear_o.bias)
torch.nn.init.zeros_(module.sequence_to_pair.o_proj.weight)
torch.nn.init.zeros_(module.sequence_to_pair.o_proj.bias)
torch.nn.init.zeros_(module.pair_to_sequence.linear.weight)
torch.nn.init.zeros_(module.seq_attention.o_proj.weight)
torch.nn.init.zeros_(module.seq_attention.o_proj.bias)
torch.nn.init.zeros_(module.mlp_seq.mlp[-2].weight)
torch.nn.init.zeros_(module.mlp_seq.mlp[-2].bias)
torch.nn.init.zeros_(module.mlp_pair.mlp[-2].weight)
torch.nn.init.zeros_(module.mlp_pair.mlp[-2].bias)
else:
super()._init_weights(module)
|
class EsmFoldPreTrainedModel(EsmPreTrainedModel):
'''
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
'''
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 2
| 2
| 43
| 1
| 41
| 1
| 12
| 0.14
| 1
| 4
| 3
| 0
| 1
| 0
| 1
| 2
| 50
| 2
| 42
| 2
| 40
| 6
| 33
| 2
| 31
| 12
| 2
| 4
| 12
|
2,456
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmFoldRelativePosition
|
import torch
import torch.nn as nn
class EsmFoldRelativePosition(nn.Module):
def __init__(self, config):
super().__init__()
self.bins = config.position_bins
self.embedding = torch.nn.Embedding(2 * self.bins + 2, config.pairwise_state_dim)
def forward(self, residue_index, mask=None):
"""
Input:
residue_index: B x L tensor of indices (dtype=torch.long) mask: B x L tensor of booleans
Output:
pairwise_state: B x L x L x pairwise_state_dim tensor of embeddings
"""
if residue_index.dtype != torch.long:
raise ValueError(f'`residue_index` has dtype {residue_index.dtype}, it should be `torch.long`.')
if mask is not None and residue_index.shape != mask.shape:
raise ValueError(f'`residue_index` and `mask` have inconsistent shapes: {residue_index.shape} != {mask.shape}.')
diff = residue_index[:, None, :] - residue_index[:, :, None]
diff = diff.clamp(-self.bins, self.bins)
diff = diff + self.bins + 1
if mask is not None:
mask = mask[:, None, :] * mask[:, :, None]
diff[mask == False] = 0
output = self.embedding(diff)
return output
|
class EsmFoldRelativePosition(nn.Module):
def __init__(self, config):
pass
def forward(self, residue_index, mask=None):
'''
Input:
residue_index: B x L tensor of indices (dtype=torch.long) mask: B x L tensor of booleans
Output:
pairwise_state: B x L x L x pairwise_state_dim tensor of embeddings
'''
pass
| 3
| 1
| 16
| 3
| 10
| 5
| 3
| 0.5
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 34
| 6
| 20
| 7
| 17
| 10
| 18
| 7
| 15
| 4
| 1
| 1
| 5
|
2,457
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmFoldResidueMLP
|
import torch.nn as nn
class EsmFoldResidueMLP(nn.Module):
def __init__(self, embed_dim, inner_dim, dropout=0):
super().__init__()
self.mlp = nn.Sequential(nn.LayerNorm(embed_dim), nn.Linear(embed_dim, inner_dim), nn.ReLU(), nn.Linear(inner_dim, embed_dim), nn.Dropout(dropout))
def forward(self, x):
return x + self.mlp(x)
|
class EsmFoldResidueMLP(nn.Module):
def __init__(self, embed_dim, inner_dim, dropout=0):
pass
def forward(self, x):
pass
| 3
| 0
| 6
| 1
| 6
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 14
| 2
| 12
| 4
| 9
| 0
| 6
| 4
| 3
| 1
| 1
| 0
| 2
|
2,458
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmFoldSelfAttention
|
import torch.nn as nn
import numpy as np
import torch
class EsmFoldSelfAttention(nn.Module):
def __init__(self, embed_dim, num_heads, head_width, gated=False):
super().__init__()
assert embed_dim == num_heads * head_width
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_width = head_width
self.proj = nn.Linear(embed_dim, embed_dim * 3, bias=False)
self.o_proj = nn.Linear(embed_dim, embed_dim, bias=True)
self.gated = gated
if gated:
self.g_proj = nn.Linear(embed_dim, embed_dim)
torch.nn.init.zeros_(self.g_proj.weight)
torch.nn.init.ones_(self.g_proj.bias)
self.rescale_factor = self.head_width ** (-0.5)
torch.nn.init.zeros_(self.o_proj.bias)
def forward(self, x, mask=None, bias=None, indices=None):
"""
Basic self attention with optional mask and external pairwise bias. To handle sequences of different lengths,
use mask.
Inputs:
x: batch of input sequences (.. x L x C) mask: batch of boolean masks where 1=valid, 0=padding position (..
x L_k) bias: batch of scalar pairwise attention biases (.. x Lq x Lk x num_heads)
Outputs:
sequence projection (B x L x embed_dim), attention maps (B x L x L x num_heads)
"""
t = self.proj(x).view(*x.shape[:2], self.num_heads, -1)
t = t.permute(0, 2, 1, 3)
q, k, v = t.chunk(3, dim=-1)
q = self.rescale_factor * q
a = torch.einsum('...qc,...kc->...qk', q, k)
if bias is not None:
a = a + bias.permute(0, 3, 1, 2)
if mask is not None:
mask = mask[:, None, None]
a = a.masked_fill(mask == False, -np.inf)
a = nn.functional.softmax(a, dim=-1)
y = torch.einsum('...hqk,...hkc->...qhc', a, v)
y = y.reshape(*y.shape[:2], -1)
if self.gated:
y = self.g_proj(x).sigmoid() * y
y = self.o_proj(y)
return (y, a.permute(0, 3, 1, 2))
|
class EsmFoldSelfAttention(nn.Module):
def __init__(self, embed_dim, num_heads, head_width, gated=False):
pass
def forward(self, x, mask=None, bias=None, indices=None):
'''
Basic self attention with optional mask and external pairwise bias. To handle sequences of different lengths,
use mask.
Inputs:
x: batch of input sequences (.. x L x C) mask: batch of boolean masks where 1=valid, 0=padding position (..
x L_k) bias: batch of scalar pairwise attention biases (.. x Lq x Lk x num_heads)
Outputs:
sequence projection (B x L x embed_dim), attention maps (B x L x L x num_heads)
'''
pass
| 3
| 1
| 29
| 7
| 17
| 6
| 3
| 0.35
| 1
| 1
| 0
| 0
| 2
| 8
| 2
| 12
| 60
| 15
| 34
| 15
| 31
| 12
| 34
| 15
| 31
| 4
| 1
| 1
| 6
|
2,459
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmFoldSequenceToPair
|
import torch
import torch.nn as nn
class EsmFoldSequenceToPair(nn.Module):
def __init__(self, sequence_state_dim, inner_dim, pairwise_state_dim):
super().__init__()
self.layernorm = nn.LayerNorm(sequence_state_dim)
self.proj = nn.Linear(sequence_state_dim, inner_dim * 2, bias=True)
self.o_proj = nn.Linear(2 * inner_dim, pairwise_state_dim, bias=True)
torch.nn.init.zeros_(self.proj.bias)
torch.nn.init.zeros_(self.o_proj.bias)
def forward(self, sequence_state):
"""
Inputs:
sequence_state: B x L x sequence_state_dim
Output:
pairwise_state: B x L x L x pairwise_state_dim
Intermediate state:
B x L x L x 2*inner_dim
"""
assert len(sequence_state.shape) == 3
s = self.layernorm(sequence_state)
s = self.proj(s)
q, k = s.chunk(2, dim=-1)
prod = q[:, None, :, :] * k[:, :, None, :]
diff = q[:, None, :, :] - k[:, :, None, :]
x = torch.cat([prod, diff], dim=-1)
x = self.o_proj(x)
return x
|
class EsmFoldSequenceToPair(nn.Module):
def __init__(self, sequence_state_dim, inner_dim, pairwise_state_dim):
pass
def forward(self, sequence_state):
'''
Inputs:
sequence_state: B x L x sequence_state_dim
Output:
pairwise_state: B x L x L x pairwise_state_dim
Intermediate state:
B x L x L x 2*inner_dim
'''
pass
| 3
| 1
| 17
| 5
| 9
| 4
| 1
| 0.44
| 1
| 1
| 0
| 0
| 2
| 3
| 2
| 12
| 36
| 10
| 18
| 11
| 15
| 8
| 18
| 11
| 15
| 1
| 1
| 0
| 2
|
2,460
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmFoldStructureModule
|
from torch.nn import LayerNorm
import torch.nn as nn
import torch
import sys
from .openfold_utils import OFProtein, Rigid, Rotation, atom14_to_atom37, chunk_layer, compute_predicted_aligned_error, compute_tm, frames_and_literature_positions_to_atom14_pos, make_atom14_masks, residue_constants, to_pdb, torsion_angles_to_frames
class EsmFoldStructureModule(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer_norm_s = LayerNorm(config.sequence_dim)
self.layer_norm_z = LayerNorm(config.pairwise_dim)
self.linear_in = EsmFoldLinear(config.sequence_dim, config.sequence_dim)
self.ipa = EsmFoldInvariantPointAttention(config)
self.ipa_dropout = nn.Dropout(config.dropout_rate)
self.layer_norm_ipa = LayerNorm(config.sequence_dim)
self.transition = EsmFoldStructureModuleTransition(config)
self.bb_update = EsmFoldBackboneUpdate(config)
self.angle_resnet = EsmFoldAngleResnet(config)
def forward(self, evoformer_output_dict, aatype, mask=None, _offload_inference=False):
"""
Args:
evoformer_output_dict:
Dictionary containing:
"single":
[*, N_res, C_s] single representation
"pair":
[*, N_res, N_res, C_z] pair representation
aatype:
[*, N_res] amino acid indices
mask:
Optional [*, N_res] sequence mask
Returns:
A dictionary of outputs
"""
s = evoformer_output_dict['single']
if mask is None:
mask = s.new_ones(s.shape[:-1])
s = self.layer_norm_s(s)
z = self.layer_norm_z(evoformer_output_dict['pair'])
z_reference_list = None
if _offload_inference:
assert sys.getrefcount(evoformer_output_dict['pair']) == 2
evoformer_output_dict['pair'] = evoformer_output_dict['pair'].cpu()
z_reference_list = [z]
z = None
s_initial = s
s = self.linear_in(s)
rigids = Rigid.identity(s.shape[:-1], s.dtype, s.device, self.training, fmt='quat')
outputs = []
for i in range(self.config.num_blocks):
s = s + self.ipa(s, z, rigids, mask, _offload_inference=_offload_inference, _z_reference_list=z_reference_list)
s = self.ipa_dropout(s)
s = self.layer_norm_ipa(s)
s = self.transition(s)
rigids = rigids.compose_q_update_vec(self.bb_update(s))
backb_to_global = Rigid(Rotation(rot_mats=rigids.get_rots().get_rot_mats(), quats=None), rigids.get_trans())
backb_to_global = backb_to_global.scale_translation(self.config.trans_scale_factor)
unnormalized_angles, angles = self.angle_resnet(s, s_initial)
all_frames_to_global = self.torsion_angles_to_frames(backb_to_global, angles, aatype)
pred_xyz = self.frames_and_literature_positions_to_atom14_pos(all_frames_to_global, aatype)
scaled_rigids = rigids.scale_translation(self.config.trans_scale_factor)
preds = {'frames': scaled_rigids.to_tensor_7(), 'sidechain_frames': all_frames_to_global.to_tensor_4x4(), 'unnormalized_angles': unnormalized_angles, 'angles': angles, 'positions': pred_xyz, 'states': s}
outputs.append(preds)
rigids = rigids.stop_rot_gradient()
del z, z_reference_list
if _offload_inference:
evoformer_output_dict['pair'] = evoformer_output_dict['pair'].to(s.device)
outputs = dict_multimap(torch.stack, outputs)
outputs['single'] = s
return outputs
def _init_residue_constants(self, float_dtype, device):
if not hasattr(self, 'default_frames'):
self.register_buffer('default_frames', torch.tensor(residue_constants.restype_rigid_group_default_frame, dtype=float_dtype, device=device, requires_grad=False), persistent=False)
if not hasattr(self, 'group_idx'):
self.register_buffer('group_idx', torch.tensor(residue_constants.restype_atom14_to_rigid_group, device=device, requires_grad=False), persistent=False)
if not hasattr(self, 'atom_mask'):
self.register_buffer('atom_mask', torch.tensor(residue_constants.restype_atom14_mask, dtype=float_dtype, device=device, requires_grad=False), persistent=False)
if not hasattr(self, 'lit_positions'):
self.register_buffer('lit_positions', torch.tensor(residue_constants.restype_atom14_rigid_group_positions, dtype=float_dtype, device=device, requires_grad=False), persistent=False)
def torsion_angles_to_frames(self, r, alpha, f):
self._init_residue_constants(alpha.dtype, alpha.device)
return torsion_angles_to_frames(r, alpha, f, self.default_frames)
def frames_and_literature_positions_to_atom14_pos(self, r, f):
self._init_residue_constants(r.get_rots().dtype, r.get_rots().device)
return frames_and_literature_positions_to_atom14_pos(r, f, self.default_frames, self.group_idx, self.atom_mask, self.lit_positions)
|
class EsmFoldStructureModule(nn.Module):
def __init__(self, config):
pass
def forward(self, evoformer_output_dict, aatype, mask=None, _offload_inference=False):
'''
Args:
evoformer_output_dict:
Dictionary containing:
"single":
[*, N_res, C_s] single representation
"pair":
[*, N_res, N_res, C_z] pair representation
aatype:
[*, N_res] amino acid indices
mask:
Optional [*, N_res] sequence mask
Returns:
A dictionary of outputs
'''
pass
def _init_residue_constants(self, float_dtype, device):
pass
def torsion_angles_to_frames(self, r, alpha, f):
pass
def frames_and_literature_positions_to_atom14_pos(self, r, f):
pass
| 6
| 1
| 39
| 5
| 27
| 7
| 3
| 0.26
| 1
| 9
| 7
| 0
| 5
| 11
| 5
| 15
| 200
| 30
| 136
| 36
| 124
| 35
| 65
| 29
| 59
| 5
| 1
| 1
| 13
|
2,461
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmFoldStructureModuleTransition
|
import torch.nn as nn
from torch.nn import LayerNorm
class EsmFoldStructureModuleTransition(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layers = nn.ModuleList()
for _ in range(config.num_transition_layers):
l = EsmFoldStructureModuleTransitionLayer(config)
self.layers.append(l)
self.dropout = nn.Dropout(config.dropout_rate)
self.layer_norm = LayerNorm(config.sequence_dim)
def forward(self, s):
for l in self.layers:
s = l(s)
s = self.dropout(s)
s = self.layer_norm(s)
return s
|
class EsmFoldStructureModuleTransition(nn.Module):
def __init__(self, config):
pass
def forward(self, s):
pass
| 3
| 0
| 10
| 2
| 8
| 0
| 2
| 0
| 1
| 3
| 1
| 0
| 2
| 4
| 2
| 12
| 21
| 5
| 16
| 10
| 13
| 0
| 16
| 10
| 13
| 2
| 1
| 1
| 4
|
2,462
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmFoldStructureModuleTransitionLayer
|
import torch.nn as nn
class EsmFoldStructureModuleTransitionLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.linear_1 = EsmFoldLinear(config.sequence_dim, config.sequence_dim, init='relu')
self.linear_2 = EsmFoldLinear(config.sequence_dim, config.sequence_dim, init='relu')
self.linear_3 = EsmFoldLinear(config.sequence_dim, config.sequence_dim, init='final')
self.relu = nn.ReLU()
def forward(self, s):
s_initial = s
s = self.linear_1(s)
s = self.relu(s)
s = self.linear_2(s)
s = self.relu(s)
s = self.linear_3(s)
s = s + s_initial
return s
|
class EsmFoldStructureModuleTransitionLayer(nn.Module):
def __init__(self, config):
pass
def forward(self, s):
pass
| 3
| 0
| 10
| 2
| 8
| 0
| 1
| 0
| 1
| 2
| 1
| 0
| 2
| 4
| 2
| 12
| 21
| 5
| 16
| 8
| 13
| 0
| 16
| 8
| 13
| 1
| 1
| 0
| 2
|
2,463
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmFoldTriangleAttention
|
import torch
import torch.nn as nn
from typing import Callable, Optional, Union
from torch.nn import LayerNorm
from functools import partial
from .openfold_utils import OFProtein, Rigid, Rotation, atom14_to_atom37, chunk_layer, compute_predicted_aligned_error, compute_tm, frames_and_literature_positions_to_atom14_pos, make_atom14_masks, residue_constants, to_pdb, torsion_angles_to_frames
class EsmFoldTriangleAttention(nn.Module):
def __init__(self, c_in, c_hidden, no_heads, starting=True, inf=1000000000.0):
"""
Args:
c_in:
Input channel dimension
c_hidden:
Overall hidden channel dimension (not per-head)
no_heads:
Number of attention heads
"""
super().__init__()
self.c_in = c_in
self.c_hidden = c_hidden
self.no_heads = no_heads
self.starting = starting
self.inf = inf
self.layer_norm = LayerNorm(self.c_in)
self.linear = EsmFoldLinear(c_in, self.no_heads, bias=False, init='normal')
self.mha = EsmFoldAttention(self.c_in, self.c_in, self.c_in, self.c_hidden, self.no_heads)
@torch.jit.ignore
def _chunk(self, x: torch.Tensor, biases: list[torch.Tensor], chunk_size: int, use_memory_efficient_kernel: bool=False, use_lma: bool=False, inplace_safe: bool=False) -> torch.Tensor:
"""triangle! triangle!"""
mha_inputs = {'q_x': x, 'kv_x': x, 'biases': biases}
return chunk_layer(partial(self.mha, use_memory_efficient_kernel=use_memory_efficient_kernel, use_lma=use_lma), mha_inputs, chunk_size=chunk_size, no_batch_dims=len(x.shape[:-2]), _out=x if inplace_safe else None)
def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor]=None, chunk_size: Optional[int]=None, use_memory_efficient_kernel: bool=False, use_lma: bool=False, inplace_safe: bool=False) -> torch.Tensor:
"""
Args:
x:
[*, I, J, C_in] input tensor (e.g. the pair representation)
Returns:
[*, I, J, C_in] output tensor
"""
if mask is None:
mask = x.new_ones(x.shape[:-1])
if not self.starting:
x = x.transpose(-2, -3)
mask = mask.transpose(-1, -2)
x = self.layer_norm(x)
mask_bias = (self.inf * (mask - 1))[..., :, None, None, :]
triangle_bias = permute_final_dims(self.linear(x), (2, 0, 1))
triangle_bias = triangle_bias.unsqueeze(-4)
biases = [mask_bias, triangle_bias]
if chunk_size is not None:
x = self._chunk(x, biases, chunk_size, use_memory_efficient_kernel=use_memory_efficient_kernel, use_lma=use_lma, inplace_safe=inplace_safe)
else:
x = self.mha(q_x=x, kv_x=x, biases=biases, use_memory_efficient_kernel=use_memory_efficient_kernel, use_lma=use_lma)
if not self.starting:
x = x.transpose(-2, -3)
return x
|
class EsmFoldTriangleAttention(nn.Module):
def __init__(self, c_in, c_hidden, no_heads, starting=True, inf=1000000000.0):
'''
Args:
c_in:
Input channel dimension
c_hidden:
Overall hidden channel dimension (not per-head)
no_heads:
Number of attention heads
'''
pass
@torch.jit.ignore
def _chunk(self, x: torch.Tensor, biases: list[torch.Tensor], chunk_size: int, use_memory_efficient_kernel: bool=False, use_lma: bool=False, inplace_safe: bool=False) -> torch.Tensor:
'''triangle! triangle!'''
pass
def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor]=None, chunk_size: Optional[int]=None, use_memory_efficient_kernel: bool=False, use_lma: bool=False, inplace_safe: bool=False) -> torch.Tensor:
'''
Args:
x:
[*, I, J, C_in] input tensor (e.g. the pair representation)
Returns:
[*, I, J, C_in] output tensor
'''
pass
| 5
| 3
| 35
| 5
| 23
| 7
| 3
| 0.31
| 1
| 7
| 2
| 0
| 3
| 8
| 3
| 13
| 108
| 16
| 70
| 33
| 49
| 22
| 31
| 16
| 27
| 5
| 1
| 1
| 8
|
2,464
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmFoldTriangleMultiplicativeUpdate
|
import torch
from typing import Callable, Optional, Union
import torch.nn as nn
from torch.nn import LayerNorm
class EsmFoldTriangleMultiplicativeUpdate(nn.Module):
"""
Implements Algorithms 11 and 12.
"""
def __init__(self, config, _outgoing=True):
super().__init__()
c_hidden = config.pairwise_state_dim
self._outgoing = _outgoing
self.linear_a_p = EsmFoldLinear(c_hidden, c_hidden)
self.linear_a_g = EsmFoldLinear(c_hidden, c_hidden, init='gating')
self.linear_b_p = EsmFoldLinear(c_hidden, c_hidden)
self.linear_b_g = EsmFoldLinear(c_hidden, c_hidden, init='gating')
self.linear_g = EsmFoldLinear(c_hidden, c_hidden, init='gating')
self.linear_z = EsmFoldLinear(c_hidden, c_hidden, init='final')
self.layer_norm_in = LayerNorm(c_hidden)
self.layer_norm_out = LayerNorm(c_hidden)
self.sigmoid = nn.Sigmoid()
def _combine_projections(self, a: torch.Tensor, b: torch.Tensor, _inplace_chunk_size: Optional[int]=None) -> torch.Tensor:
if self._outgoing:
a = permute_final_dims(a, (2, 0, 1))
b = permute_final_dims(b, (2, 1, 0))
else:
a = permute_final_dims(a, (2, 1, 0))
b = permute_final_dims(b, (2, 0, 1))
if _inplace_chunk_size is not None:
for i in range(0, a.shape[-3], _inplace_chunk_size):
a_chunk = a[..., i:i + _inplace_chunk_size, :, :]
b_chunk = b[..., i:i + _inplace_chunk_size, :, :]
a[..., i:i + _inplace_chunk_size, :, :] = torch.matmul(a_chunk, b_chunk)
p = a
else:
p = torch.matmul(a, b)
return permute_final_dims(p, (1, 2, 0))
def _inference_forward(self, z: torch.Tensor, mask: Optional[torch.Tensor]=None, inplace_chunk_size: Optional[int]=None, with_add: bool=True):
"""
Args:
z:
A [*, N, N, C_z] pair representation
mask:
A [*, N, N] pair mask
inplace_chunk_size:
Size of chunks used in the main computation. Increase to trade memory for speed.
with_add:
If True, z is overwritten with (z + update). Otherwise, it is overwritten with (update).
Returns:
A reference to the overwritten z
More memory-efficient, inference-only version of the forward function. Uses in-place operations, fusion of the
addition that happens after this module in the Evoformer, a smidge of recomputation, and a cache of overwritten
values to lower peak memory consumption of this module from 5x the size of the input tensor z to 2.5x its size.
Useful for inference on extremely long sequences.
It works as follows. We will make reference to variables used in the default forward implementation below.
Naively, triangle multiplication attention requires the manifestation of 5 tensors the size of z: 1) z, the
"square" input tensor, 2) a, the first projection of z, 3) b, the second projection of b, 4) g, a z-sized mask,
and 5) a z-sized tensor for intermediate computations. For large N, this is prohibitively expensive; for
N=4000, for example, z is more than 8GB alone. To avoid this problem, we compute b, g, and all intermediate
tensors in small chunks, noting that the chunks required to compute a chunk of the output depend only on the
tensor a and corresponding vertical and horizontal chunks of z. This suggests an algorithm that loops over
pairs of chunks of z: hereafter "columns" and "rows" of z, even though each "column" and "row" in fact contains
inplace_chunk_size contiguous true columns and rows of z. Writing output chunks to a new tensor would bring
total memory consumption down to 3x the size of z. However, more memory can be saved by writing output chunks
directly to z in-place. WLOG, we choose to write output chunks vertically, overwriting the ith "column" of z at
the end of the ith iteration of the main loop. Despite this overwriting, the ith column is always one column
ahead of previously overwritten columns and can be recovered directly from z. After the first iteration,
however, the ith row of z is always at least partially overwritten. For this reason, we introduce the z-cache,
a tensor one-half the size of z. The z-cache initially contains the left half (2nd and 3rd quadrants) of z. For
0 < i < N/2, the missing left part of the ith row of z is recovered from this cache at the beginning of the ith
iteration. Once i exceeds n/2, the cache is "reoriented" to encompass the 3rd and 4th quadrants of z instead.
Though the 3rd quadrant of the original z is entirely overwritten at this point, it can be recovered from the
z-cache itself. Thereafter, the ith row of z can be recovered in its entirety from the reoriented z-cache.
After the final iteration, z has been completely overwritten and contains the triangular multiplicative update.
If with_add is True, it instead contains the sum of z and the triangular multiplicative update. In either case,
peak memory consumption is just 2.5x the size of z, disregarding memory used for chunks and other small
variables.
"""
if mask is None:
mask = z.new_ones(z.shape[:-1])
mask = mask.unsqueeze(-1)
def compute_projection_helper(pair, mask, a=True):
if a:
linear_g = self.linear_a_g
linear_p = self.linear_a_p
else:
linear_g = self.linear_b_g
linear_p = self.linear_b_p
pair = self.layer_norm_in(pair)
p = linear_g(pair)
p.sigmoid_()
p *= linear_p(pair)
p *= mask
p = permute_final_dims(p, (2, 0, 1))
return p
def compute_projection(pair, mask, a=True, chunked=True):
need_transpose = self._outgoing ^ a
if not chunked:
p = compute_projection_helper(pair, mask, a)
if need_transpose:
p = p.transpose(-1, -2)
else:
linear_g = self.linear_a_g if a else self.linear_b_g
c = linear_g.bias.shape[-1]
out_shape = pair.shape[:-3] + (c,) + pair.shape[-3:-1]
p = pair.new_zeros(out_shape)
for i in range(0, pair.shape[-3], inplace_chunk_size):
pair_chunk = pair[..., i:i + inplace_chunk_size, :, :]
pair_chunk = compute_projection_helper(pair[..., i:i + inplace_chunk_size, :, :], mask[..., i:i + inplace_chunk_size, :, :], a)
if need_transpose:
pair_chunk = pair_chunk.transpose(-1, -2)
p[..., i:i + inplace_chunk_size] = pair_chunk
else:
p[..., i:i + inplace_chunk_size, :] = pair_chunk
del pair_chunk
return p
a = compute_projection(z, mask, True, chunked=True)
if inplace_chunk_size is not None:
n = a.shape[-1]
half_n = n // 2 + n % 2
row_dim = -3
col_dim = -2
b_chunk_dim = row_dim if self._outgoing else col_dim
def empty_slicer(t):
return [slice(None) for _ in t.shape]
def slice_tensor(t, start, end, dim):
s = empty_slicer(t)
s[dim] = slice(start, end)
return t[s]
def flip_z_cache_(z_cache, z):
quadrant_3 = slice_tensor(z_cache, half_n, None, row_dim)
z_cache = z_cache.transpose(row_dim, col_dim)
z_cache = z_cache[..., :n // 2, :, :]
first_half_slicer = empty_slicer(z_cache)
first_half_slicer[col_dim] = slice(0, half_n)
z_cache[first_half_slicer] = quadrant_3
quadrant_4 = slice_tensor(z, half_n, None, row_dim)
quadrant_4 = slice_tensor(quadrant_4, half_n, None, col_dim)
quadrant_3_slicer = empty_slicer(z_cache)
quadrant_3_slicer[col_dim] = slice(half_n, None)
z_cache[quadrant_3_slicer] = quadrant_4
return z_cache
z_cache_shape = list(z.shape)
z_cache_shape[col_dim] = half_n
z_cache = z.new_zeros(z_cache_shape)
z_cache_slicer = empty_slicer(z_cache)
z_cache_slicer[col_dim] = slice(0, half_n)
z_cache.copy_(z[z_cache_slicer])
z_cache_rotated = False
i_range = list(range(0, half_n, inplace_chunk_size))
initial_offsets = [i_2 - i_1 for i_1, i_2 in zip(i_range, i_range[1:] + [half_n])]
after_half = list(range(half_n, n, inplace_chunk_size))
after_half_offsets = [inplace_chunk_size for _ in after_half]
combined_range_with_offsets = zip(i_range + after_half, initial_offsets + after_half_offsets)
for i, offset in combined_range_with_offsets:
if not z_cache_rotated and i >= half_n:
z_cache = flip_z_cache_(z_cache, z)
z_cache_rotated = True
z_chunk_b = slice_tensor(z, i, i + offset, b_chunk_dim)
mask_chunk = slice_tensor(mask, i, i + offset, b_chunk_dim)
z_chunk_b = z_chunk_b.clone()
if b_chunk_dim == col_dim:
z_chunk_b = slice_tensor(z, i, i + offset, col_dim)
elif not z_cache_rotated:
z_chunk_slicer = empty_slicer(z_chunk_b)
z_chunk_slicer[col_dim] = slice(0, half_n)
z_chunk_b[z_chunk_slicer] = slice_tensor(z_cache, i, i + offset, row_dim)
else:
z_cache_offset = i - half_n
z_chunk_b = slice_tensor(z_cache, z_cache_offset, z_cache_offset + offset, row_dim)
b_chunk = compute_projection(z_chunk_b, mask_chunk, a=False, chunked=False)
del z_chunk_b
x_chunk = torch.matmul(a, b_chunk)
x_chunk = permute_final_dims(x_chunk, (1, 2, 0))
x_chunk = self.layer_norm_out(x_chunk)
x_chunk = self.linear_z(x_chunk)
z_chunk_g = slice_tensor(z, i, i + offset, col_dim)
g_chunk = self.linear_g(self.layer_norm_in(z_chunk_g))
g_chunk.sigmoid_()
del z_chunk_g
x_chunk *= g_chunk
z_slicer = empty_slicer(z)
z_slicer[col_dim] = slice(i, i + offset)
if with_add:
z[z_slicer] += x_chunk
else:
z[z_slicer] = x_chunk
else:
b = compute_projection(z, mask, False, False)
x = torch.matmul(a, b)
x = self.layer_norm_out(x)
x = self.linear_z(x)
g = self.linear_g(z)
g.sigmoid_()
x *= g
if with_add:
z += x
else:
z = x
return z
def forward(self, z: torch.Tensor, mask: Optional[torch.Tensor]=None, inplace_safe: bool=False, _add_with_inplace: bool=False, _inplace_chunk_size: Optional[int]=256) -> torch.Tensor:
"""
Args:
x:
[*, N_res, N_res, C_z] input tensor
mask:
[*, N_res, N_res] input mask
Returns:
[*, N_res, N_res, C_z] output tensor
"""
if inplace_safe:
x = self._inference_forward(z, mask, inplace_chunk_size=_inplace_chunk_size, with_add=_add_with_inplace)
return x
if mask is None:
mask = z.new_ones(z.shape[:-1])
mask = mask.unsqueeze(-1)
z = self.layer_norm_in(z)
a = mask
a = a * self.sigmoid(self.linear_a_g(z))
a = a * self.linear_a_p(z)
b = mask
b = b * self.sigmoid(self.linear_b_g(z))
b = b * self.linear_b_p(z)
device_type = a.device.type if a.device.type != 'mps' else 'cpu'
if is_fp16_enabled(device_type):
with torch.autocast(device_type=device_type, enabled=False):
x = self._combine_projections(a.float(), b.float())
else:
x = self._combine_projections(a, b)
del a, b
x = self.layer_norm_out(x)
x = self.linear_z(x)
g = self.sigmoid(self.linear_g(z))
x = x * g
return x
|
class EsmFoldTriangleMultiplicativeUpdate(nn.Module):
'''
Implements Algorithms 11 and 12.
'''
def __init__(self, config, _outgoing=True):
pass
def _combine_projections(self, a: torch.Tensor, b: torch.Tensor, _inplace_chunk_size: Optional[int]=None) -> torch.Tensor:
pass
def _inference_forward(self, z: torch.Tensor, mask: Optional[torch.Tensor]=None, inplace_chunk_size: Optional[int]=None, with_add: bool=True):
'''
Args:
z:
A [*, N, N, C_z] pair representation
mask:
A [*, N, N] pair mask
inplace_chunk_size:
Size of chunks used in the main computation. Increase to trade memory for speed.
with_add:
If True, z is overwritten with (z + update). Otherwise, it is overwritten with (update).
Returns:
A reference to the overwritten z
More memory-efficient, inference-only version of the forward function. Uses in-place operations, fusion of the
addition that happens after this module in the Evoformer, a smidge of recomputation, and a cache of overwritten
values to lower peak memory consumption of this module from 5x the size of the input tensor z to 2.5x its size.
Useful for inference on extremely long sequences.
It works as follows. We will make reference to variables used in the default forward implementation below.
Naively, triangle multiplication attention requires the manifestation of 5 tensors the size of z: 1) z, the
"square" input tensor, 2) a, the first projection of z, 3) b, the second projection of b, 4) g, a z-sized mask,
and 5) a z-sized tensor for intermediate computations. For large N, this is prohibitively expensive; for
N=4000, for example, z is more than 8GB alone. To avoid this problem, we compute b, g, and all intermediate
tensors in small chunks, noting that the chunks required to compute a chunk of the output depend only on the
tensor a and corresponding vertical and horizontal chunks of z. This suggests an algorithm that loops over
pairs of chunks of z: hereafter "columns" and "rows" of z, even though each "column" and "row" in fact contains
inplace_chunk_size contiguous true columns and rows of z. Writing output chunks to a new tensor would bring
total memory consumption down to 3x the size of z. However, more memory can be saved by writing output chunks
directly to z in-place. WLOG, we choose to write output chunks vertically, overwriting the ith "column" of z at
the end of the ith iteration of the main loop. Despite this overwriting, the ith column is always one column
ahead of previously overwritten columns and can be recovered directly from z. After the first iteration,
however, the ith row of z is always at least partially overwritten. For this reason, we introduce the z-cache,
a tensor one-half the size of z. The z-cache initially contains the left half (2nd and 3rd quadrants) of z. For
0 < i < N/2, the missing left part of the ith row of z is recovered from this cache at the beginning of the ith
iteration. Once i exceeds n/2, the cache is "reoriented" to encompass the 3rd and 4th quadrants of z instead.
Though the 3rd quadrant of the original z is entirely overwritten at this point, it can be recovered from the
z-cache itself. Thereafter, the ith row of z can be recovered in its entirety from the reoriented z-cache.
After the final iteration, z has been completely overwritten and contains the triangular multiplicative update.
If with_add is True, it instead contains the sum of z and the triangular multiplicative update. In either case,
peak memory consumption is just 2.5x the size of z, disregarding memory used for chunks and other small
variables.
'''
pass
def compute_projection_helper(pair, mask, a=True):
pass
def compute_projection_helper(pair, mask, a=True):
pass
def empty_slicer(t):
pass
def slice_tensor(t, start, end, dim):
pass
def flip_z_cache_(z_cache, z):
pass
def forward(self, z: torch.Tensor, mask: Optional[torch.Tensor]=None, inplace_safe: bool=False, _add_with_inplace: bool=False, _inplace_chunk_size: Optional[int]=256) -> torch.Tensor:
'''
Args:
x:
[*, N_res, N_res, C_z] input tensor
mask:
[*, N_res, N_res] input mask
Returns:
[*, N_res, N_res, C_z] output tensor
'''
pass
| 10
| 3
| 44
| 6
| 29
| 9
| 3
| 0.37
| 1
| 10
| 1
| 0
| 4
| 10
| 4
| 14
| 327
| 45
| 206
| 88
| 181
| 77
| 168
| 72
| 158
| 10
| 1
| 4
| 30
|
2,465
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmFoldTriangularSelfAttentionBlock
|
import torch.nn as nn
class EsmFoldTriangularSelfAttentionBlock(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
sequence_state_dim = config.sequence_state_dim
pairwise_state_dim = config.pairwise_state_dim
sequence_num_heads = sequence_state_dim // config.sequence_head_width
pairwise_num_heads = pairwise_state_dim // config.pairwise_head_width
self.layernorm_1 = nn.LayerNorm(sequence_state_dim)
self.sequence_to_pair = EsmFoldSequenceToPair(sequence_state_dim, pairwise_state_dim // 2, pairwise_state_dim)
self.pair_to_sequence = EsmFoldPairToSequence(pairwise_state_dim, sequence_num_heads)
self.seq_attention = EsmFoldSelfAttention(sequence_state_dim, sequence_num_heads, config.sequence_head_width, gated=True)
self.tri_mul_out = EsmFoldTriangleMultiplicativeUpdate(config, _outgoing=True)
self.tri_mul_in = EsmFoldTriangleMultiplicativeUpdate(config, _outgoing=False)
self.tri_att_start = EsmFoldTriangleAttention(pairwise_state_dim, config.pairwise_head_width, pairwise_num_heads, inf=1000000000.0, starting=True)
self.tri_att_end = EsmFoldTriangleAttention(pairwise_state_dim, config.pairwise_head_width, pairwise_num_heads, inf=1000000000.0, starting=False)
self.mlp_seq = EsmFoldResidueMLP(sequence_state_dim, 4 * sequence_state_dim, dropout=config.dropout)
self.mlp_pair = EsmFoldResidueMLP(pairwise_state_dim, 4 * pairwise_state_dim, dropout=config.dropout)
self.drop = nn.Dropout(config.dropout)
self.row_drop = EsmFoldDropout(config.dropout * 2, 2)
self.col_drop = EsmFoldDropout(config.dropout * 2, 1)
def forward(self, sequence_state, pairwise_state, mask=None, chunk_size=None, **__kwargs):
"""
Inputs:
sequence_state: B x L x sequence_state_dim pairwise_state: B x L x L x pairwise_state_dim mask: B x L boolean
tensor of valid positions
Output:
sequence_state: B x L x sequence_state_dim pairwise_state: B x L x L x pairwise_state_dim
"""
if len(sequence_state.shape) != 3:
raise ValueError(f'`sequence_state` should be a 3d-tensor, got {len(sequence_state.shape)} dims.')
if len(pairwise_state.shape) != 4:
raise ValueError(f'`pairwise_state` should be a 4d-tensor, got {len(pairwise_state.shape)} dims.')
if mask is not None and len(mask.shape) != 2:
raise ValueError(f'`mask` should be a 2d-tensor, got {len(mask.shape)} dims.')
batch_dim, seq_dim, sequence_state_dim = sequence_state.shape
pairwise_state_dim = pairwise_state.shape[3]
if sequence_state_dim != self.config.sequence_state_dim:
raise ValueError(f'`sequence_state` last dimension should be equal to `self.sequence_state_dim`. Got {sequence_state_dim} != {self.config.sequence_state_dim}.')
if pairwise_state_dim != self.config.pairwise_state_dim:
raise ValueError(f'`pairwise_state` last dimension should be equal to `self.pairwise_state_dim`. Got {pairwise_state_dim} != {self.config.pairwise_state_dim}.')
if batch_dim != pairwise_state.shape[0]:
raise ValueError(f'`sequence_state` and `pairwise_state` have inconsistent batch size: {batch_dim} != {pairwise_state.shape[0]}.')
if seq_dim != pairwise_state.shape[1] or seq_dim != pairwise_state.shape[2]:
raise ValueError(f'`sequence_state` and `pairwise_state` have inconsistent sequence length: {seq_dim} != {pairwise_state.shape[1]} or {pairwise_state.shape[2]}.')
bias = self.pair_to_sequence(pairwise_state)
y = self.layernorm_1(sequence_state)
y, _ = self.seq_attention(y, mask=mask, bias=bias)
sequence_state = sequence_state + self.drop(y)
sequence_state = self.mlp_seq(sequence_state)
pairwise_state = pairwise_state + self.sequence_to_pair(sequence_state)
tri_mask = mask.unsqueeze(2) * mask.unsqueeze(1) if mask is not None else None
pairwise_state = pairwise_state + self.row_drop(self.tri_mul_out(pairwise_state, mask=tri_mask))
pairwise_state = pairwise_state + self.col_drop(self.tri_mul_in(pairwise_state, mask=tri_mask))
pairwise_state = pairwise_state + self.row_drop(self.tri_att_start(pairwise_state, mask=tri_mask, chunk_size=chunk_size))
pairwise_state = pairwise_state + self.col_drop(self.tri_att_end(pairwise_state, mask=tri_mask, chunk_size=chunk_size))
pairwise_state = self.mlp_pair(pairwise_state)
return (sequence_state, pairwise_state)
|
class EsmFoldTriangularSelfAttentionBlock(nn.Module):
def __init__(self, config):
pass
def forward(self, sequence_state, pairwise_state, mask=None, chunk_size=None, **__kwargs):
'''
Inputs:
sequence_state: B x L x sequence_state_dim pairwise_state: B x L x L x pairwise_state_dim mask: B x L boolean
tensor of valid positions
Output:
sequence_state: B x L x sequence_state_dim pairwise_state: B x L x L x pairwise_state_dim
'''
pass
| 3
| 1
| 50
| 8
| 36
| 6
| 5
| 0.16
| 1
| 9
| 7
| 0
| 2
| 14
| 2
| 12
| 102
| 17
| 73
| 27
| 70
| 12
| 51
| 27
| 48
| 9
| 1
| 1
| 10
|
2,466
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmFoldingTrunk
|
from ...utils import ContextManagers, auto_docstring, is_scipy_available, logging
import torch
import torch.nn as nn
class EsmFoldingTrunk(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
c_s = config.sequence_state_dim
c_z = config.pairwise_state_dim
self.pairwise_positional_embedding = EsmFoldRelativePosition(config)
self.blocks = nn.ModuleList([EsmFoldTriangularSelfAttentionBlock(config) for _ in range(config.num_blocks)])
self.recycle_bins = 15
self.recycle_s_norm = nn.LayerNorm(c_s)
self.recycle_z_norm = nn.LayerNorm(c_z)
self.recycle_disto = nn.Embedding(self.recycle_bins, c_z)
self.recycle_disto.weight[0].detach().zero_()
self.structure_module = EsmFoldStructureModule(config.structure_module)
self.trunk2sm_s = nn.Linear(c_s, config.structure_module.sequence_dim)
self.trunk2sm_z = nn.Linear(c_z, config.structure_module.pairwise_dim)
self.chunk_size = config.chunk_size
def set_chunk_size(self, chunk_size):
self.chunk_size = chunk_size
def forward(self, seq_feats, pair_feats, true_aa, residx, mask, no_recycles):
"""
Inputs:
seq_feats: B x L x C tensor of sequence features pair_feats: B x L x L x C tensor of pair features residx: B
x L long tensor giving the position in the sequence mask: B x L boolean tensor indicating valid residues
Output:
predicted_structure: B x L x (num_atoms_per_residue * 3) tensor wrapped in a Coordinates object
"""
device = seq_feats.device
s_s_0 = seq_feats
s_z_0 = pair_feats
if no_recycles is None:
no_recycles = self.config.max_recycles
else:
if no_recycles < 0:
raise ValueError('Number of recycles must not be negative.')
no_recycles += 1
def trunk_iter(s, z, residx, mask):
z = z + self.pairwise_positional_embedding(residx, mask=mask)
for block in self.blocks:
s, z = block(s, z, mask=mask, residue_index=residx, chunk_size=self.chunk_size)
return (s, z)
s_s = s_s_0
s_z = s_z_0
recycle_s = torch.zeros_like(s_s)
recycle_z = torch.zeros_like(s_z)
recycle_bins = torch.zeros(*s_z.shape[:-1], device=device, dtype=torch.int64)
for recycle_idx in range(no_recycles):
with ContextManagers([] if recycle_idx == no_recycles - 1 else [torch.no_grad()]):
recycle_s = self.recycle_s_norm(recycle_s.detach()).to(device)
recycle_z = self.recycle_z_norm(recycle_z.detach()).to(device)
recycle_z += self.recycle_disto(recycle_bins.detach()).to(device)
s_s, s_z = trunk_iter(s_s_0 + recycle_s, s_z_0 + recycle_z, residx, mask)
structure = self.structure_module({'single': self.trunk2sm_s(s_s), 'pair': self.trunk2sm_z(s_z)}, true_aa, mask.float())
recycle_s = s_s
recycle_z = s_z
recycle_bins = EsmFoldingTrunk.distogram(structure['positions'][-1][:, :, :3], 3.375, 21.375, self.recycle_bins)
structure['s_s'] = s_s
structure['s_z'] = s_z
return structure
@staticmethod
def distogram(coords, min_bin, max_bin, num_bins):
boundaries = torch.linspace(min_bin, max_bin, num_bins - 1, device=coords.device)
boundaries = boundaries ** 2
N, CA, C = [x.squeeze(-2) for x in coords.chunk(3, dim=-2)]
b = CA - N
c = C - CA
a = b.cross(c, dim=-1)
CB = -0.58273431 * a + 0.56802827 * b - 0.54067466 * c + CA
dists = (CB[..., None, :, :] - CB[..., :, None, :]).pow(2).sum(dim=-1, keepdims=True)
bins = torch.sum(dists > boundaries, dim=-1)
return bins
|
class EsmFoldingTrunk(nn.Module):
def __init__(self, config):
pass
def set_chunk_size(self, chunk_size):
pass
def forward(self, seq_feats, pair_feats, true_aa, residx, mask, no_recycles):
'''
Inputs:
seq_feats: B x L x C tensor of sequence features pair_feats: B x L x L x C tensor of pair features residx: B
x L long tensor giving the position in the sequence mask: B x L boolean tensor indicating valid residues
Output:
predicted_structure: B x L x (num_atoms_per_residue * 3) tensor wrapped in a Coordinates object
'''
pass
def trunk_iter(s, z, residx, mask):
pass
@staticmethod
def distogram(coords, min_bin, max_bin, num_bins):
pass
| 7
| 1
| 23
| 4
| 16
| 4
| 2
| 0.23
| 1
| 6
| 3
| 0
| 3
| 11
| 4
| 14
| 115
| 21
| 78
| 39
| 71
| 18
| 62
| 38
| 56
| 5
| 1
| 2
| 10
|
2,467
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmForProteinFolding
|
from typing import Callable, Optional, Union
from .modeling_esm import EsmModel, EsmPreTrainedModel
from torch.nn import LayerNorm
import torch.nn as nn
import torch
from ...utils import ContextManagers, auto_docstring, is_scipy_available, logging
from .openfold_utils import OFProtein, Rigid, Rotation, atom14_to_atom37, chunk_layer, compute_predicted_aligned_error, compute_tm, frames_and_literature_positions_to_atom14_pos, make_atom14_masks, residue_constants, to_pdb, torsion_angles_to_frames
@auto_docstring(custom_intro='\n ESMForProteinFolding is the HuggingFace port of the original ESMFold model. It consists of an ESM-2 "stem" followed\n by a protein folding "head", although unlike most other output heads, this "head" is similar in size and runtime to\n the rest of the model combined! It outputs a dictionary containing predicted structural information about the input\n protein(s).\n ')
class EsmForProteinFolding(EsmPreTrainedModel):
_no_split_modules = ['EsmFoldStructureModule', 'EsmFoldTriangularSelfAttentionBlock']
_supports_flash_attn = False
_supports_sdpa = False
_supports_attention_backend = False
_can_record_outputs = None
def __init__(self, config):
super().__init__(config)
self.config = config
self.distogram_bins = 64
self.esm = EsmModel(config, add_pooling_layer=False)
self.esm.requires_grad_(False)
if self.config.esmfold_config.fp16_esm:
self.esm.half()
self.esm_feats = self.config.hidden_size
self.esm_attns = self.config.num_hidden_layers * self.config.num_attention_heads
self.esm_layers = self.config.num_hidden_layers
self.register_buffer('af2_to_esm', self._af2_to_esm_from_vocab_list(config.vocab_list))
self.esm_s_combine = nn.Parameter(torch.zeros(self.esm_layers + 1))
trunk_config = self.config.esmfold_config.trunk
c_s = trunk_config.sequence_state_dim
c_z = trunk_config.pairwise_state_dim
self.esm_s_mlp = nn.Sequential(LayerNorm(self.esm_feats), nn.Linear(self.esm_feats, c_s), nn.ReLU(), nn.Linear(c_s, c_s))
self.n_tokens_embed = residue_constants.restype_num + 3
self.pad_idx = 0
self.unk_idx = self.n_tokens_embed - 2
self.mask_idx = self.n_tokens_embed - 1
self.esm_dict_cls_idx = self.config.vocab_list.index('<cls>')
self.esm_dict_mask_idx = self.config.vocab_list.index('<mask>')
self.esm_dict_eos_idx = self.config.vocab_list.index('<eos>')
self.esm_dict_padding_idx = self.config.vocab_list.index('<pad>')
if self.config.esmfold_config.embed_aa:
self.embedding = nn.Embedding(self.n_tokens_embed, c_s, padding_idx=0)
self.trunk = EsmFoldingTrunk(trunk_config)
self.distogram_head = nn.Linear(c_z, self.distogram_bins)
self.ptm_head = nn.Linear(c_z, self.distogram_bins)
self.lm_head = nn.Linear(c_s, self.n_tokens_embed)
self.lddt_bins = 50
structure_module_config = trunk_config.structure_module
self.lddt_head = nn.Sequential(nn.LayerNorm(structure_module_config.sequence_dim), nn.Linear(structure_module_config.sequence_dim, self.config.esmfold_config.lddt_head_hid_dim), nn.Linear(self.config.esmfold_config.lddt_head_hid_dim, self.config.esmfold_config.lddt_head_hid_dim), nn.Linear(self.config.esmfold_config.lddt_head_hid_dim, 37 * self.lddt_bins))
@staticmethod
def _af2_to_esm_from_vocab_list(vocab_list: list[str]) -> torch.Tensor:
esm_reorder = [vocab_list.index('<pad>')] + [vocab_list.index(v) for v in residue_constants.restypes_with_x]
return torch.tensor(esm_reorder)
@auto_docstring
def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, masking_pattern: Optional[torch.Tensor]=None, num_recycles: Optional[int]=None, output_hidden_states: Optional[bool]=False) -> EsmForProteinFoldingOutput:
"""
masking_pattern (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Locations of tokens to mask during training as a form of regularization. Mask values selected in `[0, 1]`.
num_recycles (`int`, *optional*, defaults to `None`):
Number of times to recycle the input sequence. If `None`, defaults to `config.num_recycles`. "Recycling"
consists of passing the output of the folding trunk back in as input to the trunk. During training, the
number of recycles should vary with each batch, to ensure that the model learns to output valid predictions
after each recycle. During inference, num_recycles should be set to the highest value that the model was
trained with for maximum accuracy. Accordingly, when this value is set to `None`, config.max_recycles is
used.
Example:
```python
>>> from transformers import AutoTokenizer, EsmForProteinFolding
>>> model = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1")
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/esmfold_v1")
>>> inputs = tokenizer(["MLKNVQVQLV"], return_tensors="pt", add_special_tokens=False) # A tiny random peptide
>>> outputs = model(**inputs)
>>> folded_positions = outputs.positions
```
"""
cfg = self.config.esmfold_config
aa = input_ids
B = aa.shape[0]
L = aa.shape[1]
device = input_ids.device
if attention_mask is None:
attention_mask = torch.ones_like(aa, device=device)
if position_ids is None:
position_ids = torch.arange(L, device=device).expand_as(input_ids)
esmaa = self.af2_idx_to_esm_idx(aa, attention_mask)
if masking_pattern is not None:
masked_aa, esmaa, mlm_targets = self.bert_mask(aa, esmaa, attention_mask, masking_pattern)
else:
masked_aa = aa
mlm_targets = None
esm_s = self.compute_language_model_representations(esmaa)
esm_s = esm_s.to(self.esm_s_combine.dtype)
if cfg.esm_ablate_sequence:
esm_s = esm_s * 0
esm_s = esm_s.detach()
esm_s = (self.esm_s_combine.softmax(0).unsqueeze(0) @ esm_s).squeeze(2)
s_s_0 = self.esm_s_mlp(esm_s)
s_z_0 = s_s_0.new_zeros(B, L, L, cfg.trunk.pairwise_state_dim)
if self.config.esmfold_config.embed_aa:
s_s_0 += self.embedding(masked_aa)
structure: dict = self.trunk(s_s_0, s_z_0, aa, position_ids, attention_mask, no_recycles=num_recycles)
structure = {k: v for k, v in structure.items() if k in ['s_z', 's_s', 'frames', 'sidechain_frames', 'unnormalized_angles', 'angles', 'positions', 'states']}
if mlm_targets:
structure['mlm_targets'] = mlm_targets
disto_logits = self.distogram_head(structure['s_z'])
disto_logits = (disto_logits + disto_logits.transpose(1, 2)) / 2
structure['distogram_logits'] = disto_logits
lm_logits = self.lm_head(structure['s_s'])
structure['lm_logits'] = lm_logits
structure['aatype'] = aa
make_atom14_masks(structure)
for k in ['atom14_atom_exists', 'atom37_atom_exists']:
structure[k] *= attention_mask.unsqueeze(-1)
structure['residue_index'] = position_ids
lddt_head = self.lddt_head(structure['states']).reshape(structure['states'].shape[0], B, L, -1, self.lddt_bins)
structure['lddt_head'] = lddt_head
plddt = categorical_lddt(lddt_head[-1], bins=self.lddt_bins)
structure['plddt'] = plddt
ptm_logits = self.ptm_head(structure['s_z'])
structure['ptm_logits'] = ptm_logits
structure['ptm'] = compute_tm(ptm_logits, max_bin=31, no_bins=self.distogram_bins)
structure.update(compute_predicted_aligned_error(ptm_logits, max_bin=31, no_bins=self.distogram_bins))
return EsmForProteinFoldingOutput(**structure)
def af2_idx_to_esm_idx(self, aa, mask):
if self.af2_to_esm.device != aa.device:
self.af2_to_esm = self.af2_to_esm.to(aa.device)
aa = (aa + 1).masked_fill(mask != 1, 0)
return self.af2_to_esm[aa]
def compute_language_model_representations(self, esmaa: torch.Tensor) -> torch.Tensor:
device = next(self.parameters()).device
B, L = esmaa.shape
if self.config.esmfold_config.bypass_lm:
esm_s = torch.zeros(B, L, self.esm_s_combine.size[0], -1, self.esm_feats, device=device)
return esm_s
bosi, eosi = (self.esm_dict_cls_idx, self.esm_dict_eos_idx)
bos = esmaa.new_full((B, 1), bosi)
eos = esmaa.new_full((B, 1), self.esm_dict_padding_idx)
esmaa = torch.cat([bos, esmaa, eos], dim=1)
esmaa[range(B), (esmaa != 1).sum(1)] = eosi
esm_hidden_states = self.esm(esmaa, attention_mask=esmaa != 1, output_hidden_states=True)['hidden_states']
esm_s = torch.stack(esm_hidden_states, dim=2)
esm_s = esm_s[:, 1:-1]
return esm_s
def bert_mask(self, aa, esmaa, mask, pattern):
new_aa = aa.clone()
target = aa.clone()
new_esmaa = esmaa.clone()
new_aa[pattern == 1] = self.mask_idx
target[pattern != 1] = 0
new_esmaa[pattern == 1] = self.esm_dict_mask_idx
return (new_aa, new_esmaa, target)
@torch.no_grad()
def infer(self, seqs: Union[str, list[str]], position_ids=None):
if isinstance(seqs, str):
lst = [seqs]
else:
lst = seqs
device = next(self.parameters()).device
aatype = collate_dense_tensors([torch.from_numpy(residue_constants.sequence_to_onehot(sequence=seq, mapping=residue_constants.restype_order_with_x, map_unknown_to_x=True)).to(device).argmax(dim=1) for seq in lst])
mask = collate_dense_tensors([aatype.new_ones(len(seq)) for seq in lst])
position_ids = torch.arange(aatype.shape[1], device=device).expand(len(lst), -1) if position_ids is None else position_ids.to(device)
if position_ids.ndim == 1:
position_ids = position_ids.unsqueeze(0)
return self.forward(aatype, mask, position_ids=position_ids)
@staticmethod
def output_to_pdb(output: dict) -> list[str]:
"""Returns the pbd (file) string from the model given the model output."""
output = {k: v.to('cpu').numpy() for k, v in output.items()}
pdbs = []
final_atom_positions = atom14_to_atom37(output['positions'][-1], output)
final_atom_mask = output['atom37_atom_exists']
for i in range(output['aatype'].shape[0]):
aa = output['aatype'][i]
pred_pos = final_atom_positions[i]
mask = final_atom_mask[i]
resid = output['residue_index'][i] + 1
pred = OFProtein(aatype=aa, atom_positions=pred_pos, atom_mask=mask, residue_index=resid, b_factors=output['plddt'][i])
pdbs.append(to_pdb(pred))
return pdbs
def infer_pdb(self, seqs, *args, **kwargs) -> str:
"""Returns the pdb (file) string from the model given an input sequence."""
assert isinstance(seqs, str)
output = self.infer(seqs, *args, **kwargs)
return self.output_to_pdb(output)[0]
def infer_pdbs(self, seqs: list[str], *args, **kwargs) -> list[str]:
"""Returns the pdb (file) string from the model given an input sequence."""
output = self.infer(seqs, *args, **kwargs)
return self.output_to_pdb(output)
|
@auto_docstring(custom_intro='\n ESMForProteinFolding is the HuggingFace port of the original ESMFold model. It consists of an ESM-2 "stem" followed\n by a protein folding "head", although unlike most other output heads, this "head" is similar in size and runtime to\n the rest of the model combined! It outputs a dictionary containing predicted structural information about the input\n protein(s).\n ')
class EsmForProteinFolding(EsmPreTrainedModel):
def __init__(self, config):
pass
@staticmethod
def _af2_to_esm_from_vocab_list(vocab_list: list[str]) -> torch.Tensor:
pass
@auto_docstring
def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, masking_pattern: Optional[torch.Tensor]=None, num_recycles: Optional[int]=None, output_hidden_states: Optional[bool]=False) -> EsmForProteinFoldingOutput:
'''
masking_pattern (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Locations of tokens to mask during training as a form of regularization. Mask values selected in `[0, 1]`.
num_recycles (`int`, *optional*, defaults to `None`):
Number of times to recycle the input sequence. If `None`, defaults to `config.num_recycles`. "Recycling"
consists of passing the output of the folding trunk back in as input to the trunk. During training, the
number of recycles should vary with each batch, to ensure that the model learns to output valid predictions
after each recycle. During inference, num_recycles should be set to the highest value that the model was
trained with for maximum accuracy. Accordingly, when this value is set to `None`, config.max_recycles is
used.
Example:
```python
>>> from transformers import AutoTokenizer, EsmForProteinFolding
>>> model = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1")
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/esmfold_v1")
>>> inputs = tokenizer(["MLKNVQVQLV"], return_tensors="pt", add_special_tokens=False) # A tiny random peptide
>>> outputs = model(**inputs)
>>> folded_positions = outputs.positions
```
'''
pass
def af2_idx_to_esm_idx(self, aa, mask):
pass
def compute_language_model_representations(self, esmaa: torch.Tensor) -> torch.Tensor:
pass
def bert_mask(self, aa, esmaa, mask, pattern):
pass
@torch.no_grad()
def infer(self, seqs: Union[str, list[str]], position_ids=None):
pass
@staticmethod
def output_to_pdb(output: dict) -> list[str]:
'''Returns the pbd (file) string from the model given the model output.'''
pass
def infer_pdb(self, seqs, *args, **kwargs) -> str:
'''Returns the pdb (file) string from the model given an input sequence.'''
pass
def infer_pdbs(self, seqs: list[str], *args, **kwargs) -> list[str]:
'''Returns the pdb (file) string from the model given an input sequence.'''
pass
| 16
| 4
| 29
| 4
| 21
| 4
| 3
| 0.2
| 1
| 9
| 3
| 0
| 8
| 24
| 10
| 11
| 303
| 46
| 218
| 97
| 191
| 43
| 146
| 82
| 135
| 8
| 2
| 1
| 25
|
2,468
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/modeling_esmfold.py
|
transformers.models.esm.modeling_esmfold.EsmForProteinFoldingOutput
|
from typing import Callable, Optional, Union
import torch.nn as nn
from dataclasses import dataclass
import torch
from ...utils import ContextManagers, auto_docstring, is_scipy_available, logging
from ...modeling_outputs import ModelOutput
@dataclass
@auto_docstring(custom_intro='\n Output type of [`EsmForProteinFoldingOutput`].\n ')
class EsmForProteinFoldingOutput(ModelOutput):
"""
frames (`torch.FloatTensor`):
Output frames.
sidechain_frames (`torch.FloatTensor`):
Output sidechain frames.
unnormalized_angles (`torch.FloatTensor`):
Predicted unnormalized backbone and side chain torsion angles.
angles (`torch.FloatTensor`):
Predicted backbone and side chain torsion angles.
positions (`torch.FloatTensor`):
Predicted positions of the backbone and side chain atoms.
states (`torch.FloatTensor`):
Hidden states from the protein folding trunk.
s_s (`torch.FloatTensor`):
Per-residue embeddings derived by concatenating the hidden states of each layer of the ESM-2 LM stem.
s_z (`torch.FloatTensor`):
Pairwise residue embeddings.
distogram_logits (`torch.FloatTensor`):
Input logits to the distogram used to compute residue distances.
lm_logits (`torch.FloatTensor`):
Logits output by the ESM-2 protein language model stem.
aatype (`torch.FloatTensor`):
Input amino acids (AlphaFold2 indices).
atom14_atom_exists (`torch.FloatTensor`):
Whether each atom exists in the atom14 representation.
residx_atom14_to_atom37 (`torch.FloatTensor`):
Mapping between atoms in the atom14 and atom37 representations.
residx_atom37_to_atom14 (`torch.FloatTensor`):
Mapping between atoms in the atom37 and atom14 representations.
atom37_atom_exists (`torch.FloatTensor`):
Whether each atom exists in the atom37 representation.
residue_index (`torch.FloatTensor`):
The index of each residue in the protein chain. Unless internal padding tokens are used, this will just be
a sequence of integers from 0 to `sequence_length`.
lddt_head (`torch.FloatTensor`):
Raw outputs from the lddt head used to compute plddt.
plddt (`torch.FloatTensor`):
Per-residue confidence scores. Regions of low confidence may indicate areas where the model's prediction is
uncertain, or where the protein structure is disordered.
ptm_logits (`torch.FloatTensor`):
Raw logits used for computing ptm.
ptm (`torch.FloatTensor`):
TM-score output representing the model's high-level confidence in the overall structure.
aligned_confidence_probs (`torch.FloatTensor`):
Per-residue confidence scores for the aligned structure.
predicted_aligned_error (`torch.FloatTensor`):
Predicted error between the model's prediction and the ground truth.
max_predicted_aligned_error (`torch.FloatTensor`):
Per-sample maximum predicted error.
"""
frames: Optional[torch.FloatTensor] = None
sidechain_frames: Optional[torch.FloatTensor] = None
unnormalized_angles: Optional[torch.FloatTensor] = None
angles: Optional[torch.FloatTensor] = None
positions: Optional[torch.FloatTensor] = None
states: Optional[torch.FloatTensor] = None
s_s: Optional[torch.FloatTensor] = None
s_z: Optional[torch.FloatTensor] = None
distogram_logits: Optional[torch.FloatTensor] = None
lm_logits: Optional[torch.FloatTensor] = None
aatype: Optional[torch.FloatTensor] = None
atom14_atom_exists: Optional[torch.FloatTensor] = None
residx_atom14_to_atom37: Optional[torch.FloatTensor] = None
residx_atom37_to_atom14: Optional[torch.FloatTensor] = None
atom37_atom_exists: Optional[torch.FloatTensor] = None
residue_index: Optional[torch.FloatTensor] = None
lddt_head: Optional[torch.FloatTensor] = None
plddt: Optional[torch.FloatTensor] = None
ptm_logits: Optional[torch.FloatTensor] = None
ptm: Optional[torch.FloatTensor] = None
aligned_confidence_probs: Optional[torch.FloatTensor] = None
predicted_aligned_error: Optional[torch.FloatTensor] = None
max_predicted_aligned_error: Optional[torch.FloatTensor] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`EsmForProteinFoldingOutput`].\n ')
class EsmForProteinFoldingOutput(ModelOutput):
'''
frames (`torch.FloatTensor`):
Output frames.
sidechain_frames (`torch.FloatTensor`):
Output sidechain frames.
unnormalized_angles (`torch.FloatTensor`):
Predicted unnormalized backbone and side chain torsion angles.
angles (`torch.FloatTensor`):
Predicted backbone and side chain torsion angles.
positions (`torch.FloatTensor`):
Predicted positions of the backbone and side chain atoms.
states (`torch.FloatTensor`):
Hidden states from the protein folding trunk.
s_s (`torch.FloatTensor`):
Per-residue embeddings derived by concatenating the hidden states of each layer of the ESM-2 LM stem.
s_z (`torch.FloatTensor`):
Pairwise residue embeddings.
distogram_logits (`torch.FloatTensor`):
Input logits to the distogram used to compute residue distances.
lm_logits (`torch.FloatTensor`):
Logits output by the ESM-2 protein language model stem.
aatype (`torch.FloatTensor`):
Input amino acids (AlphaFold2 indices).
atom14_atom_exists (`torch.FloatTensor`):
Whether each atom exists in the atom14 representation.
residx_atom14_to_atom37 (`torch.FloatTensor`):
Mapping between atoms in the atom14 and atom37 representations.
residx_atom37_to_atom14 (`torch.FloatTensor`):
Mapping between atoms in the atom37 and atom14 representations.
atom37_atom_exists (`torch.FloatTensor`):
Whether each atom exists in the atom37 representation.
residue_index (`torch.FloatTensor`):
The index of each residue in the protein chain. Unless internal padding tokens are used, this will just be
a sequence of integers from 0 to `sequence_length`.
lddt_head (`torch.FloatTensor`):
Raw outputs from the lddt head used to compute plddt.
plddt (`torch.FloatTensor`):
Per-residue confidence scores. Regions of low confidence may indicate areas where the model's prediction is
uncertain, or where the protein structure is disordered.
ptm_logits (`torch.FloatTensor`):
Raw logits used for computing ptm.
ptm (`torch.FloatTensor`):
TM-score output representing the model's high-level confidence in the overall structure.
aligned_confidence_probs (`torch.FloatTensor`):
Per-residue confidence scores for the aligned structure.
predicted_aligned_error (`torch.FloatTensor`):
Predicted error between the model's prediction and the ground truth.
max_predicted_aligned_error (`torch.FloatTensor`):
Per-sample maximum predicted error.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 2.17
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 78
| 2
| 24
| 24
| 23
| 52
| 24
| 24
| 23
| 0
| 1
| 0
| 0
|
2,469
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/openfold_utils/chunk_utils.py
|
transformers.models.esm.openfold_utils.chunk_utils.ChunkSizeTuner
|
from .tensor_utils import tensor_tree_map, tree_map
from typing import Any, Callable, Optional, Union
import math
import torch
from collections.abc import Iterable, Sequence
import logging
class ChunkSizeTuner:
def __init__(self, max_chunk_size: int=512):
self.max_chunk_size = max_chunk_size
self.cached_chunk_size: Optional[int] = None
self.cached_arg_data: Optional[tuple] = None
def _determine_favorable_chunk_size(self, fn: Callable, args: tuple, min_chunk_size: int) -> int:
logging.info('Tuning chunk size...')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
candidates: list[int] = [2 ** l for l in range(int(math.log(self.max_chunk_size, 2)) + 1)]
candidates = [c for c in candidates if c > min_chunk_size]
candidates = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(chunk_size: int) -> bool:
try:
with torch.no_grad():
fn(*args, chunk_size=chunk_size)
return True
except RuntimeError:
return False
min_viable_chunk_size_index = 0
i = len(candidates) - 1
while i > min_viable_chunk_size_index:
viable = test_chunk_size(candidates[i])
if not viable:
i = (min_viable_chunk_size_index + i) // 2
else:
min_viable_chunk_size_index = i
i = (i + len(candidates) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _compare_arg_caches(self, ac1: Iterable, ac2: Iterable) -> bool:
consistent = True
for a1, a2 in zip(ac1, ac2):
assert type(ac1) is type(ac2)
if isinstance(ac1, (list, tuple)):
consistent &= self._compare_arg_caches(a1, a2)
elif isinstance(ac1, dict):
a1_items = [v for _, v in sorted(a1.items(), key=lambda x: x[0])]
a2_items = [v for _, v in sorted(a2.items(), key=lambda x: x[0])]
consistent &= self._compare_arg_caches(a1_items, a2_items)
else:
consistent &= a1 == a2
return consistent
def tune_chunk_size(self, representative_fn: Callable, args: tuple, min_chunk_size: int) -> int:
consistent = True
arg_data: tuple = tree_map(lambda a: a.shape if isinstance(a, torch.Tensor) else a, args, object)
if self.cached_arg_data is not None:
assert len(self.cached_arg_data) == len(arg_data)
consistent = self._compare_arg_caches(self.cached_arg_data, arg_data)
else:
consistent = False
if not consistent:
self.cached_chunk_size = self._determine_favorable_chunk_size(representative_fn, args, min_chunk_size)
self.cached_arg_data = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
|
class ChunkSizeTuner:
def __init__(self, max_chunk_size: int=512):
pass
def _determine_favorable_chunk_size(self, fn: Callable, args: tuple, min_chunk_size: int) -> int:
pass
def test_chunk_size(chunk_size: int) -> bool:
pass
def _compare_arg_caches(self, ac1: Iterable, ac2: Iterable) -> bool:
pass
def tune_chunk_size(self, representative_fn: Callable, args: tuple, min_chunk_size: int) -> int:
pass
| 6
| 0
| 17
| 2
| 15
| 1
| 3
| 0.06
| 0
| 11
| 0
| 0
| 4
| 3
| 4
| 4
| 84
| 12
| 68
| 27
| 54
| 4
| 52
| 19
| 46
| 4
| 0
| 2
| 14
|
2,470
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/openfold_utils/protein.py
|
transformers.models.esm.openfold_utils.protein.Protein
|
import numpy as np
import dataclasses
from collections.abc import Iterator, Mapping, Sequence
from typing import Any, Optional
@dataclasses.dataclass(frozen=True)
class Protein:
"""Protein structure representation."""
atom_positions: np.ndarray
aatype: np.ndarray
atom_mask: np.ndarray
residue_index: np.ndarray
b_factors: np.ndarray
chain_index: Optional[np.ndarray] = None
remark: Optional[str] = None
parents: Optional[Sequence[str]] = None
parents_chain_index: Optional[Sequence[int]] = None
|
@dataclasses.dataclass(frozen=True)
class Protein:
'''Protein structure representation.'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 2.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 35
| 9
| 10
| 5
| 9
| 21
| 10
| 5
| 9
| 0
| 0
| 0
| 0
|
2,471
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/openfold_utils/rigid_utils.py
|
transformers.models.esm.openfold_utils.rigid_utils.Rigid
|
from typing import Any, Callable
import torch
from collections.abc import Sequence
class Rigid:
"""
A class representing a rigid transformation. Little more than a wrapper around two objects: a Rotation object and a
[*, 3] translation Designed to behave approximately like a single torch tensor with the shape of the shared batch
dimensions of its component parts.
"""
def __init__(self, rots: Rotation | None, trans: torch.Tensor | None):
"""
Args:
rots: A [*, 3, 3] rotation tensor
trans: A corresponding [*, 3] translation tensor
"""
batch_dims, dtype, device, requires_grad = (None, None, None, None)
if trans is not None:
batch_dims = trans.shape[:-1]
dtype = trans.dtype
device = trans.device
requires_grad = trans.requires_grad
elif rots is not None:
batch_dims = rots.shape
dtype = rots.dtype
device = rots.device
requires_grad = rots.requires_grad
else:
raise ValueError('At least one input argument must be specified')
if rots is None:
rots = Rotation.identity(batch_dims, dtype, device, requires_grad)
elif trans is None:
trans = identity_trans(batch_dims, dtype, device, requires_grad)
assert rots is not None
assert trans is not None
if rots.shape != trans.shape[:-1] or rots.device != trans.device:
raise ValueError('Rots and trans incompatible')
trans = trans.to(dtype=torch.float32)
self._rots = rots
self._trans = trans
@staticmethod
def identity(shape: tuple[int, ...], dtype: torch.dtype | None=None, device: torch.device | None=None, requires_grad: bool=True, fmt: str='quat') -> Rigid:
"""
Constructs an identity transformation.
Args:
shape:
The desired shape
dtype:
The dtype of both internal tensors
device:
The device of both internal tensors
requires_grad:
Whether grad should be enabled for the internal tensors
Returns:
The identity transformation
"""
return Rigid(Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt), identity_trans(shape, dtype, device, requires_grad))
def __getitem__(self, index: Any) -> Rigid:
"""
Indexes the affine transformation with PyTorch-style indices. The index is applied to the shared dimensions of
both the rotation and the translation.
E.g.::
r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None) t = Rigid(r, torch.rand(10, 10, 3)) indexed =
t[3, 4:6] assert(indexed.shape == (2,)) assert(indexed.get_rots().shape == (2,))
assert(indexed.get_trans().shape == (2, 3))
Args:
index: A standard torch tensor index. E.g. 8, (10, None, 3),
or (3, slice(0, 1, None))
Returns:
The indexed tensor
"""
if type(index) is not tuple:
index = (index,)
return Rigid(self._rots[index], self._trans[index + (slice(None),)])
def __mul__(self, right: torch.Tensor) -> Rigid:
"""
Pointwise left multiplication of the transformation with a tensor. Can be used to e.g. mask the Rigid.
Args:
right:
The tensor multiplicand
Returns:
The product
"""
if not isinstance(right, torch.Tensor):
raise TypeError('The other multiplicand must be a Tensor')
new_rots = self._rots * right
new_trans = self._trans * right[..., None]
return Rigid(new_rots, new_trans)
def __rmul__(self, left: torch.Tensor) -> Rigid:
"""
Reverse pointwise multiplication of the transformation with a tensor.
Args:
left:
The left multiplicand
Returns:
The product
"""
return self.__mul__(left)
@property
def shape(self) -> torch.Size:
"""
Returns the shape of the shared dimensions of the rotation and the translation.
Returns:
The shape of the transformation
"""
return self._trans.shape[:-1]
@property
def device(self) -> torch.device:
"""
Returns the device on which the Rigid's tensors are located.
Returns:
The device on which the Rigid's tensors are located
"""
return self._trans.device
def get_rots(self) -> Rotation:
"""
Getter for the rotation.
Returns:
The rotation object
"""
return self._rots
def get_trans(self) -> torch.Tensor:
"""
Getter for the translation.
Returns:
The stored translation
"""
return self._trans
def compose_q_update_vec(self, q_update_vec: torch.Tensor) -> Rigid:
"""
Composes the transformation with a quaternion update vector of shape [*, 6], where the final 6 columns
represent the x, y, and z values of a quaternion of form (1, x, y, z) followed by a 3D translation.
Args:
q_vec: The quaternion update vector.
Returns:
The composed transformation.
"""
q_vec, t_vec = (q_update_vec[..., :3], q_update_vec[..., 3:])
new_rots = self._rots.compose_q_update_vec(q_vec)
trans_update = self._rots.apply(t_vec)
new_translation = self._trans + trans_update
return Rigid(new_rots, new_translation)
def compose(self, r: Rigid) -> Rigid:
"""
Composes the current rigid object with another.
Args:
r:
Another Rigid object
Returns:
The composition of the two transformations
"""
new_rot = self._rots.compose_r(r._rots)
new_trans = self._rots.apply(r._trans) + self._trans
return Rigid(new_rot, new_trans)
def apply(self, pts: torch.Tensor) -> torch.Tensor:
"""
Applies the transformation to a coordinate tensor.
Args:
pts: A [*, 3] coordinate tensor.
Returns:
The transformed points.
"""
rotated = self._rots.apply(pts)
return rotated + self._trans
def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:
"""
Applies the inverse of the transformation to a coordinate tensor.
Args:
pts: A [*, 3] coordinate tensor
Returns:
The transformed points.
"""
pts = pts - self._trans
return self._rots.invert_apply(pts)
def invert(self) -> Rigid:
"""
Inverts the transformation.
Returns:
The inverse transformation.
"""
rot_inv = self._rots.invert()
trn_inv = rot_inv.apply(self._trans)
return Rigid(rot_inv, -1 * trn_inv)
def map_tensor_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rigid:
"""
Apply a Tensor -> Tensor function to underlying translation and rotation tensors, mapping over the
translation/rotation dimensions respectively.
Args:
fn:
A Tensor -> Tensor function to be mapped over the Rigid
Returns:
The transformed Rigid object
"""
new_rots = self._rots.map_tensor_fn(fn)
new_trans = torch.stack(list(map(fn, torch.unbind(self._trans, dim=-1))), dim=-1)
return Rigid(new_rots, new_trans)
def to_tensor_4x4(self) -> torch.Tensor:
"""
Converts a transformation to a homogeneous transformation tensor.
Returns:
A [*, 4, 4] homogeneous transformation tensor
"""
tensor = self._trans.new_zeros((*self.shape, 4, 4))
tensor[..., :3, :3] = self._rots.get_rot_mats()
tensor[..., :3, 3] = self._trans
tensor[..., 3, 3] = 1
return tensor
@staticmethod
def from_tensor_4x4(t: torch.Tensor) -> Rigid:
"""
Constructs a transformation from a homogeneous transformation tensor.
Args:
t: [*, 4, 4] homogeneous transformation tensor
Returns:
T object with shape [*]
"""
if t.shape[-2:] != (4, 4):
raise ValueError('Incorrectly shaped input tensor')
rots = Rotation(rot_mats=t[..., :3, :3], quats=None)
trans = t[..., :3, 3]
return Rigid(rots, trans)
def to_tensor_7(self) -> torch.Tensor:
"""
Converts a transformation to a tensor with 7 final columns, four for the quaternion followed by three for the
translation.
Returns:
A [*, 7] tensor representation of the transformation
"""
tensor = self._trans.new_zeros((*self.shape, 7))
tensor[..., :4] = self._rots.get_quats()
tensor[..., 4:] = self._trans
return tensor
@staticmethod
def from_tensor_7(t: torch.Tensor, normalize_quats: bool=False) -> Rigid:
if t.shape[-1] != 7:
raise ValueError('Incorrectly shaped input tensor')
quats, trans = (t[..., :4], t[..., 4:])
rots = Rotation(rot_mats=None, quats=quats, normalize_quats=normalize_quats)
return Rigid(rots, trans)
@staticmethod
def from_3_points(p_neg_x_axis: torch.Tensor, origin: torch.Tensor, p_xy_plane: torch.Tensor, eps: float=1e-08) -> Rigid:
"""
Implements algorithm 21. Constructs transformations from sets of 3 points using the Gram-Schmidt algorithm.
Args:
p_neg_x_axis: [*, 3] coordinates
origin: [*, 3] coordinates used as frame origins
p_xy_plane: [*, 3] coordinates
eps: Small epsilon value
Returns:
A transformation object of shape [*]
"""
p_neg_x_axis_unbound = torch.unbind(p_neg_x_axis, dim=-1)
origin_unbound = torch.unbind(origin, dim=-1)
p_xy_plane_unbound = torch.unbind(p_xy_plane, dim=-1)
e0 = [c1 - c2 for c1, c2 in zip(origin_unbound, p_neg_x_axis_unbound)]
e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane_unbound, origin_unbound)]
denom = torch.sqrt(sum((c * c for c in e0)) + eps * torch.ones_like(e0[0]))
e0 = [c / denom for c in e0]
dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))
e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]
denom = torch.sqrt(sum((c * c for c in e1)) + eps * torch.ones_like(e1[0]))
e1 = [c / denom for c in e1]
e2 = [e0[1] * e1[2] - e0[2] * e1[1], e0[2] * e1[0] - e0[0] * e1[2], e0[0] * e1[1] - e0[1] * e1[0]]
rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1)
rots = rots.reshape(rots.shape[:-1] + (3, 3))
rot_obj = Rotation(rot_mats=rots, quats=None)
return Rigid(rot_obj, torch.stack(origin_unbound, dim=-1))
def unsqueeze(self, dim: int) -> Rigid:
"""
Analogous to torch.unsqueeze. The dimension is relative to the shared dimensions of the rotation/translation.
Args:
dim: A positive or negative dimension index.
Returns:
The unsqueezed transformation.
"""
if dim >= len(self.shape):
raise ValueError('Invalid dimension')
rots = self._rots.unsqueeze(dim)
trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)
return Rigid(rots, trans)
@staticmethod
def cat(ts: Sequence[Rigid], dim: int) -> Rigid:
"""
Concatenates transformations along a new dimension.
Args:
ts:
A list of T objects
dim:
The dimension along which the transformations should be concatenated
Returns:
A concatenated transformation object
"""
rots = Rotation.cat([t._rots for t in ts], dim)
trans = torch.cat([t._trans for t in ts], dim=dim if dim >= 0 else dim - 1)
return Rigid(rots, trans)
def apply_rot_fn(self, fn: Callable[[Rotation], Rotation]) -> Rigid:
"""
Applies a Rotation -> Rotation function to the stored rotation object.
Args:
fn: A function of type Rotation -> Rotation
Returns:
A transformation object with a transformed rotation.
"""
return Rigid(fn(self._rots), self._trans)
def apply_trans_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rigid:
"""
Applies a Tensor -> Tensor function to the stored translation.
Args:
fn:
A function of type Tensor -> Tensor to be applied to the translation
Returns:
A transformation object with a transformed translation.
"""
return Rigid(self._rots, fn(self._trans))
def scale_translation(self, trans_scale_factor: float) -> Rigid:
"""
Scales the translation by a constant factor.
Args:
trans_scale_factor:
The constant factor
Returns:
A transformation object with a scaled translation.
"""
return self.apply_trans_fn(lambda t: t * trans_scale_factor)
def stop_rot_gradient(self) -> Rigid:
"""
Detaches the underlying rotation object
Returns:
A transformation object with detached rotations
"""
return self.apply_rot_fn(lambda r: r.detach())
@staticmethod
def make_transform_from_reference(n_xyz: torch.Tensor, ca_xyz: torch.Tensor, c_xyz: torch.Tensor, eps: float=1e-20) -> Rigid:
"""
Returns a transformation object from reference coordinates.
Note that this method does not take care of symmetries. If you provide the atom positions in the non-standard
way, the N atom will end up not at [-0.527250, 1.359329, 0.0] but instead at [-0.527250, -1.359329, 0.0]. You
need to take care of such cases in your code.
Args:
n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.
ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.
c_xyz: A [*, 3] tensor of carbon xyz coordinates.
Returns:
A transformation object. After applying the translation and rotation to the reference backbone, the
coordinates will approximately equal to the input coordinates.
"""
translation = -1 * ca_xyz
n_xyz = n_xyz + translation
c_xyz = c_xyz + translation
c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]
norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2)
sin_c1 = -c_y / norm
cos_c1 = c_x / norm
c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))
c1_rots[..., 0, 0] = cos_c1
c1_rots[..., 0, 1] = -1 * sin_c1
c1_rots[..., 1, 0] = sin_c1
c1_rots[..., 1, 1] = cos_c1
c1_rots[..., 2, 2] = 1
norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2 + c_z ** 2)
sin_c2 = c_z / norm
cos_c2 = torch.sqrt(c_x ** 2 + c_y ** 2) / norm
c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))
c2_rots[..., 0, 0] = cos_c2
c2_rots[..., 0, 2] = sin_c2
c2_rots[..., 1, 1] = 1
c2_rots[..., 2, 0] = -1 * sin_c2
c2_rots[..., 2, 2] = cos_c2
c_rots = rot_matmul(c2_rots, c1_rots)
n_xyz = rot_vec_mul(c_rots, n_xyz)
_, n_y, n_z = [n_xyz[..., i] for i in range(3)]
norm = torch.sqrt(eps + n_y ** 2 + n_z ** 2)
sin_n = -n_z / norm
cos_n = n_y / norm
n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))
n_rots[..., 0, 0] = 1
n_rots[..., 1, 1] = cos_n
n_rots[..., 1, 2] = -1 * sin_n
n_rots[..., 2, 1] = sin_n
n_rots[..., 2, 2] = cos_n
rots = rot_matmul(n_rots, c_rots)
rots = rots.transpose(-1, -2)
translation = -1 * translation
rot_obj = Rotation(rot_mats=rots, quats=None)
return Rigid(rot_obj, translation)
def cuda(self) -> Rigid:
"""
Moves the transformation object to GPU memory
Returns:
A version of the transformation on GPU
"""
return Rigid(self._rots.cuda(), self._trans.cuda())
|
class Rigid:
'''
A class representing a rigid transformation. Little more than a wrapper around two objects: a Rotation object and a
[*, 3] translation Designed to behave approximately like a single torch tensor with the shape of the shared batch
dimensions of its component parts.
'''
def __init__(self, rots: Rotation | None, trans: torch.Tensor | None):
'''
Args:
rots: A [*, 3, 3] rotation tensor
trans: A corresponding [*, 3] translation tensor
'''
pass
@staticmethod
def identity(shape: tuple[int, ...], dtype: torch.dtype | None=None, device: torch.device | None=None, requires_grad: bool=True, fmt: str='quat') -> Rigid:
'''
Constructs an identity transformation.
Args:
shape:
The desired shape
dtype:
The dtype of both internal tensors
device:
The device of both internal tensors
requires_grad:
Whether grad should be enabled for the internal tensors
Returns:
The identity transformation
'''
pass
def __getitem__(self, index: Any) -> Rigid:
'''
Indexes the affine transformation with PyTorch-style indices. The index is applied to the shared dimensions of
both the rotation and the translation.
E.g.::
r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None) t = Rigid(r, torch.rand(10, 10, 3)) indexed =
t[3, 4:6] assert(indexed.shape == (2,)) assert(indexed.get_rots().shape == (2,))
assert(indexed.get_trans().shape == (2, 3))
Args:
index: A standard torch tensor index. E.g. 8, (10, None, 3),
or (3, slice(0, 1, None))
Returns:
The indexed tensor
'''
pass
def __mul__(self, right: torch.Tensor) -> Rigid:
'''
Pointwise left multiplication of the transformation with a tensor. Can be used to e.g. mask the Rigid.
Args:
right:
The tensor multiplicand
Returns:
The product
'''
pass
def __rmul__(self, left: torch.Tensor) -> Rigid:
'''
Reverse pointwise multiplication of the transformation with a tensor.
Args:
left:
The left multiplicand
Returns:
The product
'''
pass
@property
def shape(self) -> torch.Size:
'''
Returns the shape of the shared dimensions of the rotation and the translation.
Returns:
The shape of the transformation
'''
pass
@property
def device(self) -> torch.device:
'''
Returns the device on which the Rigid's tensors are located.
Returns:
The device on which the Rigid's tensors are located
'''
pass
def get_rots(self) -> Rotation:
'''
Getter for the rotation.
Returns:
The rotation object
'''
pass
def get_trans(self) -> torch.Tensor:
'''
Getter for the translation.
Returns:
The stored translation
'''
pass
def compose_q_update_vec(self, q_update_vec: torch.Tensor) -> Rigid:
'''
Composes the transformation with a quaternion update vector of shape [*, 6], where the final 6 columns
represent the x, y, and z values of a quaternion of form (1, x, y, z) followed by a 3D translation.
Args:
q_vec: The quaternion update vector.
Returns:
The composed transformation.
'''
pass
def compose_q_update_vec(self, q_update_vec: torch.Tensor) -> Rigid:
'''
Composes the current rigid object with another.
Args:
r:
Another Rigid object
Returns:
The composition of the two transformations
'''
pass
def apply(self, pts: torch.Tensor) -> torch.Tensor:
'''
Applies the transformation to a coordinate tensor.
Args:
pts: A [*, 3] coordinate tensor.
Returns:
The transformed points.
'''
pass
def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:
'''
Applies the inverse of the transformation to a coordinate tensor.
Args:
pts: A [*, 3] coordinate tensor
Returns:
The transformed points.
'''
pass
def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:
'''
Inverts the transformation.
Returns:
The inverse transformation.
'''
pass
def map_tensor_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rigid:
'''
Apply a Tensor -> Tensor function to underlying translation and rotation tensors, mapping over the
translation/rotation dimensions respectively.
Args:
fn:
A Tensor -> Tensor function to be mapped over the Rigid
Returns:
The transformed Rigid object
'''
pass
def to_tensor_4x4(self) -> torch.Tensor:
'''
Converts a transformation to a homogeneous transformation tensor.
Returns:
A [*, 4, 4] homogeneous transformation tensor
'''
pass
@staticmethod
def from_tensor_4x4(t: torch.Tensor) -> Rigid:
'''
Constructs a transformation from a homogeneous transformation tensor.
Args:
t: [*, 4, 4] homogeneous transformation tensor
Returns:
T object with shape [*]
'''
pass
def to_tensor_7(self) -> torch.Tensor:
'''
Converts a transformation to a tensor with 7 final columns, four for the quaternion followed by three for the
translation.
Returns:
A [*, 7] tensor representation of the transformation
'''
pass
@staticmethod
def from_tensor_7(t: torch.Tensor, normalize_quats: bool=False) -> Rigid:
pass
@staticmethod
def from_3_points(p_neg_x_axis: torch.Tensor, origin: torch.Tensor, p_xy_plane: torch.Tensor, eps: float=1e-08) -> Rigid:
'''
Implements algorithm 21. Constructs transformations from sets of 3 points using the Gram-Schmidt algorithm.
Args:
p_neg_x_axis: [*, 3] coordinates
origin: [*, 3] coordinates used as frame origins
p_xy_plane: [*, 3] coordinates
eps: Small epsilon value
Returns:
A transformation object of shape [*]
'''
pass
def unsqueeze(self, dim: int) -> Rigid:
'''
Analogous to torch.unsqueeze. The dimension is relative to the shared dimensions of the rotation/translation.
Args:
dim: A positive or negative dimension index.
Returns:
The unsqueezed transformation.
'''
pass
@staticmethod
def cat(ts: Sequence[Rigid], dim: int) -> Rigid:
'''
Concatenates transformations along a new dimension.
Args:
ts:
A list of T objects
dim:
The dimension along which the transformations should be concatenated
Returns:
A concatenated transformation object
'''
pass
def apply_rot_fn(self, fn: Callable[[Rotation], Rotation]) -> Rigid:
'''
Applies a Rotation -> Rotation function to the stored rotation object.
Args:
fn: A function of type Rotation -> Rotation
Returns:
A transformation object with a transformed rotation.
'''
pass
def apply_trans_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rigid:
'''
Applies a Tensor -> Tensor function to the stored translation.
Args:
fn:
A function of type Tensor -> Tensor to be applied to the translation
Returns:
A transformation object with a transformed translation.
'''
pass
def scale_translation(self, trans_scale_factor: float) -> Rigid:
'''
Scales the translation by a constant factor.
Args:
trans_scale_factor:
The constant factor
Returns:
A transformation object with a scaled translation.
'''
pass
def stop_rot_gradient(self) -> Rigid:
'''
Detaches the underlying rotation object
Returns:
A transformation object with detached rotations
'''
pass
@staticmethod
def make_transform_from_reference(n_xyz: torch.Tensor, ca_xyz: torch.Tensor, c_xyz: torch.Tensor, eps: float=1e-20) -> Rigid:
'''
Returns a transformation object from reference coordinates.
Note that this method does not take care of symmetries. If you provide the atom positions in the non-standard
way, the N atom will end up not at [-0.527250, 1.359329, 0.0] but instead at [-0.527250, -1.359329, 0.0]. You
need to take care of such cases in your code.
Args:
n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.
ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.
c_xyz: A [*, 3] tensor of carbon xyz coordinates.
Returns:
A transformation object. After applying the translation and rotation to the reference backbone, the
coordinates will approximately equal to the input coordinates.
'''
pass
def cuda(self) -> Rigid:
'''
Moves the transformation object to GPU memory
Returns:
A version of the transformation on GPU
'''
pass
| 37
| 28
| 17
| 2
| 7
| 7
| 1
| 1
| 0
| 16
| 1
| 0
| 22
| 2
| 28
| 28
| 514
| 94
| 210
| 99
| 163
| 210
| 169
| 81
| 140
| 6
| 0
| 1
| 40
|
2,472
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/openfold_utils/rigid_utils.py
|
transformers.models.esm.openfold_utils.rigid_utils.Rotation
|
import torch
from collections.abc import Sequence
from typing import Any, Callable
class Rotation:
"""
A 3D rotation. Depending on how the object is initialized, the rotation is represented by either a rotation matrix
or a quaternion, though both formats are made available by helper functions. To simplify gradient computation, the
underlying format of the rotation cannot be changed in-place. Like Rigid, the class is designed to mimic the
behavior of a torch Tensor, almost as if each Rotation object were a tensor of rotations, in one format or another.
"""
def __init__(self, rot_mats: torch.Tensor | None=None, quats: torch.Tensor | None=None, normalize_quats: bool=True):
"""
Args:
rot_mats:
A [*, 3, 3] rotation matrix tensor. Mutually exclusive with quats
quats:
A [*, 4] quaternion. Mutually exclusive with rot_mats. If normalize_quats is not True, must be a unit
quaternion
normalize_quats:
If quats is specified, whether to normalize quats
"""
if rot_mats is None and quats is None or (rot_mats is not None and quats is not None):
raise ValueError('Exactly one input argument must be specified')
if rot_mats is not None and rot_mats.shape[-2:] != (3, 3) or (quats is not None and quats.shape[-1] != 4):
raise ValueError('Incorrectly shaped rotation matrix or quaternion')
if quats is not None:
quats = quats.to(dtype=torch.float32)
if rot_mats is not None:
rot_mats = rot_mats.to(dtype=torch.float32)
if quats is not None and normalize_quats:
quats = quats / torch.linalg.norm(quats, dim=-1, keepdim=True)
self._rot_mats = rot_mats
self._quats = quats
@staticmethod
def identity(shape, dtype: torch.dtype | None=None, device: torch.device | None=None, requires_grad: bool=True, fmt: str='quat') -> Rotation:
"""
Returns an identity Rotation.
Args:
shape:
The "shape" of the resulting Rotation object. See documentation for the shape property
dtype:
The torch dtype for the rotation
device:
The torch device for the new rotation
requires_grad:
Whether the underlying tensors in the new rotation object should require gradient computation
fmt:
One of "quat" or "rot_mat". Determines the underlying format of the new object's rotation
Returns:
A new identity rotation
"""
if fmt == 'rot_mat':
rot_mats = identity_rot_mats(shape, dtype, device, requires_grad)
return Rotation(rot_mats=rot_mats, quats=None)
elif fmt == 'quat':
quats = identity_quats(shape, dtype, device, requires_grad)
return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
else:
raise ValueError(f'Invalid format: f{fmt}')
def __getitem__(self, index: Any) -> Rotation:
"""
Allows torch-style indexing over the virtual shape of the rotation object. See documentation for the shape
property.
Args:
index:
A torch index. E.g. (1, 3, 2), or (slice(None,))
Returns:
The indexed rotation
"""
if type(index) is not tuple:
index = (index,)
if self._rot_mats is not None:
rot_mats = self._rot_mats[index + (slice(None), slice(None))]
return Rotation(rot_mats=rot_mats)
elif self._quats is not None:
quats = self._quats[index + (slice(None),)]
return Rotation(quats=quats, normalize_quats=False)
else:
raise ValueError('Both rotations are None')
def __mul__(self, right: torch.Tensor) -> Rotation:
"""
Pointwise left multiplication of the rotation with a tensor. Can be used to e.g. mask the Rotation.
Args:
right:
The tensor multiplicand
Returns:
The product
"""
if not isinstance(right, torch.Tensor):
raise TypeError('The other multiplicand must be a Tensor')
if self._rot_mats is not None:
rot_mats = self._rot_mats * right[..., None, None]
return Rotation(rot_mats=rot_mats, quats=None)
elif self._quats is not None:
quats = self._quats * right[..., None]
return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
else:
raise ValueError('Both rotations are None')
def __rmul__(self, left: torch.Tensor) -> Rotation:
"""
Reverse pointwise multiplication of the rotation with a tensor.
Args:
left:
The left multiplicand
Returns:
The product
"""
return self.__mul__(left)
@property
def shape(self) -> torch.Size:
"""
Returns the virtual shape of the rotation object. This shape is defined as the batch dimensions of the
underlying rotation matrix or quaternion. If the Rotation was initialized with a [10, 3, 3] rotation matrix
tensor, for example, the resulting shape would be [10].
Returns:
The virtual shape of the rotation object
"""
if self._rot_mats is not None:
return self._rot_mats.shape[:-2]
elif self._quats is not None:
return self._quats.shape[:-1]
else:
raise ValueError('Both rotations are None')
@property
def dtype(self) -> torch.dtype:
"""
Returns the dtype of the underlying rotation.
Returns:
The dtype of the underlying rotation
"""
if self._rot_mats is not None:
return self._rot_mats.dtype
elif self._quats is not None:
return self._quats.dtype
else:
raise ValueError('Both rotations are None')
@property
def device(self) -> torch.device:
"""
The device of the underlying rotation
Returns:
The device of the underlying rotation
"""
if self._rot_mats is not None:
return self._rot_mats.device
elif self._quats is not None:
return self._quats.device
else:
raise ValueError('Both rotations are None')
@property
def requires_grad(self) -> bool:
"""
Returns the requires_grad property of the underlying rotation
Returns:
The requires_grad property of the underlying tensor
"""
if self._rot_mats is not None:
return self._rot_mats.requires_grad
elif self._quats is not None:
return self._quats.requires_grad
else:
raise ValueError('Both rotations are None')
def get_rot_mats(self) -> torch.Tensor:
"""
Returns the underlying rotation as a rotation matrix tensor.
Returns:
The rotation as a rotation matrix tensor
"""
if self._rot_mats is not None:
return self._rot_mats
elif self._quats is not None:
return quat_to_rot(self._quats)
else:
raise ValueError('Both rotations are None')
def get_quats(self) -> torch.Tensor:
"""
Returns the underlying rotation as a quaternion tensor.
Depending on whether the Rotation was initialized with a quaternion, this function may call torch.linalg.eigh.
Returns:
The rotation as a quaternion tensor.
"""
if self._rot_mats is not None:
return rot_to_quat(self._rot_mats)
elif self._quats is not None:
return self._quats
else:
raise ValueError('Both rotations are None')
def get_cur_rot(self) -> torch.Tensor:
"""
Return the underlying rotation in its current form
Returns:
The stored rotation
"""
if self._rot_mats is not None:
return self._rot_mats
elif self._quats is not None:
return self._quats
else:
raise ValueError('Both rotations are None')
def compose_q_update_vec(self, q_update_vec: torch.Tensor, normalize_quats: bool=True) -> Rotation:
"""
Returns a new quaternion Rotation after updating the current object's underlying rotation with a quaternion
update, formatted as a [*, 3] tensor whose final three columns represent x, y, z such that (1, x, y, z) is the
desired (not necessarily unit) quaternion update.
Args:
q_update_vec:
A [*, 3] quaternion update tensor
normalize_quats:
Whether to normalize the output quaternion
Returns:
An updated Rotation
"""
quats = self.get_quats()
new_quats = quats + quat_multiply_by_vec(quats, q_update_vec)
return Rotation(rot_mats=None, quats=new_quats, normalize_quats=normalize_quats)
def compose_r(self, r: Rotation) -> Rotation:
"""
Compose the rotation matrices of the current Rotation object with those of another.
Args:
r:
An update rotation object
Returns:
An updated rotation object
"""
r1 = self.get_rot_mats()
r2 = r.get_rot_mats()
new_rot_mats = rot_matmul(r1, r2)
return Rotation(rot_mats=new_rot_mats, quats=None)
def compose_q(self, r: Rotation, normalize_quats: bool=True) -> Rotation:
"""
Compose the quaternions of the current Rotation object with those of another.
Depending on whether either Rotation was initialized with quaternions, this function may call
torch.linalg.eigh.
Args:
r:
An update rotation object
Returns:
An updated rotation object
"""
q1 = self.get_quats()
q2 = r.get_quats()
new_quats = quat_multiply(q1, q2)
return Rotation(rot_mats=None, quats=new_quats, normalize_quats=normalize_quats)
def apply(self, pts: torch.Tensor) -> torch.Tensor:
"""
Apply the current Rotation as a rotation matrix to a set of 3D coordinates.
Args:
pts:
A [*, 3] set of points
Returns:
[*, 3] rotated points
"""
rot_mats = self.get_rot_mats()
return rot_vec_mul(rot_mats, pts)
def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:
"""
The inverse of the apply() method.
Args:
pts:
A [*, 3] set of points
Returns:
[*, 3] inverse-rotated points
"""
rot_mats = self.get_rot_mats()
inv_rot_mats = invert_rot_mat(rot_mats)
return rot_vec_mul(inv_rot_mats, pts)
def invert(self) -> Rotation:
"""
Returns the inverse of the current Rotation.
Returns:
The inverse of the current Rotation
"""
if self._rot_mats is not None:
return Rotation(rot_mats=invert_rot_mat(self._rot_mats), quats=None)
elif self._quats is not None:
return Rotation(rot_mats=None, quats=invert_quat(self._quats), normalize_quats=False)
else:
raise ValueError('Both rotations are None')
def unsqueeze(self, dim: int) -> Rotation:
"""
Analogous to torch.unsqueeze. The dimension is relative to the shape of the Rotation object.
Args:
dim: A positive or negative dimension index.
Returns:
The unsqueezed Rotation.
"""
if dim >= len(self.shape):
raise ValueError('Invalid dimension')
if self._rot_mats is not None:
rot_mats = self._rot_mats.unsqueeze(dim if dim >= 0 else dim - 2)
return Rotation(rot_mats=rot_mats, quats=None)
elif self._quats is not None:
quats = self._quats.unsqueeze(dim if dim >= 0 else dim - 1)
return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
else:
raise ValueError('Both rotations are None')
@staticmethod
def cat(rs: Sequence[Rotation], dim: int) -> Rotation:
"""
Concatenates rotations along one of the batch dimensions. Analogous to torch.cat().
Note that the output of this operation is always a rotation matrix, regardless of the format of input
rotations.
Args:
rs:
A list of rotation objects
dim:
The dimension along which the rotations should be concatenated
Returns:
A concatenated Rotation object in rotation matrix format
"""
rot_mats = torch.cat([r.get_rot_mats() for r in rs], dim=dim if dim >= 0 else dim - 2)
return Rotation(rot_mats=rot_mats, quats=None)
def map_tensor_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rotation:
"""
Apply a Tensor -> Tensor function to underlying rotation tensors, mapping over the rotation dimension(s). Can
be used e.g. to sum out a one-hot batch dimension.
Args:
fn:
A Tensor -> Tensor function to be mapped over the Rotation
Returns:
The transformed Rotation object
"""
if self._rot_mats is not None:
rot_mats = self._rot_mats.view(self._rot_mats.shape[:-2] + (9,))
rot_mats = torch.stack(list(map(fn, torch.unbind(rot_mats, dim=-1))), dim=-1)
rot_mats = rot_mats.view(rot_mats.shape[:-1] + (3, 3))
return Rotation(rot_mats=rot_mats, quats=None)
elif self._quats is not None:
quats = torch.stack(list(map(fn, torch.unbind(self._quats, dim=-1))), dim=-1)
return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
else:
raise ValueError('Both rotations are None')
def cuda(self) -> Rotation:
"""
Analogous to the cuda() method of torch Tensors
Returns:
A copy of the Rotation in CUDA memory
"""
if self._rot_mats is not None:
return Rotation(rot_mats=self._rot_mats.cuda(), quats=None)
elif self._quats is not None:
return Rotation(rot_mats=None, quats=self._quats.cuda(), normalize_quats=False)
else:
raise ValueError('Both rotations are None')
def to(self, device: torch.device | None, dtype: torch.dtype | None) -> Rotation:
"""
Analogous to the to() method of torch Tensors
Args:
device:
A torch device
dtype:
A torch dtype
Returns:
A copy of the Rotation using the new device and dtype
"""
if self._rot_mats is not None:
return Rotation(rot_mats=self._rot_mats.to(device=device, dtype=dtype), quats=None)
elif self._quats is not None:
return Rotation(rot_mats=None, quats=self._quats.to(device=device, dtype=dtype), normalize_quats=False)
else:
raise ValueError('Both rotations are None')
def detach(self) -> Rotation:
"""
Returns a copy of the Rotation whose underlying Tensor has been detached from its torch graph.
Returns:
A copy of the Rotation whose underlying Tensor has been detached from its torch graph
"""
if self._rot_mats is not None:
return Rotation(rot_mats=self._rot_mats.detach(), quats=None)
elif self._quats is not None:
return Rotation(rot_mats=None, quats=self._quats.detach(), normalize_quats=False)
else:
raise ValueError('Both rotations are None')
|
class Rotation:
'''
A 3D rotation. Depending on how the object is initialized, the rotation is represented by either a rotation matrix
or a quaternion, though both formats are made available by helper functions. To simplify gradient computation, the
underlying format of the rotation cannot be changed in-place. Like Rigid, the class is designed to mimic the
behavior of a torch Tensor, almost as if each Rotation object were a tensor of rotations, in one format or another.
'''
def __init__(self, rot_mats: torch.Tensor | None=None, quats: torch.Tensor | None=None, normalize_quats: bool=True):
'''
Args:
rot_mats:
A [*, 3, 3] rotation matrix tensor. Mutually exclusive with quats
quats:
A [*, 4] quaternion. Mutually exclusive with rot_mats. If normalize_quats is not True, must be a unit
quaternion
normalize_quats:
If quats is specified, whether to normalize quats
'''
pass
@staticmethod
def identity(shape, dtype: torch.dtype | None=None, device: torch.device | None=None, requires_grad: bool=True, fmt: str='quat') -> Rotation:
'''
Returns an identity Rotation.
Args:
shape:
The "shape" of the resulting Rotation object. See documentation for the shape property
dtype:
The torch dtype for the rotation
device:
The torch device for the new rotation
requires_grad:
Whether the underlying tensors in the new rotation object should require gradient computation
fmt:
One of "quat" or "rot_mat". Determines the underlying format of the new object's rotation
Returns:
A new identity rotation
'''
pass
def __getitem__(self, index: Any) -> Rotation:
'''
Allows torch-style indexing over the virtual shape of the rotation object. See documentation for the shape
property.
Args:
index:
A torch index. E.g. (1, 3, 2), or (slice(None,))
Returns:
The indexed rotation
'''
pass
def __mul__(self, right: torch.Tensor) -> Rotation:
'''
Pointwise left multiplication of the rotation with a tensor. Can be used to e.g. mask the Rotation.
Args:
right:
The tensor multiplicand
Returns:
The product
'''
pass
def __rmul__(self, left: torch.Tensor) -> Rotation:
'''
Reverse pointwise multiplication of the rotation with a tensor.
Args:
left:
The left multiplicand
Returns:
The product
'''
pass
@property
def shape(self) -> torch.Size:
'''
Returns the virtual shape of the rotation object. This shape is defined as the batch dimensions of the
underlying rotation matrix or quaternion. If the Rotation was initialized with a [10, 3, 3] rotation matrix
tensor, for example, the resulting shape would be [10].
Returns:
The virtual shape of the rotation object
'''
pass
@property
def dtype(self) -> torch.dtype:
'''
Returns the dtype of the underlying rotation.
Returns:
The dtype of the underlying rotation
'''
pass
@property
def device(self) -> torch.device:
'''
The device of the underlying rotation
Returns:
The device of the underlying rotation
'''
pass
@property
def requires_grad(self) -> bool:
'''
Returns the requires_grad property of the underlying rotation
Returns:
The requires_grad property of the underlying tensor
'''
pass
def get_rot_mats(self) -> torch.Tensor:
'''
Returns the underlying rotation as a rotation matrix tensor.
Returns:
The rotation as a rotation matrix tensor
'''
pass
def get_quats(self) -> torch.Tensor:
'''
Returns the underlying rotation as a quaternion tensor.
Depending on whether the Rotation was initialized with a quaternion, this function may call torch.linalg.eigh.
Returns:
The rotation as a quaternion tensor.
'''
pass
def get_cur_rot(self) -> torch.Tensor:
'''
Return the underlying rotation in its current form
Returns:
The stored rotation
'''
pass
def compose_q_update_vec(self, q_update_vec: torch.Tensor, normalize_quats: bool=True) -> Rotation:
'''
Returns a new quaternion Rotation after updating the current object's underlying rotation with a quaternion
update, formatted as a [*, 3] tensor whose final three columns represent x, y, z such that (1, x, y, z) is the
desired (not necessarily unit) quaternion update.
Args:
q_update_vec:
A [*, 3] quaternion update tensor
normalize_quats:
Whether to normalize the output quaternion
Returns:
An updated Rotation
'''
pass
def compose_r(self, r: Rotation) -> Rotation:
'''
Compose the rotation matrices of the current Rotation object with those of another.
Args:
r:
An update rotation object
Returns:
An updated rotation object
'''
pass
def compose_q_update_vec(self, q_update_vec: torch.Tensor, normalize_quats: bool=True) -> Rotation:
'''
Compose the quaternions of the current Rotation object with those of another.
Depending on whether either Rotation was initialized with quaternions, this function may call
torch.linalg.eigh.
Args:
r:
An update rotation object
Returns:
An updated rotation object
'''
pass
def apply(self, pts: torch.Tensor) -> torch.Tensor:
'''
Apply the current Rotation as a rotation matrix to a set of 3D coordinates.
Args:
pts:
A [*, 3] set of points
Returns:
[*, 3] rotated points
'''
pass
def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:
'''
The inverse of the apply() method.
Args:
pts:
A [*, 3] set of points
Returns:
[*, 3] inverse-rotated points
'''
pass
def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:
'''
Returns the inverse of the current Rotation.
Returns:
The inverse of the current Rotation
'''
pass
def unsqueeze(self, dim: int) -> Rotation:
'''
Analogous to torch.unsqueeze. The dimension is relative to the shape of the Rotation object.
Args:
dim: A positive or negative dimension index.
Returns:
The unsqueezed Rotation.
'''
pass
@staticmethod
def cat(rs: Sequence[Rotation], dim: int) -> Rotation:
'''
Concatenates rotations along one of the batch dimensions. Analogous to torch.cat().
Note that the output of this operation is always a rotation matrix, regardless of the format of input
rotations.
Args:
rs:
A list of rotation objects
dim:
The dimension along which the rotations should be concatenated
Returns:
A concatenated Rotation object in rotation matrix format
'''
pass
def map_tensor_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rotation:
'''
Apply a Tensor -> Tensor function to underlying rotation tensors, mapping over the rotation dimension(s). Can
be used e.g. to sum out a one-hot batch dimension.
Args:
fn:
A Tensor -> Tensor function to be mapped over the Rotation
Returns:
The transformed Rotation object
'''
pass
def cuda(self) -> Rotation:
'''
Analogous to the cuda() method of torch Tensors
Returns:
A copy of the Rotation in CUDA memory
'''
pass
def to(self, device: torch.device | None, dtype: torch.dtype | None) -> Rotation:
'''
Analogous to the to() method of torch Tensors
Args:
device:
A torch device
dtype:
A torch dtype
Returns:
A copy of the Rotation using the new device and dtype
'''
pass
def detach(self) -> Rotation:
'''
Returns a copy of the Rotation whose underlying Tensor has been detached from its torch graph.
Returns:
A copy of the Rotation whose underlying Tensor has been detached from its torch graph
'''
pass
| 31
| 25
| 18
| 1
| 9
| 8
| 3
| 0.93
| 0
| 12
| 0
| 0
| 22
| 2
| 24
| 24
| 475
| 62
| 214
| 66
| 172
| 199
| 138
| 49
| 113
| 6
| 0
| 1
| 67
|
2,473
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/esm/tokenization_esm.py
|
transformers.models.esm.tokenization_esm.EsmTokenizer
|
from typing import Optional
from ...tokenization_utils import PreTrainedTokenizer
import os
class EsmTokenizer(PreTrainedTokenizer):
"""
Constructs an ESM tokenizer.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, unk_token='<unk>', cls_token='<cls>', pad_token='<pad>', mask_token='<mask>', eos_token='<eos>', **kwargs):
self.all_tokens = load_vocab_file(vocab_file)
self._id_to_token = dict(enumerate(self.all_tokens))
self._token_to_id = {tok: ind for ind, tok in enumerate(self.all_tokens)}
super().__init__(unk_token=unk_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, eos_token=eos_token, **kwargs)
self.unique_no_split_tokens = self.all_tokens
self._update_trie(self.unique_no_split_tokens)
def _convert_id_to_token(self, index: int) -> str:
return self._id_to_token.get(index, self.unk_token)
def _convert_token_to_id(self, token: str) -> int:
return self._token_to_id.get(token, self._token_to_id.get(self.unk_token))
def _tokenize(self, text, **kwargs):
return text.split()
def get_vocab(self):
base_vocab = self._token_to_id.copy()
base_vocab.update(self.added_tokens_encoder)
return base_vocab
def token_to_id(self, token: str) -> int:
return self._token_to_id.get(token, self._token_to_id.get(self.unk_token))
def id_to_token(self, index: int) -> str:
return self._id_to_token.get(index, self.unk_token)
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
cls = [self.cls_token_id]
sep = [self.eos_token_id]
if token_ids_1 is None:
if self.eos_token_id is None:
return cls + token_ids_0
else:
return cls + token_ids_0 + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!')
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0: list, token_ids_1: Optional[list]=None, already_has_special_tokens: bool=False) -> list[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
Args:
token_ids_0 (`list[int]`):
List of ids of the first sequence.
token_ids_1 (`list[int]`, *optional*):
List of ids of the second sequence.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')
return [1 if token in self.all_special_ids else 0 for token in token_ids_0]
mask = [1] + [0] * len(token_ids_0) + [1]
if token_ids_1 is not None:
mask += [0] * len(token_ids_1) + [1]
return mask
def save_vocabulary(self, save_directory, filename_prefix):
vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt')
with open(vocab_file, 'w') as f:
f.write('\n'.join(self.all_tokens))
return (vocab_file,)
@property
def vocab_size(self) -> int:
return len(self.all_tokens)
|
class EsmTokenizer(PreTrainedTokenizer):
'''
Constructs an ESM tokenizer.
'''
def __init__(self, vocab_file, unk_token='<unk>', cls_token='<cls>', pad_token='<pad>', mask_token='<mask>', eos_token='<eos>', **kwargs):
pass
def _convert_id_to_token(self, index: int) -> str:
pass
def _convert_token_to_id(self, token: str) -> int:
pass
def _tokenize(self, text, **kwargs):
pass
def get_vocab(self):
pass
def token_to_id(self, token: str) -> int:
pass
def id_to_token(self, index: int) -> str:
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
pass
def get_special_tokens_mask(self, token_ids_0: list, token_ids_1: Optional[list]=None, already_has_special_tokens: bool=False) -> list[int]:
'''
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
Args:
token_ids_0 (`list[int]`):
List of ids of the first sequence.
token_ids_1 (`list[int]`, *optional*):
List of ids of the second sequence.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
def save_vocabulary(self, save_directory, filename_prefix):
pass
@property
def vocab_size(self) -> int:
pass
| 13
| 2
| 8
| 0
| 6
| 2
| 2
| 0.27
| 1
| 7
| 0
| 0
| 11
| 4
| 11
| 100
| 110
| 17
| 75
| 38
| 49
| 20
| 49
| 23
| 37
| 5
| 3
| 2
| 19
|
2,474
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/falcon/configuration_falcon.py
|
transformers.models.falcon.configuration_falcon.FalconConfig
|
from ...configuration_utils import PretrainedConfig
class FalconConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`FalconModel`]. It is used to instantiate a Falcon
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the
[tiiuae/falcon-7b](https://huggingface.co/tiiuae/falcon-7b) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 65024):
Vocabulary size of the Falcon model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`FalconModel`]
hidden_size (`int`, *optional*, defaults to 4544):
Dimension of the hidden representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 71):
Number of attention heads for each attention layer in the Transformer encoder.
num_ln_in_parallel_attn (`int`, *optional*):
Set to 2 if separate layer norms are to be used for the MLP and the attention output when using parallel
attention, otherwise, 1.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
use_cache (`bool`, *optional*, defaults to `True`):
Whether the model should return the last key/values attentions (not used by all models). Only relevant if
`config.is_decoder=True`.
hidden_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for MLP layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for attention layers.
num_kv_heads (`int`, *optional*):
Number of key-value heads to use per attention layer. If unset, defaults to the same value as
`num_attention_heads`.
alibi (`bool`, *optional*, defaults to `False`):
Whether to use ALiBi positional biases during self-attention.
new_decoder_architecture (`bool`, *optional*, defaults to `False`):
Whether to use the new (Falcon-40B) decoder architecture. If `True`, the `multi_query` and `parallel_attn`
arguments are ignored, as the new decoder always uses parallel attention.
multi_query (`bool`, *optional*, defaults to `True`):
Whether to use multi-query attention in the decoder. Ignored when `new_decoder_architecture` is `True`.
parallel_attn (`bool`, *optional*, defaults to `True`):
Whether to compute attention in parallel with the feedforward layer. If False, they are consecutive
instead, as in the original Transformer architecture. Ignored when `new_decoder_architecture` is `True`.
bias (`bool`, *optional*, defaults to `False`):
Whether to use bias on Linear layers.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with, when `alibi` is `False`. Pretrained
Falcon models with RoPE support up to 2048 tokens.
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
bos_token_id (`int`, *optional*, defaults to 11):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 11):
The id of the "end-of-sequence" token.
ffn_hidden_size (`int`, *optional*):
The hidden size of the feedforward layer in the Transformer decoder.
defaults to 4x hidden dim
activation (`str`, *optional*, defaults to `"gelu"`):
The activation function used in the feedforward layer.
Example:
```python
>>> from transformers import FalconModel, FalconConfig
>>> # Initializing a small (2-layer) Falcon configuration
>>> configuration = FalconConfig(num_hidden_layers=2)
>>> # Initializing a model from the small configuration
>>> model = FalconModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'falcon'
keys_to_ignore_at_inference = ['past_key_values']
def __init__(self, vocab_size=65024, hidden_size=4544, num_hidden_layers=32, num_attention_heads=71, num_ln_in_parallel_attn=None, layer_norm_epsilon=1e-05, initializer_range=0.02, use_cache=True, hidden_dropout=0.0, attention_dropout=0.0, num_kv_heads=None, alibi=False, new_decoder_architecture=False, multi_query=True, parallel_attn=True, bias=False, max_position_embeddings=2048, rope_theta=10000.0, rope_scaling=None, bos_token_id=11, eos_token_id=11, ffn_hidden_size=None, activation='gelu', **kwargs):
self.vocab_size = vocab_size
n_embed = kwargs.pop('n_embed', None)
self.hidden_size = hidden_size if n_embed is None else n_embed
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.use_cache = use_cache
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.num_kv_heads = num_attention_heads if num_kv_heads is None else num_kv_heads
self.alibi = alibi
self.new_decoder_architecture = new_decoder_architecture
self.multi_query = multi_query
self.parallel_attn = parallel_attn
self.bias = bias
self.num_ln_in_parallel_attn = num_ln_in_parallel_attn
self.max_position_embeddings = max_position_embeddings
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.activation = activation
if ffn_hidden_size is None:
self.ffn_hidden_size = hidden_size * 4
else:
self.ffn_hidden_size = ffn_hidden_size
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
@property
def head_dim(self):
return self.hidden_size // self.num_attention_heads
@property
def rotary(self):
return not self.alibi
|
class FalconConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`FalconModel`]. It is used to instantiate a Falcon
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the
[tiiuae/falcon-7b](https://huggingface.co/tiiuae/falcon-7b) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 65024):
Vocabulary size of the Falcon model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`FalconModel`]
hidden_size (`int`, *optional*, defaults to 4544):
Dimension of the hidden representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 71):
Number of attention heads for each attention layer in the Transformer encoder.
num_ln_in_parallel_attn (`int`, *optional*):
Set to 2 if separate layer norms are to be used for the MLP and the attention output when using parallel
attention, otherwise, 1.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
use_cache (`bool`, *optional*, defaults to `True`):
Whether the model should return the last key/values attentions (not used by all models). Only relevant if
`config.is_decoder=True`.
hidden_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for MLP layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for attention layers.
num_kv_heads (`int`, *optional*):
Number of key-value heads to use per attention layer. If unset, defaults to the same value as
`num_attention_heads`.
alibi (`bool`, *optional*, defaults to `False`):
Whether to use ALiBi positional biases during self-attention.
new_decoder_architecture (`bool`, *optional*, defaults to `False`):
Whether to use the new (Falcon-40B) decoder architecture. If `True`, the `multi_query` and `parallel_attn`
arguments are ignored, as the new decoder always uses parallel attention.
multi_query (`bool`, *optional*, defaults to `True`):
Whether to use multi-query attention in the decoder. Ignored when `new_decoder_architecture` is `True`.
parallel_attn (`bool`, *optional*, defaults to `True`):
Whether to compute attention in parallel with the feedforward layer. If False, they are consecutive
instead, as in the original Transformer architecture. Ignored when `new_decoder_architecture` is `True`.
bias (`bool`, *optional*, defaults to `False`):
Whether to use bias on Linear layers.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with, when `alibi` is `False`. Pretrained
Falcon models with RoPE support up to 2048 tokens.
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
bos_token_id (`int`, *optional*, defaults to 11):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 11):
The id of the "end-of-sequence" token.
ffn_hidden_size (`int`, *optional*):
The hidden size of the feedforward layer in the Transformer decoder.
defaults to 4x hidden dim
activation (`str`, *optional*, defaults to `"gelu"`):
The activation function used in the feedforward layer.
Example:
```python
>>> from transformers import FalconModel, FalconConfig
>>> # Initializing a small (2-layer) Falcon configuration
>>> configuration = FalconConfig(num_hidden_layers=2)
>>> # Initializing a model from the small configuration
>>> model = FalconModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=65024, hidden_size=4544, num_hidden_layers=32, num_attention_heads=71, num_ln_in_parallel_attn=None, layer_norm_epsilon=1e-05, initializer_range=0.02, use_cache=True, hidden_dropout=0.0, attention_dropout=0.0, num_kv_heads=None, alibi=False, new_decoder_architecture=False, multi_query=True, parallel_attn=True, bias=False, max_position_embeddings=2048, rope_theta=10000.0, rope_scaling=None, bos_token_id=11, eos_token_id=11, ffn_hidden_size=None, activation='gelu', **kwargs):
pass
@property
def head_dim(self):
pass
@property
def rotary(self):
pass
| 6
| 1
| 20
| 0
| 20
| 1
| 2
| 1.7
| 1
| 1
| 0
| 0
| 3
| 23
| 3
| 3
| 185
| 13
| 64
| 58
| 32
| 109
| 35
| 30
| 31
| 4
| 1
| 1
| 6
|
2,475
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/falcon/modeling_falcon.py
|
transformers.models.falcon.modeling_falcon.FalconAttention
|
from typing import Optional, Union
from torch import nn
from torch.nn import functional as F
from .configuration_falcon import FalconConfig
import torch
import math
from ...cache_utils import Cache, DynamicCache, StaticCache
class FalconAttention(nn.Module):
def __init__(self, config: FalconConfig, layer_idx=None):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.hidden_size // self.num_heads
self.split_size = self.hidden_size
self.hidden_dropout = config.hidden_dropout
self.max_position_embeddings = config.max_position_embeddings
self.rope_theta = config.rope_theta
self.is_causal = True
self.layer_idx = layer_idx
if layer_idx is None:
logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.')
if self.head_dim * self.num_heads != self.hidden_size:
raise ValueError(f'`hidden_size` must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {self.num_heads}).')
self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim)
self.beta = self.inv_norm_factor
if config.new_decoder_architecture:
qkv_out_dim = (config.num_kv_heads * 2 + config.num_attention_heads) * self.head_dim
elif config.multi_query:
qkv_out_dim = self.hidden_size + 2 * self.head_dim
else:
qkv_out_dim = 3 * self.hidden_size
self.query_key_value = FalconLinear(self.hidden_size, qkv_out_dim, bias=config.bias)
self.new_decoder_architecture = config.new_decoder_architecture
self.multi_query = config.multi_query
self.dense = FalconLinear(self.hidden_size, self.hidden_size, bias=config.bias)
self.attention_dropout = nn.Dropout(config.attention_dropout)
self.num_kv_heads = config.num_kv_heads if self.new_decoder_architecture or not self.multi_query else 1
def _split_heads(self, fused_qkv: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Split the last dimension into (num_heads, head_dim), results share same memory storage as `fused_qkv`
Args:
fused_qkv (`torch.tensor`): [batch_size, seq_length, num_heads * 3 * head_dim]
Returns:
query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim]
value: [batch_size, seq_length, num_heads, head_dim]
"""
if self.new_decoder_architecture:
batch, seq_len, _ = fused_qkv.shape
qkv = fused_qkv.view(batch, seq_len, -1, self.num_heads // self.num_kv_heads + 2, self.head_dim)
query = qkv[:, :, :, :-2]
key = qkv[:, :, :, [-2]]
value = qkv[:, :, :, [-1]]
key = torch.broadcast_to(key, query.shape)
value = torch.broadcast_to(value, query.shape)
query, key, value = [x.flatten(2, 3) for x in (query, key, value)]
return (query, key, value)
elif not self.multi_query:
batch_size, seq_length, three_times_hidden_size = fused_qkv.shape
fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim)
return (fused_qkv[..., 0, :], fused_qkv[..., 1, :], fused_qkv[..., 2, :])
else:
batch_size, seq_length, three_times_hidden_size = fused_qkv.shape
fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads + 2, self.head_dim)
return (fused_qkv[..., :-2, :], fused_qkv[..., [-2], :], fused_qkv[..., [-1], :])
def _merge_heads(self, x: torch.Tensor) -> torch.Tensor:
"""
Merge heads together over the last dimension
Args:
x (`torch.tensor`): [batch_size * num_heads, seq_length, head_dim]
Returns:
torch.tensor: [batch_size, seq_length, num_heads * head_dim]
"""
batch_size_and_num_heads, seq_length, _ = x.shape
batch_size = batch_size_and_num_heads // self.num_heads
x = x.view(batch_size, self.num_heads, seq_length, self.head_dim)
x = x.permute(0, 2, 1, 3)
return x.reshape(batch_size, seq_length, self.num_heads * self.head_dim)
def forward(self, hidden_states: torch.Tensor, alibi: Optional[torch.Tensor], attention_mask: torch.Tensor, position_ids: Optional[torch.LongTensor]=None, layer_past: Optional[Cache]=None, head_mask: Optional[torch.Tensor]=None, use_cache: bool=False, output_attentions: bool=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None):
fused_qkv = self.query_key_value(hidden_states)
num_kv_heads = self.num_heads if self.new_decoder_architecture else self.num_kv_heads
query_layer, key_layer, value_layer = self._split_heads(fused_qkv)
batch_size, query_length, _, _ = query_layer.shape
query_layer = query_layer.transpose(1, 2).reshape(batch_size, self.num_heads, query_length, self.head_dim)
key_layer = key_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim)
value_layer = value_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim)
if alibi is None:
cos, sin = position_embeddings
query_layer, key_layer = apply_rotary_pos_emb(query_layer, key_layer, cos, sin)
if layer_past is not None:
cache_kwargs = {'cache_position': cache_position}
if alibi is None:
cache_kwargs.update({'sin': sin, 'cos': cos})
key_layer, value_layer = layer_past.update(key_layer, value_layer, self.layer_idx, cache_kwargs)
kv_length = key_layer.shape[-2]
if self.config._attn_implementation == 'sdpa' and query_layer.device.type == 'cuda' and (attention_mask is not None):
query_layer = query_layer.contiguous()
key_layer = key_layer.contiguous()
value_layer = value_layer.contiguous()
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, :key_layer.shape[-2]]
if alibi is None:
if self.config._attn_implementation == 'sdpa' and (not output_attentions):
is_causal = self.is_causal and attention_mask is None and (query_length > 1)
attn_output = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, attn_mask=attention_mask, dropout_p=0.0, is_causal=is_causal)
attention_scores = None
else:
attention_scores = query_layer @ key_layer.transpose(-1, -2)
attention_scores /= math.sqrt(self.head_dim)
attention_scores = F.softmax(attention_scores + attention_mask, dim=-1, dtype=hidden_states.dtype)
attn_output = attention_scores @ value_layer
attn_output = attn_output.view(batch_size, self.num_heads, query_length, self.head_dim)
attn_output = attn_output.permute(0, 2, 1, 3)
attn_output = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim)
attn_output = self.dense(attn_output)
return (attn_output, attention_scores)
else:
if self.config._attn_implementation == 'sdpa' and (not output_attentions) and (head_mask is None):
is_causal = self.is_causal and attention_mask is None and (query_length > 1)
attn_output = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, attn_mask=attention_mask, dropout_p=self.attention_dropout.p if self.training else 0.0, is_causal=is_causal)
attention_probs = None
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim)
attn_output = self.dense(attn_output)
else:
matmul_result = query_layer @ key_layer.transpose(-1, -2)
attention_scores = matmul_result.view(batch_size, self.num_heads, query_length, kv_length)
input_dtype = attention_scores.dtype
if input_dtype == torch.float16 or input_dtype == torch.bfloat16:
attention_scores = attention_scores.to(torch.float32)
attention_logits = attention_scores + alibi.view(batch_size, self.num_heads, 1, -1)
attention_logits *= self.inv_norm_factor
attention_probs = F.softmax(attention_logits + attention_mask, dim=-1, dtype=hidden_states.dtype)
attention_probs = self.attention_dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
attention_probs_reshaped = attention_probs.view(batch_size, self.num_heads, query_length, kv_length)
attn_output = (attention_probs_reshaped @ value_layer).flatten(0, 1)
attn_output = self._merge_heads(attn_output)
attn_output = self.dense(attn_output)
return (attn_output, attention_probs)
|
class FalconAttention(nn.Module):
def __init__(self, config: FalconConfig, layer_idx=None):
pass
def _split_heads(self, fused_qkv: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
'''
Split the last dimension into (num_heads, head_dim), results share same memory storage as `fused_qkv`
Args:
fused_qkv (`torch.tensor`): [batch_size, seq_length, num_heads * 3 * head_dim]
Returns:
query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim]
value: [batch_size, seq_length, num_heads, head_dim]
'''
pass
def _merge_heads(self, x: torch.Tensor) -> torch.Tensor:
'''
Merge heads together over the last dimension
Args:
x (`torch.tensor`): [batch_size * num_heads, seq_length, head_dim]
Returns:
torch.tensor: [batch_size, seq_length, num_heads * head_dim]
'''
pass
def forward(self, hidden_states: torch.Tensor, alibi: Optional[torch.Tensor], attention_mask: torch.Tensor, position_ids: Optional[torch.LongTensor]=None, layer_past: Optional[Cache]=None, head_mask: Optional[torch.Tensor]=None, use_cache: bool=False, output_attentions: bool=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None):
pass
| 5
| 2
| 58
| 9
| 40
| 11
| 7
| 0.27
| 1
| 8
| 4
| 1
| 4
| 20
| 4
| 14
| 238
| 37
| 160
| 61
| 143
| 43
| 118
| 49
| 113
| 17
| 1
| 3
| 28
|
2,476
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/falcon/modeling_falcon.py
|
transformers.models.falcon.modeling_falcon.FalconDecoderLayer
|
import torch
from .configuration_falcon import FalconConfig
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
from ...modeling_layers import GradientCheckpointingLayer
from ...cache_utils import Cache, DynamicCache, StaticCache
from typing import Optional, Union
class FalconDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: FalconConfig, layer_idx=None):
super().__init__()
hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.self_attention = FALCON_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
self.mlp = FalconMLP(config)
self.hidden_dropout = config.hidden_dropout
self.config = config
if config.num_ln_in_parallel_attn is None and config.new_decoder_architecture:
config.num_ln_in_parallel_attn = 2
if not config.parallel_attn:
self.post_attention_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.input_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
elif config.num_ln_in_parallel_attn == 2:
self.ln_attn = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.ln_mlp = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
else:
self.input_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
def forward(self, hidden_states: torch.Tensor, alibi: Optional[torch.Tensor], attention_mask: torch.Tensor, position_ids: Optional[torch.LongTensor]=None, layer_past: Optional[Union[Cache, tuple[torch.Tensor, torch.Tensor]]]=None, head_mask: Optional[torch.Tensor]=None, use_cache: bool=False, output_attentions: bool=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs):
residual = hidden_states
if self.config.new_decoder_architecture and self.config.num_ln_in_parallel_attn == 2:
attention_layernorm_out = self.ln_attn(hidden_states)
mlp_layernorm_out = self.ln_mlp(hidden_states)
else:
attention_layernorm_out = self.input_layernorm(hidden_states)
attention_output, attn_weights = self.self_attention(attention_layernorm_out, layer_past=layer_past, attention_mask=attention_mask, position_ids=position_ids, alibi=alibi, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, position_embeddings=position_embeddings)
if not self.config.new_decoder_architecture:
if self.config.parallel_attn:
mlp_layernorm_out = attention_layernorm_out
else:
residual = dropout_add(attention_output, residual, self.config.attention_dropout, training=self.training)
mlp_layernorm_out = self.post_attention_layernorm(residual)
if self.config.new_decoder_architecture and self.config.parallel_attn and (self.config.num_ln_in_parallel_attn == 1):
mlp_layernorm_out = attention_layernorm_out
mlp_output = self.mlp(mlp_layernorm_out)
if self.config.new_decoder_architecture or self.config.parallel_attn:
mlp_output += attention_output
output = dropout_add(mlp_output, residual, self.config.hidden_dropout, training=self.training)
return (output, attn_weights)
|
class FalconDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: FalconConfig, layer_idx=None):
pass
def forward(self, hidden_states: torch.Tensor, alibi: Optional[torch.Tensor], attention_mask: torch.Tensor, position_ids: Optional[torch.LongTensor]=None, layer_past: Optional[Union[Cache, tuple[torch.Tensor, torch.Tensor]]]=None, head_mask: Optional[torch.Tensor]=None, use_cache: bool=False, output_attentions: bool=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs):
pass
| 3
| 0
| 47
| 7
| 38
| 3
| 6
| 0.08
| 1
| 6
| 3
| 0
| 2
| 9
| 2
| 12
| 96
| 15
| 77
| 34
| 61
| 6
| 42
| 21
| 39
| 7
| 1
| 2
| 11
|
2,477
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/falcon/modeling_falcon.py
|
transformers.models.falcon.modeling_falcon.FalconFlashAttention2
|
import torch
from ...cache_utils import Cache, DynamicCache, StaticCache
from ...modeling_flash_attention_utils import flash_attn_supports_top_left_mask, is_flash_attn_available
from typing import Optional, Union
class FalconFlashAttention2(FalconAttention):
"""
Falcon flash attention module. This module inherits from `FalconAttention` as the weights of the module stays
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
flash attention and deal with padding tokens in case the input contains any of them.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask()
def forward(self, hidden_states: torch.Tensor, alibi: Optional[torch.Tensor], attention_mask: torch.Tensor, position_ids: Optional[torch.LongTensor]=None, layer_past: Optional[Cache]=None, head_mask: Optional[torch.Tensor]=None, use_cache: bool=False, output_attentions: bool=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None):
fused_qkv = self.query_key_value(hidden_states)
num_kv_heads = self.num_heads if self.new_decoder_architecture else self.num_kv_heads
query_layer, key_layer, value_layer = self._split_heads(fused_qkv)
batch_size, query_length, _, _ = query_layer.shape
query_layer = query_layer.transpose(1, 2).reshape(batch_size, self.num_heads, query_length, self.head_dim)
key_layer = key_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim)
value_layer = value_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim)
if alibi is None:
cos, sin = position_embeddings
query_layer, key_layer = apply_rotary_pos_emb(query_layer, key_layer, cos, sin)
if layer_past is not None:
cache_kwargs = {'cache_position': cache_position}
if alibi is None:
cache_kwargs.update({'sin': sin, 'cos': cos})
key_layer, value_layer = layer_past.update(key_layer, value_layer, self.layer_idx, cache_kwargs)
query_layer = query_layer.transpose(1, 2)
key_layer = key_layer.transpose(1, 2)
value_layer = value_layer.transpose(1, 2)
if alibi is not None:
raise ValueError('`alibi` is not supported when `use_flash_attn` is True')
attn_dropout = self.config.attention_dropout if self.training else 0.0
input_dtype = query_layer.dtype
device_type = query_layer.device.type if query_layer.device.type != 'mps' else 'cpu'
if input_dtype == torch.float32:
if torch.is_autocast_enabled():
target_dtype = torch.get_autocast_dtype(device_type) if hasattr(torch, 'get_autocast_dtype') else torch.get_autocast_gpu_dtype()
elif hasattr(self.config, '_pre_quantization_dtype'):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = self.query_key_value.weight.dtype
logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.')
query_layer = query_layer.to(target_dtype)
key_layer = key_layer.to(target_dtype)
value_layer = value_layer.to(target_dtype)
attn_output = _flash_attention_forward(query_layer, key_layer, value_layer, attention_mask, query_length, position_ids=position_ids, dropout=attn_dropout, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask)
attn_weights = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim)
attn_output = self.dense(attn_weights)
if not output_attentions:
attn_weights = None
return (attn_output, attn_weights)
|
class FalconFlashAttention2(FalconAttention):
'''
Falcon flash attention module. This module inherits from `FalconAttention` as the weights of the module stays
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
flash attention and deal with padding tokens in case the input contains any of them.
'''
def __init__(self, *args, **kwargs):
pass
def forward(self, hidden_states: torch.Tensor, alibi: Optional[torch.Tensor], attention_mask: torch.Tensor, position_ids: Optional[torch.LongTensor]=None, layer_past: Optional[Cache]=None, head_mask: Optional[torch.Tensor]=None, use_cache: bool=False, output_attentions: bool=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None):
pass
| 3
| 1
| 47
| 8
| 35
| 6
| 6
| 0.24
| 1
| 5
| 1
| 0
| 2
| 2
| 2
| 16
| 102
| 17
| 70
| 28
| 55
| 17
| 42
| 15
| 39
| 11
| 2
| 2
| 12
|
2,478
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/falcon/modeling_falcon.py
|
transformers.models.falcon.modeling_falcon.FalconForCausalLM
|
import torch
from ...utils import auto_docstring, logging
from typing import Optional, Union
from .configuration_falcon import FalconConfig
from torch import nn
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput
from ...generation import GenerationMixin
from ...cache_utils import Cache, DynamicCache, StaticCache
@auto_docstring(custom_intro='\n The Falcon Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).\n ')
class FalconForCausalLM(FalconPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight']
def __init__(self, config: FalconConfig):
super().__init__(config)
self.transformer = FalconModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
def set_output_embeddings(self, new_embeddings: torch.Tensor):
self.lm_head = new_embeddings
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, tuple[tuple[torch.Tensor, torch.Tensor], ...]]]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
hidden_states = transformer_outputs[0]
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
lm_logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(lm_logits, labels, vocab_size=self.config.vocab_size, **kwargs)
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
|
@auto_docstring(custom_intro='\n The Falcon Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).\n ')
class FalconForCausalLM(FalconPreTrainedModel, GenerationMixin):
def __init__(self, config: FalconConfig):
pass
def set_output_embeddings(self, new_embeddings: torch.Tensor):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, tuple[tuple[torch.Tensor, torch.Tensor], ...]]]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
'''
pass
| 6
| 1
| 21
| 2
| 15
| 4
| 2
| 0.24
| 2
| 10
| 4
| 0
| 5
| 2
| 5
| 8
| 119
| 15
| 84
| 37
| 53
| 20
| 28
| 17
| 22
| 6
| 2
| 1
| 10
|
2,479
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/falcon/modeling_falcon.py
|
transformers.models.falcon.modeling_falcon.FalconForQuestionAnswering
|
from torch import nn
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
from typing import Optional, Union
import torch
from ...utils import auto_docstring, logging
@auto_docstring
class FalconForQuestionAnswering(FalconPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = FalconModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return (total_loss,) + output if total_loss is not None else output
return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class FalconForQuestionAnswering(FalconPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
'''
pass
| 5
| 1
| 38
| 4
| 28
| 7
| 4
| 0.23
| 1
| 4
| 2
| 0
| 2
| 2
| 2
| 5
| 79
| 9
| 57
| 27
| 42
| 13
| 31
| 15
| 28
| 7
| 2
| 2
| 8
|
2,480
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/falcon/modeling_falcon.py
|
transformers.models.falcon.modeling_falcon.FalconForSequenceClassification
|
from typing import Optional, Union
from .configuration_falcon import FalconConfig
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput
from torch import nn
from ...utils import auto_docstring, logging
import torch
from ...cache_utils import Cache, DynamicCache, StaticCache
@auto_docstring(custom_intro='\n The Falcon Model transformer with a sequence classification head on top (linear layer).\n\n [`FalconForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT-1) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ')
class FalconForSequenceClassification(FalconPreTrainedModel):
def __init__(self, config: FalconConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = FalconModel(config)
self.score = nn.Linear(config.hidden_size, config.num_labels, bias=False)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], SequenceClassifierOutputWithPast]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size = input_ids.shape[0]
else:
batch_size = inputs_embeds.shape[0]
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.')
if self.config.pad_token_id is None:
last_non_pad_token = -1
elif input_ids is not None:
non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32)
last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
else:
last_non_pad_token = -1
logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`')
pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return (loss,) + output if loss is not None else output
return SequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
| null | 5
| 1
| 51
| 5
| 42
| 4
| 9
| 0.09
| 1
| 8
| 3
| 0
| 2
| 3
| 2
| 5
| 109
| 11
| 90
| 30
| 69
| 8
| 45
| 17
| 42
| 16
| 2
| 3
| 17
|
2,481
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/falcon/modeling_falcon.py
|
transformers.models.falcon.modeling_falcon.FalconForTokenClassification
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
from torch import nn
from ...utils import auto_docstring, logging
from .configuration_falcon import FalconConfig
from typing import Optional, Union
import torch
from ...cache_utils import Cache, DynamicCache, StaticCache
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput
@auto_docstring
class FalconForTokenClassification(FalconPreTrainedModel):
def __init__(self, config: FalconConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = FalconModel(config)
if getattr(config, 'classifier_dropout', None) is not None:
classifier_dropout = config.classifier_dropout
elif getattr(config, 'hidden_dropout', None) is not None:
classifier_dropout = config.hidden_dropout
else:
classifier_dropout = 0.1
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = transformer_outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.classifier(hidden_states)
loss = None
if labels is not None:
batch_size, seq_length = labels.shape
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length))
if not return_dict:
output = (logits,) + transformer_outputs[2:]
return (loss,) + output if loss is not None else output
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
|
@auto_docstring
class FalconForTokenClassification(FalconPreTrainedModel):
def __init__(self, config: FalconConfig):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 36
| 4
| 29
| 4
| 4
| 0.11
| 1
| 6
| 3
| 0
| 2
| 4
| 2
| 5
| 80
| 9
| 64
| 28
| 43
| 7
| 27
| 15
| 24
| 5
| 2
| 1
| 8
|
2,482
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/falcon/modeling_falcon.py
|
transformers.models.falcon.modeling_falcon.FalconLinear
|
from torch import nn
import torch
class FalconLinear(nn.Linear):
def forward(self, input: torch.Tensor) -> torch.Tensor:
hidden_states = input @ self.weight.T
if self.bias is None:
return hidden_states
return hidden_states + self.bias
|
class FalconLinear(nn.Linear):
def forward(self, input: torch.Tensor) -> torch.Tensor:
pass
| 2
| 0
| 5
| 0
| 5
| 0
| 2
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 6
| 0
| 6
| 3
| 4
| 0
| 6
| 3
| 4
| 2
| 1
| 1
| 2
|
2,483
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/falcon/modeling_falcon.py
|
transformers.models.falcon.modeling_falcon.FalconMLP
|
from ...activations import get_activation
from torch import nn
import torch
from .configuration_falcon import FalconConfig
class FalconMLP(nn.Module):
def __init__(self, config: FalconConfig):
super().__init__()
hidden_size = config.hidden_size
self.dense_h_to_4h = FalconLinear(hidden_size, config.ffn_hidden_size, bias=config.bias)
self.act = get_activation(config.activation)
self.dense_4h_to_h = FalconLinear(config.ffn_hidden_size, hidden_size, bias=config.bias)
self.hidden_dropout = config.hidden_dropout
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.act(self.dense_h_to_4h(x))
x = self.dense_4h_to_h(x)
return x
|
class FalconMLP(nn.Module):
def __init__(self, config: FalconConfig):
pass
def forward(self, x: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 1
| 6
| 0
| 1
| 0
| 1
| 4
| 2
| 0
| 2
| 4
| 2
| 12
| 14
| 2
| 12
| 8
| 9
| 0
| 12
| 8
| 9
| 1
| 1
| 0
| 2
|
2,484
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/falcon/modeling_falcon.py
|
transformers.models.falcon.modeling_falcon.FalconModel
|
import math
from .configuration_falcon import FalconConfig
import torch
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
from ...modeling_attn_mask_utils import AttentionMaskConverter
from ...utils import auto_docstring, logging
from torch import nn
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput
from ...cache_utils import Cache, DynamicCache, StaticCache
from typing import Optional, Union
@auto_docstring
class FalconModel(FalconPreTrainedModel):
def __init__(self, config: FalconConfig):
super().__init__(config)
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.use_alibi = config.alibi
self.word_embeddings = nn.Embedding(config.vocab_size, self.embed_dim)
self.h = nn.ModuleList([FalconDecoderLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)])
self.ln_f = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
self.rotary_emb = FalconRotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.post_init()
def get_input_embeddings(self):
return self.word_embeddings
def set_input_embeddings(self, new_embeddings: torch.Tensor):
self.word_embeddings = new_embeddings
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, tuple[tuple[torch.Tensor, torch.Tensor], ...]]]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
use_cache = False
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
alibi = None
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
batch_size, seq_length, _ = inputs_embeds.shape
if self.use_alibi:
mask = torch.ones((batch_size, seq_length + past_key_values_length), device=inputs_embeds.device, dtype=torch.long) if attention_mask is None else attention_mask
alibi = build_alibi_tensor(mask, self.num_heads, dtype=inputs_embeds.dtype)
if cache_position is None:
cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions, head_mask, alibi)
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
all_self_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for i, block in enumerate(self.h):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = block(hidden_states, layer_past=past_key_values, attention_mask=causal_mask, position_ids=position_ids, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, alibi=alibi, cache_position=cache_position, position_embeddings=position_embeddings)
hidden_states = outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[1],)
hidden_states = self.ln_f(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attentions] if v is not None))
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attentions)
def _update_causal_mask(self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool, head_mask: torch.Tensor, alibi: torch.Tensor):
if self.config._attn_implementation == 'flash_attention_2':
if attention_mask is not None and 0.0 in attention_mask:
return attention_mask
return None
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
using_static_cache = isinstance(past_key_values, StaticCache)
if self.config._attn_implementation == 'sdpa' and (not using_static_cache) and (not output_attentions) and (head_mask is None) and (alibi is None):
if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training):
return None
dtype, device = (input_tensor.dtype, input_tensor.device)
min_dtype = torch.finfo(dtype).min
batch_size, sequence_length, _ = input_tensor.shape
if using_static_cache:
target_length = past_key_values.get_max_cache_shape()
else:
target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, cache_position=cache_position, batch_size=input_tensor.shape[0])
if head_mask is None and alibi is not None:
alibi = alibi.reshape(batch_size, -1, *alibi.shape[1:])
causal_mask = torch.masked_fill(alibi / math.sqrt(self.config.hidden_size // self.num_heads), causal_mask < -1, min_dtype)
if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']) and (not output_attentions):
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone()
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)
return causal_mask
|
@auto_docstring
class FalconModel(FalconPreTrainedModel):
def __init__(self, config: FalconConfig):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, new_embeddings: torch.Tensor):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, tuple[tuple[torch.Tensor, torch.Tensor], ...]]]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
'''
pass
def _update_causal_mask(self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool, head_mask: torch.Tensor, alibi: torch.Tensor):
pass
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):
'''
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
'''
pass
| 10
| 2
| 53
| 6
| 39
| 8
| 8
| 0.21
| 1
| 16
| 8
| 0
| 5
| 10
| 6
| 9
| 330
| 42
| 240
| 75
| 195
| 50
| 114
| 42
| 107
| 28
| 2
| 2
| 45
|
2,485
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/falcon/modeling_falcon.py
|
transformers.models.falcon.modeling_falcon.FalconPreTrainedModel
|
from torch import nn
from ...utils import auto_docstring, logging
from .configuration_falcon import FalconConfig
from ...modeling_utils import PreTrainedModel
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
@auto_docstring
class FalconPreTrainedModel(PreTrainedModel):
config: FalconConfig
base_model_prefix = 'transformer'
supports_gradient_checkpointing = True
_no_split_modules = ['FalconDecoderLayer']
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = True
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module: nn.Module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear, FalconLinear)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@classmethod
def _check_and_enable_sdpa(cls, config, hard_check_only: bool=False):
_is_bettertransformer = getattr(cls, 'use_bettertransformer', False)
if _is_bettertransformer:
return config
if not hard_check_only:
config._attn_implementation = 'sdpa'
return config
|
@auto_docstring
class FalconPreTrainedModel(PreTrainedModel):
def __init__(self, *inputs, **kwargs):
pass
def _init_weights(self, module: nn.Module):
'''Initialize the weights.'''
pass
@classmethod
def _check_and_enable_sdpa(cls, config, hard_check_only: bool=False):
pass
| 6
| 1
| 8
| 0
| 7
| 1
| 3
| 0.25
| 1
| 3
| 1
| 5
| 2
| 0
| 3
| 3
| 45
| 5
| 32
| 15
| 27
| 8
| 29
| 14
| 25
| 6
| 1
| 2
| 10
|
2,486
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/falcon/modeling_falcon.py
|
transformers.models.falcon.modeling_falcon.FalconRotaryEmbedding
|
from .configuration_falcon import FalconConfig
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
import torch
from torch import nn
class FalconRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: FalconConfig, device=None):
super().__init__()
if hasattr(config, 'rope_scaling') and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type'))
else:
self.rope_type = 'default'
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer('inv_freq', inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu'
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
|
class FalconRotaryEmbedding(nn.Module):
def __init__(self, config: FalconConfig, device=None):
pass
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
pass
| 5
| 0
| 18
| 2
| 13
| 5
| 3
| 0.35
| 1
| 4
| 1
| 0
| 3
| 7
| 3
| 13
| 59
| 8
| 40
| 21
| 35
| 14
| 38
| 20
| 34
| 3
| 1
| 1
| 8
|
2,487
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/falcon_mamba/configuration_falcon_mamba.py
|
transformers.models.falcon_mamba.configuration_falcon_mamba.FalconMambaConfig
|
import math
from ...configuration_utils import PretrainedConfig
class FalconMambaConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`FalconMambaModel`]. It is used to instantiate a FALCON_MAMBA
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the FALCON_MAMBA
[tiiuae/falcon-mamba-7b](https://huggingface.co/tiiuae/falcon-mamba-7b) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50280):
Vocabulary size of the FALCON_MAMBA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`FalconMambaModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the embeddings and hidden states.
state_size (`int`, *optional*, defaults to 16): shape of the state space latents.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the model.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
The epsilon to use in the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 0):
The id of the beginning of sentence token in the vocabulary.
eos_token_id (`int`, *optional*, defaults to 0):
The id of the end of sentence token in the vocabulary.
expand (`int`, *optional*, defaults to 2): Expanding factor used to determine the intermediate size.
conv_kernel (`int`, *optional*, defaults to 4): Size of the convolution kernel.
use_bias (`bool`, *optional*, defaults to `False`):
Whether or not to use bias in ["in_proj", "out_proj"] of the mixer block
use_conv_bias (`bool`, *optional*, defaults to `True`):
Whether or not to use bias in the convolution layer of the mixer block.
hidden_act (`str`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
initializer_range (`float`, *optional*, defaults to 0.1):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
residual_in_fp32 (`bool`, *optional*, defaults to `True`):
Whether or not residuals should be in `float32`. If set to `False` residuals will keep the same `dtype` as the rest of the model
time_step_rank (`Union[int,str]`, *optional*, defaults to `"auto"`):
Rank of the discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)`
time_step_scale (`float`, *optional*, defaults to 1.0):
Scale used used to scale `dt_proj.bias`.
time_step_min (`float`, *optional*, defaults to 0.001):
Minimum `time_step` used to bound `dt_proj.bias`.
time_step_max (`float`, *optional*, defaults to 0.1):
Maximum `time_step` used to bound `dt_proj.bias`.
time_step_init_scheme (`float`, *optional*, defaults to `"random"`):
Init scheme used for `dt_proj.weight`. Should be one of `["random","uniform"]`
time_step_floor (`float`, *optional*, defaults to 0.0001):
Minimum clamping value of the `dt_proj.bias` layer initialization.
rescale_prenorm_residual (`bool`, *optional*, defaults to `False`):
Whether or not to rescale `out_proj` weights when initializing.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the cache should be used.
use_falcon_mambapy (`bool`, *optional*, defaults to `False`):
This argument corresponds to `use_mambapy` in MambaConfig.
Determines the fallback strategy during training if the CUDA-based official implementation of Mamba is not available. If `True`, the mamba.py implementation is used. If `False`, the naive and slower implementation is used. Consider switching to the naive version if memory is limited.
mixer_rms_eps (`float`, *optional*, defaults to 1e-06):
The RMS norm epsilon value that is used in the Mixer RMS norm for B, C and dt states.
Example:
```python
>>> from transformers import FalconMambaConfig, FalconMambaModel
>>> # Initializing a FalconMamba configuration
>>> configuration = FalconMambaConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = FalconMambaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'falcon_mamba'
def __init__(self, vocab_size=50280, hidden_size=768, state_size=16, num_hidden_layers=32, layer_norm_epsilon=1e-05, pad_token_id=0, bos_token_id=0, eos_token_id=0, expand=2, conv_kernel=4, use_bias=False, use_conv_bias=True, hidden_act='silu', initializer_range=0.1, residual_in_fp32=True, time_step_rank='auto', time_step_scale=1.0, time_step_min=0.001, time_step_max=0.1, time_step_init_scheme='random', time_step_floor=0.0001, rescale_prenorm_residual=False, use_cache=True, use_falcon_mambapy=False, mixer_rms_eps=1e-06, **kwargs):
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.state_size = state_size
self.num_hidden_layers = num_hidden_layers
self.layer_norm_epsilon = layer_norm_epsilon
self.conv_kernel = conv_kernel
self.expand = expand
self.intermediate_size = int(expand * self.hidden_size) if kwargs.get('intermediate_size') is None else kwargs.get('intermediate_size')
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.use_bias = use_bias
self.use_conv_bias = use_conv_bias
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.time_step_rank = math.ceil(self.hidden_size / 16) if time_step_rank == 'auto' else time_step_rank
self.time_step_scale = time_step_scale
self.time_step_min = time_step_min
self.time_step_max = time_step_max
self.time_step_init_scheme = time_step_init_scheme
self.time_step_floor = time_step_floor
self.rescale_prenorm_residual = rescale_prenorm_residual
self.residual_in_fp32 = residual_in_fp32
self.use_cache = use_cache
self.use_falcon_mambapy = use_falcon_mambapy
self.mixer_rms_eps = mixer_rms_eps
|
class FalconMambaConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`FalconMambaModel`]. It is used to instantiate a FALCON_MAMBA
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the FALCON_MAMBA
[tiiuae/falcon-mamba-7b](https://huggingface.co/tiiuae/falcon-mamba-7b) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50280):
Vocabulary size of the FALCON_MAMBA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`FalconMambaModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the embeddings and hidden states.
state_size (`int`, *optional*, defaults to 16): shape of the state space latents.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the model.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
The epsilon to use in the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 0):
The id of the beginning of sentence token in the vocabulary.
eos_token_id (`int`, *optional*, defaults to 0):
The id of the end of sentence token in the vocabulary.
expand (`int`, *optional*, defaults to 2): Expanding factor used to determine the intermediate size.
conv_kernel (`int`, *optional*, defaults to 4): Size of the convolution kernel.
use_bias (`bool`, *optional*, defaults to `False`):
Whether or not to use bias in ["in_proj", "out_proj"] of the mixer block
use_conv_bias (`bool`, *optional*, defaults to `True`):
Whether or not to use bias in the convolution layer of the mixer block.
hidden_act (`str`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
initializer_range (`float`, *optional*, defaults to 0.1):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
residual_in_fp32 (`bool`, *optional*, defaults to `True`):
Whether or not residuals should be in `float32`. If set to `False` residuals will keep the same `dtype` as the rest of the model
time_step_rank (`Union[int,str]`, *optional*, defaults to `"auto"`):
Rank of the discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)`
time_step_scale (`float`, *optional*, defaults to 1.0):
Scale used used to scale `dt_proj.bias`.
time_step_min (`float`, *optional*, defaults to 0.001):
Minimum `time_step` used to bound `dt_proj.bias`.
time_step_max (`float`, *optional*, defaults to 0.1):
Maximum `time_step` used to bound `dt_proj.bias`.
time_step_init_scheme (`float`, *optional*, defaults to `"random"`):
Init scheme used for `dt_proj.weight`. Should be one of `["random","uniform"]`
time_step_floor (`float`, *optional*, defaults to 0.0001):
Minimum clamping value of the `dt_proj.bias` layer initialization.
rescale_prenorm_residual (`bool`, *optional*, defaults to `False`):
Whether or not to rescale `out_proj` weights when initializing.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the cache should be used.
use_falcon_mambapy (`bool`, *optional*, defaults to `False`):
This argument corresponds to `use_mambapy` in MambaConfig.
Determines the fallback strategy during training if the CUDA-based official implementation of Mamba is not available. If `True`, the mamba.py implementation is used. If `False`, the naive and slower implementation is used. Consider switching to the naive version if memory is limited.
mixer_rms_eps (`float`, *optional*, defaults to 1e-06):
The RMS norm epsilon value that is used in the Mixer RMS norm for B, C and dt states.
Example:
```python
>>> from transformers import FalconMambaConfig, FalconMambaModel
>>> # Initializing a FalconMamba configuration
>>> configuration = FalconMambaConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = FalconMambaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=50280, hidden_size=768, state_size=16, num_hidden_layers=32, layer_norm_epsilon=1e-05, pad_token_id=0, bos_token_id=0, eos_token_id=0, expand=2, conv_kernel=4, use_bias=False, use_conv_bias=True, hidden_act='silu', initializer_range=0.1, residual_in_fp32=True, time_step_rank='auto', time_step_scale=1.0, time_step_min=0.001, time_step_max=0.1, time_step_init_scheme='random', time_step_floor=0.0001, rescale_prenorm_residual=False, use_cache=True, use_falcon_mambapy=False, mixer_rms_eps=1e-06, **kwargs):
pass
| 2
| 1
| 57
| 1
| 56
| 0
| 2
| 1.14
| 1
| 2
| 0
| 0
| 1
| 26
| 1
| 1
| 134
| 10
| 58
| 57
| 28
| 66
| 30
| 29
| 28
| 2
| 1
| 0
| 2
|
2,488
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py
|
transformers.models.falcon_mamba.modeling_falcon_mamba.FalconMambaBlock
|
from typing import Any, Optional, Union
import torch
from ...modeling_layers import GradientCheckpointingLayer
class FalconMambaBlock(GradientCheckpointingLayer):
def __init__(self, config, layer_idx):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.residual_in_fp32 = config.residual_in_fp32
self.norm = FalconMambaRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
self.mixer = FalconMambaMixer(config, layer_idx=layer_idx)
def forward(self, hidden_states, cache_params: Optional[FalconMambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None):
residual = hidden_states
hidden_states = self.norm(hidden_states.to(dtype=self.norm.weight.dtype))
if self.residual_in_fp32:
residual = residual.to(torch.float32)
hidden_states = self.mixer(hidden_states, cache_params=cache_params, cache_position=cache_position, attention_mask=attention_mask)
hidden_states = residual + hidden_states
return hidden_states
|
class FalconMambaBlock(GradientCheckpointingLayer):
def __init__(self, config, layer_idx):
pass
def forward(self, hidden_states, cache_params: Optional[FalconMambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None):
pass
| 3
| 0
| 12
| 1
| 12
| 0
| 2
| 0
| 1
| 4
| 3
| 0
| 2
| 5
| 2
| 12
| 26
| 2
| 24
| 15
| 15
| 0
| 16
| 9
| 13
| 2
| 1
| 1
| 3
|
2,489
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py
|
transformers.models.falcon_mamba.modeling_falcon_mamba.FalconMambaForCausalLM
|
from ...utils import ModelOutput, auto_docstring, logging
from typing import Any, Optional, Union
from ...generation import GenerationMixin
from torch import nn
from torch.nn import CrossEntropyLoss
import torch
@auto_docstring(custom_intro='\n The FALCON_MAMBA Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ')
class FalconMambaForCausalLM(FalconMambaPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight']
def __init__(self, config):
super().__init__(config)
self.backbone = FalconMambaModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.backbone.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
return self.backbone.set_input_embeddings(new_embeddings)
def _update_model_kwargs_for_generation(self, outputs: ModelOutput, model_kwargs: dict[str, Any], num_new_tokens: int=1, **kwargs) -> dict[str, Any]:
model_kwargs['cache_params'] = outputs.get('cache_params', None)
if model_kwargs.get('use_cache', True) and 'cache_position' in model_kwargs and (model_kwargs['cache_position'] is not None):
model_kwargs['cache_position'] = model_kwargs['cache_position'][-1:] + num_new_tokens
if 'attention_mask' in model_kwargs:
attention_mask = model_kwargs['attention_mask']
model_kwargs['attention_mask'] = torch.cat([attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1)
return model_kwargs
def prepare_inputs_for_generation(self, input_ids, inputs_embeds=None, use_cache=None, cache_params: Optional[FalconMambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, **kwargs):
model_inputs = {'input_ids': input_ids.contiguous()}
if use_cache and cache_params is None:
cache_position = torch.arange(0, self.backbone.config.conv_kernel, device=input_ids.device)
if inputs_embeds is not None:
model_inputs = {'inputs_embeds': inputs_embeds}
max_batch_size = inputs_embeds.size(0)
else:
max_batch_size = input_ids.size(0)
cache_params = FalconMambaCache(self.backbone.config, max_batch_size, device=self.device, dtype=self.dtype)
if use_cache and cache_position[0] > 0:
model_inputs['input_ids'] = input_ids[:, -1].unsqueeze(-1).contiguous()
attention_mask = None
if not use_cache and inputs_embeds is not None:
model_inputs = {'inputs_embeds': inputs_embeds}
model_inputs.update({'cache_params': cache_params, 'use_cache': use_cache, 'cache_position': cache_position, 'attention_mask': attention_mask})
for key, value in kwargs.items():
if key not in model_inputs:
model_inputs[key] = value
return model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_params: Optional[FalconMambaCache]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple, FalconMambaCausalLMOutput]:
"""
cache_params (`FalconMambaCache`, *optional*):
If passed along, the model uses the previous state in all the blocks (which will give the output for the
`input_ids` provided as if the model add `state_input_ids + input_ids` as context).
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
use_cache (`bool`, *optional*):
If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
falcon_mamba_outputs = self.backbone(input_ids, cache_params=cache_params, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict, use_cache=use_cache, cache_position=cache_position, attention_mask=attention_mask)
hidden_states = falcon_mamba_outputs[0]
logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype)).float()
loss = None
if labels is not None:
labels = labels.to(logits.device)
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (logits,) + falcon_mamba_outputs[1:]
return (loss,) + output if loss is not None else output
return FalconMambaCausalLMOutput(loss=loss, logits=logits, cache_params=falcon_mamba_outputs.cache_params, hidden_states=falcon_mamba_outputs.hidden_states)
|
@auto_docstring(custom_intro='\n The FALCON_MAMBA Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ')
class FalconMambaForCausalLM(FalconMambaPreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, new_embeddings):
pass
def _update_model_kwargs_for_generation(self, outputs: ModelOutput, model_kwargs: dict[str, Any], num_new_tokens: int=1, **kwargs) -> dict[str, Any]:
pass
def prepare_inputs_for_generation(self, input_ids, inputs_embeds=None, use_cache=None, cache_params: Optional[FalconMambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, **kwargs):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_params: Optional[FalconMambaCache]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple, FalconMambaCausalLMOutput]:
'''
cache_params (`FalconMambaCache`, *optional*):
If passed along, the model uses the previous state in all the blocks (which will give the output for the
`input_ids` provided as if the model add `state_input_ids + input_ids` as context).
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
use_cache (`bool`, *optional*):
If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits.
'''
pass
| 9
| 1
| 17
| 2
| 13
| 2
| 2
| 0.15
| 2
| 10
| 3
| 0
| 8
| 2
| 8
| 9
| 151
| 20
| 115
| 46
| 77
| 17
| 53
| 22
| 44
| 6
| 2
| 3
| 19
|
2,490
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py
|
transformers.models.falcon_mamba.modeling_falcon_mamba.FalconMambaMixer
|
from .configuration_falcon_mamba import FalconMambaConfig
from ...activations import ACT2FN
import torch
from typing import Any, Optional, Union
from torch import nn
from ...utils.import_utils import is_causal_conv1d_available, is_kernels_available, is_mamba_ssm_available, is_mambapy_available
class FalconMambaMixer(nn.Module):
"""
Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
A, D are input independent (see FalconMamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
∆, B, C are input-dependent (this is a key difference between FalconMamba and the linear time invariant S4,
and is why FalconMamba is called **selective** state spaces)
"""
def __init__(self, config: FalconMambaConfig, layer_idx: int):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.ssm_state_size = config.state_size
self.conv_kernel_size = config.conv_kernel
self.intermediate_size = config.intermediate_size
self.time_step_rank = int(config.time_step_rank)
self.layer_idx = layer_idx
self.use_conv_bias = config.use_conv_bias
self.conv1d = nn.Conv1d(in_channels=self.intermediate_size, out_channels=self.intermediate_size, bias=config.use_conv_bias, kernel_size=config.conv_kernel, groups=self.intermediate_size, padding=config.conv_kernel - 1)
self.activation = config.hidden_act
self.act = ACT2FN[config.hidden_act]
self.use_falcon_mambapy = config.use_falcon_mambapy
self.in_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=config.use_bias)
self.x_proj = nn.Linear(self.intermediate_size, self.time_step_rank + self.ssm_state_size * 2, bias=False)
self.dt_proj = nn.Linear(self.time_step_rank, self.intermediate_size, bias=True)
A = torch.arange(1, self.ssm_state_size + 1, dtype=torch.float32)[None, :]
A = A.expand(self.intermediate_size, -1).contiguous()
self.A_log = nn.Parameter(torch.log(A))
self.D = nn.Parameter(torch.ones(self.intermediate_size))
self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias)
self.use_bias = config.use_bias
self.warn_slow_implementation()
self.register_buffer('b_c_rms', torch.nn.Parameter(torch.ones(self.ssm_state_size), requires_grad=False), persistent=False)
self.register_buffer('dt_rms', torch.nn.Parameter(torch.ones(self.intermediate_size), requires_grad=False), persistent=False)
self.rms_eps = config.mixer_rms_eps
def warn_slow_implementation(self):
causal_conv1d_update, causal_conv1d_fn = _lazy_load_causal_conv1d()
is_fast_path_available = all((selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn))
if not is_fast_path_available:
if self.use_falcon_mambapy:
if is_mambapy_available():
logger.warning_once('The fast path is not available because one of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)` is None. Falling back to the mamba.py backend. To install follow https://github.com/state-spaces/mamba/#installation for mamba-ssm and https://github.com/Dao-AILab/causal-conv1d or `pip install kernels` for causal-conv1d')
else:
raise ImportError('use_mambapy is set to True but the mambapy package is not installed. To install it follow https://github.com/alxndrTL/mamba.py.')
else:
logger.warning_once('The fast path is not available because one of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)` is None. Falling back to the sequential implementation of Mamba, as use_mambapy is set to False. To install follow https://github.com/state-spaces/mamba/#installation for mamba-ssm and https://github.com/Dao-AILab/causal-conv1d or `pip install kernels` for causal-conv1d. For the mamba.py backend, follow https://github.com/alxndrTL/mamba.py.')
def cuda_kernels_forward(self, hidden_states: torch.Tensor, cache_params: Optional[FalconMambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None):
projected_states = self.in_proj(hidden_states).transpose(1, 2)
if self.training and cache_params is None:
contextualized_states = mamba_inner_fn(projected_states, self.conv1d.weight, self.conv1d.bias if self.use_conv_bias else None, self.x_proj.weight, self.dt_proj.weight, self.out_proj.weight, self.out_proj.bias.float() if self.use_bias else None, -torch.exp(self.A_log.float()), None, None, self.D.float(), delta_bias=self.dt_proj.bias.float(), delta_softplus=True, b_rms_weight=self.b_c_rms, c_rms_weight=self.b_c_rms, dt_rms_weight=self.dt_rms, b_c_dt_rms_eps=self.rms_eps)
else:
causal_conv1d_update, causal_conv1d_fn = _lazy_load_causal_conv1d()
hidden_states, gate = projected_states.chunk(2, dim=1)
if attention_mask is not None:
hidden_states = hidden_states * attention_mask.unsqueeze(1)
conv_weights = self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2))
if cache_params is not None and cache_position[0] > 0:
hidden_states = causal_conv1d_update(hidden_states.squeeze(-1), cache_params.conv_states[self.layer_idx], conv_weights, self.conv1d.bias, self.activation)
hidden_states = hidden_states.unsqueeze(-1)
else:
if cache_params is not None:
conv_states = nn.functional.pad(hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0))
cache_params.update_conv_state(self.layer_idx, conv_states, cache_position)
hidden_states = causal_conv1d_fn(hidden_states, conv_weights, self.conv1d.bias, activation=self.activation)
if attention_mask is not None:
hidden_states = hidden_states * attention_mask.unsqueeze(1)
ssm_parameters = self.x_proj(hidden_states.transpose(1, 2))
time_step, B, C = torch.split(ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1)
B = rms_forward(B, variance_epsilon=self.rms_eps)
C = rms_forward(C, variance_epsilon=self.rms_eps)
time_step = rms_forward(time_step, variance_epsilon=self.rms_eps)
if hasattr(self.config, '_pre_quantization_dtype'):
discrete_time_step = (self.dt_proj(time_step) - self.dt_proj.bias).transpose(1, 2)
else:
discrete_time_step = self.dt_proj.weight @ time_step.transpose(1, 2)
A = -torch.exp(self.A_log.float())
time_proj_bias = self.dt_proj.bias.float() if hasattr(self.dt_proj, 'bias') else None
if cache_params is not None and cache_position[0] > 0:
scan_outputs = selective_state_update(cache_params.ssm_states[self.layer_idx], hidden_states[..., 0], discrete_time_step[..., 0], A, B[:, 0], C[:, 0], self.D, gate[..., 0], time_proj_bias, dt_softplus=True).unsqueeze(-1)
else:
scan_outputs, ssm_state = selective_scan_fn(hidden_states, discrete_time_step, A, B.transpose(1, 2), C.transpose(1, 2), self.D.float(), gate, time_proj_bias, delta_softplus=True, return_last_state=True)
if ssm_state is not None and cache_params is not None:
cache_params.update_ssm_state(self.layer_idx, ssm_state)
contextualized_states = self.out_proj(scan_outputs.transpose(1, 2))
return contextualized_states
def slow_forward(self, input_states, cache_params: Optional[FalconMambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None):
batch_size, seq_len, _ = input_states.shape
dtype = input_states.dtype
projected_states = self.in_proj(input_states).transpose(1, 2)
hidden_states, gate = projected_states.chunk(2, dim=1)
if attention_mask is not None:
hidden_states = hidden_states * attention_mask.unsqueeze(1)
if cache_params is not None:
ssm_state = cache_params.ssm_states[self.layer_idx].clone()
ssm_state = ssm_state.to(hidden_states.device)
if cache_position is not None and cache_position.shape[0] == self.conv_kernel_size:
conv_state = nn.functional.pad(hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0))
cache_params.update_conv_state(self.layer_idx, conv_state, cache_position)
hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len])
else:
conv_state = cache_params.update_conv_state(self.layer_idx, hidden_states, cache_position)
conv_state = conv_state.to(self.conv1d.weight.device)
hidden_states = torch.sum(conv_state * self.conv1d.weight[:, 0, :], dim=-1)
if self.use_conv_bias:
hidden_states += self.conv1d.bias
hidden_states = self.act(hidden_states).to(dtype).unsqueeze(-1)
else:
ssm_state = torch.zeros((batch_size, self.intermediate_size, self.ssm_state_size), device=hidden_states.device, dtype=dtype)
hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len])
if attention_mask is not None:
hidden_states = hidden_states * attention_mask.unsqueeze(1)
ssm_parameters = self.x_proj(hidden_states.transpose(1, 2))
time_step, B, C = torch.split(ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1)
B = rms_forward(B, variance_epsilon=self.rms_eps)
C = rms_forward(C, variance_epsilon=self.rms_eps)
time_step = rms_forward(time_step, variance_epsilon=self.rms_eps)
discrete_time_step = self.dt_proj(time_step)
discrete_time_step = nn.functional.softplus(discrete_time_step).transpose(1, 2)
A = -torch.exp(self.A_log.float())
discrete_A = torch.exp(A[None, :, None, :] * discrete_time_step[:, :, :, None])
discrete_B = discrete_time_step[:, :, :, None] * B[:, None, :, :].float()
deltaB_u = discrete_B * hidden_states[:, :, :, None].float()
if self.use_falcon_mambapy and self.training and (cache_params is None):
hs = pscan(discrete_A.transpose(1, 2), deltaB_u.transpose(1, 2))
scan_output = (hs @ C.unsqueeze(-1)).squeeze(3).transpose(1, 2)
scan_output = scan_output + hidden_states * self.D[None, :, None]
scan_output = scan_output * self.act(gate)
else:
scan_outputs = []
for i in range(seq_len):
ssm_state = discrete_A[:, :, i, :] * ssm_state + deltaB_u[:, :, i, :]
scan_output = torch.matmul(ssm_state.to(dtype), C[:, i, :].unsqueeze(-1))
scan_outputs.append(scan_output[:, :, 0])
scan_output = torch.stack(scan_outputs, dim=-1)
scan_output = scan_output + hidden_states * self.D[None, :, None]
scan_output = scan_output * self.act(gate)
if cache_params is not None:
cache_params.update_ssm_state(self.layer_idx, ssm_state)
contextualized_states = self.out_proj(scan_output.transpose(1, 2))
return contextualized_states
def forward(self, hidden_states, cache_params: Optional[FalconMambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None):
causal_conv1d_update, causal_conv1d_fn = _lazy_load_causal_conv1d()
is_fast_path_available = all((selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn))
if is_fast_path_available and 'cuda' in self.x_proj.weight.device.type and (not torch._dynamo.is_compiling()):
return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask)
return self.slow_forward(hidden_states, cache_params, cache_position, attention_mask)
|
class FalconMambaMixer(nn.Module):
'''
Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
A, D are input independent (see FalconMamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
∆, B, C are input-dependent (this is a key difference between FalconMamba and the linear time invariant S4,
and is why FalconMamba is called **selective** state spaces)
'''
def __init__(self, config: FalconMambaConfig, layer_idx: int):
pass
def warn_slow_implementation(self):
pass
def cuda_kernels_forward(self, hidden_states: torch.Tensor, cache_params: Optional[FalconMambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None):
pass
def slow_forward(self, input_states, cache_params: Optional[FalconMambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None):
pass
def forward(self, hidden_states, cache_params: Optional[FalconMambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None):
pass
| 6
| 1
| 73
| 7
| 60
| 11
| 7
| 0.21
| 1
| 7
| 2
| 0
| 4
| 22
| 4
| 14
| 305
| 32
| 242
| 76
| 219
| 51
| 122
| 56
| 117
| 12
| 1
| 3
| 27
|
2,491
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py
|
transformers.models.falcon_mamba.modeling_falcon_mamba.FalconMambaModel
|
import torch
from typing import Any, Optional, Union
from torch import nn
from ...utils import ModelOutput, auto_docstring, logging
@auto_docstring
class FalconMambaModel(FalconMambaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.layers = nn.ModuleList([FalconMambaBlock(config, layer_idx=idx) for idx in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
self.norm_f = FalconMambaRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
self.post_init()
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings = new_embeddings
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, cache_params: Optional[FalconMambaCache]=None, use_cache: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None) -> Union[tuple, FalconMambaOutput]:
"""
cache_params (`FalconMambaCache`, *optional*):
If passed along, the model uses the previous state in all the blocks (which will give the output for the
`input_ids` provided as if the model add `state_input_ids + input_ids` as context).
use_cache (`bool`, *optional*):
If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits.
"""
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache if not self.training else False
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if inputs_embeds is None:
inputs_embeds = self.embeddings(input_ids)
if self.gradient_checkpointing and self.training and use_cache:
use_cache = False
if use_cache:
if cache_params is None:
cache_params = FalconMambaCache(self.config, inputs_embeds.size(0), device=inputs_embeds.device, dtype=inputs_embeds.dtype)
cache_position = torch.arange(0, self.config.conv_kernel, device=inputs_embeds.device)
elif cache_position is None:
raise ValueError("You have to specify the `cache_position` manually when `use_cache=True` and `cache_params` is passed, you don't have to pass a `cache_params` if you are in prefilling stage because in that case it will be initialized for you automatically")
else:
cache_params = None
hidden_states = inputs_embeds
all_hidden_states = () if output_hidden_states else None
for mixer_block in self.layers:
hidden_states = mixer_block(hidden_states, cache_params=cache_params, cache_position=cache_position, attention_mask=attention_mask)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
hidden_states = self.norm_f(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, cache_params, all_hidden_states] if v is not None))
return FalconMambaOutput(last_hidden_state=hidden_states, cache_params=cache_params if use_cache else None, hidden_states=all_hidden_states)
|
@auto_docstring
class FalconMambaModel(FalconMambaPreTrainedModel):
def __init__(self, config):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, new_embeddings):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, cache_params: Optional[FalconMambaCache]=None, use_cache: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None) -> Union[tuple, FalconMambaOutput]:
'''
cache_params (`FalconMambaCache`, *optional*):
If passed along, the model uses the previous state in all the blocks (which will give the output for the
`input_ids` provided as if the model add `state_input_ids + input_ids` as context).
use_cache (`bool`, *optional*):
If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits.
'''
pass
| 7
| 1
| 23
| 3
| 19
| 1
| 5
| 0.06
| 1
| 9
| 4
| 0
| 4
| 5
| 4
| 5
| 100
| 14
| 82
| 24
| 61
| 5
| 42
| 12
| 37
| 18
| 2
| 2
| 21
|
2,492
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py
|
transformers.models.falcon_mamba.modeling_falcon_mamba.FalconMambaOutput
|
from ...utils import ModelOutput, auto_docstring, logging
import torch
from typing import Any, Optional, Union
from dataclasses import dataclass
@dataclass
@auto_docstring(custom_intro='\n Class for the FALCON_MAMBA model outputs.\n ')
class FalconMambaOutput(ModelOutput):
"""
cache_params (`FalconMambaCache`):
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
avoid providing the old `input_ids`.
Includes both the State space model state matrices after the selective scan, and the Convolutional states
"""
last_hidden_state: Optional[torch.FloatTensor] = None
cache_params: Optional[FalconMambaCache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Class for the FALCON_MAMBA model outputs.\n ')
class FalconMambaOutput(ModelOutput):
'''
cache_params (`FalconMambaCache`):
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
avoid providing the old `input_ids`.
Includes both the State space model state matrices after the selective scan, and the Convolutional states
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 22
| 4
| 4
| 4
| 3
| 14
| 4
| 4
| 3
| 0
| 1
| 0
| 0
|
2,493
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py
|
transformers.models.falcon_mamba.modeling_falcon_mamba.FalconMambaPreTrainedModel
|
import torch
from torch import nn
from .configuration_falcon_mamba import FalconMambaConfig
from ...utils import ModelOutput, auto_docstring, logging
from ...modeling_utils import PreTrainedModel
import math
@auto_docstring
class FalconMambaPreTrainedModel(PreTrainedModel):
config: FalconMambaConfig
base_model_prefix = 'backbone'
_no_split_modules = ['FalconMambaBlock', 'FalconMambaMixer']
supports_gradient_checkpointing = True
_is_stateful = True
def _init_weights(self, module):
"""Initialize the weights."""
std = self.config.initializer_range
if isinstance(module, FalconMambaMixer):
A = torch.arange(1, module.ssm_state_size + 1, dtype=torch.float32)[None, :]
A = A.expand(module.intermediate_size, -1).contiguous()
module.A_log.copy_(torch.log(A))
module.D.data.fill_(1.0)
dt_init_std = self.config.time_step_rank ** (-0.5) * self.config.time_step_scale
if self.config.time_step_init_scheme == 'constant':
nn.init.constant_(module.dt_proj.weight, dt_init_std)
elif self.config.time_step_init_scheme == 'random':
nn.init.uniform_(module.dt_proj.weight, -dt_init_std, dt_init_std)
dt = torch.exp(torch.rand(self.config.intermediate_size) * (math.log(self.config.time_step_max) - math.log(self.config.time_step_min)) + math.log(self.config.time_step_min)).clamp(min=self.config.time_step_floor)
inv_dt = dt + torch.log(-torch.expm1(-dt))
module.dt_proj.bias.copy_(inv_dt)
module.dt_proj.bias._no_reinit = True
nn.init.kaiming_uniform_(module.conv1d.weight, a=math.sqrt(5))
if module.conv1d.bias is not None:
if not getattr(module.conv1d.bias, '_no_reinit', False):
nn.init.zeros_(module.conv1d.bias)
nn.init.kaiming_uniform_(module.out_proj.weight, a=math.sqrt(5))
if self.config.rescale_prenorm_residual:
p = module.out_proj.weight
p /= math.sqrt(self.config.num_hidden_layers)
if isinstance(module, nn.Linear):
if not getattr(module.weight, '_no_reinit', False):
nn.init.normal_(module.weight, std=std)
if module.bias is not None:
if not getattr(module.bias, '_no_reinit', False):
nn.init.zeros_(module.bias)
elif isinstance(module, FalconMambaRMSNorm):
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Embedding):
nn.init.normal_(module.weight, std=std)
|
@auto_docstring
class FalconMambaPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights.'''
pass
| 3
| 1
| 46
| 4
| 30
| 12
| 11
| 0.44
| 1
| 1
| 1
| 2
| 1
| 0
| 1
| 1
| 58
| 6
| 36
| 11
| 34
| 16
| 30
| 11
| 28
| 11
| 1
| 4
| 11
|
2,494
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py
|
transformers.models.falcon_mamba.modeling_falcon_mamba.FalconMambaRMSNorm
|
import torch
from torch import nn
class FalconMambaRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
"""
FalconMambaRMSNorm is equivalent to T5LayerNorm and LlamaRMSNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
return self.weight.to(hidden_states.device) * rms_forward(hidden_states, variance_epsilon=self.variance_epsilon)
def extra_repr(self):
return f'{self.weight.shape[0]}, eps={self.variance_epsilon}'
|
class FalconMambaRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
'''
FalconMambaRMSNorm is equivalent to T5LayerNorm and LlamaRMSNorm
'''
pass
def forward(self, hidden_states):
pass
def extra_repr(self):
pass
| 4
| 1
| 4
| 0
| 3
| 1
| 1
| 0.36
| 1
| 1
| 0
| 0
| 3
| 2
| 3
| 13
| 17
| 2
| 11
| 6
| 7
| 4
| 9
| 6
| 5
| 1
| 1
| 0
| 3
|
2,495
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.configuration_fastspeech2_conformer.FastSpeech2ConformerConfig
|
from ...configuration_utils import PretrainedConfig
class FastSpeech2ConformerConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`FastSpeech2ConformerModel`]. It is used to
instantiate a FastSpeech2Conformer model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the
FastSpeech2Conformer [espnet/fastspeech2_conformer](https://huggingface.co/espnet/fastspeech2_conformer)
architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 384):
The dimensionality of the hidden layers.
vocab_size (`int`, *optional*, defaults to 78):
The size of the vocabulary.
num_mel_bins (`int`, *optional*, defaults to 80):
The number of mel filters used in the filter bank.
encoder_num_attention_heads (`int`, *optional*, defaults to 2):
The number of attention heads in the encoder.
encoder_layers (`int`, *optional*, defaults to 4):
The number of layers in the encoder.
encoder_linear_units (`int`, *optional*, defaults to 1536):
The number of units in the linear layer of the encoder.
decoder_layers (`int`, *optional*, defaults to 4):
The number of layers in the decoder.
decoder_num_attention_heads (`int`, *optional*, defaults to 2):
The number of attention heads in the decoder.
decoder_linear_units (`int`, *optional*, defaults to 1536):
The number of units in the linear layer of the decoder.
speech_decoder_postnet_layers (`int`, *optional*, defaults to 5):
The number of layers in the post-net of the speech decoder.
speech_decoder_postnet_units (`int`, *optional*, defaults to 256):
The number of units in the post-net layers of the speech decoder.
speech_decoder_postnet_kernel (`int`, *optional*, defaults to 5):
The kernel size in the post-net of the speech decoder.
positionwise_conv_kernel_size (`int`, *optional*, defaults to 3):
The size of the convolution kernel used in the position-wise layer.
encoder_normalize_before (`bool`, *optional*, defaults to `False`):
Specifies whether to normalize before encoder layers.
decoder_normalize_before (`bool`, *optional*, defaults to `False`):
Specifies whether to normalize before decoder layers.
encoder_concat_after (`bool`, *optional*, defaults to `False`):
Specifies whether to concatenate after encoder layers.
decoder_concat_after (`bool`, *optional*, defaults to `False`):
Specifies whether to concatenate after decoder layers.
reduction_factor (`int`, *optional*, defaults to 1):
The factor by which the speech frame rate is reduced.
speaking_speed (`float`, *optional*, defaults to 1.0):
The speed of the speech produced.
use_macaron_style_in_conformer (`bool`, *optional*, defaults to `True`):
Specifies whether to use macaron style in the conformer.
use_cnn_in_conformer (`bool`, *optional*, defaults to `True`):
Specifies whether to use convolutional neural networks in the conformer.
encoder_kernel_size (`int`, *optional*, defaults to 7):
The kernel size used in the encoder.
decoder_kernel_size (`int`, *optional*, defaults to 31):
The kernel size used in the decoder.
duration_predictor_layers (`int`, *optional*, defaults to 2):
The number of layers in the duration predictor.
duration_predictor_channels (`int`, *optional*, defaults to 256):
The number of channels in the duration predictor.
duration_predictor_kernel_size (`int`, *optional*, defaults to 3):
The kernel size used in the duration predictor.
energy_predictor_layers (`int`, *optional*, defaults to 2):
The number of layers in the energy predictor.
energy_predictor_channels (`int`, *optional*, defaults to 256):
The number of channels in the energy predictor.
energy_predictor_kernel_size (`int`, *optional*, defaults to 3):
The kernel size used in the energy predictor.
energy_predictor_dropout (`float`, *optional*, defaults to 0.5):
The dropout rate in the energy predictor.
energy_embed_kernel_size (`int`, *optional*, defaults to 1):
The kernel size used in the energy embed layer.
energy_embed_dropout (`float`, *optional*, defaults to 0.0):
The dropout rate in the energy embed layer.
stop_gradient_from_energy_predictor (`bool`, *optional*, defaults to `False`):
Specifies whether to stop gradients from the energy predictor.
pitch_predictor_layers (`int`, *optional*, defaults to 5):
The number of layers in the pitch predictor.
pitch_predictor_channels (`int`, *optional*, defaults to 256):
The number of channels in the pitch predictor.
pitch_predictor_kernel_size (`int`, *optional*, defaults to 5):
The kernel size used in the pitch predictor.
pitch_predictor_dropout (`float`, *optional*, defaults to 0.5):
The dropout rate in the pitch predictor.
pitch_embed_kernel_size (`int`, *optional*, defaults to 1):
The kernel size used in the pitch embed layer.
pitch_embed_dropout (`float`, *optional*, defaults to 0.0):
The dropout rate in the pitch embed layer.
stop_gradient_from_pitch_predictor (`bool`, *optional*, defaults to `True`):
Specifies whether to stop gradients from the pitch predictor.
encoder_dropout_rate (`float`, *optional*, defaults to 0.2):
The dropout rate in the encoder.
encoder_positional_dropout_rate (`float`, *optional*, defaults to 0.2):
The positional dropout rate in the encoder.
encoder_attention_dropout_rate (`float`, *optional*, defaults to 0.2):
The attention dropout rate in the encoder.
decoder_dropout_rate (`float`, *optional*, defaults to 0.2):
The dropout rate in the decoder.
decoder_positional_dropout_rate (`float`, *optional*, defaults to 0.2):
The positional dropout rate in the decoder.
decoder_attention_dropout_rate (`float`, *optional*, defaults to 0.2):
The attention dropout rate in the decoder.
duration_predictor_dropout_rate (`float`, *optional*, defaults to 0.2):
The dropout rate in the duration predictor.
speech_decoder_postnet_dropout (`float`, *optional*, defaults to 0.5):
The dropout rate in the speech decoder postnet.
max_source_positions (`int`, *optional*, defaults to 5000):
if `"relative"` position embeddings are used, defines the maximum source input positions.
use_masking (`bool`, *optional*, defaults to `True`):
Specifies whether to use masking in the model.
use_weighted_masking (`bool`, *optional*, defaults to `False`):
Specifies whether to use weighted masking in the model.
num_speakers (`int`, *optional*):
Number of speakers. If set to > 1, assume that the speaker ids will be provided as the input and use
speaker id embedding layer.
num_languages (`int`, *optional*):
Number of languages. If set to > 1, assume that the language ids will be provided as the input and use the
language id embedding layer.
speaker_embed_dim (`int`, *optional*):
Speaker embedding dimension. If set to > 0, assume that speaker_embedding will be provided as the input.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Specifies whether the model is an encoder-decoder.
Example:
```python
>>> from transformers import FastSpeech2ConformerModel, FastSpeech2ConformerConfig
>>> # Initializing a FastSpeech2Conformer style configuration
>>> configuration = FastSpeech2ConformerConfig()
>>> # Initializing a model from the FastSpeech2Conformer style configuration
>>> model = FastSpeech2ConformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'fastspeech2_conformer'
base_config_key = 'model_config'
attribute_map = {'num_hidden_layers': 'encoder_layers', 'num_attention_heads': 'encoder_num_attention_heads'}
def __init__(self, hidden_size=384, vocab_size=78, num_mel_bins=80, encoder_num_attention_heads=2, encoder_layers=4, encoder_linear_units=1536, decoder_layers=4, decoder_num_attention_heads=2, decoder_linear_units=1536, speech_decoder_postnet_layers=5, speech_decoder_postnet_units=256, speech_decoder_postnet_kernel=5, positionwise_conv_kernel_size=3, encoder_normalize_before=False, decoder_normalize_before=False, encoder_concat_after=False, decoder_concat_after=False, reduction_factor=1, speaking_speed=1.0, use_macaron_style_in_conformer=True, use_cnn_in_conformer=True, encoder_kernel_size=7, decoder_kernel_size=31, duration_predictor_layers=2, duration_predictor_channels=256, duration_predictor_kernel_size=3, energy_predictor_layers=2, energy_predictor_channels=256, energy_predictor_kernel_size=3, energy_predictor_dropout=0.5, energy_embed_kernel_size=1, energy_embed_dropout=0.0, stop_gradient_from_energy_predictor=False, pitch_predictor_layers=5, pitch_predictor_channels=256, pitch_predictor_kernel_size=5, pitch_predictor_dropout=0.5, pitch_embed_kernel_size=1, pitch_embed_dropout=0.0, stop_gradient_from_pitch_predictor=True, encoder_dropout_rate=0.2, encoder_positional_dropout_rate=0.2, encoder_attention_dropout_rate=0.2, decoder_dropout_rate=0.2, decoder_positional_dropout_rate=0.2, decoder_attention_dropout_rate=0.2, duration_predictor_dropout_rate=0.2, speech_decoder_postnet_dropout=0.5, max_source_positions=5000, use_masking=True, use_weighted_masking=False, num_speakers=None, num_languages=None, speaker_embed_dim=None, is_encoder_decoder=True, **kwargs):
if positionwise_conv_kernel_size % 2 == 0:
raise ValueError(f'positionwise_conv_kernel_size must be odd, but got {positionwise_conv_kernel_size} instead.')
if encoder_kernel_size % 2 == 0:
raise ValueError(f'encoder_kernel_size must be odd, but got {encoder_kernel_size} instead.')
if decoder_kernel_size % 2 == 0:
raise ValueError(f'decoder_kernel_size must be odd, but got {decoder_kernel_size} instead.')
if duration_predictor_kernel_size % 2 == 0:
raise ValueError(f'duration_predictor_kernel_size must be odd, but got {duration_predictor_kernel_size} instead.')
if energy_predictor_kernel_size % 2 == 0:
raise ValueError(f'energy_predictor_kernel_size must be odd, but got {energy_predictor_kernel_size} instead.')
if energy_embed_kernel_size % 2 == 0:
raise ValueError(f'energy_embed_kernel_size must be odd, but got {energy_embed_kernel_size} instead.')
if pitch_predictor_kernel_size % 2 == 0:
raise ValueError(f'pitch_predictor_kernel_size must be odd, but got {pitch_predictor_kernel_size} instead.')
if pitch_embed_kernel_size % 2 == 0:
raise ValueError(f'pitch_embed_kernel_size must be odd, but got {pitch_embed_kernel_size} instead.')
if hidden_size % encoder_num_attention_heads != 0:
raise ValueError('The hidden_size must be evenly divisible by encoder_num_attention_heads.')
if hidden_size % decoder_num_attention_heads != 0:
raise ValueError('The hidden_size must be evenly divisible by decoder_num_attention_heads.')
if use_masking and use_weighted_masking:
raise ValueError('Either use_masking or use_weighted_masking can be True, but not both.')
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.num_mel_bins = num_mel_bins
self.encoder_config = {'num_attention_heads': encoder_num_attention_heads, 'layers': encoder_layers, 'kernel_size': encoder_kernel_size, 'attention_dropout_rate': encoder_attention_dropout_rate, 'dropout_rate': encoder_dropout_rate, 'positional_dropout_rate': encoder_positional_dropout_rate, 'linear_units': encoder_linear_units, 'normalize_before': encoder_normalize_before, 'concat_after': encoder_concat_after}
self.decoder_config = {'num_attention_heads': decoder_num_attention_heads, 'layers': decoder_layers, 'kernel_size': decoder_kernel_size, 'attention_dropout_rate': decoder_attention_dropout_rate, 'dropout_rate': decoder_dropout_rate, 'positional_dropout_rate': decoder_positional_dropout_rate, 'linear_units': decoder_linear_units, 'normalize_before': decoder_normalize_before, 'concat_after': decoder_concat_after}
self.encoder_num_attention_heads = encoder_num_attention_heads
self.encoder_layers = encoder_layers
self.duration_predictor_channels = duration_predictor_channels
self.duration_predictor_kernel_size = duration_predictor_kernel_size
self.duration_predictor_layers = duration_predictor_layers
self.energy_embed_dropout = energy_embed_dropout
self.energy_embed_kernel_size = energy_embed_kernel_size
self.energy_predictor_channels = energy_predictor_channels
self.energy_predictor_dropout = energy_predictor_dropout
self.energy_predictor_kernel_size = energy_predictor_kernel_size
self.energy_predictor_layers = energy_predictor_layers
self.pitch_embed_dropout = pitch_embed_dropout
self.pitch_embed_kernel_size = pitch_embed_kernel_size
self.pitch_predictor_channels = pitch_predictor_channels
self.pitch_predictor_dropout = pitch_predictor_dropout
self.pitch_predictor_kernel_size = pitch_predictor_kernel_size
self.pitch_predictor_layers = pitch_predictor_layers
self.positionwise_conv_kernel_size = positionwise_conv_kernel_size
self.speech_decoder_postnet_units = speech_decoder_postnet_units
self.speech_decoder_postnet_dropout = speech_decoder_postnet_dropout
self.speech_decoder_postnet_kernel = speech_decoder_postnet_kernel
self.speech_decoder_postnet_layers = speech_decoder_postnet_layers
self.reduction_factor = reduction_factor
self.speaking_speed = speaking_speed
self.stop_gradient_from_energy_predictor = stop_gradient_from_energy_predictor
self.stop_gradient_from_pitch_predictor = stop_gradient_from_pitch_predictor
self.max_source_positions = max_source_positions
self.use_cnn_in_conformer = use_cnn_in_conformer
self.use_macaron_style_in_conformer = use_macaron_style_in_conformer
self.use_masking = use_masking
self.use_weighted_masking = use_weighted_masking
self.num_speakers = num_speakers
self.num_languages = num_languages
self.speaker_embed_dim = speaker_embed_dim
self.duration_predictor_dropout_rate = duration_predictor_dropout_rate
self.is_encoder_decoder = is_encoder_decoder
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
|
class FastSpeech2ConformerConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`FastSpeech2ConformerModel`]. It is used to
instantiate a FastSpeech2Conformer model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the
FastSpeech2Conformer [espnet/fastspeech2_conformer](https://huggingface.co/espnet/fastspeech2_conformer)
architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 384):
The dimensionality of the hidden layers.
vocab_size (`int`, *optional*, defaults to 78):
The size of the vocabulary.
num_mel_bins (`int`, *optional*, defaults to 80):
The number of mel filters used in the filter bank.
encoder_num_attention_heads (`int`, *optional*, defaults to 2):
The number of attention heads in the encoder.
encoder_layers (`int`, *optional*, defaults to 4):
The number of layers in the encoder.
encoder_linear_units (`int`, *optional*, defaults to 1536):
The number of units in the linear layer of the encoder.
decoder_layers (`int`, *optional*, defaults to 4):
The number of layers in the decoder.
decoder_num_attention_heads (`int`, *optional*, defaults to 2):
The number of attention heads in the decoder.
decoder_linear_units (`int`, *optional*, defaults to 1536):
The number of units in the linear layer of the decoder.
speech_decoder_postnet_layers (`int`, *optional*, defaults to 5):
The number of layers in the post-net of the speech decoder.
speech_decoder_postnet_units (`int`, *optional*, defaults to 256):
The number of units in the post-net layers of the speech decoder.
speech_decoder_postnet_kernel (`int`, *optional*, defaults to 5):
The kernel size in the post-net of the speech decoder.
positionwise_conv_kernel_size (`int`, *optional*, defaults to 3):
The size of the convolution kernel used in the position-wise layer.
encoder_normalize_before (`bool`, *optional*, defaults to `False`):
Specifies whether to normalize before encoder layers.
decoder_normalize_before (`bool`, *optional*, defaults to `False`):
Specifies whether to normalize before decoder layers.
encoder_concat_after (`bool`, *optional*, defaults to `False`):
Specifies whether to concatenate after encoder layers.
decoder_concat_after (`bool`, *optional*, defaults to `False`):
Specifies whether to concatenate after decoder layers.
reduction_factor (`int`, *optional*, defaults to 1):
The factor by which the speech frame rate is reduced.
speaking_speed (`float`, *optional*, defaults to 1.0):
The speed of the speech produced.
use_macaron_style_in_conformer (`bool`, *optional*, defaults to `True`):
Specifies whether to use macaron style in the conformer.
use_cnn_in_conformer (`bool`, *optional*, defaults to `True`):
Specifies whether to use convolutional neural networks in the conformer.
encoder_kernel_size (`int`, *optional*, defaults to 7):
The kernel size used in the encoder.
decoder_kernel_size (`int`, *optional*, defaults to 31):
The kernel size used in the decoder.
duration_predictor_layers (`int`, *optional*, defaults to 2):
The number of layers in the duration predictor.
duration_predictor_channels (`int`, *optional*, defaults to 256):
The number of channels in the duration predictor.
duration_predictor_kernel_size (`int`, *optional*, defaults to 3):
The kernel size used in the duration predictor.
energy_predictor_layers (`int`, *optional*, defaults to 2):
The number of layers in the energy predictor.
energy_predictor_channels (`int`, *optional*, defaults to 256):
The number of channels in the energy predictor.
energy_predictor_kernel_size (`int`, *optional*, defaults to 3):
The kernel size used in the energy predictor.
energy_predictor_dropout (`float`, *optional*, defaults to 0.5):
The dropout rate in the energy predictor.
energy_embed_kernel_size (`int`, *optional*, defaults to 1):
The kernel size used in the energy embed layer.
energy_embed_dropout (`float`, *optional*, defaults to 0.0):
The dropout rate in the energy embed layer.
stop_gradient_from_energy_predictor (`bool`, *optional*, defaults to `False`):
Specifies whether to stop gradients from the energy predictor.
pitch_predictor_layers (`int`, *optional*, defaults to 5):
The number of layers in the pitch predictor.
pitch_predictor_channels (`int`, *optional*, defaults to 256):
The number of channels in the pitch predictor.
pitch_predictor_kernel_size (`int`, *optional*, defaults to 5):
The kernel size used in the pitch predictor.
pitch_predictor_dropout (`float`, *optional*, defaults to 0.5):
The dropout rate in the pitch predictor.
pitch_embed_kernel_size (`int`, *optional*, defaults to 1):
The kernel size used in the pitch embed layer.
pitch_embed_dropout (`float`, *optional*, defaults to 0.0):
The dropout rate in the pitch embed layer.
stop_gradient_from_pitch_predictor (`bool`, *optional*, defaults to `True`):
Specifies whether to stop gradients from the pitch predictor.
encoder_dropout_rate (`float`, *optional*, defaults to 0.2):
The dropout rate in the encoder.
encoder_positional_dropout_rate (`float`, *optional*, defaults to 0.2):
The positional dropout rate in the encoder.
encoder_attention_dropout_rate (`float`, *optional*, defaults to 0.2):
The attention dropout rate in the encoder.
decoder_dropout_rate (`float`, *optional*, defaults to 0.2):
The dropout rate in the decoder.
decoder_positional_dropout_rate (`float`, *optional*, defaults to 0.2):
The positional dropout rate in the decoder.
decoder_attention_dropout_rate (`float`, *optional*, defaults to 0.2):
The attention dropout rate in the decoder.
duration_predictor_dropout_rate (`float`, *optional*, defaults to 0.2):
The dropout rate in the duration predictor.
speech_decoder_postnet_dropout (`float`, *optional*, defaults to 0.5):
The dropout rate in the speech decoder postnet.
max_source_positions (`int`, *optional*, defaults to 5000):
if `"relative"` position embeddings are used, defines the maximum source input positions.
use_masking (`bool`, *optional*, defaults to `True`):
Specifies whether to use masking in the model.
use_weighted_masking (`bool`, *optional*, defaults to `False`):
Specifies whether to use weighted masking in the model.
num_speakers (`int`, *optional*):
Number of speakers. If set to > 1, assume that the speaker ids will be provided as the input and use
speaker id embedding layer.
num_languages (`int`, *optional*):
Number of languages. If set to > 1, assume that the language ids will be provided as the input and use the
language id embedding layer.
speaker_embed_dim (`int`, *optional*):
Speaker embedding dimension. If set to > 0, assume that speaker_embedding will be provided as the input.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Specifies whether the model is an encoder-decoder.
Example:
```python
>>> from transformers import FastSpeech2ConformerModel, FastSpeech2ConformerConfig
>>> # Initializing a FastSpeech2Conformer style configuration
>>> configuration = FastSpeech2ConformerConfig()
>>> # Initializing a model from the FastSpeech2Conformer style configuration
>>> model = FastSpeech2ConformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, hidden_size=384, vocab_size=78, num_mel_bins=80, encoder_num_attention_heads=2, encoder_layers=4, encoder_linear_units=1536, decoder_layers=4, decoder_num_attention_heads=2, decoder_linear_units=1536, speech_decoder_postnet_layers=5, speech_decoder_postnet_units=256, speech_decoder_postnet_kernel=5, positionwise_conv_kernel_size=3, encoder_normalize_before=False, decoder_normalize_before=False, encoder_concat_after=False, decoder_concat_after=False, reduction_factor=1, speaking_speed=1.0, use_macaron_style_in_conformer=True, use_cnn_in_conformer=True, encoder_kernel_size=7, decoder_kernel_size=31, duration_predictor_layers=2, duration_predictor_channels=256, duration_predictor_kernel_size=3, energy_predictor_layers=2, energy_predictor_channels=256, energy_predictor_kernel_size=3, energy_predictor_dropout=0.5, energy_embed_kernel_size=1, energy_embed_dropout=0.0, stop_gradient_from_energy_predictor=False, pitch_predictor_layers=5, pitch_predictor_channels=256, pitch_predictor_kernel_size=5, pitch_predictor_dropout=0.5, pitch_embed_kernel_size=1, pitch_embed_dropout=0.0, stop_gradient_from_pitch_predictor=True, encoder_dropout_rate=0.2, encoder_positional_dropout_rate=0.2, encoder_attention_dropout_rate=0.2, decoder_dropout_rate=0.2, decoder_positional_dropout_rate=0.2, decoder_attention_dropout_rate=0.2, duration_predictor_dropout_rate=0.2, speech_decoder_postnet_dropout=0.5, max_source_positions=5000, use_masking=True, use_weighted_masking=False, num_speakers=None, num_languages=None, speaker_embed_dim=None, is_encoder_decoder=True, **kwargs):
pass
| 2
| 1
| 156
| 2
| 154
| 0
| 12
| 0.83
| 1
| 2
| 0
| 0
| 1
| 41
| 1
| 1
| 300
| 11
| 158
| 104
| 98
| 131
| 69
| 46
| 67
| 12
| 1
| 1
| 12
|
2,496
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.configuration_fastspeech2_conformer.FastSpeech2ConformerHifiGanConfig
|
from ...configuration_utils import PretrainedConfig
class FastSpeech2ConformerHifiGanConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`FastSpeech2ConformerHifiGanModel`]. It is used to
instantiate a FastSpeech2Conformer HiFi-GAN vocoder model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the
FastSpeech2Conformer
[espnet/fastspeech2_conformer_hifigan](https://huggingface.co/espnet/fastspeech2_conformer_hifigan) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
model_in_dim (`int`, *optional*, defaults to 80):
The number of frequency bins in the input log-mel spectrogram.
upsample_initial_channel (`int`, *optional*, defaults to 512):
The number of input channels into the upsampling network.
upsample_rates (`tuple[int]` or `list[int]`, *optional*, defaults to `[8, 8, 2, 2]`):
A tuple of integers defining the stride of each 1D convolutional layer in the upsampling network. The
length of *upsample_rates* defines the number of convolutional layers and has to match the length of
*upsample_kernel_sizes*.
upsample_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[16, 16, 4, 4]`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the upsampling network. The
length of *upsample_kernel_sizes* defines the number of convolutional layers and has to match the length of
*upsample_rates*.
resblock_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[3, 7, 11]`):
A tuple of integers defining the kernel sizes of the 1D convolutional layers in the multi-receptive field
fusion (MRF) module.
resblock_dilation_sizes (`tuple[tuple[int]]` or `list[list[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`):
A nested tuple of integers defining the dilation rates of the dilated 1D convolutional layers in the
multi-receptive field fusion (MRF) module.
initializer_range (`float`, *optional*, defaults to 0.01):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
leaky_relu_slope (`float`, *optional*, defaults to 0.1):
The angle of the negative slope used by the leaky ReLU activation.
normalize_before (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the spectrogram before vocoding using the vocoder's learned mean and variance.
Example:
```python
>>> from transformers import FastSpeech2ConformerHifiGan, FastSpeech2ConformerHifiGanConfig
>>> # Initializing a FastSpeech2ConformerHifiGan configuration
>>> configuration = FastSpeech2ConformerHifiGanConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = FastSpeech2ConformerHifiGan(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'hifigan'
base_config_key = 'vocoder_config'
def __init__(self, model_in_dim=80, upsample_initial_channel=512, upsample_rates=[8, 8, 2, 2], upsample_kernel_sizes=[16, 16, 4, 4], resblock_kernel_sizes=[3, 7, 11], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]], initializer_range=0.01, leaky_relu_slope=0.1, normalize_before=True, **kwargs):
self.model_in_dim = model_in_dim
self.upsample_initial_channel = upsample_initial_channel
self.upsample_rates = upsample_rates
self.upsample_kernel_sizes = upsample_kernel_sizes
self.resblock_kernel_sizes = resblock_kernel_sizes
self.resblock_dilation_sizes = resblock_dilation_sizes
self.initializer_range = initializer_range
self.leaky_relu_slope = leaky_relu_slope
self.normalize_before = normalize_before
super().__init__(**kwargs)
|
class FastSpeech2ConformerHifiGanConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`FastSpeech2ConformerHifiGanModel`]. It is used to
instantiate a FastSpeech2Conformer HiFi-GAN vocoder model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the
FastSpeech2Conformer
[espnet/fastspeech2_conformer_hifigan](https://huggingface.co/espnet/fastspeech2_conformer_hifigan) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
model_in_dim (`int`, *optional*, defaults to 80):
The number of frequency bins in the input log-mel spectrogram.
upsample_initial_channel (`int`, *optional*, defaults to 512):
The number of input channels into the upsampling network.
upsample_rates (`tuple[int]` or `list[int]`, *optional*, defaults to `[8, 8, 2, 2]`):
A tuple of integers defining the stride of each 1D convolutional layer in the upsampling network. The
length of *upsample_rates* defines the number of convolutional layers and has to match the length of
*upsample_kernel_sizes*.
upsample_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[16, 16, 4, 4]`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the upsampling network. The
length of *upsample_kernel_sizes* defines the number of convolutional layers and has to match the length of
*upsample_rates*.
resblock_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[3, 7, 11]`):
A tuple of integers defining the kernel sizes of the 1D convolutional layers in the multi-receptive field
fusion (MRF) module.
resblock_dilation_sizes (`tuple[tuple[int]]` or `list[list[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`):
A nested tuple of integers defining the dilation rates of the dilated 1D convolutional layers in the
multi-receptive field fusion (MRF) module.
initializer_range (`float`, *optional*, defaults to 0.01):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
leaky_relu_slope (`float`, *optional*, defaults to 0.1):
The angle of the negative slope used by the leaky ReLU activation.
normalize_before (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the spectrogram before vocoding using the vocoder's learned mean and variance.
Example:
```python
>>> from transformers import FastSpeech2ConformerHifiGan, FastSpeech2ConformerHifiGanConfig
>>> # Initializing a FastSpeech2ConformerHifiGan configuration
>>> configuration = FastSpeech2ConformerHifiGanConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = FastSpeech2ConformerHifiGan(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, model_in_dim=80, upsample_initial_channel=512, upsample_rates=[8, 8, 2, 2], upsample_kernel_sizes=[16, 16, 4, 4], resblock_kernel_sizes=[3, 7, 11], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]], initializer_range=0.01, leaky_relu_slope=0.1, normalize_before=True, **kwargs):
pass
| 2
| 1
| 23
| 0
| 23
| 0
| 1
| 1.65
| 1
| 1
| 0
| 0
| 1
| 9
| 1
| 1
| 78
| 9
| 26
| 25
| 12
| 43
| 14
| 13
| 12
| 1
| 1
| 0
| 1
|
2,497
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.configuration_fastspeech2_conformer.FastSpeech2ConformerWithHifiGanConfig
|
from ...configuration_utils import PretrainedConfig
from typing import Optional
class FastSpeech2ConformerWithHifiGanConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`FastSpeech2ConformerWithHifiGan`]. It is used to
instantiate a `FastSpeech2ConformerWithHifiGanModel` model according to the specified sub-models configurations,
defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the
FastSpeech2ConformerModel [espnet/fastspeech2_conformer](https://huggingface.co/espnet/fastspeech2_conformer) and
FastSpeech2ConformerHifiGan
[espnet/fastspeech2_conformer_hifigan](https://huggingface.co/espnet/fastspeech2_conformer_hifigan) architectures.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
model_config (`typing.Dict`, *optional*):
Configuration of the text-to-speech model.
vocoder_config (`typing.Dict`, *optional*):
Configuration of the vocoder model.
model_config ([`FastSpeech2ConformerConfig`], *optional*):
Configuration of the text-to-speech model.
vocoder_config ([`FastSpeech2ConformerHiFiGanConfig`], *optional*):
Configuration of the vocoder model.
Example:
```python
>>> from transformers import (
... FastSpeech2ConformerConfig,
... FastSpeech2ConformerHifiGanConfig,
... FastSpeech2ConformerWithHifiGanConfig,
... FastSpeech2ConformerWithHifiGan,
... )
>>> # Initializing FastSpeech2ConformerWithHifiGan sub-modules configurations.
>>> model_config = FastSpeech2ConformerConfig()
>>> vocoder_config = FastSpeech2ConformerHifiGanConfig()
>>> # Initializing a FastSpeech2ConformerWithHifiGan module style configuration
>>> configuration = FastSpeech2ConformerWithHifiGanConfig(model_config.to_dict(), vocoder_config.to_dict())
>>> # Initializing a model (with random weights)
>>> model = FastSpeech2ConformerWithHifiGan(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = 'fastspeech2_conformer_with_hifigan'
sub_configs = {'model_config': FastSpeech2ConformerConfig, 'vocoder_config': FastSpeech2ConformerHifiGanConfig}
def __init__(self, model_config: Optional[dict]=None, vocoder_config: Optional[dict]=None, **kwargs):
if model_config is None:
model_config = {}
logger.info('model_config is None. initializing the model with default values.')
if vocoder_config is None:
vocoder_config = {}
logger.info('vocoder_config is None. initializing the coarse model with default values.')
self.model_config = FastSpeech2ConformerConfig(**model_config)
self.vocoder_config = FastSpeech2ConformerHifiGanConfig(**vocoder_config)
super().__init__(**kwargs)
|
class FastSpeech2ConformerWithHifiGanConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`FastSpeech2ConformerWithHifiGan`]. It is used to
instantiate a `FastSpeech2ConformerWithHifiGanModel` model according to the specified sub-models configurations,
defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the
FastSpeech2ConformerModel [espnet/fastspeech2_conformer](https://huggingface.co/espnet/fastspeech2_conformer) and
FastSpeech2ConformerHifiGan
[espnet/fastspeech2_conformer_hifigan](https://huggingface.co/espnet/fastspeech2_conformer_hifigan) architectures.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
model_config (`typing.Dict`, *optional*):
Configuration of the text-to-speech model.
vocoder_config (`typing.Dict`, *optional*):
Configuration of the vocoder model.
model_config ([`FastSpeech2ConformerConfig`], *optional*):
Configuration of the text-to-speech model.
vocoder_config ([`FastSpeech2ConformerHiFiGanConfig`], *optional*):
Configuration of the vocoder model.
Example:
```python
>>> from transformers import (
... FastSpeech2ConformerConfig,
... FastSpeech2ConformerHifiGanConfig,
... FastSpeech2ConformerWithHifiGanConfig,
... FastSpeech2ConformerWithHifiGan,
... )
>>> # Initializing FastSpeech2ConformerWithHifiGan sub-modules configurations.
>>> model_config = FastSpeech2ConformerConfig()
>>> vocoder_config = FastSpeech2ConformerHifiGanConfig()
>>> # Initializing a FastSpeech2ConformerWithHifiGan module style configuration
>>> configuration = FastSpeech2ConformerWithHifiGanConfig(model_config.to_dict(), vocoder_config.to_dict())
>>> # Initializing a model (with random weights)
>>> model = FastSpeech2ConformerWithHifiGan(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
'''
def __init__(self, model_config: Optional[dict]=None, vocoder_config: Optional[dict]=None, **kwargs):
pass
| 2
| 1
| 18
| 3
| 15
| 0
| 3
| 2.11
| 1
| 3
| 2
| 0
| 1
| 2
| 1
| 1
| 70
| 14
| 18
| 11
| 11
| 38
| 13
| 6
| 11
| 3
| 1
| 1
| 3
|
2,498
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.modeling_fastspeech2_conformer.FastSpeech2ConformerAttention
|
import torch
import math
from .configuration_fastspeech2_conformer import FastSpeech2ConformerConfig, FastSpeech2ConformerHifiGanConfig, FastSpeech2ConformerWithHifiGanConfig
from typing import Optional, Union
from torch import nn
class FastSpeech2ConformerAttention(nn.Module):
"""
Multi-Head attention layer with relative position encoding. Details can be found in
https://github.com/espnet/espnet/pull/2816. Paper: https://huggingface.co/papers/1901.02860.
"""
def __init__(self, config: FastSpeech2ConformerConfig, module_config):
"""Construct an FastSpeech2ConformerAttention object."""
super().__init__()
self.num_heads = module_config['num_attention_heads']
self.hidden_size = config.hidden_size
self.dim_key = self.hidden_size // self.num_heads
self.head_dim = self.hidden_size // self.num_heads
self.linear_q = nn.Linear(self.hidden_size, self.hidden_size)
self.linear_k = nn.Linear(self.hidden_size, self.hidden_size)
self.linear_v = nn.Linear(self.hidden_size, self.hidden_size)
self.linear_out = nn.Linear(self.hidden_size, self.hidden_size)
self.dropout = nn.Dropout(p=module_config['attention_dropout_rate'])
self.linear_pos = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
self.pos_bias_u = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim))
self.pos_bias_v = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim))
def shift_relative_position_tensor(self, pos_tensor):
"""
Args:
pos_tensor (torch.Tensor of shape (batch_size, head, time1, 2*time1-1)): Input tensor.
"""
zero_pad = torch.zeros((*pos_tensor.size()[:3], 1), device=pos_tensor.device, dtype=pos_tensor.dtype)
pos_tensor_padded = torch.cat([zero_pad, pos_tensor], dim=-1)
pos_tensor_padded = pos_tensor_padded.view(*pos_tensor.size()[:2], pos_tensor.size(3) + 1, pos_tensor.size(2))
pos_tensor = pos_tensor_padded[:, :, 1:].view_as(pos_tensor)[:, :, :, :pos_tensor.size(-1) // 2 + 1]
return pos_tensor
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, pos_emb: Optional[torch.Tensor]=None, output_attentions: Optional[torch.Tensor]=False) -> tuple[torch.Tensor, torch.Tensor]:
"""
Compute 'Scaled Dot Product Attention' with rel. positional encoding.
Args:
hidden_states (`torch.Tensor` of shape `(batch, time2, size)`): Values of the hidden states
attention_mask (`torch.Tensor` of shape `(batch, time1, time2)`): Mask tensor.
pos_emb (`torch.Tensor` of shape `(batch, 2*time1-1, size)`): Positional embedding tensor.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
Returns:
`torch.Tensor`: Output tensor of shape `(batch, time1, d_model)`.
"""
bsz, q_len, _ = hidden_states.size()
query_states = self.linear_q(hidden_states).view(bsz, -1, self.num_heads, self.head_dim)
key_states = self.linear_k(hidden_states).view(bsz, -1, self.num_heads, self.head_dim)
value_states = self.linear_v(hidden_states).view(bsz, -1, self.num_heads, self.head_dim)
bsz_pos = pos_emb.size(0)
pos_encoding = self.linear_pos(pos_emb).view(bsz_pos, -1, self.num_heads, self.head_dim)
query_with_bias_u = (query_states + self.pos_bias_u).transpose(1, 2)
query_with_bias_v = (query_states + self.pos_bias_v).transpose(1, 2)
matrix_ac = torch.matmul(query_with_bias_u, key_states.permute(0, 2, 3, 1))
matrix_bd = torch.matmul(query_with_bias_v, pos_encoding.permute(0, 2, 3, 1))
matrix_bd = self.shift_relative_position_tensor(matrix_bd)
scores = (matrix_ac + matrix_bd) / math.sqrt(self.dim_key)
if attention_mask is not None:
expected_size = (bsz, 1, q_len)
if attention_mask.size() != expected_size:
raise ValueError(f'Attention mask should be of size {expected_size}, but is {attention_mask.size()}')
attention_mask = attention_mask.unsqueeze(1).eq(0)
min_value = float(torch.finfo(scores.dtype).min)
scores = scores.masked_fill(attention_mask, min_value)
attn_weights = torch.softmax(scores, dim=-1).masked_fill(attention_mask, 0.0)
else:
attn_weights = torch.softmax(scores, dim=-1)
attn_weights = self.dropout(attn_weights)
attn_output = torch.matmul(attn_weights, value_states.transpose(1, 2))
attn_output = attn_output.transpose(1, 2).contiguous().view(bsz, q_len, -1)
attn_output = self.linear_out(attn_output)
if not output_attentions:
attn_weights = None
return (attn_output, attn_weights)
|
class FastSpeech2ConformerAttention(nn.Module):
'''
Multi-Head attention layer with relative position encoding. Details can be found in
https://github.com/espnet/espnet/pull/2816. Paper: https://huggingface.co/papers/1901.02860.
'''
def __init__(self, config: FastSpeech2ConformerConfig, module_config):
'''Construct an FastSpeech2ConformerAttention object.'''
pass
def shift_relative_position_tensor(self, pos_tensor):
'''
Args:
pos_tensor (torch.Tensor of shape (batch_size, head, time1, 2*time1-1)): Input tensor.
'''
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, pos_emb: Optional[torch.Tensor]=None, output_attentions: Optional[torch.Tensor]=False) -> tuple[torch.Tensor, torch.Tensor]:
'''
Compute 'Scaled Dot Product Attention' with rel. positional encoding.
Args:
hidden_states (`torch.Tensor` of shape `(batch, time2, size)`): Values of the hidden states
attention_mask (`torch.Tensor` of shape `(batch, time1, time2)`): Mask tensor.
pos_emb (`torch.Tensor` of shape `(batch, 2*time1-1, size)`): Positional embedding tensor.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
Returns:
`torch.Tensor`: Output tensor of shape `(batch, time1, d_model)`.
'''
pass
| 4
| 4
| 34
| 5
| 19
| 11
| 2
| 0.63
| 1
| 5
| 1
| 0
| 3
| 12
| 3
| 13
| 110
| 17
| 57
| 39
| 47
| 36
| 50
| 33
| 46
| 4
| 1
| 2
| 6
|
2,499
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.modeling_fastspeech2_conformer.FastSpeech2ConformerBatchNormConvLayer
|
from torch import nn
class FastSpeech2ConformerBatchNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
if layer_id == 0:
in_conv_dim = config.num_mel_bins
else:
in_conv_dim = config.speech_decoder_postnet_units
if layer_id == config.speech_decoder_postnet_layers - 1:
out_conv_dim = config.num_mel_bins
else:
out_conv_dim = config.speech_decoder_postnet_units
self.conv = nn.Conv1d(in_conv_dim, out_conv_dim, kernel_size=config.speech_decoder_postnet_kernel, stride=1, padding=(config.speech_decoder_postnet_kernel - 1) // 2, bias=False)
self.batch_norm = nn.BatchNorm1d(out_conv_dim)
if layer_id < config.speech_decoder_postnet_layers - 1:
self.activation = nn.Tanh()
else:
self.activation = None
self.dropout = nn.Dropout(config.speech_decoder_postnet_dropout)
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.batch_norm(hidden_states)
if self.activation is not None:
hidden_states = self.activation(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class FastSpeech2ConformerBatchNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 18
| 3
| 16
| 0
| 3
| 0
| 1
| 1
| 0
| 0
| 2
| 4
| 2
| 12
| 38
| 6
| 32
| 9
| 29
| 0
| 22
| 9
| 19
| 4
| 1
| 1
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.