|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" PyTorch MegatronBERT model.""" |
|
|
|
|
|
|
|
|
import math |
|
|
from typing import Optional, Tuple, Union |
|
|
|
|
|
import torch |
|
|
import torch.utils.checkpoint |
|
|
from torch import nn |
|
|
from torch.nn import CrossEntropyLoss |
|
|
|
|
|
from transformers.activations import ACT2FN |
|
|
from transformers.modeling_outputs import ( |
|
|
BaseModelOutputWithPastAndCrossAttentions, |
|
|
BaseModelOutputWithPoolingAndCrossAttentions, |
|
|
MaskedLMOutput, |
|
|
) |
|
|
from transformers import PreTrainedModel |
|
|
from transformers.pytorch_utils import ( |
|
|
apply_chunking_to_forward, |
|
|
find_pruneable_heads_and_indices, |
|
|
prune_linear_layer, |
|
|
) |
|
|
from .configuration_rnabert import RNABertConfig |
|
|
|
|
|
VARIANTS = { |
|
|
"aido_rna_1m_mars": "genbio-ai/AIDO.RNA-1M-MARS", |
|
|
"aido_rna_25m_mars": "genbio-ai/AIDO.RNA-25M-MARS", |
|
|
"aido_rna_300m_mars": "genbio-ai/AIDO.RNA-300M-MARS", |
|
|
"aido_rna_650m": "genbio-ai/AIDO.RNA-650M", |
|
|
"aido_rna_650m_cds": "genbio-ai/AIDO.RNA-650M-CDS", |
|
|
"aido_rna_1b600m": "genbio-ai/AIDO.RNA-1.6B", |
|
|
"aido_rna_1b600m_cds": "genbio-ai/AIDO.RNA-1.6B-CDS", |
|
|
} |
|
|
|
|
|
class RNABertEmbeddings(nn.Module): |
|
|
"""Construct the embeddings from word, position and token_type embeddings.""" |
|
|
|
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.word_embeddings = nn.Embedding( |
|
|
config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id |
|
|
) |
|
|
if config.position_embedding_type != "rope": |
|
|
self.position_embeddings = nn.Embedding( |
|
|
config.max_position_embeddings, config.hidden_size |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.dropout = nn.Dropout(config.hidden_dropout_prob) |
|
|
|
|
|
|
|
|
self.register_buffer( |
|
|
"position_ids", |
|
|
torch.arange(config.max_position_embeddings).expand((1, -1)), |
|
|
persistent=False, |
|
|
) |
|
|
self.position_embedding_type = getattr( |
|
|
config, "position_embedding_type", "rope" |
|
|
) |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
input_ids: Optional[torch.LongTensor] = None, |
|
|
token_type_ids: Optional[torch.LongTensor] = None, |
|
|
position_ids: Optional[torch.LongTensor] = None, |
|
|
inputs_embeds: Optional[torch.LongTensor] = None, |
|
|
past_key_values_length: int = 0, |
|
|
) -> torch.Tensor: |
|
|
if input_ids is not None: |
|
|
input_shape = input_ids.size() |
|
|
else: |
|
|
input_shape = inputs_embeds.size()[:-1] |
|
|
|
|
|
seq_length = input_shape[1] |
|
|
|
|
|
if position_ids is None: |
|
|
position_ids = self.position_ids[ |
|
|
:, past_key_values_length : seq_length + past_key_values_length |
|
|
] |
|
|
|
|
|
if token_type_ids is None: |
|
|
token_type_ids = torch.zeros( |
|
|
input_shape, dtype=torch.long, device=self.position_ids.device |
|
|
) |
|
|
|
|
|
if inputs_embeds is None: |
|
|
inputs_embeds = self.word_embeddings(input_ids) |
|
|
|
|
|
|
|
|
|
|
|
embeddings = inputs_embeds |
|
|
if self.position_embedding_type == "absolute": |
|
|
position_embeddings = self.position_embeddings(position_ids) |
|
|
embeddings += position_embeddings |
|
|
|
|
|
|
|
|
|
|
|
embeddings = self.dropout(embeddings) |
|
|
return embeddings |
|
|
|
|
|
|
|
|
|
|
|
class RNABertSelfAttention(nn.Module): |
|
|
def __init__(self, config, position_embedding_type=None): |
|
|
super().__init__() |
|
|
if config.hidden_size % config.num_attention_heads != 0 and not hasattr( |
|
|
config, "embedding_size" |
|
|
): |
|
|
raise ValueError( |
|
|
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " |
|
|
f"heads ({config.num_attention_heads})" |
|
|
) |
|
|
|
|
|
self.num_attention_heads = config.num_attention_heads |
|
|
self.attention_head_size = int(config.hidden_size / config.num_attention_heads) |
|
|
self.all_head_size = self.num_attention_heads * self.attention_head_size |
|
|
|
|
|
self.query = nn.Linear( |
|
|
config.hidden_size, self.all_head_size, bias=config.add_linear_bias |
|
|
) |
|
|
self.key = nn.Linear( |
|
|
config.hidden_size, self.all_head_size, bias=config.add_linear_bias |
|
|
) |
|
|
self.value = nn.Linear( |
|
|
config.hidden_size, self.all_head_size, bias=config.add_linear_bias |
|
|
) |
|
|
|
|
|
self.dropout = nn.Dropout(config.attention_probs_dropout_prob) |
|
|
self.position_embedding_type = position_embedding_type or getattr( |
|
|
config, "position_embedding_type", "absolute" |
|
|
) |
|
|
if ( |
|
|
self.position_embedding_type == "relative_key" |
|
|
or self.position_embedding_type == "relative_key_query" |
|
|
): |
|
|
self.max_position_embeddings = config.max_position_embeddings |
|
|
self.distance_embedding = nn.Embedding( |
|
|
2 * config.max_position_embeddings - 1, self.attention_head_size |
|
|
) |
|
|
|
|
|
self.is_decoder = config.is_decoder |
|
|
|
|
|
def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: |
|
|
new_x_shape = x.size()[:-1] + ( |
|
|
self.num_attention_heads, |
|
|
self.attention_head_size, |
|
|
) |
|
|
x = x.view(new_x_shape) |
|
|
return x.permute(0, 2, 1, 3) |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
hidden_states: torch.Tensor, |
|
|
attention_mask: Optional[torch.FloatTensor] = None, |
|
|
head_mask: Optional[torch.FloatTensor] = None, |
|
|
encoder_hidden_states: Optional[torch.FloatTensor] = None, |
|
|
encoder_attention_mask: Optional[torch.FloatTensor] = None, |
|
|
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
|
|
output_attentions: Optional[bool] = False, |
|
|
rotary_pos_emb=None, |
|
|
) -> Tuple[torch.Tensor]: |
|
|
mixed_query_layer = self.query(hidden_states) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
is_cross_attention = encoder_hidden_states is not None |
|
|
|
|
|
if is_cross_attention and past_key_value is not None: |
|
|
|
|
|
key_layer = past_key_value[0] |
|
|
value_layer = past_key_value[1] |
|
|
attention_mask = encoder_attention_mask |
|
|
elif is_cross_attention: |
|
|
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) |
|
|
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) |
|
|
attention_mask = encoder_attention_mask |
|
|
elif past_key_value is not None: |
|
|
key_layer = self.transpose_for_scores(self.key(hidden_states)) |
|
|
value_layer = self.transpose_for_scores(self.value(hidden_states)) |
|
|
key_layer = torch.cat([past_key_value[0], key_layer], dim=2) |
|
|
value_layer = torch.cat([past_key_value[1], value_layer], dim=2) |
|
|
else: |
|
|
key_layer = self.transpose_for_scores(self.key(hidden_states)) |
|
|
value_layer = self.transpose_for_scores(self.value(hidden_states)) |
|
|
|
|
|
|
|
|
query_layer = self.transpose_for_scores(mixed_query_layer) |
|
|
|
|
|
if rotary_pos_emb is not None: |
|
|
if isinstance(rotary_pos_emb, tuple): |
|
|
rotary_pos_emb = rotary_pos_emb |
|
|
else: |
|
|
rotary_pos_emb = (rotary_pos_emb,) * 2 |
|
|
|
|
|
q_pos_emb, k_pos_emb = rotary_pos_emb |
|
|
|
|
|
|
|
|
query_layer = query_layer.permute(2, 0, 1, 3).contiguous() |
|
|
key_layer = key_layer.permute(2, 0, 1, 3).contiguous() |
|
|
|
|
|
query_layer = apply_rotary_pos_emb( |
|
|
query_layer, q_pos_emb |
|
|
) |
|
|
key_layer = apply_rotary_pos_emb(key_layer, k_pos_emb) |
|
|
|
|
|
|
|
|
query_layer = query_layer.permute(1, 2, 0, 3).contiguous() |
|
|
key_layer = key_layer.permute(1, 2, 0, 3).contiguous() |
|
|
|
|
|
use_cache = past_key_value is not None |
|
|
if self.is_decoder: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
past_key_value = (key_layer, value_layer) |
|
|
|
|
|
|
|
|
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) |
|
|
|
|
|
attention_scores = attention_scores / math.sqrt(self.attention_head_size) |
|
|
if attention_mask is not None: |
|
|
|
|
|
attention_scores = attention_scores + attention_mask.to( |
|
|
attention_scores.dtype |
|
|
) |
|
|
|
|
|
|
|
|
attention_probs = nn.functional.softmax(attention_scores, dim=-1) |
|
|
|
|
|
no_prob_mask = attention_mask < -1e-5 |
|
|
attention_probs = attention_probs.masked_fill(no_prob_mask, 0.0) |
|
|
|
|
|
|
|
|
attention_probs = self.dropout(attention_probs) |
|
|
|
|
|
|
|
|
if head_mask is not None: |
|
|
attention_probs = attention_probs * head_mask |
|
|
|
|
|
context_layer = torch.matmul(attention_probs, value_layer) |
|
|
|
|
|
context_layer = context_layer.permute(0, 2, 1, 3).contiguous() |
|
|
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) |
|
|
context_layer = context_layer.view(new_context_layer_shape) |
|
|
|
|
|
outputs = ( |
|
|
(context_layer, attention_probs) if output_attentions else (context_layer,) |
|
|
) |
|
|
|
|
|
if self.is_decoder: |
|
|
outputs = outputs + (past_key_value,) |
|
|
return outputs |
|
|
|
|
|
|
|
|
|
|
|
class RNABertSelfOutput(nn.Module): |
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.dense = nn.Linear( |
|
|
config.hidden_size, config.hidden_size, bias=config.add_linear_bias |
|
|
) |
|
|
self.dropout = nn.Dropout(config.hidden_dropout_prob) |
|
|
|
|
|
def forward( |
|
|
self, hidden_states: torch.Tensor, residual: torch.Tensor |
|
|
) -> torch.Tensor: |
|
|
hidden_states = self.dense(hidden_states) |
|
|
hidden_states = self.dropout(hidden_states) |
|
|
return residual + hidden_states |
|
|
|
|
|
|
|
|
|
|
|
class RNABertAttention(nn.Module): |
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.ln = config.norm_cls(config.hidden_size, eps=config.layer_norm_eps) |
|
|
|
|
|
self.self = RNABertSelfAttention(config) |
|
|
self.output = RNABertSelfOutput(config) |
|
|
self.pruned_heads = set() |
|
|
|
|
|
def prune_heads(self, heads): |
|
|
if len(heads) == 0: |
|
|
return |
|
|
heads, index = find_pruneable_heads_and_indices( |
|
|
heads, |
|
|
self.self.num_attention_heads, |
|
|
self.self.attention_head_size, |
|
|
self.pruned_heads, |
|
|
) |
|
|
|
|
|
|
|
|
self.self.query = prune_linear_layer(self.self.query, index) |
|
|
self.self.key = prune_linear_layer(self.self.key, index) |
|
|
self.self.value = prune_linear_layer(self.self.value, index) |
|
|
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) |
|
|
|
|
|
|
|
|
self.self.num_attention_heads = self.self.num_attention_heads - len(heads) |
|
|
self.self.all_head_size = ( |
|
|
self.self.attention_head_size * self.self.num_attention_heads |
|
|
) |
|
|
self.pruned_heads = self.pruned_heads.union(heads) |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
hidden_states: torch.Tensor, |
|
|
attention_mask: Optional[torch.FloatTensor] = None, |
|
|
head_mask: Optional[torch.FloatTensor] = None, |
|
|
encoder_hidden_states: Optional[torch.FloatTensor] = None, |
|
|
encoder_attention_mask: Optional[torch.FloatTensor] = None, |
|
|
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
|
|
output_attentions: Optional[bool] = False, |
|
|
rotary_pos_emb=None, |
|
|
) -> Tuple[torch.Tensor]: |
|
|
|
|
|
ln_outputs = self.ln(hidden_states) |
|
|
self_outputs = self.self( |
|
|
ln_outputs, |
|
|
attention_mask, |
|
|
head_mask, |
|
|
encoder_hidden_states, |
|
|
encoder_attention_mask, |
|
|
past_key_value, |
|
|
output_attentions, |
|
|
rotary_pos_emb, |
|
|
) |
|
|
attention_output = self.output(self_outputs[0], hidden_states) |
|
|
outputs = (attention_output,) + self_outputs[ |
|
|
1: |
|
|
] |
|
|
return outputs |
|
|
|
|
|
|
|
|
|
|
|
class RNABertMLP(nn.Module): |
|
|
def __init__(self, config: RNABertConfig): |
|
|
super().__init__() |
|
|
assert config.hidden_act == "swiglu", "Only swiglu is supported." |
|
|
self.up_proj = nn.Linear( |
|
|
config.hidden_size, config.intermediate_size, bias=config.add_linear_bias |
|
|
) |
|
|
self.down_proj = nn.Linear( |
|
|
config.intermediate_size, config.hidden_size, bias=config.add_linear_bias |
|
|
) |
|
|
self.gate_proj = nn.Linear( |
|
|
config.hidden_size, config.intermediate_size, bias=config.add_linear_bias |
|
|
) |
|
|
self.intermediate_act_fn = ACT2FN[ |
|
|
"silu" |
|
|
] |
|
|
|
|
|
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
|
|
down_proj = self.down_proj( |
|
|
self.intermediate_act_fn(self.gate_proj(hidden_states)) |
|
|
* self.up_proj(hidden_states) |
|
|
) |
|
|
return down_proj |
|
|
|
|
|
|
|
|
|
|
|
class RNABertOutput(nn.Module): |
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
|
|
|
self.dropout = nn.Dropout(config.hidden_dropout_prob) |
|
|
|
|
|
def forward( |
|
|
self, hidden_states: torch.Tensor, input_tensor: torch.Tensor |
|
|
) -> torch.Tensor: |
|
|
|
|
|
hidden_states = self.dropout(hidden_states) |
|
|
return input_tensor + hidden_states |
|
|
|
|
|
|
|
|
|
|
|
class RNABertLayer(nn.Module): |
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.chunk_size_feed_forward = config.chunk_size_feed_forward |
|
|
self.seq_len_dim = 1 |
|
|
self.attention = RNABertAttention(config) |
|
|
self.is_decoder = config.is_decoder |
|
|
self.add_cross_attention = config.add_cross_attention |
|
|
if self.add_cross_attention: |
|
|
if not self.is_decoder: |
|
|
raise TypeError( |
|
|
f"{self} should be used as a decoder model if cross attention is added" |
|
|
) |
|
|
self.crossattention = RNABertAttention(config) |
|
|
self.ln = config.norm_cls(config.hidden_size, eps=config.layer_norm_eps) |
|
|
self.mlp = RNABertMLP(config) |
|
|
self.output = RNABertOutput(config) |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
hidden_states: torch.Tensor, |
|
|
attention_mask: Optional[torch.FloatTensor] = None, |
|
|
head_mask: Optional[torch.FloatTensor] = None, |
|
|
encoder_hidden_states: Optional[torch.FloatTensor] = None, |
|
|
encoder_attention_mask: Optional[torch.FloatTensor] = None, |
|
|
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
|
|
output_attentions: Optional[bool] = False, |
|
|
rotary_pos_emb=None, |
|
|
) -> Tuple[torch.Tensor]: |
|
|
|
|
|
self_attn_past_key_value = ( |
|
|
past_key_value[:2] if past_key_value is not None else None |
|
|
) |
|
|
self_attention_outputs = self.attention( |
|
|
hidden_states, |
|
|
attention_mask, |
|
|
head_mask, |
|
|
output_attentions=output_attentions, |
|
|
past_key_value=self_attn_past_key_value, |
|
|
rotary_pos_emb=rotary_pos_emb, |
|
|
) |
|
|
attention_output = self_attention_outputs[0] |
|
|
|
|
|
|
|
|
if self.is_decoder: |
|
|
outputs = self_attention_outputs[1:-1] |
|
|
present_key_value = self_attention_outputs[-1] |
|
|
else: |
|
|
outputs = self_attention_outputs[ |
|
|
1: |
|
|
] |
|
|
|
|
|
cross_attn_present_key_value = None |
|
|
if self.is_decoder and encoder_hidden_states is not None: |
|
|
if not hasattr(self, "crossattention"): |
|
|
raise AttributeError( |
|
|
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" |
|
|
" by setting `config.add_cross_attention=True`" |
|
|
) |
|
|
|
|
|
|
|
|
cross_attn_past_key_value = ( |
|
|
past_key_value[-2:] if past_key_value is not None else None |
|
|
) |
|
|
cross_attention_outputs = self.crossattention( |
|
|
attention_output, |
|
|
attention_mask, |
|
|
head_mask, |
|
|
encoder_hidden_states, |
|
|
encoder_attention_mask, |
|
|
cross_attn_past_key_value, |
|
|
output_attentions, |
|
|
) |
|
|
attention_output = cross_attention_outputs[0] |
|
|
outputs = ( |
|
|
outputs + cross_attention_outputs[1:-1] |
|
|
) |
|
|
|
|
|
|
|
|
cross_attn_present_key_value = cross_attention_outputs[-1] |
|
|
present_key_value = present_key_value + cross_attn_present_key_value |
|
|
|
|
|
layer_output = apply_chunking_to_forward( |
|
|
self.feed_forward_chunk, |
|
|
self.chunk_size_feed_forward, |
|
|
self.seq_len_dim, |
|
|
attention_output, |
|
|
) |
|
|
outputs = (layer_output,) + outputs |
|
|
|
|
|
|
|
|
if self.is_decoder: |
|
|
outputs = outputs + (present_key_value,) |
|
|
|
|
|
return outputs |
|
|
|
|
|
def feed_forward_chunk(self, attention_output): |
|
|
|
|
|
ln_output = self.ln(attention_output) |
|
|
mlp_output = self.mlp(ln_output) |
|
|
layer_output = self.output(mlp_output, attention_output) |
|
|
return layer_output |
|
|
|
|
|
|
|
|
class RnaRMSNorm(nn.Module): |
|
|
def __init__(self, hidden_size, eps=1e-6): |
|
|
""" |
|
|
same as LlamaRMSNorm |
|
|
""" |
|
|
super().__init__() |
|
|
self.weight = nn.Parameter(torch.ones(hidden_size)) |
|
|
self.variance_epsilon = eps |
|
|
|
|
|
def forward(self, hidden_states): |
|
|
input_dtype = hidden_states.dtype |
|
|
hidden_states = hidden_states.to(torch.float32) |
|
|
variance = hidden_states.pow(2).mean(-1, keepdim=True) |
|
|
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) |
|
|
return self.weight * hidden_states.to(input_dtype) |
|
|
|
|
|
|
|
|
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS |
|
|
|
|
|
ALL_LAYERNORM_LAYERS.append(RnaRMSNorm) |
|
|
|
|
|
|
|
|
class RNABertEncoder(nn.Module): |
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.config = config |
|
|
self.layer = nn.ModuleList( |
|
|
[RNABertLayer(config) for _ in range(config.num_hidden_layers)] |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
self.ln = config.norm_cls(config.hidden_size, eps=config.layer_norm_eps) |
|
|
self.gradient_checkpointing = False |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
hidden_states: torch.Tensor, |
|
|
attention_mask: Optional[torch.FloatTensor] = None, |
|
|
head_mask: Optional[torch.FloatTensor] = None, |
|
|
encoder_hidden_states: Optional[torch.FloatTensor] = None, |
|
|
encoder_attention_mask: Optional[torch.FloatTensor] = None, |
|
|
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
|
|
use_cache: Optional[bool] = None, |
|
|
output_attentions: Optional[bool] = False, |
|
|
output_hidden_states: Optional[bool] = False, |
|
|
return_dict: Optional[bool] = True, |
|
|
rotary_pos_emb: Optional[torch.FloatTensor] = None, |
|
|
) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: |
|
|
if self.gradient_checkpointing and self.training: |
|
|
if use_cache: |
|
|
print( |
|
|
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." |
|
|
) |
|
|
use_cache = False |
|
|
all_hidden_states = () if output_hidden_states else None |
|
|
all_self_attentions = () if output_attentions else None |
|
|
all_cross_attentions = ( |
|
|
() if output_attentions and self.config.add_cross_attention else None |
|
|
) |
|
|
|
|
|
next_decoder_cache = () if use_cache else None |
|
|
for i, layer_module in enumerate(self.layer): |
|
|
if output_hidden_states: |
|
|
all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
|
|
|
layer_head_mask = head_mask[i] if head_mask is not None else None |
|
|
past_key_value = past_key_values[i] if past_key_values is not None else None |
|
|
|
|
|
if self.gradient_checkpointing and self.training: |
|
|
layer_outputs = self._gradient_checkpointing_func( |
|
|
layer_module.__call__, |
|
|
hidden_states, |
|
|
attention_mask, |
|
|
layer_head_mask, |
|
|
encoder_hidden_states, |
|
|
encoder_attention_mask, |
|
|
past_key_value, |
|
|
output_attentions, |
|
|
rotary_pos_emb, |
|
|
) |
|
|
else: |
|
|
layer_outputs = layer_module( |
|
|
hidden_states, |
|
|
attention_mask, |
|
|
layer_head_mask, |
|
|
encoder_hidden_states, |
|
|
encoder_attention_mask, |
|
|
past_key_value, |
|
|
output_attentions, |
|
|
rotary_pos_emb, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
hidden_states = layer_outputs[0] |
|
|
if use_cache: |
|
|
next_decoder_cache += (layer_outputs[-1],) |
|
|
if output_attentions: |
|
|
all_self_attentions = all_self_attentions + (layer_outputs[1],) |
|
|
if self.config.add_cross_attention: |
|
|
all_cross_attentions = all_cross_attentions + (layer_outputs[2],) |
|
|
|
|
|
|
|
|
hidden_states = self.ln(hidden_states) |
|
|
|
|
|
if output_hidden_states: |
|
|
all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
|
|
|
if not return_dict: |
|
|
return tuple( |
|
|
v |
|
|
for v in [ |
|
|
hidden_states, |
|
|
next_decoder_cache, |
|
|
all_hidden_states, |
|
|
all_self_attentions, |
|
|
all_cross_attentions, |
|
|
] |
|
|
if v is not None |
|
|
) |
|
|
return BaseModelOutputWithPastAndCrossAttentions( |
|
|
last_hidden_state=hidden_states, |
|
|
past_key_values=next_decoder_cache, |
|
|
hidden_states=all_hidden_states, |
|
|
attentions=all_self_attentions, |
|
|
cross_attentions=all_cross_attentions, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
class RNABertPooler(nn.Module): |
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.dense = nn.Linear( |
|
|
config.hidden_size, config.hidden_size, bias=config.add_linear_bias |
|
|
) |
|
|
self.activation = nn.Tanh() |
|
|
|
|
|
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
|
|
|
|
|
|
|
|
first_token_tensor = hidden_states[:, 0] |
|
|
pooled_output = self.dense(first_token_tensor) |
|
|
pooled_output = self.activation(pooled_output) |
|
|
return pooled_output |
|
|
|
|
|
|
|
|
|
|
|
class RNABertPredictionHeadTransform(nn.Module): |
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.dense = nn.Linear( |
|
|
config.hidden_size, config.hidden_size |
|
|
) |
|
|
|
|
|
self.transform_act_fn = ACT2FN["gelu"] |
|
|
|
|
|
if config.normalization_type == "RMSNorm": |
|
|
self.LayerNorm = RnaRMSNorm(config.hidden_size, eps=config.layer_norm_eps) |
|
|
else: |
|
|
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
|
|
|
|
|
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
|
|
hidden_states = self.dense(hidden_states) |
|
|
hidden_states = self.transform_act_fn(hidden_states) |
|
|
hidden_states = self.LayerNorm(hidden_states) |
|
|
return hidden_states |
|
|
|
|
|
|
|
|
|
|
|
class RNABertLMPredictionHead(nn.Module): |
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.transform = RNABertPredictionHeadTransform(config) |
|
|
|
|
|
|
|
|
|
|
|
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) |
|
|
|
|
|
self.bias = nn.Parameter(torch.zeros(config.vocab_size)) |
|
|
|
|
|
|
|
|
self.decoder.bias = self.bias |
|
|
|
|
|
def forward(self, hidden_states): |
|
|
hidden_states = self.transform(hidden_states) |
|
|
hidden_states = self.decoder(hidden_states) |
|
|
return hidden_states |
|
|
|
|
|
|
|
|
|
|
|
class RNABertOnlyMLMHead(nn.Module): |
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.predictions = RNABertLMPredictionHead(config) |
|
|
|
|
|
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: |
|
|
prediction_scores = self.predictions(sequence_output) |
|
|
return prediction_scores |
|
|
|
|
|
|
|
|
class RNABertPreTrainedModel(PreTrainedModel): |
|
|
""" |
|
|
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained |
|
|
models. |
|
|
""" |
|
|
|
|
|
config_class = RNABertConfig |
|
|
|
|
|
base_model_prefix = "bert" |
|
|
supports_gradient_checkpointing = True |
|
|
|
|
|
def _init_weights(self, module): |
|
|
"""Initialize the weights""" |
|
|
if isinstance(module, (nn.Linear, nn.Embedding)): |
|
|
|
|
|
|
|
|
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
|
|
elif isinstance(module, nn.LayerNorm): |
|
|
module.bias.data.zero_() |
|
|
module.weight.data.fill_(1.0) |
|
|
elif isinstance(module, RnaRMSNorm): |
|
|
module.weight.data.fill_(1.0) |
|
|
|
|
|
if isinstance(module, nn.Linear) and module.bias is not None: |
|
|
module.bias.data.zero_() |
|
|
|
|
|
|
|
|
|
|
|
class RNABertModel(RNABertPreTrainedModel): |
|
|
""" |
|
|
|
|
|
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of |
|
|
cross-attention is added between the self-attention layers, following the architecture described in [Attention is |
|
|
all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, |
|
|
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. |
|
|
|
|
|
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set |
|
|
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and |
|
|
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. |
|
|
""" |
|
|
|
|
|
@classmethod |
|
|
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): |
|
|
wrapper_config = kwargs.pop("config", None) |
|
|
if wrapper_config is None: |
|
|
raise ValueError("Config must be provided") |
|
|
|
|
|
base_model = VARIANTS.get(wrapper_config.base_model, wrapper_config.base_model) |
|
|
|
|
|
|
|
|
base_config = RNABertConfig.from_pretrained(base_model, **kwargs) |
|
|
|
|
|
|
|
|
base_config.base_model = wrapper_config.base_model |
|
|
|
|
|
return super().from_pretrained( |
|
|
base_model, |
|
|
*model_args, |
|
|
config=base_config, |
|
|
**kwargs, |
|
|
) |
|
|
|
|
|
def __init__(self, config, add_pooling_layer=False): |
|
|
super().__init__(config) |
|
|
self.config = config |
|
|
if config.normalization_type == "RMSNorm": |
|
|
self.config.norm_cls = RnaRMSNorm |
|
|
else: |
|
|
assert config.normalization_type == "LayerNorm" |
|
|
self.config.norm_cls = nn.LayerNorm |
|
|
self.embeddings = RNABertEmbeddings(config) |
|
|
self.encoder = RNABertEncoder(config) |
|
|
|
|
|
self.pooler = RNABertPooler(config) if add_pooling_layer else None |
|
|
|
|
|
|
|
|
if config.position_embedding_type == "rope": |
|
|
rotary_dim = config.hidden_size // config.num_attention_heads |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.rotary_pos_emb = RotaryEmbedding(rotary_dim, config.rotary_percent) |
|
|
|
|
|
|
|
|
del self.config.norm_cls |
|
|
|
|
|
|
|
|
self.post_init() |
|
|
|
|
|
def get_input_embeddings(self): |
|
|
return self.embeddings.word_embeddings |
|
|
|
|
|
def set_input_embeddings(self, value): |
|
|
self.embeddings.word_embeddings = value |
|
|
|
|
|
def _prune_heads(self, heads_to_prune): |
|
|
""" |
|
|
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base |
|
|
class PreTrainedModel |
|
|
""" |
|
|
for layer, heads in heads_to_prune.items(): |
|
|
self.encoder.layer[layer].attention.prune_heads(heads) |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
input_ids: Optional[torch.LongTensor] = None, |
|
|
attention_mask: Optional[torch.FloatTensor] = None, |
|
|
token_type_ids: Optional[torch.LongTensor] = None, |
|
|
position_ids: Optional[torch.LongTensor] = None, |
|
|
head_mask: Optional[torch.FloatTensor] = None, |
|
|
inputs_embeds: Optional[torch.FloatTensor] = None, |
|
|
encoder_hidden_states: Optional[torch.FloatTensor] = None, |
|
|
encoder_attention_mask: Optional[torch.FloatTensor] = None, |
|
|
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
|
|
use_cache: Optional[bool] = None, |
|
|
output_attentions: Optional[bool] = None, |
|
|
output_hidden_states: Optional[bool] = None, |
|
|
return_dict: Optional[bool] = None, |
|
|
) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]: |
|
|
r""" |
|
|
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): |
|
|
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if |
|
|
the model is configured as a decoder. |
|
|
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): |
|
|
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in |
|
|
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: |
|
|
|
|
|
- 1 for tokens that are **not masked**, |
|
|
- 0 for tokens that are **masked**. |
|
|
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): |
|
|
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. |
|
|
|
|
|
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that |
|
|
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all |
|
|
`decoder_input_ids` of shape `(batch_size, sequence_length)`. |
|
|
use_cache (`bool`, *optional*): |
|
|
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see |
|
|
`past_key_values`). |
|
|
""" |
|
|
output_attentions = ( |
|
|
output_attentions |
|
|
if output_attentions is not None |
|
|
else self.config.output_attentions |
|
|
) |
|
|
output_hidden_states = ( |
|
|
output_hidden_states |
|
|
if output_hidden_states is not None |
|
|
else self.config.output_hidden_states |
|
|
) |
|
|
return_dict = ( |
|
|
return_dict if return_dict is not None else self.config.use_return_dict |
|
|
) |
|
|
|
|
|
if self.config.is_decoder: |
|
|
use_cache = use_cache if use_cache is not None else self.config.use_cache |
|
|
else: |
|
|
use_cache = False |
|
|
|
|
|
if input_ids is not None and inputs_embeds is not None: |
|
|
raise ValueError( |
|
|
"You cannot specify both input_ids and inputs_embeds at the same time" |
|
|
) |
|
|
elif input_ids is not None: |
|
|
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) |
|
|
input_shape = input_ids.size() |
|
|
elif inputs_embeds is not None: |
|
|
input_shape = inputs_embeds.size()[:-1] |
|
|
else: |
|
|
raise ValueError("You have to specify either input_ids or inputs_embeds") |
|
|
|
|
|
batch_size, seq_length = input_shape |
|
|
device = input_ids.device if input_ids is not None else inputs_embeds.device |
|
|
|
|
|
|
|
|
past_key_values_length = ( |
|
|
past_key_values[0][0].shape[2] if past_key_values is not None else 0 |
|
|
) |
|
|
|
|
|
if attention_mask is None: |
|
|
attention_mask = torch.ones( |
|
|
((batch_size, seq_length + past_key_values_length)), device=device |
|
|
) |
|
|
if token_type_ids is None: |
|
|
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
extended_attention_mask = bert_extended_attention_mask( |
|
|
attention_mask |
|
|
) |
|
|
extended_attention_mask = extended_attention_mask * torch.finfo(torch.float).min |
|
|
|
|
|
|
|
|
|
|
|
if self.config.is_decoder and encoder_hidden_states is not None: |
|
|
encoder_batch_size, encoder_sequence_length, _ = ( |
|
|
encoder_hidden_states.size() |
|
|
) |
|
|
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) |
|
|
if encoder_attention_mask is None: |
|
|
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) |
|
|
encoder_extended_attention_mask = self.invert_attention_mask( |
|
|
encoder_attention_mask |
|
|
) |
|
|
else: |
|
|
encoder_extended_attention_mask = None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) |
|
|
|
|
|
|
|
|
rotary_pos_emb = None |
|
|
if self.config.position_embedding_type == "rope": |
|
|
rotary_pos_emb = self.rotary_pos_emb(input_ids.size(1)) |
|
|
|
|
|
embedding_output = self.embeddings( |
|
|
input_ids=input_ids, |
|
|
position_ids=position_ids, |
|
|
token_type_ids=token_type_ids, |
|
|
inputs_embeds=inputs_embeds, |
|
|
past_key_values_length=past_key_values_length, |
|
|
) |
|
|
encoder_outputs = self.encoder( |
|
|
embedding_output, |
|
|
attention_mask=extended_attention_mask, |
|
|
head_mask=head_mask, |
|
|
encoder_hidden_states=encoder_hidden_states, |
|
|
encoder_attention_mask=encoder_extended_attention_mask, |
|
|
past_key_values=past_key_values, |
|
|
use_cache=use_cache, |
|
|
output_attentions=output_attentions, |
|
|
output_hidden_states=output_hidden_states, |
|
|
return_dict=return_dict, |
|
|
rotary_pos_emb=rotary_pos_emb, |
|
|
) |
|
|
sequence_output = encoder_outputs[0] |
|
|
pooled_output = ( |
|
|
self.pooler(sequence_output) if self.pooler is not None else None |
|
|
) |
|
|
|
|
|
if not return_dict: |
|
|
return (sequence_output, pooled_output) + encoder_outputs[1:] |
|
|
|
|
|
return BaseModelOutputWithPoolingAndCrossAttentions( |
|
|
last_hidden_state=sequence_output, |
|
|
pooler_output=pooled_output, |
|
|
past_key_values=encoder_outputs.past_key_values, |
|
|
hidden_states=encoder_outputs.hidden_states, |
|
|
attentions=encoder_outputs.attentions, |
|
|
cross_attentions=encoder_outputs.cross_attentions, |
|
|
) |
|
|
|
|
|
class RNABertForMaskedLM(RNABertPreTrainedModel): |
|
|
_tied_weights_keys = ["cls.predictions.decoder"] |
|
|
|
|
|
def __init__(self, config): |
|
|
super().__init__(config) |
|
|
|
|
|
if config.is_decoder: |
|
|
print( |
|
|
"If you want to use `RNABertForMaskedLM` make sure `config.is_decoder=False` for " |
|
|
"bi-directional self-attention." |
|
|
) |
|
|
|
|
|
self.bert = RNABertModel(config, add_pooling_layer=False) |
|
|
self.cls = RNABertOnlyMLMHead(config) |
|
|
|
|
|
|
|
|
self.post_init() |
|
|
|
|
|
def get_output_embeddings(self): |
|
|
return self.cls.predictions.decoder |
|
|
|
|
|
def set_output_embeddings(self, new_embeddings): |
|
|
self.cls.predictions.decoder = new_embeddings |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
input_ids: Optional[torch.LongTensor] = None, |
|
|
attention_mask: Optional[torch.FloatTensor] = None, |
|
|
token_type_ids: Optional[torch.LongTensor] = None, |
|
|
position_ids: Optional[torch.LongTensor] = None, |
|
|
head_mask: Optional[torch.FloatTensor] = None, |
|
|
inputs_embeds: Optional[torch.FloatTensor] = None, |
|
|
encoder_hidden_states: Optional[torch.FloatTensor] = None, |
|
|
encoder_attention_mask: Optional[torch.FloatTensor] = None, |
|
|
labels: Optional[torch.LongTensor] = None, |
|
|
output_attentions: Optional[bool] = None, |
|
|
output_hidden_states: Optional[bool] = None, |
|
|
return_dict: Optional[bool] = None, |
|
|
) -> Union[Tuple, MaskedLMOutput]: |
|
|
r""" |
|
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
|
|
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., |
|
|
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the |
|
|
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` |
|
|
""" |
|
|
|
|
|
return_dict = ( |
|
|
return_dict if return_dict is not None else self.config.use_return_dict |
|
|
) |
|
|
|
|
|
outputs = self.bert( |
|
|
input_ids, |
|
|
attention_mask=attention_mask, |
|
|
token_type_ids=token_type_ids, |
|
|
position_ids=position_ids, |
|
|
head_mask=head_mask, |
|
|
inputs_embeds=inputs_embeds, |
|
|
encoder_hidden_states=encoder_hidden_states, |
|
|
encoder_attention_mask=encoder_attention_mask, |
|
|
output_attentions=output_attentions, |
|
|
output_hidden_states=output_hidden_states, |
|
|
return_dict=return_dict, |
|
|
) |
|
|
|
|
|
sequence_output = outputs[0] |
|
|
prediction_scores = self.cls(sequence_output) |
|
|
|
|
|
masked_lm_loss = None |
|
|
if labels is not None: |
|
|
loss_fct = CrossEntropyLoss() |
|
|
masked_lm_loss = loss_fct( |
|
|
prediction_scores.view(-1, self.config.vocab_size), labels.view(-1) |
|
|
) |
|
|
|
|
|
if not return_dict: |
|
|
output = (prediction_scores,) + outputs[2:] |
|
|
return ( |
|
|
((masked_lm_loss,) + output) if masked_lm_loss is not None else output |
|
|
) |
|
|
|
|
|
return MaskedLMOutput( |
|
|
loss=masked_lm_loss, |
|
|
logits=prediction_scores, |
|
|
hidden_states=outputs.hidden_states, |
|
|
attentions=outputs.attentions, |
|
|
) |
|
|
|
|
|
def prepare_inputs_for_generation( |
|
|
self, input_ids, attention_mask=None, **model_kwargs |
|
|
): |
|
|
input_shape = input_ids.shape |
|
|
effective_batch_size = input_shape[0] |
|
|
|
|
|
|
|
|
if self.config.pad_token_id is None: |
|
|
raise ValueError("The PAD token should be defined for generation") |
|
|
attention_mask = torch.cat( |
|
|
[attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], |
|
|
dim=-1, |
|
|
) |
|
|
dummy_token = torch.full( |
|
|
(effective_batch_size, 1), |
|
|
self.config.pad_token_id, |
|
|
dtype=torch.long, |
|
|
device=input_ids.device, |
|
|
) |
|
|
input_ids = torch.cat([input_ids, dummy_token], dim=1) |
|
|
|
|
|
return {"input_ids": input_ids, "attention_mask": attention_mask} |
|
|
|
|
|
|
|
|
from torch import Tensor, nn |
|
|
|
|
|
|
|
|
class RotaryEmbedding(nn.Module): |
|
|
"""Rotary Embedding for language model. |
|
|
|
|
|
Args: |
|
|
kv_channels (int): Projection weights dimension in multi-head attention. Obtained from transformer config |
|
|
rotary_percent (float): Percent of rotary dimension to use for rotary position embeddings. |
|
|
seq_len_interpolation_factor (float, optional): scale of linearly interpolating RoPE for longer sequences. The value must be a float larger than 1.0. Defaults to None |
|
|
rotary_base (int, optional): Base period for rotary position embeddings. Defaults to 10000. |
|
|
""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
kv_channels: int, |
|
|
rotary_percent: float, |
|
|
seq_len_interpolation_factor: float = None, |
|
|
rotary_base: int = 10000, |
|
|
) -> None: |
|
|
super().__init__() |
|
|
|
|
|
dim = kv_channels |
|
|
if rotary_percent < 1.0: |
|
|
dim = int(dim * rotary_percent) |
|
|
|
|
|
self.seq_len_interpolation_factor = seq_len_interpolation_factor |
|
|
device = ( |
|
|
torch.cuda.current_device() |
|
|
if torch.cuda.is_available() |
|
|
else torch.device("cpu") |
|
|
) |
|
|
self.inv_freq = 1.0 / ( |
|
|
rotary_base |
|
|
** (torch.arange(0, dim, 2, dtype=torch.float32, device=device) / dim) |
|
|
) |
|
|
|
|
|
def forward(self, max_seq_len: int, offset: int = 0) -> Tensor: |
|
|
"""Forward pass of RoPE embedding. |
|
|
|
|
|
Args: |
|
|
max_seq_len (int): Maximum size of sequence |
|
|
offset (int, optional): _description_. Defaults to 0. |
|
|
|
|
|
Returns: |
|
|
Tensor: Embeddings after applying RoPE. |
|
|
""" |
|
|
seq = ( |
|
|
torch.arange( |
|
|
max_seq_len, device=self.inv_freq.device, dtype=self.inv_freq.dtype |
|
|
) |
|
|
+ offset |
|
|
) |
|
|
|
|
|
if self.seq_len_interpolation_factor is not None: |
|
|
seq *= 1 / self.seq_len_interpolation_factor |
|
|
|
|
|
freqs = torch.outer(seq, self.inv_freq) |
|
|
|
|
|
|
|
|
emb = torch.cat((freqs, freqs), dim=-1) |
|
|
|
|
|
emb = emb[:, None, None, :] |
|
|
|
|
|
return emb |
|
|
|
|
|
def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs): |
|
|
state_dict.pop(f"{prefix}inv_freq", None) |
|
|
return super()._load_from_state_dict(state_dict, prefix, *args, **kwargs) |
|
|
|
|
|
|
|
|
def _rotate_half(x: Tensor) -> Tensor: |
|
|
"""Change sign so the last dimension becomes [-odd, +even] |
|
|
|
|
|
Args: |
|
|
x (Tensor): Input tensor |
|
|
|
|
|
Returns: |
|
|
Tensor: Tensor rotated half |
|
|
""" |
|
|
|
|
|
x1, x2 = torch.chunk(x, 2, dim=-1) |
|
|
return torch.cat((-x2, x1), dim=-1) |
|
|
|
|
|
|
|
|
def apply_rotary_pos_emb(t: Tensor, freqs: Tensor) -> Tensor: |
|
|
"""Apply rotary positional embedding to input tensor T. |
|
|
|
|
|
check https://kexue.fm/archives/8265 for detailed formulas |
|
|
|
|
|
Args: |
|
|
t (Tensor): Input tensor T is of shape [seq_length, ... , dim] |
|
|
freqs (Tensor): Rotary Positional embedding tensor freq is of shape [seq_length, ..., dim] |
|
|
|
|
|
Returns: |
|
|
Tensor: The input tensor after applying RoPE |
|
|
""" |
|
|
rot_dim = freqs.shape[-1] |
|
|
|
|
|
|
|
|
t, t_pass = t[..., :rot_dim], t[..., rot_dim:] |
|
|
|
|
|
|
|
|
|
|
|
cos_ = torch.cos(freqs).to(t.dtype).to(t.device) |
|
|
sin_ = torch.sin(freqs).to(t.dtype).to(t.device) |
|
|
|
|
|
t = (t * cos_) + (_rotate_half(t) * sin_) |
|
|
return torch.cat((t, t_pass), dim=-1) |
|
|
|
|
|
|
|
|
def bert_extended_attention_mask(attention_mask): |
|
|
|
|
|
|
|
|
attention_mask_b1s = attention_mask.unsqueeze(1) |
|
|
|
|
|
attention_mask_bs1 = attention_mask.unsqueeze(2) |
|
|
|
|
|
attention_mask_bss = attention_mask_b1s * attention_mask_bs1 |
|
|
|
|
|
extended_attention_mask = attention_mask_bss.unsqueeze(1) |
|
|
|
|
|
|
|
|
extended_attention_mask = extended_attention_mask < 0.5 |
|
|
|
|
|
return extended_attention_mask |
|
|
|