# coding=utf-8 # Copyright 2024 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch MegatronBERT model.""" import math from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from transformers.activations import ACT2FN from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, ) from transformers import PreTrainedModel from transformers.pytorch_utils import ( apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from .configuration_rnabert import RNABertConfig VARIANTS = { "aido_rna_1m_mars": "genbio-ai/AIDO.RNA-1M-MARS", "aido_rna_25m_mars": "genbio-ai/AIDO.RNA-25M-MARS", "aido_rna_300m_mars": "genbio-ai/AIDO.RNA-300M-MARS", "aido_rna_650m": "genbio-ai/AIDO.RNA-650M", "aido_rna_650m_cds": "genbio-ai/AIDO.RNA-650M-CDS", "aido_rna_1b600m": "genbio-ai/AIDO.RNA-1.6B", "aido_rna_1b600m_cds": "genbio-ai/AIDO.RNA-1.6B-CDS", } class RNABertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding( config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id ) if config.position_embedding_type != "rope": self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size ) # self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file # In Megatron, layer-norm is applied after the 1st dropout. # self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False, ) self.position_embedding_type = getattr( config, "position_embedding_type", "rope" ) def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.LongTensor] = None, past_key_values_length: int = 0, ) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[ :, past_key_values_length : seq_length + past_key_values_length ] if token_type_ids is None: token_type_ids = torch.zeros( input_shape, dtype=torch.long, device=self.position_ids.device ) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) # token_type_embeddings = self.token_type_embeddings(token_type_ids) # embeddings = inputs_embeds + token_type_embeddings embeddings = inputs_embeds if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings # Megatron BERT moves that layer norm after the drop-out (and to each layer). # embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->RNABert class RNABertSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr( config, "embedding_size" ): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear( config.hidden_size, self.all_head_size, bias=config.add_linear_bias ) self.key = nn.Linear( config.hidden_size, self.all_head_size, bias=config.add_linear_bias ) self.value = nn.Linear( config.hidden_size, self.all_head_size, bias=config.add_linear_bias ) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if ( self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query" ): self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding( 2 * config.max_position_embeddings - 1, self.attention_head_size ) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + ( self.num_attention_heads, self.attention_head_size, ) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, rotary_pos_emb=None, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) # [b, hn, sq, c] query_layer = self.transpose_for_scores(mixed_query_layer) if rotary_pos_emb is not None: if isinstance(rotary_pos_emb, tuple): rotary_pos_emb = rotary_pos_emb else: rotary_pos_emb = (rotary_pos_emb,) * 2 q_pos_emb, k_pos_emb = rotary_pos_emb # [b, hn, sq, c] --> [sq, b, hn, c] query_layer = query_layer.permute(2, 0, 1, 3).contiguous() key_layer = key_layer.permute(2, 0, 1, 3).contiguous() query_layer = apply_rotary_pos_emb( query_layer, q_pos_emb ) # debug query_layer[:,0] key_layer = apply_rotary_pos_emb(key_layer, k_pos_emb) # [sq, b, hn, c] --> [b, hn, sq, c] query_layer = query_layer.permute(1, 2, 0, 3).contiguous() key_layer = key_layer.permute(1, 2, 0, 3).contiguous() use_cache = past_key_value is not None if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in RNABertModel forward() function) attention_scores = attention_scores + attention_mask.to( attention_scores.dtype ) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) no_prob_mask = attention_mask < -1e-5 attention_probs = attention_probs.masked_fill(no_prob_mask, 0.0) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = ( (context_layer, attention_probs) if output_attentions else (context_layer,) ) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Based transformers.models.bert.modeling_bert.BertSelfOutput. Moved LayerNorm to RNABertAttention below. class RNABertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear( config.hidden_size, config.hidden_size, bias=config.add_linear_bias ) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward( self, hidden_states: torch.Tensor, residual: torch.Tensor ) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return residual + hidden_states # Based transformers.models.bert.modeling_bert.BertAttention. Added LayerNorm. class RNABertAttention(nn.Module): def __init__(self, config): super().__init__() self.ln = config.norm_cls(config.hidden_size, eps=config.layer_norm_eps) self.self = RNABertSelfAttention(config) self.output = RNABertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads, ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = ( self.self.attention_head_size * self.self.num_attention_heads ) self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, rotary_pos_emb=None, ) -> Tuple[torch.Tensor]: # debug_point1 = hidden_states[0] ln_outputs = self.ln(hidden_states) self_outputs = self.self( ln_outputs, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, rotary_pos_emb, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[ 1: ] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->RNABert class RNABertMLP(nn.Module): def __init__(self, config: RNABertConfig): super().__init__() assert config.hidden_act == "swiglu", "Only swiglu is supported." self.up_proj = nn.Linear( config.hidden_size, config.intermediate_size, bias=config.add_linear_bias ) self.down_proj = nn.Linear( config.intermediate_size, config.hidden_size, bias=config.add_linear_bias ) self.gate_proj = nn.Linear( config.hidden_size, config.intermediate_size, bias=config.add_linear_bias ) self.intermediate_act_fn = ACT2FN[ "silu" ] # swiglu use silu as part of its activation function def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: down_proj = self.down_proj( self.intermediate_act_fn(self.gate_proj(hidden_states)) * self.up_proj(hidden_states) ) return down_proj # Based on transformers.models.bert.modeling_bert.BertOutput. Moved LayerNorm to RNABertLayer below. class RNABertOutput(nn.Module): def __init__(self, config): super().__init__() # self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward( self, hidden_states: torch.Tensor, input_tensor: torch.Tensor ) -> torch.Tensor: # hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return input_tensor + hidden_states # Based on transformers.models.bert.modeling_bert.BertLayer. Added LayerNorm. class RNABertLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = RNABertAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise TypeError( f"{self} should be used as a decoder model if cross attention is added" ) self.crossattention = RNABertAttention(config) self.ln = config.norm_cls(config.hidden_size, eps=config.layer_norm_eps) self.mlp = RNABertMLP(config) self.output = RNABertOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, rotary_pos_emb=None, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = ( past_key_value[:2] if past_key_value is not None else None ) self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, rotary_pos_emb=rotary_pos_emb, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[ 1: ] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise AttributeError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = ( past_key_value[-2:] if past_key_value is not None else None ) cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = ( outputs + cross_attention_outputs[1:-1] ) # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output, ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): # debug: attention_output[0] ln_output = self.ln(attention_output) mlp_output = self.mlp(ln_output) layer_output = self.output(mlp_output, attention_output) return layer_output class RnaRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ same as LlamaRMSNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS ALL_LAYERNORM_LAYERS.append(RnaRMSNorm) class RNABertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList( [RNABertLayer(config) for _ in range(config.num_hidden_layers)] ) # The final layer norm. We removed the 1st LN, moved LN to each hidden layer and this one # is simply the final LN (Transformer's BERT has it attached to each hidden layer). self.ln = config.norm_cls(config.hidden_size, eps=config.layer_norm_eps) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, rotary_pos_emb: Optional[torch.FloatTensor] = None, ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: if self.gradient_checkpointing and self.training: if use_cache: print( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = ( () if output_attentions and self.config.add_cross_attention else None ) next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, rotary_pos_emb, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, rotary_pos_emb, ) # Because we moved the layer-norm at the end of the hidden layer, we have non-normali- # zed data here. If that's really needed, we must apply LN to match Transformer's BERT. hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) # Finalize the hidden states. hidden_states = self.ln(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->RNABert class RNABertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear( config.hidden_size, config.hidden_size, bias=config.add_linear_bias ) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->RNABert class RNABertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear( config.hidden_size, config.hidden_size ) # in megatron, this will always have bias self.transform_act_fn = ACT2FN["gelu"] if config.normalization_type == "RMSNorm": self.LayerNorm = RnaRMSNorm(config.hidden_size, eps=config.layer_norm_eps) else: self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->RNABert class RNABertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = RNABertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->RNABert class RNABertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = RNABertLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores class RNABertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = RNABertConfig # load_tf_weights = load_tf_weights_in_rnabert base_model_prefix = "bert" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, RnaRMSNorm): module.weight.data.fill_(1.0) # no bias if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() class RNABertModel(RNABertPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): wrapper_config = kwargs.pop("config", None) if wrapper_config is None: raise ValueError("Config must be provided") base_model = VARIANTS.get(wrapper_config.base_model, wrapper_config.base_model) # load base model config base_config = RNABertConfig.from_pretrained(base_model, **kwargs) # keep routing info base_config.base_model = wrapper_config.base_model return super().from_pretrained( base_model, *model_args, config=base_config, **kwargs, ) def __init__(self, config, add_pooling_layer=False): super().__init__(config) self.config = config if config.normalization_type == "RMSNorm": self.config.norm_cls = RnaRMSNorm else: assert config.normalization_type == "LayerNorm" self.config.norm_cls = nn.LayerNorm self.embeddings = RNABertEmbeddings(config) self.encoder = RNABertEncoder(config) self.pooler = RNABertPooler(config) if add_pooling_layer else None # rotary position embeddings if config.position_embedding_type == "rope": rotary_dim = config.hidden_size // config.num_attention_heads # partial rotary embeddings, which is better than full rotary # Wang and Komatsuzaki et al # https://github.com/kingoflolz/mesh-transformer-jax/ self.rotary_pos_emb = RotaryEmbedding(rotary_dim, config.rotary_percent) # delete this from config so the config can be successfully saved del self.config.norm_cls # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError( "You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = ( past_key_values[0][0].shape[2] if past_key_values is not None else 0 ) if attention_mask is None: attention_mask = torch.ones( ((batch_size, seq_length + past_key_values_length)), device=device ) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. # extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) extended_attention_mask = bert_extended_attention_mask( attention_mask ) # True for pad, false for non-pad extended_attention_mask = extended_attention_mask * torch.finfo(torch.float).min # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = ( encoder_hidden_states.size() ) encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask( encoder_attention_mask ) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) # Rotary positional embeddings rotary_pos_emb = None if self.config.position_embedding_type == "rope": rotary_pos_emb = self.rotary_pos_emb(input_ids.size(1)) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rotary_pos_emb=rotary_pos_emb, ) sequence_output = encoder_outputs[0] pooled_output = ( self.pooler(sequence_output) if self.pooler is not None else None ) if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class RNABertForMaskedLM(RNABertPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder"] def __init__(self, config): super().__init__(config) if config.is_decoder: print( "If you want to use `RNABertForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.bert = RNABertModel(config, add_pooling_layer=False) self.cls = RNABertOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct( prediction_scores.view(-1, self.config.vocab_size), labels.view(-1) ) if not return_dict: output = (prediction_scores,) + outputs[2:] return ( ((masked_lm_loss,) + output) if masked_lm_loss is not None else output ) return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, attention_mask=None, **model_kwargs ): input_shape = input_ids.shape effective_batch_size = input_shape[0] # add a dummy token if self.config.pad_token_id is None: raise ValueError("The PAD token should be defined for generation") attention_mask = torch.cat( [attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1, ) dummy_token = torch.full( (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device, ) input_ids = torch.cat([input_ids, dummy_token], dim=1) return {"input_ids": input_ids, "attention_mask": attention_mask} from torch import Tensor, nn class RotaryEmbedding(nn.Module): """Rotary Embedding for language model. Args: kv_channels (int): Projection weights dimension in multi-head attention. Obtained from transformer config rotary_percent (float): Percent of rotary dimension to use for rotary position embeddings. seq_len_interpolation_factor (float, optional): scale of linearly interpolating RoPE for longer sequences. The value must be a float larger than 1.0. Defaults to None rotary_base (int, optional): Base period for rotary position embeddings. Defaults to 10000. """ def __init__( self, kv_channels: int, rotary_percent: float, seq_len_interpolation_factor: float = None, rotary_base: int = 10000, ) -> None: super().__init__() dim = kv_channels if rotary_percent < 1.0: dim = int(dim * rotary_percent) self.seq_len_interpolation_factor = seq_len_interpolation_factor device = ( torch.cuda.current_device() if torch.cuda.is_available() else torch.device("cpu") ) self.inv_freq = 1.0 / ( rotary_base ** (torch.arange(0, dim, 2, dtype=torch.float32, device=device) / dim) ) def forward(self, max_seq_len: int, offset: int = 0) -> Tensor: """Forward pass of RoPE embedding. Args: max_seq_len (int): Maximum size of sequence offset (int, optional): _description_. Defaults to 0. Returns: Tensor: Embeddings after applying RoPE. """ seq = ( torch.arange( max_seq_len, device=self.inv_freq.device, dtype=self.inv_freq.dtype ) + offset ) if self.seq_len_interpolation_factor is not None: seq *= 1 / self.seq_len_interpolation_factor freqs = torch.outer(seq, self.inv_freq) # first part even vector components, second part odd vector components, # 2 * dim in dimension size emb = torch.cat((freqs, freqs), dim=-1) # emb [seq_length, .., dim] emb = emb[:, None, None, :] return emb def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs): state_dict.pop(f"{prefix}inv_freq", None) return super()._load_from_state_dict(state_dict, prefix, *args, **kwargs) def _rotate_half(x: Tensor) -> Tensor: """Change sign so the last dimension becomes [-odd, +even] Args: x (Tensor): Input tensor Returns: Tensor: Tensor rotated half """ x1, x2 = torch.chunk(x, 2, dim=-1) return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(t: Tensor, freqs: Tensor) -> Tensor: """Apply rotary positional embedding to input tensor T. check https://kexue.fm/archives/8265 for detailed formulas Args: t (Tensor): Input tensor T is of shape [seq_length, ... , dim] freqs (Tensor): Rotary Positional embedding tensor freq is of shape [seq_length, ..., dim] Returns: Tensor: The input tensor after applying RoPE """ rot_dim = freqs.shape[-1] # ideally t_pass is empty so rotary pos embedding is applied to all tensor t t, t_pass = t[..., :rot_dim], t[..., rot_dim:] # first part is cosine component # second part is sine component, need to change signs with _rotate_half method cos_ = torch.cos(freqs).to(t.dtype).to(t.device) sin_ = torch.sin(freqs).to(t.dtype).to(t.device) t = (t * cos_) + (_rotate_half(t) * sin_) return torch.cat((t, t_pass), dim=-1) def bert_extended_attention_mask(attention_mask): # We create a 3D attention mask from a 2D tensor mask. # [b, 1, s] attention_mask_b1s = attention_mask.unsqueeze(1) # [b, s, 1] attention_mask_bs1 = attention_mask.unsqueeze(2) # [b, s, s] attention_mask_bss = attention_mask_b1s * attention_mask_bs1 # [b, 1, s, s] extended_attention_mask = attention_mask_bss.unsqueeze(1) # Convert attention mask to binary: extended_attention_mask = extended_attention_mask < 0.5 return extended_attention_mask