| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ PyTorch ESM model.""" |
|
|
| import math |
| from typing import List, Optional, Tuple, Union |
|
|
| import torch |
| import torch.utils.checkpoint |
| from torch import nn |
| from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss, SiLU |
| from transformers.file_utils import ( |
| add_code_sample_docstrings, |
| add_start_docstrings, |
| add_start_docstrings_to_model_forward, |
| ) |
| from transformers.modeling_outputs import ( |
| BaseModelOutputWithPastAndCrossAttentions, |
| BaseModelOutputWithPoolingAndCrossAttentions, |
| MaskedLMOutput, |
| SequenceClassifierOutput, |
| TokenClassifierOutput, |
| ) |
| from transformers.modeling_utils import PreTrainedModel |
| from transformers.pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer |
| from transformers.utils import logging |
|
|
| from .esm_config import EsmConfig |
|
|
| logger = logging.get_logger(__name__) |
|
|
| _CHECKPOINT_FOR_DOC = "facebook/esm2_t6_8M_UR50D" |
| _CONFIG_FOR_DOC = "EsmConfig" |
|
|
| ESM_PRETRAINED_MODEL_ARCHIVE_LIST = [ |
| "facebook/esm2_t6_8M_UR50D", |
| "facebook/esm2_t12_35M_UR50D", |
| |
| |
| ] |
|
|
|
|
| def rotate_half(x): |
| x1, x2 = x.chunk(2, dim=-1) |
| return torch.cat((-x2, x1), dim=-1) |
|
|
|
|
| def apply_rotary_pos_emb(x, cos, sin): |
| cos = cos[:, :, : x.shape[-2], :] |
| sin = sin[:, :, : x.shape[-2], :] |
|
|
| return (x * cos) + (rotate_half(x) * sin) |
|
|
|
|
| def gelu(x): |
| """ |
| This is the gelu implementation from the original ESM repo. Using F.gelu yields subtly wrong results. |
| """ |
| return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) |
|
|
|
|
| def symmetrize(x): |
| "Make layer symmetric in final two dimensions, used for contact prediction." |
| return x + x.transpose(-1, -2) |
|
|
|
|
| def average_product_correct(x): |
| "Perform average product correct, used for contact prediction." |
| a1 = x.sum(-1, keepdims=True) |
| a2 = x.sum(-2, keepdims=True) |
| a12 = x.sum((-1, -2), keepdims=True) |
|
|
| avg = a1 * a2 |
| avg.div_(a12) |
| normalized = x - avg |
| return normalized |
|
|
|
|
| class RotaryEmbedding(torch.nn.Module): |
| """ |
| Rotary position embeddings based on those in |
| [RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer). Query and keys are transformed by rotation |
| matrices which depend on their relative positions. |
| """ |
|
|
| def __init__(self, dim: int): |
| super().__init__() |
| |
| inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim)) |
| inv_freq = inv_freq |
| self.register_buffer("inv_freq", inv_freq) |
|
|
| self._seq_len_cached = None |
| self._cos_cached = None |
| self._sin_cached = None |
|
|
| def _update_cos_sin_tables(self, x, seq_dimension=2): |
| seq_len = x.shape[seq_dimension] |
|
|
| |
| |
| if seq_len != self._seq_len_cached or self._cos_cached.device != x.device: |
| self._seq_len_cached = seq_len |
| t = torch.arange(x.shape[seq_dimension], device=x.device).type_as( |
| self.inv_freq |
| ) |
| freqs = torch.outer(t, self.inv_freq) |
| emb = torch.cat((freqs, freqs), dim=-1).to(x.device) |
|
|
| self._cos_cached = emb.cos()[None, None, :, :] |
| self._sin_cached = emb.sin()[None, None, :, :] |
|
|
| return self._cos_cached, self._sin_cached |
|
|
| def forward( |
| self, q: torch.Tensor, k: torch.Tensor |
| ) -> Tuple[torch.Tensor, torch.Tensor]: |
| self._cos_cached, self._sin_cached = self._update_cos_sin_tables( |
| k, seq_dimension=-2 |
| ) |
|
|
| return ( |
| apply_rotary_pos_emb(q, self._cos_cached, self._sin_cached), |
| apply_rotary_pos_emb(k, self._cos_cached, self._sin_cached), |
| ) |
|
|
|
|
| class EsmContactPredictionHead(nn.Module): |
| """Performs symmetrization, apc, and computes a logistic regression on the output features""" |
|
|
| def __init__( |
| self, |
| in_features: int, |
| bias=True, |
| eos_idx: int = 2, |
| ): |
| super().__init__() |
| self.in_features = in_features |
| self.eos_idx = eos_idx |
| self.regression = nn.Linear(in_features, 1, bias) |
| self.activation = nn.Sigmoid() |
|
|
| def forward(self, tokens, attentions): |
| |
| eos_mask = tokens.ne(self.eos_idx).to(attentions) |
| eos_mask = eos_mask.unsqueeze(1) * eos_mask.unsqueeze(2) |
| attentions = attentions * eos_mask[:, None, None, :, :] |
| attentions = attentions[..., :-1, :-1] |
| |
| attentions = attentions[..., 1:, 1:] |
| batch_size, layers, heads, seqlen, _ = attentions.size() |
| attentions = attentions.view(batch_size, layers * heads, seqlen, seqlen) |
|
|
| |
| attentions = attentions.to( |
| self.regression.weight.device |
| ) |
| attentions = average_product_correct(symmetrize(attentions)) |
| attentions = attentions.permute(0, 2, 3, 1) |
| return self.activation(self.regression(attentions).squeeze(3)) |
|
|
|
|
| class EsmEmbeddings(nn.Module): |
| """ |
| Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. |
| """ |
|
|
| def __init__(self, config): |
| super().__init__() |
| self.word_embeddings = nn.Embedding( |
| config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id |
| ) |
|
|
| if config.emb_layer_norm_before: |
| self.layer_norm = nn.LayerNorm( |
| config.hidden_size, eps=config.layer_norm_eps |
| ) |
| else: |
| self.layer_norm = None |
| self.dropout = nn.Dropout(config.hidden_dropout_prob) |
| |
| self.position_embedding_type = getattr( |
| config, "position_embedding_type", "absolute" |
| ) |
| self.register_buffer( |
| "position_ids", |
| torch.arange(config.max_position_embeddings).expand((1, -1)), |
| persistent=False, |
| ) |
|
|
| self.padding_idx = config.pad_token_id |
| self.position_embeddings = nn.Embedding( |
| config.max_position_embeddings, |
| config.hidden_size, |
| padding_idx=self.padding_idx, |
| ) |
| self.token_dropout = config.token_dropout |
| self.mask_token_id = config.mask_token_id |
|
|
| def forward( |
| self, |
| input_ids=None, |
| attention_mask=None, |
| position_ids=None, |
| inputs_embeds=None, |
| past_key_values_length=0, |
| ): |
| if position_ids is None: |
| if input_ids is not None: |
| |
| position_ids = create_position_ids_from_input_ids( |
| input_ids, self.padding_idx, past_key_values_length |
| ) |
| else: |
| position_ids = self.create_position_ids_from_inputs_embeds( |
| inputs_embeds |
| ) |
|
|
| if inputs_embeds is None: |
| inputs_embeds = self.word_embeddings(input_ids) |
|
|
| |
| |
| embeddings = inputs_embeds |
|
|
| |
| |
| |
| |
| |
| |
| |
| if self.token_dropout: |
| embeddings.masked_fill_( |
| (input_ids == self.mask_token_id).unsqueeze(-1), 0.0 |
| ) |
| mask_ratio_train = ( |
| 0.15 * 0.8 |
| ) |
| src_lengths = attention_mask.sum(-1) |
| mask_ratio_observed = (input_ids == self.mask_token_id).sum( |
| -1 |
| ).float() / src_lengths |
| embeddings = ( |
| embeddings |
| * (1 - mask_ratio_train) |
| / (1 - mask_ratio_observed)[:, None, None] |
| ).to(embeddings.dtype) |
|
|
| if self.position_embedding_type == "absolute": |
| position_embeddings = self.position_embeddings(position_ids) |
| embeddings += position_embeddings |
|
|
| if self.layer_norm is not None: |
| embeddings = self.layer_norm(embeddings) |
| if attention_mask is not None: |
| embeddings = (embeddings * attention_mask.unsqueeze(-1)).to( |
| embeddings.dtype |
| ) |
| |
| |
| return embeddings |
|
|
| def create_position_ids_from_inputs_embeds(self, inputs_embeds): |
| """ |
| We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. |
| |
| Args: |
| inputs_embeds: torch.Tensor |
| |
| Returns: torch.Tensor |
| """ |
| input_shape = inputs_embeds.size()[:-1] |
| sequence_length = input_shape[1] |
|
|
| position_ids = torch.arange( |
| self.padding_idx + 1, |
| sequence_length + self.padding_idx + 1, |
| dtype=torch.long, |
| device=inputs_embeds.device, |
| ) |
| return position_ids.unsqueeze(0).expand(input_shape) |
|
|
|
|
| class EsmSelfAttention(nn.Module): |
| def __init__(self, config, position_embedding_type=None): |
| super().__init__() |
| if config.hidden_size % config.num_attention_heads != 0 and not hasattr( |
| config, "embedding_size" |
| ): |
| raise ValueError( |
| f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " |
| f"heads ({config.num_attention_heads})" |
| ) |
|
|
| self.num_attention_heads = config.num_attention_heads |
| self.attention_head_size = int(config.hidden_size / config.num_attention_heads) |
| self.all_head_size = self.num_attention_heads * self.attention_head_size |
|
|
| self.query = nn.Linear(config.hidden_size, self.all_head_size) |
| self.key = nn.Linear(config.hidden_size, self.all_head_size) |
| self.value = nn.Linear(config.hidden_size, self.all_head_size) |
|
|
| self.dropout = nn.Dropout(config.attention_probs_dropout_prob) |
| self.position_embedding_type = position_embedding_type or getattr( |
| config, "position_embedding_type", "absolute" |
| ) |
| self.rotary_embeddings = None |
| if ( |
| self.position_embedding_type == "relative_key" |
| or self.position_embedding_type == "relative_key_query" |
| ): |
| self.max_position_embeddings = config.max_position_embeddings |
| self.distance_embedding = nn.Embedding( |
| 2 * config.max_position_embeddings - 1, self.attention_head_size |
| ) |
| elif self.position_embedding_type == "rotary": |
| self.rotary_embeddings = RotaryEmbedding(dim=self.attention_head_size) |
|
|
| self.is_decoder = config.is_decoder |
|
|
| def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: |
| new_x_shape = x.size()[:-1] + ( |
| self.num_attention_heads, |
| self.attention_head_size, |
| ) |
| x = x.view(new_x_shape) |
| return x.permute(0, 2, 1, 3) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| encoder_hidden_states: Optional[torch.FloatTensor] = None, |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
| output_attentions: Optional[bool] = False, |
| ) -> Tuple[torch.Tensor]: |
| mixed_query_layer = self.query(hidden_states) |
|
|
| |
| |
| |
| is_cross_attention = encoder_hidden_states is not None |
|
|
| if is_cross_attention and past_key_value is not None: |
| |
| key_layer = past_key_value[0] |
| value_layer = past_key_value[1] |
| attention_mask = encoder_attention_mask |
| elif is_cross_attention: |
| key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) |
| value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) |
| attention_mask = encoder_attention_mask |
| elif past_key_value is not None: |
| key_layer = self.transpose_for_scores(self.key(hidden_states)) |
| value_layer = self.transpose_for_scores(self.value(hidden_states)) |
| key_layer = torch.cat([past_key_value[0], key_layer], dim=2) |
| value_layer = torch.cat([past_key_value[1], value_layer], dim=2) |
| else: |
| key_layer = self.transpose_for_scores(self.key(hidden_states)) |
| value_layer = self.transpose_for_scores(self.value(hidden_states)) |
|
|
| query_layer = self.transpose_for_scores(mixed_query_layer) |
|
|
| |
| |
| |
| |
| query_layer = query_layer * self.attention_head_size**-0.5 |
|
|
| if self.is_decoder: |
| |
| |
| |
| |
| |
| |
| |
| past_key_value = (key_layer, value_layer) |
|
|
| if self.position_embedding_type == "rotary": |
| query_layer, key_layer = self.rotary_embeddings(query_layer, key_layer) |
|
|
| |
| attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) |
|
|
| if ( |
| self.position_embedding_type == "relative_key" |
| or self.position_embedding_type == "relative_key_query" |
| ): |
| seq_length = hidden_states.size()[1] |
| position_ids_l = torch.arange( |
| seq_length, dtype=torch.long, device=hidden_states.device |
| ).view(-1, 1) |
| position_ids_r = torch.arange( |
| seq_length, dtype=torch.long, device=hidden_states.device |
| ).view(1, -1) |
| distance = position_ids_l - position_ids_r |
| positional_embedding = self.distance_embedding( |
| distance + self.max_position_embeddings - 1 |
| ) |
| positional_embedding = positional_embedding.to( |
| dtype=query_layer.dtype |
| ) |
|
|
| if self.position_embedding_type == "relative_key": |
| relative_position_scores = torch.einsum( |
| "bhld,lrd->bhlr", query_layer, positional_embedding |
| ) |
| attention_scores = attention_scores + relative_position_scores |
| elif self.position_embedding_type == "relative_key_query": |
| relative_position_scores_query = torch.einsum( |
| "bhld,lrd->bhlr", query_layer, positional_embedding |
| ) |
| relative_position_scores_key = torch.einsum( |
| "bhrd,lrd->bhlr", key_layer, positional_embedding |
| ) |
| attention_scores = ( |
| attention_scores |
| + relative_position_scores_query |
| + relative_position_scores_key |
| ) |
|
|
| if attention_mask is not None: |
| |
| attention_scores = attention_scores + attention_mask |
|
|
| |
| attention_probs = nn.functional.softmax(attention_scores, dim=-1) |
|
|
| |
| |
| attention_probs = self.dropout(attention_probs) |
|
|
| |
| if head_mask is not None: |
| attention_probs = attention_probs * head_mask |
|
|
| context_layer = torch.matmul(attention_probs, value_layer) |
|
|
| context_layer = context_layer.permute(0, 2, 1, 3).contiguous() |
| new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) |
| context_layer = context_layer.view(new_context_layer_shape) |
|
|
| outputs = ( |
| (context_layer, attention_probs) if output_attentions else (context_layer,) |
| ) |
|
|
| if self.is_decoder: |
| outputs = outputs + (past_key_value,) |
| return outputs |
|
|
|
|
| class EsmSelfOutput(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.dense = nn.Linear(config.hidden_size, config.hidden_size) |
| self.dropout = nn.Dropout(config.hidden_dropout_prob) |
|
|
| def forward(self, hidden_states, input_tensor): |
| hidden_states = self.dense(hidden_states) |
| hidden_states = self.dropout(hidden_states) |
| hidden_states += input_tensor |
| return hidden_states |
|
|
|
|
| class EsmAttention(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.self = EsmSelfAttention(config) |
| self.output = EsmSelfOutput(config) |
| self.pruned_heads = set() |
| self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
|
|
| def prune_heads(self, heads): |
| if len(heads) == 0: |
| return |
| heads, index = find_pruneable_heads_and_indices( |
| heads, |
| self.self.num_attention_heads, |
| self.self.attention_head_size, |
| self.pruned_heads, |
| ) |
|
|
| |
| self.self.query = prune_linear_layer(self.self.query, index) |
| self.self.key = prune_linear_layer(self.self.key, index) |
| self.self.value = prune_linear_layer(self.self.value, index) |
| self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) |
|
|
| |
| self.self.num_attention_heads = self.self.num_attention_heads - len(heads) |
| self.self.all_head_size = ( |
| self.self.attention_head_size * self.self.num_attention_heads |
| ) |
| self.pruned_heads = self.pruned_heads.union(heads) |
|
|
| def forward( |
| self, |
| hidden_states, |
| attention_mask=None, |
| head_mask=None, |
| encoder_hidden_states=None, |
| encoder_attention_mask=None, |
| past_key_value=None, |
| output_attentions=False, |
| ): |
| hidden_states_ln = self.LayerNorm(hidden_states) |
| self_outputs = self.self( |
| hidden_states_ln, |
| attention_mask, |
| head_mask, |
| encoder_hidden_states, |
| encoder_attention_mask, |
| past_key_value, |
| output_attentions, |
| ) |
| attention_output = self.output(self_outputs[0], hidden_states) |
| outputs = (attention_output,) + self_outputs[ |
| 1: |
| ] |
| return outputs |
|
|
|
|
| class EsmIntermediate(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
|
|
| self.dense = nn.Linear( |
| config.hidden_size, |
| int(config.intermediate_size * 2), |
| bias=config.add_bias_fnn, |
| ) |
| self.activation_fn = SiLU() |
|
|
| def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
| hidden_states = self.dense(hidden_states) |
|
|
| |
| x1, x2 = hidden_states.split(int(hidden_states.size(-1) / 2), -1) |
| hidden_states = self.activation_fn(x1) * x2 |
|
|
| return hidden_states |
|
|
|
|
| class EsmOutput(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.dense = nn.Linear( |
| config.intermediate_size, config.hidden_size, bias=config.add_bias_fnn |
| ) |
| self.dropout = nn.Dropout(config.hidden_dropout_prob) |
|
|
| def forward(self, hidden_states, input_tensor): |
| hidden_states = self.dense(hidden_states) |
| hidden_states = self.dropout(hidden_states) |
| hidden_states += input_tensor |
| return hidden_states |
|
|
|
|
| class EsmLayer(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.chunk_size_feed_forward = config.chunk_size_feed_forward |
| self.seq_len_dim = 1 |
| self.attention = EsmAttention(config) |
| self.is_decoder = config.is_decoder |
| self.add_cross_attention = config.add_cross_attention |
| if self.add_cross_attention: |
| if not self.is_decoder: |
| raise RuntimeError( |
| f"{self} should be used as a decoder model if cross attention is added" |
| ) |
| self.crossattention = EsmAttention(config) |
| self.intermediate = EsmIntermediate(config) |
| self.output = EsmOutput(config) |
| self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
|
|
| def forward( |
| self, |
| hidden_states, |
| attention_mask=None, |
| head_mask=None, |
| encoder_hidden_states=None, |
| encoder_attention_mask=None, |
| past_key_value=None, |
| output_attentions=False, |
| ): |
| |
| self_attn_past_key_value = ( |
| past_key_value[:2] if past_key_value is not None else None |
| ) |
| self_attention_outputs = self.attention( |
| hidden_states, |
| attention_mask, |
| head_mask, |
| output_attentions=output_attentions, |
| past_key_value=self_attn_past_key_value, |
| ) |
| attention_output = self_attention_outputs[0] |
|
|
| |
| if self.is_decoder: |
| outputs = self_attention_outputs[1:-1] |
| present_key_value = self_attention_outputs[-1] |
| else: |
| outputs = self_attention_outputs[ |
| 1: |
| ] |
|
|
| cross_attn_present_key_value = None |
| if self.is_decoder and encoder_hidden_states is not None: |
| if not hasattr(self, "crossattention"): |
| raise AttributeError( |
| f"If `encoder_hidden_states` are passed, {self} has to be instantiated" |
| " with cross-attention layers by setting `config.add_cross_attention=True`" |
| ) |
|
|
| |
| cross_attn_past_key_value = ( |
| past_key_value[-2:] if past_key_value is not None else None |
| ) |
| cross_attention_outputs = self.crossattention( |
| attention_output, |
| attention_mask, |
| head_mask, |
| encoder_hidden_states, |
| encoder_attention_mask, |
| cross_attn_past_key_value, |
| output_attentions, |
| ) |
| attention_output = cross_attention_outputs[0] |
| outputs = ( |
| outputs + cross_attention_outputs[1:-1] |
| ) |
|
|
| |
| cross_attn_present_key_value = cross_attention_outputs[-1] |
| present_key_value = present_key_value + cross_attn_present_key_value |
|
|
| layer_output = self.feed_forward_chunk(attention_output) |
|
|
| outputs = (layer_output,) + outputs |
|
|
| |
| if self.is_decoder: |
| outputs = outputs + (present_key_value,) |
| return outputs |
|
|
| def feed_forward_chunk(self, attention_output): |
| attention_output_ln = self.LayerNorm(attention_output) |
| intermediate_output = self.intermediate(attention_output_ln) |
| layer_output = self.output(intermediate_output, attention_output) |
| return layer_output |
|
|
|
|
| class EsmEncoder(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.config = config |
| self.layer = nn.ModuleList( |
| [EsmLayer(config) for _ in range(config.num_hidden_layers)] |
| ) |
| self.emb_layer_norm_after = nn.LayerNorm( |
| config.hidden_size, eps=config.layer_norm_eps |
| ) |
| self.gradient_checkpointing = False |
|
|
| def forward( |
| self, |
| hidden_states, |
| attention_mask=None, |
| head_mask=None, |
| encoder_hidden_states=None, |
| encoder_attention_mask=None, |
| past_key_values=None, |
| use_cache=None, |
| output_attentions=False, |
| output_hidden_states=False, |
| return_dict=True, |
| ): |
| if self.gradient_checkpointing and self.training: |
| if use_cache: |
| logger.warning_once( |
| "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " |
| "`use_cache=False`..." |
| ) |
| use_cache = False |
| all_hidden_states = () if output_hidden_states else None |
| all_self_attentions = () if output_attentions else None |
| all_cross_attentions = ( |
| () if output_attentions and self.config.add_cross_attention else None |
| ) |
|
|
| next_decoder_cache = () if use_cache else None |
| for i, layer_module in enumerate(self.layer): |
| if output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
| layer_head_mask = head_mask[i] if head_mask is not None else None |
| past_key_value = past_key_values[i] if past_key_values is not None else None |
|
|
| if self.gradient_checkpointing and self.training: |
|
|
| def create_custom_forward(module): |
| def custom_forward(*inputs): |
| return module(*inputs, past_key_value, output_attentions) |
|
|
| return custom_forward |
|
|
| layer_outputs = torch.utils.checkpoint.checkpoint( |
| create_custom_forward(layer_module), |
| hidden_states, |
| attention_mask, |
| layer_head_mask, |
| encoder_hidden_states, |
| encoder_attention_mask, |
| ) |
| else: |
| layer_outputs = layer_module( |
| hidden_states, |
| attention_mask, |
| layer_head_mask, |
| encoder_hidden_states, |
| encoder_attention_mask, |
| past_key_value, |
| output_attentions, |
| ) |
|
|
| hidden_states = layer_outputs[0] |
| if use_cache: |
| next_decoder_cache += (layer_outputs[-1],) |
| if output_attentions: |
| all_self_attentions = all_self_attentions + (layer_outputs[1],) |
| if self.config.add_cross_attention: |
| all_cross_attentions = all_cross_attentions + (layer_outputs[2],) |
|
|
| if self.emb_layer_norm_after: |
| hidden_states = self.emb_layer_norm_after(hidden_states) |
|
|
| if output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
| if not return_dict: |
| return tuple( |
| v |
| for v in [ |
| hidden_states, |
| next_decoder_cache, |
| all_hidden_states, |
| all_self_attentions, |
| all_cross_attentions, |
| ] |
| if v is not None |
| ) |
| return BaseModelOutputWithPastAndCrossAttentions( |
| last_hidden_state=hidden_states, |
| past_key_values=next_decoder_cache, |
| hidden_states=all_hidden_states, |
| attentions=all_self_attentions, |
| cross_attentions=all_cross_attentions, |
| ) |
|
|
|
|
| |
| class EsmPooler(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.dense = nn.Linear(config.hidden_size, config.hidden_size) |
| self.activation = nn.Tanh() |
|
|
| def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
| |
| |
| first_token_tensor = hidden_states[:, 0] |
| pooled_output = self.dense(first_token_tensor) |
| pooled_output = self.activation(pooled_output) |
| return pooled_output |
|
|
|
|
| class EsmPreTrainedModel(PreTrainedModel): |
| """ |
| An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained |
| models. |
| """ |
|
|
| config_class = EsmConfig |
| base_model_prefix = "esm" |
| _no_split_modules = ["EsmLayer", "EsmFoldTriangularSelfAttentionBlock"] |
|
|
| |
| def _init_weights(self, module): |
| """Initialize the weights""" |
| if isinstance(module, nn.Linear): |
| |
| |
| module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
| if module.bias is not None: |
| module.bias.data.zero_() |
| elif isinstance(module, nn.Embedding): |
| module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
| if module.padding_idx is not None: |
| module.weight.data[module.padding_idx].zero_() |
| elif isinstance(module, nn.LayerNorm): |
| module.bias.data.zero_() |
| module.weight.data.fill_(1.0) |
|
|
|
|
| ESM_START_DOCSTRING = r""" |
| |
| This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the |
| library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads |
| etc.) |
| |
| This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. |
| Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage |
| and behavior. |
| |
| Parameters: |
| config ([`EsmConfig`]): Model configuration class with all the parameters of the |
| model. Initializing with a config file does not load the weights associated with the model, only the |
| configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. |
| """ |
|
|
| ESM_INPUTS_DOCSTRING = r""" |
| Args: |
| input_ids (`torch.LongTensor` of shape `({0})`): |
| Indices of input sequence tokens in the vocabulary. |
| |
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| [`PreTrainedTokenizer.__call__`] for details. |
| |
| [What are input IDs?](../glossary#input-ids) |
| attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): |
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| |
| - 1 for tokens that are **not masked**, |
| - 0 for tokens that are **masked**. |
| |
| [What are attention masks?](../glossary#attention-mask) |
| position_ids (`torch.LongTensor` of shape `({0})`, *optional*): |
| Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, |
| config.max_position_embeddings - 1]`. |
| |
| [What are position IDs?](../glossary#position-ids) |
| head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): |
| Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: |
| |
| - 1 indicates the head is **not masked**, |
| - 0 indicates the head is **masked**. |
| |
| inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): |
| Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This |
| is useful if you want more control over how to convert `input_ids` indices into associated vectors than the |
| model's internal embedding lookup matrix. |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
| tensors for more detail. |
| output_hidden_states (`bool`, *optional*): |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
| more detail. |
| return_dict (`bool`, *optional*): |
| Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. |
| """ |
|
|
|
|
| @add_start_docstrings( |
| "The bare ESM Model transformer outputting raw hidden-states without any specific head on top.", |
| ESM_START_DOCSTRING, |
| ) |
| class EsmModel(EsmPreTrainedModel): |
| """ |
| |
| The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of |
| cross-attention is added between the self-attention layers, following the architecture described in [Attention is |
| all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, |
| Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. |
| |
| To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set |
| to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and |
| `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. |
| """ |
|
|
| supports_gradient_checkpointing = False |
|
|
| def __init__(self, config, add_pooling_layer=True): |
| super().__init__(config) |
| self.config = config |
|
|
| self.embeddings = EsmEmbeddings(config) |
| self.encoder = EsmEncoder(config) |
|
|
| self.pooler = EsmPooler(config) if add_pooling_layer else None |
|
|
| self.contact_head = EsmContactPredictionHead( |
| in_features=config.num_hidden_layers * config.num_attention_heads, bias=True |
| ) |
|
|
| |
| self.post_init() |
|
|
| def _set_gradient_checkpointing(self, module, value=False): |
| if isinstance(module, EsmEncoder): |
| module.gradient_checkpointing = value |
|
|
| def get_input_embeddings(self): |
| return self.embeddings.word_embeddings |
|
|
| def set_input_embeddings(self, value): |
| self.embeddings.word_embeddings = value |
|
|
| def _prune_heads(self, heads_to_prune): |
| """ |
| Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base |
| class PreTrainedModel |
| """ |
| for layer, heads in heads_to_prune.items(): |
| self.encoder.layer[layer].attention.prune_heads(heads) |
|
|
| @add_start_docstrings_to_model_forward( |
| ESM_INPUTS_DOCSTRING.format("(batch_size, sequence_length)") |
| ) |
| @add_code_sample_docstrings( |
| checkpoint=_CHECKPOINT_FOR_DOC, |
| output_type=BaseModelOutputWithPoolingAndCrossAttentions, |
| config_class=_CONFIG_FOR_DOC, |
| ) |
| def forward( |
| self, |
| input_ids: Optional[torch.Tensor] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.Tensor] = None, |
| head_mask: Optional[torch.Tensor] = None, |
| inputs_embeds: Optional[torch.Tensor] = None, |
| encoder_hidden_states: Optional[torch.Tensor] = None, |
| encoder_attention_mask: Optional[torch.Tensor] = None, |
| past_key_values: Optional[List[torch.FloatTensor]] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: |
| r""" |
| encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): |
| Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if |
| the model is configured as a decoder. |
| encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in |
| the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: |
| |
| - 1 for tokens that are **not masked**, |
| - 0 for tokens that are **masked**. |
| past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): |
| Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. |
| |
| If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that |
| don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all |
| `decoder_input_ids` of shape `(batch_size, sequence_length)`. |
| use_cache (`bool`, *optional*): |
| If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see |
| `past_key_values`). |
| """ |
| output_attentions = ( |
| output_attentions |
| if output_attentions is not None |
| else self.config.output_attentions |
| ) |
| output_hidden_states = ( |
| output_hidden_states |
| if output_hidden_states is not None |
| else self.config.output_hidden_states |
| ) |
| return_dict = ( |
| return_dict if return_dict is not None else self.config.use_return_dict |
| ) |
|
|
| if self.config.is_decoder: |
| use_cache = use_cache if use_cache is not None else self.config.use_cache |
| else: |
| use_cache = False |
|
|
| if input_ids is not None and inputs_embeds is not None: |
| raise ValueError( |
| "You cannot specify both input_ids and inputs_embeds at the same time" |
| ) |
| elif input_ids is not None: |
| input_shape = input_ids.size() |
| elif inputs_embeds is not None: |
| input_shape = inputs_embeds.size()[:-1] |
| else: |
| raise ValueError("You have to specify either input_ids or inputs_embeds") |
|
|
| batch_size, seq_length = input_shape |
| device = input_ids.device if input_ids is not None else inputs_embeds.device |
|
|
| |
| past_key_values_length = ( |
| past_key_values[0][0].shape[2] if past_key_values is not None else 0 |
| ) |
|
|
| if attention_mask is None: |
| attention_mask = torch.ones( |
| ((batch_size, seq_length + past_key_values_length)), device=device |
| ) |
|
|
| |
| |
| extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( |
| attention_mask, input_shape |
| ) |
|
|
| |
| |
| if self.config.is_decoder and encoder_hidden_states is not None: |
| ( |
| encoder_batch_size, |
| encoder_sequence_length, |
| _, |
| ) = encoder_hidden_states.size() |
| encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) |
| if encoder_attention_mask is None: |
| encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) |
| encoder_extended_attention_mask = self.invert_attention_mask( |
| encoder_attention_mask |
| ) |
| else: |
| encoder_extended_attention_mask = None |
|
|
| |
| |
| |
| |
| |
| head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) |
|
|
| embedding_output = self.embeddings( |
| input_ids=input_ids, |
| position_ids=position_ids, |
| attention_mask=attention_mask, |
| inputs_embeds=inputs_embeds, |
| past_key_values_length=past_key_values_length, |
| ) |
| encoder_outputs = self.encoder( |
| embedding_output, |
| attention_mask=extended_attention_mask, |
| head_mask=head_mask, |
| encoder_hidden_states=encoder_hidden_states, |
| encoder_attention_mask=encoder_extended_attention_mask, |
| past_key_values=past_key_values, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
| sequence_output = encoder_outputs[0] |
| pooled_output = ( |
| self.pooler(sequence_output) if self.pooler is not None else None |
| ) |
|
|
| if not return_dict: |
| return (sequence_output, pooled_output) + encoder_outputs[1:] |
|
|
| return BaseModelOutputWithPoolingAndCrossAttentions( |
| last_hidden_state=sequence_output, |
| pooler_output=pooled_output, |
| past_key_values=encoder_outputs.past_key_values, |
| hidden_states=encoder_outputs.hidden_states, |
| attentions=encoder_outputs.attentions, |
| cross_attentions=encoder_outputs.cross_attentions, |
| ) |
|
|
| def predict_contacts(self, tokens, attention_mask): |
| attns = self( |
| tokens, |
| attention_mask=attention_mask, |
| return_dict=True, |
| output_attentions=True, |
| ).attentions |
| attns = torch.stack(attns, dim=1) |
| |
| |
| |
| |
| attns *= attention_mask.unsqueeze(1).unsqueeze(2).unsqueeze(3) |
| attns *= attention_mask.unsqueeze(1).unsqueeze(2).unsqueeze(4) |
| return self.contact_head(tokens, attns) |
|
|
|
|
| @add_start_docstrings( |
| """ESM Model with a `language modeling` head on top.""", ESM_START_DOCSTRING |
| ) |
| class EsmForMaskedLM(EsmPreTrainedModel): |
| _tied_weights_keys = ["lm_head.decoder.weight"] |
|
|
| def __init__(self, config): |
| super().__init__(config) |
|
|
| if config.is_decoder: |
| logger.warning( |
| "If you want to use `EsmForMaskedLM` make sure `config.is_decoder=False` for " |
| "bi-directional self-attention." |
| ) |
|
|
| self.esm = EsmModel(config, add_pooling_layer=False) |
| self.lm_head = EsmLMHead(config) |
|
|
| self.init_weights() |
|
|
| def get_output_embeddings(self): |
| return self.lm_head.decoder |
|
|
| def set_output_embeddings(self, new_embeddings): |
| self.lm_head.decoder = new_embeddings |
|
|
| @add_start_docstrings_to_model_forward( |
| ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length") |
| ) |
| @add_code_sample_docstrings( |
| checkpoint=_CHECKPOINT_FOR_DOC, |
| output_type=MaskedLMOutput, |
| config_class=_CONFIG_FOR_DOC, |
| mask="<mask>", |
| ) |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.Tensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| encoder_hidden_states: Optional[torch.FloatTensor] = None, |
| encoder_attention_mask: Optional[torch.Tensor] = None, |
| labels: Optional[torch.LongTensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, MaskedLMOutput]: |
| r""" |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., |
| config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the |
| loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` |
| kwargs (`Dict[str, any]`, optional, defaults to *{}*): |
| Used to hide legacy arguments that have been deprecated. |
| """ |
| return_dict = ( |
| return_dict if return_dict is not None else self.config.use_return_dict |
| ) |
|
|
| outputs = self.esm( |
| input_ids, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| encoder_hidden_states=encoder_hidden_states, |
| encoder_attention_mask=encoder_attention_mask, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
| sequence_output = outputs[0] |
| prediction_scores = self.lm_head(sequence_output) |
|
|
| masked_lm_loss = None |
| if labels is not None: |
| loss_fct = CrossEntropyLoss() |
|
|
| labels = labels.to(prediction_scores.device) |
| masked_lm_loss = loss_fct( |
| prediction_scores.view(-1, self.config.vocab_size), labels.view(-1) |
| ) |
|
|
| if not return_dict: |
| output = (prediction_scores,) + outputs[2:] |
| return ( |
| ((masked_lm_loss,) + output) if masked_lm_loss is not None else output |
| ) |
|
|
| return MaskedLMOutput( |
| loss=masked_lm_loss, |
| logits=prediction_scores, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| ) |
|
|
| def predict_contacts(self, tokens, attention_mask): |
| return self.esm.predict_contacts(tokens, attention_mask=attention_mask) |
|
|
|
|
| class EsmLMHead(nn.Module): |
| """ESM Head for masked language modeling.""" |
|
|
| def __init__(self, config): |
| super().__init__() |
| self.dense = nn.Linear(config.hidden_size, config.hidden_size) |
| self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
|
|
| self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) |
| self.bias = nn.Parameter(torch.zeros(config.vocab_size)) |
|
|
| def forward(self, features, **kwargs): |
| x = self.dense(features) |
| x = gelu(x) |
| x = self.layer_norm(x) |
|
|
| |
| x = self.decoder(x) + self.bias |
| return x |
|
|
|
|
| @add_start_docstrings( |
| """ |
| ESM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled |
| output) e.g. for GLUE tasks. |
| """, |
| ESM_START_DOCSTRING, |
| ) |
| class EsmForSequenceClassification(EsmPreTrainedModel): |
| def __init__(self, config): |
| super().__init__(config) |
| self.num_labels = config.num_labels |
| self.config = config |
|
|
| self.esm = EsmModel(config, add_pooling_layer=False) |
| self.classifier = EsmClassificationHead(config) |
|
|
| self.init_weights() |
|
|
| @add_start_docstrings_to_model_forward( |
| ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length") |
| ) |
| @add_code_sample_docstrings( |
| checkpoint=_CHECKPOINT_FOR_DOC, |
| output_type=SequenceClassifierOutput, |
| config_class=_CONFIG_FOR_DOC, |
| ) |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.Tensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| labels: Optional[torch.LongTensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, SequenceClassifierOutput]: |
| r""" |
| labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., |
| config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If |
| `config.num_labels > 1` a classification loss is computed (Cross-Entropy). |
| """ |
| return_dict = ( |
| return_dict if return_dict is not None else self.config.use_return_dict |
| ) |
|
|
| outputs = self.esm( |
| input_ids, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
| sequence_output = outputs[0] |
| logits = self.classifier(sequence_output) |
|
|
| loss = None |
| if labels is not None: |
| labels = labels.to(logits.device) |
|
|
| if self.config.problem_type is None: |
| if self.num_labels == 1: |
| self.config.problem_type = "regression" |
| elif self.num_labels > 1 and ( |
| labels.dtype == torch.long or labels.dtype == torch.int |
| ): |
| self.config.problem_type = "single_label_classification" |
| else: |
| self.config.problem_type = "multi_label_classification" |
|
|
| if self.config.problem_type == "regression": |
| loss_fct = MSELoss() |
| if self.num_labels == 1: |
| loss = loss_fct(logits.squeeze(), labels.squeeze()) |
| else: |
| loss = loss_fct(logits, labels) |
| elif self.config.problem_type == "single_label_classification": |
| loss_fct = CrossEntropyLoss() |
| loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) |
| elif self.config.problem_type == "multi_label_classification": |
| loss_fct = BCEWithLogitsLoss() |
| loss = loss_fct(logits, labels) |
|
|
| if not return_dict: |
| output = (logits,) + outputs[2:] |
| return ((loss,) + output) if loss is not None else output |
|
|
| return SequenceClassifierOutput( |
| loss=loss, |
| logits=logits, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| ) |
|
|
|
|
| @add_start_docstrings( |
| """ |
| ESM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for |
| Named-Entity-Recognition (NER) tasks. |
| """, |
| ESM_START_DOCSTRING, |
| ) |
| class EsmForTokenClassification(EsmPreTrainedModel): |
| def __init__(self, config): |
| super().__init__(config) |
| self.num_labels = config.num_labels |
|
|
| self.esm = EsmModel(config, add_pooling_layer=False) |
| self.dropout = nn.Dropout(config.hidden_dropout_prob) |
| self.classifier = nn.Linear(config.hidden_size, config.num_labels) |
|
|
| self.init_weights() |
|
|
| @add_start_docstrings_to_model_forward( |
| ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length") |
| ) |
| @add_code_sample_docstrings( |
| checkpoint=_CHECKPOINT_FOR_DOC, |
| output_type=TokenClassifierOutput, |
| config_class=_CONFIG_FOR_DOC, |
| ) |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.Tensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| labels: Optional[torch.LongTensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, TokenClassifierOutput]: |
| r""" |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. |
| """ |
| return_dict = ( |
| return_dict if return_dict is not None else self.config.use_return_dict |
| ) |
|
|
| outputs = self.esm( |
| input_ids, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| sequence_output = outputs[0] |
|
|
| sequence_output = self.dropout(sequence_output) |
| logits = self.classifier(sequence_output) |
|
|
| loss = None |
| if labels is not None: |
| loss_fct = CrossEntropyLoss() |
|
|
| labels = labels.to(logits.device) |
| loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) |
|
|
| if not return_dict: |
| output = (logits,) + outputs[2:] |
| return ((loss,) + output) if loss is not None else output |
|
|
| return TokenClassifierOutput( |
| loss=loss, |
| logits=logits, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| ) |
|
|
|
|
| class EsmClassificationHead(nn.Module): |
| """Head for sentence-level classification tasks.""" |
|
|
| def __init__(self, config): |
| super().__init__() |
| self.dense = nn.Linear(config.hidden_size, config.hidden_size) |
| self.dropout = nn.Dropout(config.hidden_dropout_prob) |
| self.out_proj = nn.Linear(config.hidden_size, config.num_labels) |
|
|
| def forward(self, features, **kwargs): |
| x = features[:, 0, :] |
| x = self.dropout(x) |
| x = self.dense(x) |
| x = torch.tanh(x) |
| x = self.dropout(x) |
| x = self.out_proj(x) |
| return x |
|
|
|
|
| def create_position_ids_from_input_ids( |
| input_ids, padding_idx, past_key_values_length=0 |
| ): |
| """ |
| Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols |
| are ignored. This is modified from fairseq's `utils.make_positions`. |
| |
| Args: |
| x: torch.Tensor x: |
| |
| Returns: torch.Tensor |
| """ |
| |
| mask = input_ids.ne(padding_idx).int() |
| incremental_indices = ( |
| torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length |
| ) * mask |
| return incremental_indices.long() + padding_idx |
|
|
|
|
| |
|
|
| import warnings |
| from dataclasses import dataclass |
| from typing import Any, Dict |
|
|
| import torch.distributions as dists |
| from torch.nn import functional as F |
| from transformers.generation.configuration_utils import GenerationConfig |
| from transformers.utils import ModelOutput |
|
|
| try: |
| from tqdm import trange |
| except ImportError: |
| def trange(n, **kwargs): |
| return range(n) |
|
|
|
|
| def _top_p_logits(logits, top_p): |
| sorted_logits, sorted_indices = torch.sort(logits, descending=True) |
| cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) |
| sorted_indices_to_remove = cumulative_probs > top_p |
| sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() |
| sorted_indices_to_remove[..., 0] = 0 |
| mask = torch.zeros_like(logits, dtype=torch.bool, device=logits.device) |
| mask = mask.scatter_(-1, sorted_indices, sorted_indices_to_remove) |
| logits = logits.masked_fill(mask, torch.finfo(logits.dtype).min) |
| return logits |
|
|
|
|
| def _top_k_logits(logits, top_k): |
| if top_k is None or top_k == 0: |
| return logits |
| top_k = min(top_k, logits.size(-1)) |
| indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] |
| logits = logits.masked_fill(indices_to_remove, torch.finfo(logits.dtype).min) |
| return logits |
|
|
|
|
| def _sample_tokens(logits, temperature=0.0, top_p=None, top_k=None, alg="origin"): |
| if temperature > 0: |
| logits = logits / temperature |
| if top_p is not None and top_p < 1: |
| logits = _top_p_logits(logits, top_p) |
| if top_k is not None: |
| logits = _top_k_logits(logits, top_k) |
| probs = torch.softmax(logits.float(), dim=-1) |
| if temperature > 0: |
| x0 = dists.Categorical(probs=probs).sample() |
| else: |
| _, x0 = probs.max(dim=-1) |
| confidence = torch.gather(probs, -1, x0.unsqueeze(-1)).squeeze(-1) |
|
|
| if alg == "topk_margin": |
| sorted_probs, _ = torch.sort(probs, dim=-1, descending=True) |
| confidence = sorted_probs[..., 0] - sorted_probs[..., 1] |
| elif alg == "entropy": |
| log_probs = torch.log(probs.clamp(min=1e-10)) |
| confidence = (probs * log_probs).sum(dim=-1) |
|
|
| return confidence, x0 |
|
|
|
|
| @dataclass |
| class D3LMOutput(ModelOutput): |
| """Output type for D3LM diffusion generation.""" |
| sequences: torch.LongTensor = None |
| history: Optional[Tuple[torch.LongTensor]] = None |
|
|
|
|
| class MDMGenerationConfig(GenerationConfig): |
| """ |
| Configuration for Masked Diffusion Model generation. |
| |
| Args: |
| steps (`int`, defaults to 50): |
| Number of diffusion denoising steps. More steps generally yields higher quality. |
| alg (`str`, defaults to `"random"`): |
| Token unmasking order algorithm. One of: |
| - `"random"`: randomly select positions to unmask at each step. |
| - `"maskgit_plus"`: unmask positions with highest prediction confidence. |
| - `"entropy"`: unmask positions with lowest prediction entropy. |
| - `"topk_margin"`: unmask positions with highest top-1 vs top-2 probability margin. |
| - `"origin"`: stochastically unmask each position with probability proportional to schedule. |
| - `"p2"`: progressive prediction with remasking based on confidence. |
| temperature (`float`, defaults to 1.0): |
| Sampling temperature applied to the model logits. Higher values produce more diverse outputs. |
| top_p (`float`, defaults to 0.9): |
| Top-p (nucleus) sampling cutoff. Set to 1.0 to disable. |
| top_k (`int`, defaults to 0): |
| Top-k sampling cutoff. Set to 0 to disable. |
| alg_temp (`float`, defaults to 0.9): |
| Temperature for the confidence-based unmasking order (Gumbel-TopK). Set to 0 for deterministic order. |
| output_history (`bool`, defaults to `False`): |
| Whether to return the sequence state at each diffusion step. |
| """ |
| def __init__(self, **kwargs): |
| if "do_sample" not in kwargs: |
| kwargs["do_sample"] = True |
| super().__init__(**kwargs) |
| self.temperature: float = kwargs.pop("temperature", 1.0) |
| self.top_p: Optional[float] = kwargs.pop("top_p", 0.9) |
| self.top_k: Optional[int] = kwargs.pop("top_k", 0) |
| self.eps: float = kwargs.pop("eps", 1e-3) |
| self.steps: int = kwargs.pop("steps", 50) |
| self.alg: str = kwargs.pop("alg", "random") |
| self.alg_temp: Optional[float] = kwargs.pop("alg_temp", 0.9) |
| self.output_history: bool = kwargs.pop("output_history", False) |
| self.mask_token_id = kwargs.pop("mask_token_id", None) |
| self.num_return_sequences = kwargs.pop("num_return_sequences", 1) |
|
|
|
|
| class MDMGenerationMixin: |
| """Mixin that adds masked diffusion generation to any MaskedLM model.""" |
|
|
| @staticmethod |
| def _expand_inputs_for_generation( |
| expand_size: int = 1, |
| input_ids: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.LongTensor] = None, |
| ): |
| if expand_size == 1: |
| return input_ids, attention_mask |
| if input_ids is not None: |
| input_ids = input_ids.repeat_interleave(expand_size, dim=0) |
| if attention_mask is not None: |
| attention_mask = attention_mask.repeat_interleave(expand_size, dim=0) |
| return input_ids, attention_mask |
|
|
| @torch.no_grad() |
| def diffusion_generate( |
| self, |
| inputs: Optional[torch.Tensor] = None, |
| generation_config: Optional[MDMGenerationConfig] = None, |
| **kwargs, |
| ) -> Union[D3LMOutput, torch.LongTensor]: |
| """ |
| Generate DNA sequences using masked diffusion. |
| |
| Args: |
| inputs (`torch.LongTensor`): |
| Input token IDs, typically all `<mask>` tokens representing the desired output length. |
| Shape: `(1, sequence_length)`. |
| generation_config (`MDMGenerationConfig`, *optional*): |
| Generation configuration. If not provided, defaults are used. |
| |
| Returns: |
| `D3LMOutput` with `sequences` tensor of shape `(num_return_sequences, sequence_length)`. |
| """ |
| if generation_config is None: |
| generation_config = MDMGenerationConfig() |
| generation_config.update(**kwargs) |
|
|
| input_ids = inputs |
| attention_mask = kwargs.get("attention_mask", None) |
|
|
| if input_ids is None: |
| raise ValueError("`inputs` must be provided for diffusion generation.") |
|
|
| if generation_config.max_new_tokens is not None: |
| generation_config.max_length = ( |
| input_ids.shape[-1] + generation_config.max_new_tokens |
| ) |
| elif not hasattr(generation_config, "max_length") or generation_config.max_length is None: |
| generation_config.max_length = input_ids.shape[-1] |
|
|
| input_ids, attention_mask = self._expand_inputs_for_generation( |
| expand_size=generation_config.num_return_sequences, |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| ) |
|
|
| mask_token_id = generation_config.mask_token_id |
| if mask_token_id is None: |
| raise ValueError("`mask_token_id` must be set in the generation config.") |
|
|
| x = F.pad( |
| input_ids, |
| (0, generation_config.max_length - input_ids.shape[1]), |
| value=mask_token_id, |
| ) |
|
|
| steps = generation_config.steps |
| eps = generation_config.eps |
| alg = generation_config.alg |
| alg_temp = generation_config.alg_temp |
| temperature = generation_config.temperature |
| top_p = generation_config.top_p |
| top_k = generation_config.top_k |
|
|
| histories = [] if generation_config.output_history else None |
| fix_mask = (x != mask_token_id) |
| gen_attention_mask = ( |
| (x != self.config.pad_token_id).long() |
| if self.config.pad_token_id is not None |
| else None |
| ) |
| timesteps = torch.linspace(1, eps, steps + 1, device=x.device) |
|
|
| for i in trange(steps, desc="Diffusion"): |
| mask_index = (x == mask_token_id) |
| if not mask_index.any(): |
| break |
|
|
| outputs = self(input_ids=x, attention_mask=gen_attention_mask) |
| logits = outputs.logits |
| mask_logits = logits[mask_index] |
| t = timesteps[i] |
| s = timesteps[i + 1] |
|
|
| if alg == "origin": |
| p_transfer = 1 - s / t if i < steps - 1 else 1 |
| x0 = torch.full_like( |
| x[mask_index], fill_value=mask_token_id, device=x.device, dtype=torch.long |
| ) |
| transfer_index = torch.rand(*x0.shape, device=x.device) < p_transfer |
| _, sampled = _sample_tokens( |
| mask_logits[transfer_index], temperature=temperature, |
| top_p=top_p, top_k=top_k, alg=alg |
| ) |
| x0[transfer_index] = sampled |
| x[mask_index] = x0 |
|
|
| elif alg == "p2": |
| kappa_t = (i + 1) / steps |
| confidence, x0 = _sample_tokens( |
| mask_logits, temperature, top_p, top_k, alg=alg |
| ) |
| full_conf = torch.full_like(x, float("inf"), dtype=confidence.dtype) |
| full_conf[mask_index] = confidence |
| full_conf[fix_mask] = float("inf") |
| num_positions = (~fix_mask).sum(dim=1, keepdim=True) |
| num_to_mask = (num_positions.float() * (1 - kappa_t)).long() |
| sorted_idx = torch.argsort(full_conf, dim=-1, descending=False) |
| max_mask = num_to_mask.max() |
| arange_mask = torch.arange(max_mask, device=x.device).unsqueeze(0) < num_to_mask |
| to_mask_idx = sorted_idx[:, :max_mask][arange_mask] |
| to_mask = torch.zeros_like(x, dtype=torch.bool) |
| batch_idx = ( |
| torch.arange(x.size(0), device=x.device) |
| .unsqueeze(1).expand(-1, max_mask)[arange_mask] |
| ) |
| to_mask[batch_idx, to_mask_idx] = True |
| x[to_mask] = mask_token_id |
| mask_candidates = mask_index & ~to_mask |
| x_proposals = torch.full_like(x, fill_value=mask_token_id) |
| x_proposals[mask_index] = x0 |
| x[mask_candidates] = x_proposals[mask_candidates] |
|
|
| elif alg in ["maskgit_plus", "entropy", "topk_margin"]: |
| confidence, x0 = _sample_tokens( |
| mask_logits, temperature=temperature, top_p=top_p, top_k=top_k, alg=alg |
| ) |
| confidence = confidence.to(mask_logits.dtype) |
| num_mask_tokens = mask_index.sum(dim=1) |
| if i < steps - 1: |
| n_transfer = (num_mask_tokens.float() * (1 - s / t)).long() |
| else: |
| n_transfer = num_mask_tokens |
| full_confidence = torch.full_like(x, -torch.inf, dtype=logits.dtype) |
| full_confidence[mask_index] = confidence |
| max_transfer = n_transfer.max().item() |
| if max_transfer > 0: |
| if alg_temp is None or alg_temp == 0: |
| _, all_indices = torch.topk(full_confidence, max_transfer, dim=1) |
| else: |
| scaled = full_confidence / alg_temp |
| uniform = torch.rand_like(scaled).clamp_(1e-20, 1 - 1e-20) |
| scores = scaled + (-torch.log(-torch.log(uniform))) |
| _, all_indices = torch.topk(scores, max_transfer, dim=1) |
| valid_mask = ( |
| torch.arange(max_transfer, device=x.device).unsqueeze(0) |
| < n_transfer.unsqueeze(1) |
| ) |
| valid_indices = all_indices[valid_mask] |
| valid_batch = ( |
| torch.arange(x.size(0), device=x.device) |
| .unsqueeze(1).expand_as(all_indices)[valid_mask] |
| ) |
| x_ = torch.full_like(x, fill_value=mask_token_id) |
| x_[mask_index] = x0.clone() |
| x[valid_batch, valid_indices] = x_[valid_batch, valid_indices] |
|
|
| elif alg == "random": |
| _, x0 = _sample_tokens( |
| mask_logits, temperature=temperature, top_p=top_p, top_k=top_k, alg=alg |
| ) |
| num_mask_tokens = mask_index.sum(dim=1) |
| if i < steps - 1: |
| n_transfer = (num_mask_tokens.float() * (1 - s / t)).long() |
| else: |
| n_transfer = num_mask_tokens |
| max_transfer = n_transfer.max().item() |
| if max_transfer > 0: |
| x_ = torch.full_like(x, fill_value=mask_token_id) |
| x_[mask_index] = x0.clone() |
| for b in range(x.size(0)): |
| positions = mask_index[b].nonzero(as_tuple=True)[0] |
| n = n_transfer[b].item() |
| if len(positions) > 0 and n > 0: |
| n = min(n, len(positions)) |
| sel = torch.randperm(len(positions), device=x.device)[:n] |
| x[b, positions[sel]] = x_[b, positions[sel]] |
| else: |
| raise NotImplementedError(f"Algorithm '{alg}' is not implemented.") |
|
|
| if histories is not None: |
| histories.append(x.clone()) |
|
|
| if generation_config.return_dict_in_generate: |
| return D3LMOutput(sequences=x, history=histories) |
| return x |
|
|
|
|
| class D3LMForMaskedLM(EsmForMaskedLM, MDMGenerationMixin): |
| """ |
| D3LM: DNA Discrete Diffusion Language Model. |
| |
| A masked diffusion language model built on the ESM architecture, trained for |
| unconditional generation of mammalian DNA sequences. |
| |
| This model inherits from `EsmForMaskedLM` and adds `diffusion_generate()` for |
| iterative masked diffusion decoding. |
| |
| Use `diffusion_generate()` for unconditional DNA sequence generation, and |
| `forward()` for standard masked language modeling inference. |
| """ |
|
|
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.Tensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| encoder_hidden_states: Optional[torch.FloatTensor] = None, |
| encoder_attention_mask: Optional[torch.Tensor] = None, |
| labels: Optional[torch.LongTensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, MaskedLMOutput]: |
| return super().forward( |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| encoder_hidden_states=encoder_hidden_states, |
| encoder_attention_mask=encoder_attention_mask, |
| labels=labels, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|