| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """PyTorch BioGPT model.""" |
|
|
| import math |
| from typing import Optional, Union |
|
|
| import torch |
| import torch.nn as nn |
| from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss |
|
|
| from ...activations import ACT2FN |
| from ...cache_utils import Cache, DynamicCache |
| from ...generation import GenerationMixin |
| from ...modeling_attn_mask_utils import ( |
| AttentionMaskConverter, |
| ) |
| from ...modeling_outputs import ( |
| BaseModelOutputWithPastAndCrossAttentions, |
| CausalLMOutputWithCrossAttentions, |
| SequenceClassifierOutputWithPast, |
| TokenClassifierOutput, |
| ) |
| from ...modeling_utils import PreTrainedModel |
| from ...processing_utils import Unpack |
| from ...utils import ( |
| TransformersKwargs, |
| auto_docstring, |
| is_torch_flex_attn_available, |
| logger, |
| ) |
| from ...utils.deprecation import deprecate_kwarg |
| from ..bart.modeling_bart import ( |
| BartAttention, |
| BartDecoderLayer, |
| BartScaledWordEmbedding, |
| ) |
| from ..opt.modeling_opt import OPTLearnedPositionalEmbedding |
| from .configuration_biogpt import BioGptConfig |
|
|
|
|
| if is_torch_flex_attn_available(): |
| from ...integrations.flex_attention import BlockMask, make_flex_block_causal_mask |
|
|
|
|
| class BioGptLearnedPositionalEmbedding(OPTLearnedPositionalEmbedding): |
| def forward( |
| self, |
| attention_mask: torch.LongTensor, |
| past_key_values_length: int = 0, |
| position_ids: Optional[torch.LongTensor] = None, |
| ): |
| """`input_ids_shape` is expected to be [bsz x seqlen].""" |
| super().forward(attention_mask, past_key_values_length, position_ids) |
|
|
|
|
| class BioGptScaledWordEmbedding(BartScaledWordEmbedding): |
| pass |
|
|
|
|
| class BioGptAttention(BartAttention): |
| pass |
|
|
|
|
| class BioGptDecoderLayer(BartDecoderLayer): |
| def __init__(self, config: BioGptConfig, layer_idx: Optional[int] = None): |
| super().__init__(config) |
| self.embed_dim = config.hidden_size |
|
|
| self.self_attn = BioGptAttention( |
| embed_dim=self.embed_dim, |
| num_heads=config.num_attention_heads, |
| dropout=config.attention_probs_dropout_prob, |
| is_decoder=True, |
| is_causal=True, |
| config=config, |
| layer_idx=layer_idx, |
| ) |
| self.dropout = config.hidden_dropout_prob |
| self.activation_fn = ACT2FN[config.hidden_act] |
|
|
| self.fc1 = nn.Linear(self.embed_dim, config.intermediate_size) |
| self.fc2 = nn.Linear(config.intermediate_size, self.embed_dim) |
|
|
| del self.encoder_attn |
| del self.encoder_attn_layer_norm |
|
|
| @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") |
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| attention_mask: Optional[torch.Tensor] = None, |
| layer_head_mask: Optional[torch.Tensor] = None, |
| past_key_values: Optional[Cache] = None, |
| output_attentions: Optional[bool] = False, |
| use_cache: Optional[bool] = True, |
| position_ids: Optional[torch.LongTensor] = None, |
| cache_position: Optional[torch.Tensor] = None, |
| **kwargs: Unpack[TransformersKwargs], |
| ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: |
| """ |
| Args: |
| hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` |
| attention_mask (`torch.FloatTensor`): attention mask of size |
| `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. |
| layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size |
| `(encoder_attention_heads,)`. |
| past_key_values (`Cache`): cached past key and value projection states |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| returned tensors for more detail. |
| use_cache (`bool`, *optional*): |
| If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding |
| (see `past_key_values`). |
| cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): |
| Indices depicting the position of the input sequence tokens in the sequence. It is used to update the |
| cache in the correct position and to infer the complete sequence length. |
| """ |
| residual = hidden_states |
|
|
| hidden_states = self.self_attn_layer_norm(hidden_states) |
|
|
| |
| hidden_states, self_attn_weights = self.self_attn( |
| hidden_states=hidden_states, |
| past_key_values=past_key_values, |
| attention_mask=attention_mask, |
| layer_head_mask=layer_head_mask, |
| output_attentions=output_attentions, |
| position_ids=position_ids, |
| cache_position=cache_position, |
| **kwargs, |
| ) |
| hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) |
| hidden_states = residual + hidden_states |
|
|
| |
| residual = hidden_states |
| hidden_states = self.final_layer_norm(hidden_states) |
| hidden_states = self.fc1(hidden_states) |
| hidden_states = self.activation_fn(hidden_states) |
| hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) |
| hidden_states = self.fc2(hidden_states) |
| hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) |
| hidden_states = residual + hidden_states |
|
|
| outputs = (hidden_states,) |
|
|
| if output_attentions: |
| outputs += (self_attn_weights,) |
|
|
| return outputs |
|
|
|
|
| @auto_docstring |
| class BioGptPreTrainedModel(PreTrainedModel): |
| config: BioGptConfig |
| base_model_prefix = "biogpt" |
| supports_gradient_checkpointing = True |
| _supports_flash_attn = True |
| _supports_sdpa = True |
| _supports_flex_attn = True |
|
|
| _can_compile_fullgraph = True |
|
|
| |
| def _update_causal_mask( |
| self, |
| attention_mask: Optional[Union[torch.Tensor, "BlockMask"]], |
| input_tensor: torch.Tensor, |
| cache_position: torch.Tensor, |
| past_key_values: Cache, |
| ): |
| if self.config._attn_implementation == "flex_attention": |
| if isinstance(attention_mask, torch.Tensor): |
| attention_mask = make_flex_block_causal_mask(attention_mask) |
| |
| |
| elif attention_mask is None: |
| attention_mask = make_flex_block_causal_mask( |
| torch.ones( |
| size=(input_tensor.shape[0], input_tensor.shape[1]), |
| device=attention_mask.device, |
| ) |
| ) |
| return attention_mask |
|
|
| if self.config._attn_implementation == "flash_attention_2": |
| if attention_mask is not None and (attention_mask == 0.0).any(): |
| return attention_mask |
| return None |
|
|
| |
| |
| |
| past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 |
| using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False |
|
|
| |
| if self.config._attn_implementation == "sdpa" and not using_compilable_cache: |
| if AttentionMaskConverter._ignore_causal_mask_sdpa( |
| attention_mask, |
| inputs_embeds=input_tensor, |
| past_key_values_length=past_seen_tokens, |
| is_training=self.training, |
| ): |
| return None |
|
|
| dtype = input_tensor.dtype |
| sequence_length = input_tensor.shape[1] |
| if using_compilable_cache: |
| target_length = past_key_values.get_max_cache_shape() |
| else: |
| target_length = ( |
| attention_mask.shape[-1] |
| if isinstance(attention_mask, torch.Tensor) |
| else past_seen_tokens + sequence_length + 1 |
| ) |
|
|
| |
| causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( |
| attention_mask, |
| sequence_length=sequence_length, |
| target_length=target_length, |
| dtype=dtype, |
| cache_position=cache_position, |
| batch_size=input_tensor.shape[0], |
| ) |
|
|
| if ( |
| self.config._attn_implementation == "sdpa" |
| and attention_mask is not None |
| and attention_mask.device.type in ["cuda", "xpu", "npu"] |
| ): |
| |
| |
| |
| min_dtype = torch.finfo(dtype).min |
| causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) |
|
|
| return causal_mask |
|
|
| @staticmethod |
| |
| def _prepare_4d_causal_attention_mask_with_cache_position( |
| attention_mask: torch.Tensor, |
| sequence_length: int, |
| target_length: int, |
| dtype: torch.dtype, |
| cache_position: torch.Tensor, |
| batch_size: int, |
| **kwargs, |
| ): |
| """ |
| Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape |
| `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. |
| |
| Args: |
| attention_mask (`torch.Tensor`): |
| A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape |
| `(batch_size, 1, query_length, key_value_length)`. |
| sequence_length (`int`): |
| The sequence length being processed. |
| target_length (`int`): |
| The target length: when generating with static cache, the mask should be as long as the static cache, |
| to account for the 0 padding, the part of the cache that is not filled yet. |
| dtype (`torch.dtype`): |
| The dtype to use for the 4D attention mask. |
| cache_position (`torch.Tensor`): |
| Indices depicting the position of the input sequence tokens in the sequence. |
| batch_size (`torch.Tensor`): |
| Batch size. |
| """ |
| if attention_mask is not None and attention_mask.dim() == 4: |
| |
| causal_mask = attention_mask |
| else: |
| min_dtype = torch.finfo(dtype).min |
| causal_mask = torch.full( |
| (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device |
| ) |
| if sequence_length != 1: |
| causal_mask = torch.triu(causal_mask, diagonal=1) |
| causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) |
| causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) |
| if attention_mask is not None: |
| causal_mask = causal_mask.clone() |
| mask_length = attention_mask.shape[-1] |
| padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( |
| causal_mask.device |
| ) |
| padding_mask = padding_mask == 0 |
| causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( |
| padding_mask, min_dtype |
| ) |
|
|
| return causal_mask |
|
|
|
|
| @auto_docstring |
| class BioGptModel(BioGptPreTrainedModel): |
| def __init__(self, config: BioGptConfig): |
| super().__init__(config) |
| self.config = config |
| self.layerdrop = config.layerdrop |
| self.dropout = config.hidden_dropout_prob |
| self.embed_dim = config.hidden_size |
| self.padding_idx = config.pad_token_id |
| embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0 |
|
|
| self.embed_tokens = BioGptScaledWordEmbedding( |
| config.vocab_size, self.embed_dim, self.padding_idx, embed_scale=embed_scale |
| ) |
| self.embed_positions = BioGptLearnedPositionalEmbedding(config.max_position_embeddings, self.embed_dim) |
|
|
| self.layers = nn.ModuleList([BioGptDecoderLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)]) |
| self.layer_norm = nn.LayerNorm(self.embed_dim) |
|
|
| self.gradient_checkpointing = False |
| |
| self.post_init() |
|
|
| @auto_docstring |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| past_key_values: Optional[Cache] = None, |
| use_cache: Optional[bool] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| cache_position: Optional[torch.Tensor] = None, |
| **kwargs: Unpack[TransformersKwargs], |
| ) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| use_cache = use_cache if use_cache is not None else self.config.use_cache |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| |
| if (input_ids is None) ^ (inputs_embeds is not None): |
| raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") |
| elif input_ids is not None: |
| input = input_ids |
| input_shape = input.shape |
| input_ids = input_ids.view(-1, input_shape[-1]) |
| elif inputs_embeds is not None: |
| input_shape = inputs_embeds.size()[:-1] |
| input = inputs_embeds[:, :, -1] |
| else: |
| raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") |
|
|
| if inputs_embeds is None: |
| inputs_embeds = self.embed_tokens(input) |
|
|
| if self.gradient_checkpointing and self.training: |
| if use_cache: |
| logger.warning_once( |
| "`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`..." |
| ) |
| use_cache = False |
|
|
| |
| if use_cache and past_key_values is None: |
| past_key_values = DynamicCache(config=self.config) |
| if use_cache and isinstance(past_key_values, tuple): |
| logger.warning_once( |
| "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. " |
| "You should pass an instance of `DynamicCache` instead, e.g. " |
| "`past_key_values=DynamicCache.from_legacy_cache(past_key_values)`." |
| ) |
| past_key_values = DynamicCache.from_legacy_cache(past_key_values) |
|
|
| batch_size, seq_length = inputs_embeds.size()[:-1] |
| past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 |
| if cache_position is None: |
| cache_position = torch.arange( |
| past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device |
| ) |
|
|
| if attention_mask is None: |
| |
| mask_seq_length = past_key_values_length + seq_length |
| attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) |
|
|
| self_attn_cache = past_key_values |
|
|
| causal_mask = self._update_causal_mask( |
| attention_mask, |
| inputs_embeds, |
| cache_position, |
| self_attn_cache, |
| ) |
|
|
| |
| if position_ids is None: |
| |
| position_ids = torch.cumsum(attention_mask, dim=1) |
| position_ids = (position_ids * attention_mask - 1).long() |
| |
| position_ids = position_ids[:, past_key_values_length:] |
|
|
| positions = self.embed_positions(attention_mask, past_key_values_length, position_ids=position_ids) |
| hidden_states = inputs_embeds + positions |
| hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) |
|
|
| if self.gradient_checkpointing and self.training: |
| if use_cache: |
| logger.warning_once( |
| "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." |
| ) |
| use_cache = False |
|
|
| all_hidden_states = () if output_hidden_states else None |
| all_self_attns = () if output_attentions else None |
| all_cross_attentions = None |
|
|
| for idx, decoder_layer in enumerate(self.layers): |
| |
| if output_hidden_states: |
| all_hidden_states += (hidden_states,) |
| if self.training: |
| dropout_probability = torch.rand([]) |
| if dropout_probability < self.layerdrop: |
| continue |
|
|
| layer_outputs = decoder_layer( |
| hidden_states, |
| attention_mask=causal_mask, |
| layer_head_mask=(head_mask[idx] if head_mask is not None else None), |
| past_key_values=past_key_values, |
| output_attentions=output_attentions, |
| use_cache=use_cache, |
| position_ids=position_ids, |
| cache_position=cache_position, |
| **kwargs, |
| ) |
|
|
| hidden_states = layer_outputs[0] |
|
|
| if output_attentions: |
| all_self_attns += (layer_outputs[1],) |
|
|
| |
| if output_hidden_states: |
| all_hidden_states += (hidden_states,) |
|
|
| hidden_states = self.layer_norm(hidden_states) |
|
|
| if not return_dict: |
| return tuple( |
| v |
| for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] |
| if v is not None |
| ) |
| return BaseModelOutputWithPastAndCrossAttentions( |
| last_hidden_state=hidden_states, |
| past_key_values=past_key_values, |
| hidden_states=all_hidden_states, |
| attentions=all_self_attns, |
| cross_attentions=all_cross_attentions, |
| ) |
|
|
|
|
| @auto_docstring( |
| custom_intro=""" |
| BioGPT Model with a `language modeling` head on top for CLM fine-tuning. |
| """ |
| ) |
| class BioGptForCausalLM(BioGptPreTrainedModel, GenerationMixin): |
| _tied_weights_keys = ["output_projection.weight"] |
|
|
| def __init__(self, config): |
| super().__init__(config) |
|
|
| self.biogpt = BioGptModel(config) |
| self.output_projection = nn.Linear(config.hidden_size, config.vocab_size, bias=False) |
|
|
| |
| self.post_init() |
|
|
| def get_output_embeddings(self): |
| return self.output_projection |
|
|
| def set_output_embeddings(self, new_embeddings): |
| self.output_projection = new_embeddings |
|
|
| @auto_docstring |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| past_key_values: Optional[Cache] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| cache_position: Optional[torch.Tensor] = None, |
| **kwargs: Unpack[TransformersKwargs], |
| ) -> Union[tuple, CausalLMOutputWithCrossAttentions]: |
| r""" |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set |
| `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` |
| are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` |
| """ |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| outputs = self.biogpt( |
| input_ids, |
| attention_mask=attention_mask, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| past_key_values=past_key_values, |
| use_cache=use_cache, |
| position_ids=position_ids, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| cache_position=cache_position, |
| **kwargs, |
| ) |
|
|
| sequence_output = outputs[0] |
| prediction_scores = self.output_projection(sequence_output) |
|
|
| lm_loss = None |
| if labels is not None: |
| lm_loss = self.loss_function( |
| prediction_scores, |
| labels, |
| vocab_size=self.config.vocab_size, |
| **kwargs, |
| ) |
|
|
| if not return_dict: |
| output = (prediction_scores,) + outputs[1:] |
| return ((lm_loss,) + output) if lm_loss is not None else output |
|
|
| return CausalLMOutputWithCrossAttentions( |
| loss=lm_loss, |
| logits=prediction_scores, |
| past_key_values=outputs.past_key_values, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| cross_attentions=outputs.cross_attentions, |
| ) |
|
|
|
|
| @auto_docstring |
| class BioGptForTokenClassification(BioGptPreTrainedModel): |
| def __init__(self, config): |
| super().__init__(config) |
| self.num_labels = config.num_labels |
|
|
| self.biogpt = BioGptModel(config) |
| if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None: |
| classifier_dropout = config.classifier_dropout |
| else: |
| classifier_dropout = config.hidden_dropout_prob |
| self.dropout = nn.Dropout(classifier_dropout) |
| self.classifier = nn.Linear(config.hidden_size, config.num_labels) |
|
|
| self.post_init() |
|
|
| @auto_docstring |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| past_key_values: Optional[Cache] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| cache_position: Optional[torch.Tensor] = None, |
| ) -> Union[tuple, TokenClassifierOutput]: |
| r""" |
| labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., |
| config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If |
| `config.num_labels > 1` a classification loss is computed (Cross-Entropy). |
| """ |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| transformer_outputs = self.biogpt( |
| input_ids, |
| past_key_values=past_key_values, |
| attention_mask=attention_mask, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| use_cache=use_cache, |
| position_ids=position_ids, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| cache_position=cache_position, |
| ) |
|
|
| hidden_states = transformer_outputs[0] |
| hidden_states = self.dropout(hidden_states) |
| logits = self.classifier(hidden_states) |
|
|
| loss = None |
| if labels is not None: |
| loss_fct = CrossEntropyLoss() |
| |
| if attention_mask is not None: |
| active_loss = attention_mask.view(-1) == 1 |
| active_logits = logits.view(-1, self.num_labels) |
| active_labels = torch.where( |
| active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) |
| ) |
| loss = loss_fct(active_logits, active_labels) |
| else: |
| loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) |
|
|
| if not return_dict: |
| output = (logits,) + transformer_outputs[2:] |
| return ((loss,) + output) if loss is not None else output |
|
|
| return TokenClassifierOutput( |
| loss=loss, |
| logits=logits, |
| hidden_states=transformer_outputs.hidden_states, |
| attentions=transformer_outputs.attentions, |
| ) |
|
|
|
|
| @auto_docstring( |
| custom_intro=""" |
| The BioGpt Model transformer with a sequence classification head on top (linear layer). |
| |
| [`BioGptForSequenceClassification`] uses the last token in order to do the classification, as other causal models |
| (e.g. GPT-2) do. |
| |
| Since it does classification on the last token, it is required to know the position of the last token. If a |
| `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If |
| no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the |
| padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in |
| each row of the batch). |
| """ |
| ) |
| class BioGptForSequenceClassification(BioGptPreTrainedModel): |
| def __init__(self, config: BioGptConfig): |
| super().__init__(config) |
| self.num_labels = config.num_labels |
| self.biogpt = BioGptModel(config) |
| self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) |
|
|
| |
| self.post_init() |
|
|
| @auto_docstring |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| past_key_values: Optional[Cache] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| cache_position: Optional[torch.Tensor] = None, |
| logits_to_keep: Union[int, torch.Tensor] = 0, |
| ) -> Union[tuple, SequenceClassifierOutputWithPast]: |
| r""" |
| labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., |
| config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If |
| `config.num_labels > 1` a classification loss is computed (Cross-Entropy). |
| """ |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| transformer_outputs = self.biogpt( |
| input_ids, |
| past_key_values=past_key_values, |
| attention_mask=attention_mask, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| use_cache=use_cache, |
| position_ids=position_ids, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| cache_position=cache_position, |
| ) |
| hidden_states = transformer_outputs[0] |
| slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep |
| logits = self.score(hidden_states[:, slice_indices, :]) |
|
|
| if input_ids is not None: |
| batch_size, sequence_length = input_ids.shape[:2] |
| else: |
| batch_size, sequence_length = inputs_embeds.shape[:2] |
|
|
| if self.config.pad_token_id is None: |
| sequence_length = -1 |
| else: |
| if input_ids is not None: |
| sequence_length = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device) |
| else: |
| sequence_length = -1 |
| logger.warning_once( |
| f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " |
| "unexpected if using padding tokens in conjunction with `inputs_embeds.`" |
| ) |
|
|
| pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_length] |
|
|
| loss = None |
| if labels is not None: |
| if self.config.problem_type is None: |
| if self.num_labels == 1: |
| self.config.problem_type = "regression" |
| elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): |
| self.config.problem_type = "single_label_classification" |
| else: |
| self.config.problem_type = "multi_label_classification" |
|
|
| if self.config.problem_type == "regression": |
| loss_fct = MSELoss() |
| if self.num_labels == 1: |
| loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) |
| else: |
| loss = loss_fct(pooled_logits, labels) |
| elif self.config.problem_type == "single_label_classification": |
| loss_fct = CrossEntropyLoss() |
| loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) |
| elif self.config.problem_type == "multi_label_classification": |
| loss_fct = BCEWithLogitsLoss() |
| loss = loss_fct(pooled_logits, labels) |
| if not return_dict: |
| output = (pooled_logits,) + transformer_outputs[1:] |
| return ((loss,) + output) if loss is not None else output |
|
|
| return SequenceClassifierOutputWithPast( |
| loss=loss, |
| logits=pooled_logits, |
| past_key_values=transformer_outputs.past_key_values, |
| hidden_states=transformer_outputs.hidden_states, |
| attentions=transformer_outputs.attentions, |
| ) |
|
|
| def get_input_embeddings(self): |
| return self.biogpt.embed_tokens |
|
|
| def set_input_embeddings(self, value): |
| self.biogpt.embed_tokens = value |
|
|
|
|
| __all__ = [ |
| "BioGptForCausalLM", |
| "BioGptForTokenClassification", |
| "BioGptForSequenceClassification", |
| "BioGptModel", |
| "BioGptPreTrainedModel", |
| ] |
|
|