| | import copy |
| | import warnings |
| | from typing import List, Optional, Tuple, Union |
| |
|
| | import torch |
| | from torch import nn |
| | from torch.nn import CrossEntropyLoss, MSELoss, BCEWithLogitsLoss |
| |
|
| | from transformers import AutoModelForSequenceClassification |
| | from transformers.modeling_outputs import Seq2SeqSequenceClassifierOutput |
| | from transformers.models.t5.configuration_t5 import T5Config |
| | from transformers.models.t5.modeling_t5 import T5PreTrainedModel, T5Model |
| |
|
| |
|
| | class T5ClassificationHead(nn.Module): |
| | """Head for sentence-level classification tasks.""" |
| |
|
| | def __init__(self, config: T5Config): |
| | super().__init__() |
| | self.dense = nn.Linear(config.d_model, config.d_model) |
| | self.dropout = nn.Dropout(p=config.classifier_dropout) |
| | self.out_proj = nn.Linear(config.d_model, config.num_labels) |
| |
|
| | def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
| | hidden_states = self.dropout(hidden_states) |
| | hidden_states = self.dense(hidden_states) |
| | hidden_states = torch.tanh(hidden_states) |
| | hidden_states = self.dropout(hidden_states) |
| | hidden_states = self.out_proj(hidden_states) |
| | return hidden_states |
| |
|
| |
|
| | class T5ForSequenceClassification(T5PreTrainedModel): |
| | _keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"] |
| | _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] |
| |
|
| | def __init__(self, config: T5Config): |
| | super().__init__(config) |
| | self.transformer = T5Model(config) |
| | self.classification_head = T5ClassificationHead(config) |
| |
|
| | |
| | self.post_init() |
| |
|
| | self.model_parallel = False |
| |
|
| | def forward( |
| | self, |
| | input_ids: torch.LongTensor = None, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | decoder_input_ids: Optional[torch.LongTensor] = None, |
| | decoder_attention_mask: Optional[torch.LongTensor] = None, |
| | head_mask: Optional[torch.Tensor] = None, |
| | decoder_head_mask: Optional[torch.Tensor] = None, |
| | cross_attn_head_mask: Optional[torch.Tensor] = None, |
| | encoder_outputs: Optional[List[torch.FloatTensor]] = None, |
| | inputs_embeds: Optional[torch.FloatTensor] = None, |
| | decoder_inputs_embeds: Optional[torch.FloatTensor] = None, |
| | labels: Optional[torch.LongTensor] = None, |
| | use_cache: Optional[bool] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, Seq2SeqSequenceClassifierOutput]: |
| | r""" |
| | labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| | Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., |
| | config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). |
| | Returns: |
| | """ |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| | if labels is not None: |
| | use_cache = False |
| |
|
| | if input_ids is None and inputs_embeds is not None: |
| | raise NotImplementedError( |
| | f"Passing input embeddings is currently not supported for {self.__class__.__name__}" |
| | ) |
| |
|
| | |
| | |
| | if decoder_input_ids is None and decoder_inputs_embeds is None: |
| | if input_ids is None: |
| | raise ValueError( |
| | "If no `decoder_input_ids` or `decoder_inputs_embeds` are " |
| | "passed, `input_ids` cannot be `None`. Please pass either " |
| | "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`." |
| | ) |
| | decoder_input_ids = self._shift_right(input_ids) |
| |
|
| | outputs = self.transformer( |
| | input_ids, |
| | attention_mask=attention_mask, |
| | decoder_input_ids=decoder_input_ids, |
| | decoder_attention_mask=decoder_attention_mask, |
| | head_mask=head_mask, |
| | decoder_head_mask=decoder_head_mask, |
| | cross_attn_head_mask=cross_attn_head_mask, |
| | encoder_outputs=encoder_outputs, |
| | inputs_embeds=inputs_embeds, |
| | decoder_inputs_embeds=decoder_inputs_embeds, |
| | use_cache=use_cache, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| | sequence_output = outputs[0] |
| |
|
| | eos_mask = input_ids.eq(self.config.eos_token_id).to(sequence_output.device) |
| |
|
| | if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: |
| | raise ValueError("All examples must have the same number of <eos> tokens.") |
| | batch_size, _, hidden_size = sequence_output.shape |
| | sentence_representation = sequence_output[eos_mask, :].view(batch_size, -1, hidden_size)[:, -1, :] |
| | logits = self.classification_head(sentence_representation) |
| |
|
| | loss = None |
| | if labels is not None: |
| | labels = labels.to(logits.device) |
| | if self.config.problem_type is None: |
| | if self.config.num_labels == 1: |
| | self.config.problem_type = "regression" |
| | elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): |
| | self.config.problem_type = "single_label_classification" |
| | else: |
| | self.config.problem_type = "multi_label_classification" |
| |
|
| | if self.config.problem_type == "regression": |
| | loss_fct = MSELoss() |
| | if self.config.num_labels == 1: |
| | loss = loss_fct(logits.squeeze(), labels.squeeze()) |
| | else: |
| | loss = loss_fct(logits, labels) |
| | elif self.config.problem_type == "single_label_classification": |
| | loss_fct = CrossEntropyLoss() |
| | loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) |
| | elif self.config.problem_type == "multi_label_classification": |
| | loss_fct = BCEWithLogitsLoss() |
| | loss = loss_fct(logits, labels) |
| | if not return_dict: |
| | output = (logits,) + outputs[1:] |
| | return ((loss,) + output) if loss is not None else output |
| |
|
| | return Seq2SeqSequenceClassifierOutput( |
| | loss=loss, |
| | logits=logits, |
| | past_key_values=outputs.past_key_values, |
| | decoder_hidden_states=outputs.decoder_hidden_states, |
| | decoder_attentions=outputs.decoder_attentions, |
| | cross_attentions=outputs.cross_attentions, |
| | encoder_last_hidden_state=outputs.encoder_last_hidden_state, |
| | encoder_hidden_states=outputs.encoder_hidden_states, |
| | encoder_attentions=outputs.encoder_attentions, |
| | ) |
| |
|
| | try: |
| | AutoModelForSequenceClassification.register(T5Config, T5ForSequenceClassification) |
| | except ValueError: |
| | pass |
| |
|
| |
|