| | from dataclasses import dataclass |
| | from typing import Optional, Tuple |
| | import torch |
| |
|
| | from transformers.modeling_outputs import ( |
| | BaseModelOutputWithPast, |
| | CausalLMOutputWithPast, |
| | QuestionAnsweringModelOutput, |
| | SequenceClassifierOutputWithPast, |
| | ) |
| |
|
| | @dataclass |
| | class ExtendedBaseModelOutputWithPast(BaseModelOutputWithPast): |
| | """ |
| | Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). |
| | |
| | Args: |
| | last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): |
| | Sequence of hidden-states at the output of the last layer of the model. |
| | |
| | If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, |
| | hidden_size)` is output. |
| | past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): |
| | Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape |
| | `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if |
| | `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, |
| | encoder_sequence_length, embed_size_per_head)`. |
| | |
| | Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if |
| | `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` |
| | input) to speed up sequential decoding. |
| | hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): |
| | Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + |
| | one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. |
| | |
| | Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. |
| | attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): |
| | Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, |
| | sequence_length)`. |
| | |
| | Attentions weights after the attention softmax, used to compute the weighted average in the self-attention |
| | heads. |
| | """ |
| | router_inputs: Optional[Tuple[torch.FloatTensor, ...]] = None |
| | mlp_activations: Optional[Tuple[torch.FloatTensor, ...]] = None |
| | attn_outputs: Optional[Tuple[torch.FloatTensor, ...]] = None |
| |
|
| | @dataclass |
| | class ExtendedCausalLMOutputWithPast(CausalLMOutputWithPast): |
| | """ |
| | Base class for causal language model (or autoregressive) outputs. |
| | |
| | Args: |
| | loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): |
| | Language modeling loss (for next-token prediction). |
| | logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): |
| | Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). |
| | past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): |
| | Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape |
| | `(batch_size, num_heads, sequence_length, embed_size_per_head)`) |
| | |
| | Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see |
| | `past_key_values` input) to speed up sequential decoding. |
| | hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): |
| | Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + |
| | one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. |
| | |
| | Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. |
| | attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): |
| | Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, |
| | sequence_length)`. |
| | |
| | Attentions weights after the attention softmax, used to compute the weighted average in the self-attention |
| | heads. |
| | """ |
| | router_inputs: Optional[Tuple[torch.FloatTensor, ...]] = None |
| | mlp_activations: Optional[Tuple[torch.FloatTensor, ...]] = None |
| | attn_outputs: Optional[Tuple[torch.FloatTensor, ...]] = None |
| |
|
| | @dataclass |
| | class ExtendedQuestionAnsweringModelOutput(QuestionAnsweringModelOutput): |
| | """ |
| | Base class for outputs of question answering models. |
| | |
| | Args: |
| | loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): |
| | Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. |
| | start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): |
| | Span-start scores (before SoftMax). |
| | end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): |
| | Span-end scores (before SoftMax). |
| | hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): |
| | Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + |
| | one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. |
| | |
| | Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. |
| | attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): |
| | Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, |
| | sequence_length)`. |
| | |
| | Attentions weights after the attention softmax, used to compute the weighted average in the self-attention |
| | heads. |
| | """ |
| | router_inputs: Optional[Tuple[torch.FloatTensor, ...]] = None |
| | mlp_activations: Optional[Tuple[torch.FloatTensor, ...]] = None |
| | attn_outputs: Optional[Tuple[torch.FloatTensor, ...]] = None |
| |
|
| | @dataclass |
| | class ExtendedSequenceClassifierOutputWithPast(SequenceClassifierOutputWithPast): |
| | """ |
| | Base class for outputs of sentence classification models. |
| | |
| | Args: |
| | loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): |
| | Classification (or regression if config.num_labels==1) loss. |
| | logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): |
| | Classification (or regression if config.num_labels==1) scores (before SoftMax). |
| | past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): |
| | Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape |
| | `(batch_size, num_heads, sequence_length, embed_size_per_head)`) |
| | |
| | Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see |
| | `past_key_values` input) to speed up sequential decoding. |
| | hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): |
| | Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + |
| | one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. |
| | |
| | Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. |
| | attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): |
| | Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, |
| | sequence_length)`. |
| | |
| | Attentions weights after the attention softmax, used to compute the weighted average in the self-attention |
| | heads. |
| | """ |
| | router_inputs: Optional[Tuple[torch.FloatTensor, ...]] = None |
| | mlp_activations: Optional[Tuple[torch.FloatTensor, ...]] = None |
| | attn_outputs: Optional[Tuple[torch.FloatTensor, ...]] = None |