| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | """ PyTorch DeBERTa model.""" |
| |
|
| | from collections.abc import Sequence |
| | from typing import Optional, Tuple, Union |
| |
|
| | import torch |
| | import torch.utils.checkpoint |
| | from torch import nn |
| | from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss |
| |
|
| | from ...activations import ACT2FN |
| | from ...modeling_outputs import ( |
| | BaseModelOutput, |
| | MaskedLMOutput, |
| | QuestionAnsweringModelOutput, |
| | SequenceClassifierOutput, |
| | TokenClassifierOutput, |
| | ) |
| | from ...modeling_utils import PreTrainedModel |
| | from ...pytorch_utils import softmax_backward_data |
| | from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging |
| | from .configuration_deberta import DebertaConfig |
| |
|
| |
|
| | logger = logging.get_logger(__name__) |
| | _CONFIG_FOR_DOC = "DebertaConfig" |
| | _CHECKPOINT_FOR_DOC = "microsoft/deberta-base" |
| |
|
| | |
| | _CHECKPOINT_FOR_MASKED_LM = "lsanochkin/deberta-large-feedback" |
| | _MASKED_LM_EXPECTED_OUTPUT = "' Paris'" |
| | _MASKED_LM_EXPECTED_LOSS = "0.54" |
| |
|
| | |
| | _CHECKPOINT_FOR_QA = "Palak/microsoft_deberta-large_squad" |
| | _QA_EXPECTED_OUTPUT = "' a nice puppet'" |
| | _QA_EXPECTED_LOSS = 0.14 |
| | _QA_TARGET_START_INDEX = 12 |
| | _QA_TARGET_END_INDEX = 14 |
| |
|
| |
|
| | DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [ |
| | "microsoft/deberta-base", |
| | "microsoft/deberta-large", |
| | "microsoft/deberta-xlarge", |
| | "microsoft/deberta-base-mnli", |
| | "microsoft/deberta-large-mnli", |
| | "microsoft/deberta-xlarge-mnli", |
| | ] |
| |
|
| |
|
| | class ContextPooler(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size) |
| | self.dropout = StableDropout(config.pooler_dropout) |
| | self.config = config |
| |
|
| | def forward(self, hidden_states): |
| | |
| | |
| |
|
| | context_token = hidden_states[:, 0] |
| | context_token = self.dropout(context_token) |
| | pooled_output = self.dense(context_token) |
| | pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output) |
| | return pooled_output |
| |
|
| | @property |
| | def output_dim(self): |
| | return self.config.hidden_size |
| |
|
| |
|
| | class XSoftmax(torch.autograd.Function): |
| | """ |
| | Masked Softmax which is optimized for saving memory |
| | |
| | Args: |
| | input (`torch.tensor`): The input tensor that will apply softmax. |
| | mask (`torch.IntTensor`): |
| | The mask matrix where 0 indicate that element will be ignored in the softmax calculation. |
| | dim (int): The dimension that will apply softmax |
| | |
| | Example: |
| | |
| | ```python |
| | >>> import torch |
| | >>> from transformers.models.deberta.modeling_deberta import XSoftmax |
| | |
| | >>> # Make a tensor |
| | >>> x = torch.randn([4, 20, 100]) |
| | |
| | >>> # Create a mask |
| | >>> mask = (x > 0).int() |
| | |
| | >>> # Specify the dimension to apply softmax |
| | >>> dim = -1 |
| | |
| | >>> y = XSoftmax.apply(x, mask, dim) |
| | ```""" |
| |
|
| | @staticmethod |
| | def forward(self, input, mask, dim): |
| | self.dim = dim |
| | rmask = ~(mask.to(torch.bool)) |
| |
|
| | output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min)) |
| | output = torch.softmax(output, self.dim) |
| | output.masked_fill_(rmask, 0) |
| | self.save_for_backward(output) |
| | return output |
| |
|
| | @staticmethod |
| | def backward(self, grad_output): |
| | (output,) = self.saved_tensors |
| | inputGrad = softmax_backward_data(self, grad_output, output, self.dim, output) |
| | return inputGrad, None, None |
| |
|
| | @staticmethod |
| | def symbolic(g, self, mask, dim): |
| | import torch.onnx.symbolic_helper as sym_help |
| | from torch.onnx.symbolic_opset9 import masked_fill, softmax |
| |
|
| | mask_cast_value = g.op("Cast", mask, to_i=sym_help.cast_pytorch_to_onnx["Long"]) |
| | r_mask = g.op( |
| | "Cast", |
| | g.op("Sub", g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value), |
| | to_i=sym_help.cast_pytorch_to_onnx["Bool"], |
| | ) |
| | output = masked_fill( |
| | g, self, r_mask, g.op("Constant", value_t=torch.tensor(torch.finfo(self.type().dtype()).min)) |
| | ) |
| | output = softmax(g, output, dim) |
| | return masked_fill(g, output, r_mask, g.op("Constant", value_t=torch.tensor(0, dtype=torch.bool))) |
| |
|
| |
|
| | class DropoutContext(object): |
| | def __init__(self): |
| | self.dropout = 0 |
| | self.mask = None |
| | self.scale = 1 |
| | self.reuse_mask = True |
| |
|
| |
|
| | def get_mask(input, local_context): |
| | if not isinstance(local_context, DropoutContext): |
| | dropout = local_context |
| | mask = None |
| | else: |
| | dropout = local_context.dropout |
| | dropout *= local_context.scale |
| | mask = local_context.mask if local_context.reuse_mask else None |
| |
|
| | if dropout > 0 and mask is None: |
| | mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).to(torch.bool) |
| |
|
| | if isinstance(local_context, DropoutContext): |
| | if local_context.mask is None: |
| | local_context.mask = mask |
| |
|
| | return mask, dropout |
| |
|
| |
|
| | class XDropout(torch.autograd.Function): |
| | """Optimized dropout function to save computation and memory by using mask operation instead of multiplication.""" |
| |
|
| | @staticmethod |
| | def forward(ctx, input, local_ctx): |
| | mask, dropout = get_mask(input, local_ctx) |
| | ctx.scale = 1.0 / (1 - dropout) |
| | if dropout > 0: |
| | ctx.save_for_backward(mask) |
| | return input.masked_fill(mask, 0) * ctx.scale |
| | else: |
| | return input |
| |
|
| | @staticmethod |
| | def backward(ctx, grad_output): |
| | if ctx.scale > 1: |
| | (mask,) = ctx.saved_tensors |
| | return grad_output.masked_fill(mask, 0) * ctx.scale, None |
| | else: |
| | return grad_output, None |
| |
|
| | @staticmethod |
| | def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, DropoutContext]) -> torch._C.Value: |
| | from torch.onnx import symbolic_opset12 |
| |
|
| | dropout_p = local_ctx |
| | if isinstance(local_ctx, DropoutContext): |
| | dropout_p = local_ctx.dropout |
| | |
| | train = True |
| | |
| | |
| | |
| | |
| | |
| | |
| | return symbolic_opset12.dropout(g, input, dropout_p, train) |
| |
|
| |
|
| | class StableDropout(nn.Module): |
| | """ |
| | Optimized dropout module for stabilizing the training |
| | |
| | Args: |
| | drop_prob (float): the dropout probabilities |
| | """ |
| |
|
| | def __init__(self, drop_prob): |
| | super().__init__() |
| | self.drop_prob = drop_prob |
| | self.count = 0 |
| | self.context_stack = None |
| |
|
| | def forward(self, x): |
| | """ |
| | Call the module |
| | |
| | Args: |
| | x (`torch.tensor`): The input tensor to apply dropout |
| | """ |
| | if self.training and self.drop_prob > 0: |
| | return XDropout.apply(x, self.get_context()) |
| | return x |
| |
|
| | def clear_context(self): |
| | self.count = 0 |
| | self.context_stack = None |
| |
|
| | def init_context(self, reuse_mask=True, scale=1): |
| | if self.context_stack is None: |
| | self.context_stack = [] |
| | self.count = 0 |
| | for c in self.context_stack: |
| | c.reuse_mask = reuse_mask |
| | c.scale = scale |
| |
|
| | def get_context(self): |
| | if self.context_stack is not None: |
| | if self.count >= len(self.context_stack): |
| | self.context_stack.append(DropoutContext()) |
| | ctx = self.context_stack[self.count] |
| | ctx.dropout = self.drop_prob |
| | self.count += 1 |
| | return ctx |
| | else: |
| | return self.drop_prob |
| |
|
| |
|
| | class DebertaLayerNorm(nn.Module): |
| | """LayerNorm module in the TF style (epsilon inside the square root).""" |
| |
|
| | def __init__(self, size, eps=1e-12): |
| | super().__init__() |
| | self.weight = nn.Parameter(torch.ones(size)) |
| | self.bias = nn.Parameter(torch.zeros(size)) |
| | self.variance_epsilon = eps |
| |
|
| | def forward(self, hidden_states): |
| | input_type = hidden_states.dtype |
| | hidden_states = hidden_states.float() |
| | mean = hidden_states.mean(-1, keepdim=True) |
| | variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True) |
| | hidden_states = (hidden_states - mean) / torch.sqrt(variance + self.variance_epsilon) |
| | hidden_states = hidden_states.to(input_type) |
| | y = self.weight * hidden_states + self.bias |
| | return y |
| |
|
| |
|
| | class DebertaSelfOutput(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.dense = nn.Linear(config.hidden_size, config.hidden_size) |
| | self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps) |
| | self.dropout = StableDropout(config.hidden_dropout_prob) |
| |
|
| | def forward(self, hidden_states, input_tensor): |
| | hidden_states = self.dense(hidden_states) |
| | hidden_states = self.dropout(hidden_states) |
| | hidden_states = self.LayerNorm(hidden_states + input_tensor) |
| | return hidden_states |
| |
|
| |
|
| | class DebertaAttention(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.self = DisentangledSelfAttention(config) |
| | self.output = DebertaSelfOutput(config) |
| | self.config = config |
| |
|
| | def forward( |
| | self, |
| | hidden_states, |
| | attention_mask, |
| | output_attentions=False, |
| | query_states=None, |
| | relative_pos=None, |
| | rel_embeddings=None, |
| | ): |
| | self_output = self.self( |
| | hidden_states, |
| | attention_mask, |
| | output_attentions, |
| | query_states=query_states, |
| | relative_pos=relative_pos, |
| | rel_embeddings=rel_embeddings, |
| | ) |
| | if output_attentions: |
| | self_output, att_matrix = self_output |
| | if query_states is None: |
| | query_states = hidden_states |
| | attention_output = self.output(self_output, query_states) |
| |
|
| | if output_attentions: |
| | return (attention_output, att_matrix) |
| | else: |
| | return attention_output |
| |
|
| |
|
| | |
| | class DebertaIntermediate(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.dense = nn.Linear(config.hidden_size, config.intermediate_size) |
| | if isinstance(config.hidden_act, str): |
| | self.intermediate_act_fn = ACT2FN[config.hidden_act] |
| | else: |
| | self.intermediate_act_fn = config.hidden_act |
| |
|
| | def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
| | hidden_states = self.dense(hidden_states) |
| | hidden_states = self.intermediate_act_fn(hidden_states) |
| | return hidden_states |
| |
|
| |
|
| | class DebertaOutput(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.dense = nn.Linear(config.intermediate_size, config.hidden_size) |
| | self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps) |
| | self.dropout = StableDropout(config.hidden_dropout_prob) |
| | self.config = config |
| |
|
| | def forward(self, hidden_states, input_tensor): |
| | hidden_states = self.dense(hidden_states) |
| | hidden_states = self.dropout(hidden_states) |
| | hidden_states = self.LayerNorm(hidden_states + input_tensor) |
| | return hidden_states |
| |
|
| |
|
| | class DebertaLayer(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.attention = DebertaAttention(config) |
| | self.intermediate = DebertaIntermediate(config) |
| | self.output = DebertaOutput(config) |
| |
|
| | def forward( |
| | self, |
| | hidden_states, |
| | attention_mask, |
| | query_states=None, |
| | relative_pos=None, |
| | rel_embeddings=None, |
| | output_attentions=False, |
| | ): |
| | attention_output = self.attention( |
| | hidden_states, |
| | attention_mask, |
| | output_attentions=output_attentions, |
| | query_states=query_states, |
| | relative_pos=relative_pos, |
| | rel_embeddings=rel_embeddings, |
| | ) |
| | if output_attentions: |
| | attention_output, att_matrix = attention_output |
| | intermediate_output = self.intermediate(attention_output) |
| | layer_output = self.output(intermediate_output, attention_output) |
| | if output_attentions: |
| | return (layer_output, att_matrix) |
| | else: |
| | return layer_output |
| |
|
| |
|
| | class DebertaEncoder(nn.Module): |
| | """Modified BertEncoder with relative position bias support""" |
| |
|
| | def __init__(self, config): |
| | super().__init__() |
| | self.layer = nn.ModuleList([DebertaLayer(config) for _ in range(config.num_hidden_layers)]) |
| | self.relative_attention = getattr(config, "relative_attention", False) |
| | if self.relative_attention: |
| | self.max_relative_positions = getattr(config, "max_relative_positions", -1) |
| | if self.max_relative_positions < 1: |
| | self.max_relative_positions = config.max_position_embeddings |
| | self.rel_embeddings = nn.Embedding(self.max_relative_positions * 2, config.hidden_size) |
| | self.gradient_checkpointing = False |
| |
|
| | def get_rel_embedding(self): |
| | rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None |
| | return rel_embeddings |
| |
|
| | def get_attention_mask(self, attention_mask): |
| | if attention_mask.dim() <= 2: |
| | extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) |
| | attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1) |
| | elif attention_mask.dim() == 3: |
| | attention_mask = attention_mask.unsqueeze(1) |
| |
|
| | return attention_mask |
| |
|
| | def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None): |
| | if self.relative_attention and relative_pos is None: |
| | q = query_states.size(-2) if query_states is not None else hidden_states.size(-2) |
| | relative_pos = build_relative_position(q, hidden_states.size(-2), hidden_states.device) |
| | return relative_pos |
| |
|
| | def forward( |
| | self, |
| | hidden_states, |
| | attention_mask, |
| | output_hidden_states=True, |
| | output_attentions=False, |
| | query_states=None, |
| | relative_pos=None, |
| | return_dict=True, |
| | ): |
| | attention_mask = self.get_attention_mask(attention_mask) |
| | relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos) |
| |
|
| | all_hidden_states = () if output_hidden_states else None |
| | all_attentions = () if output_attentions else None |
| |
|
| | if isinstance(hidden_states, Sequence): |
| | next_kv = hidden_states[0] |
| | else: |
| | next_kv = hidden_states |
| | rel_embeddings = self.get_rel_embedding() |
| | for i, layer_module in enumerate(self.layer): |
| | if output_hidden_states: |
| | all_hidden_states = all_hidden_states + (hidden_states,) |
| |
|
| | if self.gradient_checkpointing and self.training: |
| |
|
| | def create_custom_forward(module): |
| | def custom_forward(*inputs): |
| | return module(*inputs, output_attentions) |
| |
|
| | return custom_forward |
| |
|
| | hidden_states = torch.utils.checkpoint.checkpoint( |
| | create_custom_forward(layer_module), |
| | next_kv, |
| | attention_mask, |
| | query_states, |
| | relative_pos, |
| | rel_embeddings, |
| | ) |
| | else: |
| | hidden_states = layer_module( |
| | next_kv, |
| | attention_mask, |
| | query_states=query_states, |
| | relative_pos=relative_pos, |
| | rel_embeddings=rel_embeddings, |
| | output_attentions=output_attentions, |
| | ) |
| |
|
| | if output_attentions: |
| | hidden_states, att_m = hidden_states |
| |
|
| | if query_states is not None: |
| | query_states = hidden_states |
| | if isinstance(hidden_states, Sequence): |
| | next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None |
| | else: |
| | next_kv = hidden_states |
| |
|
| | if output_attentions: |
| | all_attentions = all_attentions + (att_m,) |
| |
|
| | if output_hidden_states: |
| | all_hidden_states = all_hidden_states + (hidden_states,) |
| |
|
| | if not return_dict: |
| | return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) |
| | return BaseModelOutput( |
| | last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions |
| | ) |
| |
|
| |
|
| | def build_relative_position(query_size, key_size, device): |
| | """ |
| | Build relative position according to the query and key |
| | |
| | We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key |
| | \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q - |
| | P_k\\) |
| | |
| | Args: |
| | query_size (int): the length of query |
| | key_size (int): the length of key |
| | |
| | Return: |
| | `torch.LongTensor`: A tensor with shape [1, query_size, key_size] |
| | |
| | """ |
| |
|
| | q_ids = torch.arange(query_size, dtype=torch.long, device=device) |
| | k_ids = torch.arange(key_size, dtype=torch.long, device=device) |
| | rel_pos_ids = q_ids[:, None] - k_ids.view(1, -1).repeat(query_size, 1) |
| | rel_pos_ids = rel_pos_ids[:query_size, :] |
| | rel_pos_ids = rel_pos_ids.unsqueeze(0) |
| | return rel_pos_ids |
| |
|
| |
|
| | @torch.jit.script |
| | def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos): |
| | return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)]) |
| |
|
| |
|
| | @torch.jit.script |
| | def p2c_dynamic_expand(c2p_pos, query_layer, key_layer): |
| | return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)]) |
| |
|
| |
|
| | @torch.jit.script |
| | def pos_dynamic_expand(pos_index, p2c_att, key_layer): |
| | return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2))) |
| |
|
| |
|
| | class DisentangledSelfAttention(nn.Module): |
| | """ |
| | Disentangled self-attention module |
| | |
| | Parameters: |
| | config (`str`): |
| | A model config class instance with the configuration to build a new model. The schema is similar to |
| | *BertConfig*, for more details, please refer [`DebertaConfig`] |
| | |
| | """ |
| |
|
| | def __init__(self, config): |
| | super().__init__() |
| | if config.hidden_size % config.num_attention_heads != 0: |
| | raise ValueError( |
| | f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " |
| | f"heads ({config.num_attention_heads})" |
| | ) |
| | self.num_attention_heads = config.num_attention_heads |
| | self.attention_head_size = int(config.hidden_size / config.num_attention_heads) |
| | self.all_head_size = self.num_attention_heads * self.attention_head_size |
| | self.in_proj = nn.Linear(config.hidden_size, self.all_head_size * 3, bias=False) |
| | self.q_bias = nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float)) |
| | self.v_bias = nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float)) |
| | self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else [] |
| |
|
| | self.relative_attention = getattr(config, "relative_attention", False) |
| | self.talking_head = getattr(config, "talking_head", False) |
| |
|
| | if self.talking_head: |
| | self.head_logits_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False) |
| | self.head_weights_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False) |
| |
|
| | if self.relative_attention: |
| | self.max_relative_positions = getattr(config, "max_relative_positions", -1) |
| | if self.max_relative_positions < 1: |
| | self.max_relative_positions = config.max_position_embeddings |
| | self.pos_dropout = StableDropout(config.hidden_dropout_prob) |
| |
|
| | if "c2p" in self.pos_att_type: |
| | self.pos_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=False) |
| | if "p2c" in self.pos_att_type: |
| | self.pos_q_proj = nn.Linear(config.hidden_size, self.all_head_size) |
| |
|
| | self.dropout = StableDropout(config.attention_probs_dropout_prob) |
| |
|
| | def transpose_for_scores(self, x): |
| | new_x_shape = x.size()[:-1] + (self.num_attention_heads, -1) |
| | x = x.view(new_x_shape) |
| | return x.permute(0, 2, 1, 3) |
| |
|
| | def forward( |
| | self, |
| | hidden_states, |
| | attention_mask, |
| | output_attentions=False, |
| | query_states=None, |
| | relative_pos=None, |
| | rel_embeddings=None, |
| | ): |
| | """ |
| | Call the module |
| | |
| | Args: |
| | hidden_states (`torch.FloatTensor`): |
| | Input states to the module usually the output from previous layer, it will be the Q,K and V in |
| | *Attention(Q,K,V)* |
| | |
| | attention_mask (`torch.BoolTensor`): |
| | An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum |
| | sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j* |
| | th token. |
| | |
| | output_attentions (`bool`, optional): |
| | Whether return the attention matrix. |
| | |
| | query_states (`torch.FloatTensor`, optional): |
| | The *Q* state in *Attention(Q,K,V)*. |
| | |
| | relative_pos (`torch.LongTensor`): |
| | The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with |
| | values ranging in [*-max_relative_positions*, *max_relative_positions*]. |
| | |
| | rel_embeddings (`torch.FloatTensor`): |
| | The embedding of relative distances. It's a tensor of shape [\\(2 \\times |
| | \\text{max_relative_positions}\\), *hidden_size*]. |
| | |
| | |
| | """ |
| | if query_states is None: |
| | qp = self.in_proj(hidden_states) |
| | query_layer, key_layer, value_layer = self.transpose_for_scores(qp).chunk(3, dim=-1) |
| | else: |
| |
|
| | def linear(w, b, x): |
| | if b is not None: |
| | return torch.matmul(x, w.t()) + b.t() |
| | else: |
| | return torch.matmul(x, w.t()) |
| |
|
| | ws = self.in_proj.weight.chunk(self.num_attention_heads * 3, dim=0) |
| | qkvw = [torch.cat([ws[i * 3 + k] for i in range(self.num_attention_heads)], dim=0) for k in range(3)] |
| | qkvb = [None] * 3 |
| |
|
| | q = linear(qkvw[0], qkvb[0], query_states.to(dtype=qkvw[0].dtype)) |
| | k, v = [linear(qkvw[i], qkvb[i], hidden_states.to(dtype=qkvw[i].dtype)) for i in range(1, 3)] |
| | query_layer, key_layer, value_layer = [self.transpose_for_scores(x) for x in [q, k, v]] |
| |
|
| | query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :]) |
| | value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :]) |
| |
|
| | rel_att = None |
| | |
| | scale_factor = 1 + len(self.pos_att_type) |
| | scale = torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor) |
| | query_layer = query_layer / scale.to(dtype=query_layer.dtype) |
| | attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) |
| | if self.relative_attention: |
| | rel_embeddings = self.pos_dropout(rel_embeddings) |
| | rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor) |
| |
|
| | if rel_att is not None: |
| | attention_scores = attention_scores + rel_att |
| |
|
| | |
| | if self.talking_head: |
| | attention_scores = self.head_logits_proj(attention_scores.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) |
| |
|
| | attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1) |
| | attention_probs = self.dropout(attention_probs) |
| | if self.talking_head: |
| | attention_probs = self.head_weights_proj(attention_probs.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) |
| |
|
| | context_layer = torch.matmul(attention_probs, value_layer) |
| | context_layer = context_layer.permute(0, 2, 1, 3).contiguous() |
| | new_context_layer_shape = context_layer.size()[:-2] + (-1,) |
| | context_layer = context_layer.view(new_context_layer_shape) |
| | if output_attentions: |
| | return (context_layer, attention_probs) |
| | else: |
| | return context_layer |
| |
|
| | def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor): |
| | if relative_pos is None: |
| | q = query_layer.size(-2) |
| | relative_pos = build_relative_position(q, key_layer.size(-2), query_layer.device) |
| | if relative_pos.dim() == 2: |
| | relative_pos = relative_pos.unsqueeze(0).unsqueeze(0) |
| | elif relative_pos.dim() == 3: |
| | relative_pos = relative_pos.unsqueeze(1) |
| | |
| | elif relative_pos.dim() != 4: |
| | raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}") |
| |
|
| | att_span = min(max(query_layer.size(-2), key_layer.size(-2)), self.max_relative_positions) |
| | relative_pos = relative_pos.long().to(query_layer.device) |
| | rel_embeddings = rel_embeddings[ |
| | self.max_relative_positions - att_span : self.max_relative_positions + att_span, : |
| | ].unsqueeze(0) |
| |
|
| | score = 0 |
| |
|
| | |
| | if "c2p" in self.pos_att_type: |
| | pos_key_layer = self.pos_proj(rel_embeddings) |
| | pos_key_layer = self.transpose_for_scores(pos_key_layer) |
| | c2p_att = torch.matmul(query_layer, pos_key_layer.transpose(-1, -2)) |
| | c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1) |
| | c2p_att = torch.gather(c2p_att, dim=-1, index=c2p_dynamic_expand(c2p_pos, query_layer, relative_pos)) |
| | score += c2p_att |
| |
|
| | |
| | if "p2c" in self.pos_att_type: |
| | pos_query_layer = self.pos_q_proj(rel_embeddings) |
| | pos_query_layer = self.transpose_for_scores(pos_query_layer) |
| | pos_query_layer /= torch.sqrt(torch.tensor(pos_query_layer.size(-1), dtype=torch.float) * scale_factor) |
| | if query_layer.size(-2) != key_layer.size(-2): |
| | r_pos = build_relative_position(key_layer.size(-2), key_layer.size(-2), query_layer.device) |
| | else: |
| | r_pos = relative_pos |
| | p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1) |
| | p2c_att = torch.matmul(key_layer, pos_query_layer.transpose(-1, -2).to(dtype=key_layer.dtype)) |
| | p2c_att = torch.gather( |
| | p2c_att, dim=-1, index=p2c_dynamic_expand(p2c_pos, query_layer, key_layer) |
| | ).transpose(-1, -2) |
| |
|
| | if query_layer.size(-2) != key_layer.size(-2): |
| | pos_index = relative_pos[:, :, :, 0].unsqueeze(-1) |
| | p2c_att = torch.gather(p2c_att, dim=-2, index=pos_dynamic_expand(pos_index, p2c_att, key_layer)) |
| | score += p2c_att |
| |
|
| | return score |
| |
|
| |
|
| | class DebertaEmbeddings(nn.Module): |
| | """Construct the embeddings from word, position and token_type embeddings.""" |
| |
|
| | def __init__(self, config): |
| | super().__init__() |
| | pad_token_id = getattr(config, "pad_token_id", 0) |
| | self.embedding_size = getattr(config, "embedding_size", config.hidden_size) |
| | self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id) |
| |
|
| | self.position_biased_input = getattr(config, "position_biased_input", True) |
| | if not self.position_biased_input: |
| | self.position_embeddings = None |
| | else: |
| | self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size) |
| |
|
| | if config.type_vocab_size > 0: |
| | self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size) |
| |
|
| | if self.embedding_size != config.hidden_size: |
| | self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False) |
| | self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps) |
| | self.dropout = StableDropout(config.hidden_dropout_prob) |
| | self.config = config |
| |
|
| | |
| | self.register_buffer( |
| | "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False |
| | ) |
| |
|
| | def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None): |
| | if input_ids is not None: |
| | input_shape = input_ids.size() |
| | else: |
| | input_shape = inputs_embeds.size()[:-1] |
| |
|
| | seq_length = input_shape[1] |
| |
|
| | if position_ids is None: |
| | position_ids = self.position_ids[:, :seq_length] |
| |
|
| | if token_type_ids is None: |
| | token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) |
| |
|
| | if inputs_embeds is None: |
| | inputs_embeds = self.word_embeddings(input_ids) |
| |
|
| | if self.position_embeddings is not None: |
| | position_embeddings = self.position_embeddings(position_ids.long()) |
| | else: |
| | position_embeddings = torch.zeros_like(inputs_embeds) |
| |
|
| | embeddings = inputs_embeds |
| | if self.position_biased_input: |
| | embeddings += position_embeddings |
| | if self.config.type_vocab_size > 0: |
| | token_type_embeddings = self.token_type_embeddings(token_type_ids) |
| | embeddings += token_type_embeddings |
| |
|
| | if self.embedding_size != self.config.hidden_size: |
| | embeddings = self.embed_proj(embeddings) |
| |
|
| | embeddings = self.LayerNorm(embeddings) |
| |
|
| | if mask is not None: |
| | if mask.dim() != embeddings.dim(): |
| | if mask.dim() == 4: |
| | mask = mask.squeeze(1).squeeze(1) |
| | mask = mask.unsqueeze(2) |
| | mask = mask.to(embeddings.dtype) |
| |
|
| | embeddings = embeddings * mask |
| |
|
| | embeddings = self.dropout(embeddings) |
| | return embeddings |
| |
|
| |
|
| | class DebertaPreTrainedModel(PreTrainedModel): |
| | """ |
| | An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained |
| | models. |
| | """ |
| |
|
| | config_class = DebertaConfig |
| | base_model_prefix = "deberta" |
| | _keys_to_ignore_on_load_unexpected = ["position_embeddings"] |
| | supports_gradient_checkpointing = True |
| |
|
| | def _init_weights(self, module): |
| | """Initialize the weights.""" |
| | if isinstance(module, nn.Linear): |
| | |
| | |
| | module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
| | if module.bias is not None: |
| | module.bias.data.zero_() |
| | elif isinstance(module, nn.Embedding): |
| | module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
| | if module.padding_idx is not None: |
| | module.weight.data[module.padding_idx].zero_() |
| |
|
| | def _set_gradient_checkpointing(self, module, value=False): |
| | if isinstance(module, DebertaEncoder): |
| | module.gradient_checkpointing = value |
| |
|
| |
|
| | DEBERTA_START_DOCSTRING = r""" |
| | The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled |
| | Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build |
| | on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two |
| | improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data. |
| | |
| | This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. |
| | Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage |
| | and behavior. |
| | |
| | |
| | Parameters: |
| | config ([`DebertaConfig`]): Model configuration class with all the parameters of the model. |
| | Initializing with a config file does not load the weights associated with the model, only the |
| | configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. |
| | """ |
| |
|
| | DEBERTA_INPUTS_DOCSTRING = r""" |
| | Args: |
| | input_ids (`torch.LongTensor` of shape `({0})`): |
| | Indices of input sequence tokens in the vocabulary. |
| | |
| | Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| | [`PreTrainedTokenizer.__call__`] for details. |
| | |
| | [What are input IDs?](../glossary#input-ids) |
| | attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): |
| | Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| | |
| | - 1 for tokens that are **not masked**, |
| | - 0 for tokens that are **masked**. |
| | |
| | [What are attention masks?](../glossary#attention-mask) |
| | token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): |
| | Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, |
| | 1]`: |
| | |
| | - 0 corresponds to a *sentence A* token, |
| | - 1 corresponds to a *sentence B* token. |
| | |
| | [What are token type IDs?](../glossary#token-type-ids) |
| | position_ids (`torch.LongTensor` of shape `({0})`, *optional*): |
| | Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, |
| | config.max_position_embeddings - 1]`. |
| | |
| | [What are position IDs?](../glossary#position-ids) |
| | inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): |
| | Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This |
| | is useful if you want more control over how to convert *input_ids* indices into associated vectors than the |
| | model's internal embedding lookup matrix. |
| | output_attentions (`bool`, *optional*): |
| | Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
| | tensors for more detail. |
| | output_hidden_states (`bool`, *optional*): |
| | Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
| | more detail. |
| | return_dict (`bool`, *optional*): |
| | Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| | """ |
| |
|
| |
|
| | @add_start_docstrings( |
| | "The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.", |
| | DEBERTA_START_DOCSTRING, |
| | ) |
| | class DebertaModel(DebertaPreTrainedModel): |
| | def __init__(self, config): |
| | super().__init__(config) |
| |
|
| | self.embeddings = DebertaEmbeddings(config) |
| | self.encoder = DebertaEncoder(config) |
| | self.z_steps = 0 |
| | self.config = config |
| | |
| | self.post_init() |
| |
|
| | def get_input_embeddings(self): |
| | return self.embeddings.word_embeddings |
| |
|
| | def set_input_embeddings(self, new_embeddings): |
| | self.embeddings.word_embeddings = new_embeddings |
| |
|
| | def _prune_heads(self, heads_to_prune): |
| | """ |
| | Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base |
| | class PreTrainedModel |
| | """ |
| | raise NotImplementedError("The prune function is not implemented in DeBERTa model.") |
| |
|
| | @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) |
| | @add_code_sample_docstrings( |
| | checkpoint=_CHECKPOINT_FOR_DOC, |
| | output_type=BaseModelOutput, |
| | config_class=_CONFIG_FOR_DOC, |
| | ) |
| | def forward( |
| | self, |
| | input_ids: Optional[torch.Tensor] = None, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | token_type_ids: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.Tensor] = None, |
| | inputs_embeds: Optional[torch.Tensor] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, BaseModelOutput]: |
| | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| | output_hidden_states = ( |
| | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| | ) |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | if input_ids is not None and inputs_embeds is not None: |
| | raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") |
| | elif input_ids is not None: |
| | self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) |
| | input_shape = input_ids.size() |
| | elif inputs_embeds is not None: |
| | input_shape = inputs_embeds.size()[:-1] |
| | else: |
| | raise ValueError("You have to specify either input_ids or inputs_embeds") |
| |
|
| | device = input_ids.device if input_ids is not None else inputs_embeds.device |
| |
|
| | if attention_mask is None: |
| | attention_mask = torch.ones(input_shape, device=device) |
| | if token_type_ids is None: |
| | token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) |
| |
|
| | embedding_output = self.embeddings( |
| | input_ids=input_ids, |
| | token_type_ids=token_type_ids, |
| | position_ids=position_ids, |
| | mask=attention_mask, |
| | inputs_embeds=inputs_embeds, |
| | ) |
| |
|
| | encoder_outputs = self.encoder( |
| | embedding_output, |
| | attention_mask, |
| | output_hidden_states=True, |
| | output_attentions=output_attentions, |
| | return_dict=return_dict, |
| | ) |
| | encoded_layers = encoder_outputs[1] |
| |
|
| | if self.z_steps > 1: |
| | hidden_states = encoded_layers[-2] |
| | layers = [self.encoder.layer[-1] for _ in range(self.z_steps)] |
| | query_states = encoded_layers[-1] |
| | rel_embeddings = self.encoder.get_rel_embedding() |
| | attention_mask = self.encoder.get_attention_mask(attention_mask) |
| | rel_pos = self.encoder.get_rel_pos(embedding_output) |
| | for layer in layers[1:]: |
| | query_states = layer( |
| | hidden_states, |
| | attention_mask, |
| | output_attentions=False, |
| | query_states=query_states, |
| | relative_pos=rel_pos, |
| | rel_embeddings=rel_embeddings, |
| | ) |
| | encoded_layers.append(query_states) |
| |
|
| | sequence_output = encoded_layers[-1] |
| |
|
| | if not return_dict: |
| | return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :] |
| |
|
| | return BaseModelOutput( |
| | last_hidden_state=sequence_output, |
| | hidden_states=encoder_outputs.hidden_states if output_hidden_states else None, |
| | attentions=encoder_outputs.attentions, |
| | ) |
| |
|
| |
|
| | @add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING) |
| | class DebertaForMaskedLM(DebertaPreTrainedModel): |
| | _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] |
| |
|
| | def __init__(self, config): |
| | super().__init__(config) |
| |
|
| | self.deberta = DebertaModel(config) |
| | self.cls = DebertaOnlyMLMHead(config) |
| |
|
| | |
| | self.post_init() |
| |
|
| | def get_output_embeddings(self): |
| | return self.cls.predictions.decoder |
| |
|
| | def set_output_embeddings(self, new_embeddings): |
| | self.cls.predictions.decoder = new_embeddings |
| |
|
| | @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) |
| | @add_code_sample_docstrings( |
| | checkpoint=_CHECKPOINT_FOR_MASKED_LM, |
| | output_type=MaskedLMOutput, |
| | config_class=_CONFIG_FOR_DOC, |
| | mask="[MASK]", |
| | expected_output=_MASKED_LM_EXPECTED_OUTPUT, |
| | expected_loss=_MASKED_LM_EXPECTED_LOSS, |
| | ) |
| | def forward( |
| | self, |
| | input_ids: Optional[torch.Tensor] = None, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | token_type_ids: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.Tensor] = None, |
| | inputs_embeds: Optional[torch.Tensor] = None, |
| | labels: Optional[torch.Tensor] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, MaskedLMOutput]: |
| | r""" |
| | labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| | Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., |
| | config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the |
| | loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` |
| | """ |
| |
|
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | outputs = self.deberta( |
| | input_ids, |
| | attention_mask=attention_mask, |
| | token_type_ids=token_type_ids, |
| | position_ids=position_ids, |
| | inputs_embeds=inputs_embeds, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| |
|
| | sequence_output = outputs[0] |
| | prediction_scores = self.cls(sequence_output) |
| |
|
| | masked_lm_loss = None |
| | if labels is not None: |
| | loss_fct = CrossEntropyLoss() |
| | masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) |
| |
|
| | if not return_dict: |
| | output = (prediction_scores,) + outputs[1:] |
| | return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output |
| |
|
| | return MaskedLMOutput( |
| | loss=masked_lm_loss, |
| | logits=prediction_scores, |
| | hidden_states=outputs.hidden_states, |
| | attentions=outputs.attentions, |
| | ) |
| |
|
| |
|
| | class DebertaPredictionHeadTransform(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.embedding_size = getattr(config, "embedding_size", config.hidden_size) |
| |
|
| | self.dense = nn.Linear(config.hidden_size, self.embedding_size) |
| | if isinstance(config.hidden_act, str): |
| | self.transform_act_fn = ACT2FN[config.hidden_act] |
| | else: |
| | self.transform_act_fn = config.hidden_act |
| | self.LayerNorm = nn.LayerNorm(self.embedding_size, eps=config.layer_norm_eps) |
| |
|
| | def forward(self, hidden_states): |
| | hidden_states = self.dense(hidden_states) |
| | hidden_states = self.transform_act_fn(hidden_states) |
| | hidden_states = self.LayerNorm(hidden_states) |
| | return hidden_states |
| |
|
| |
|
| | class DebertaLMPredictionHead(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.transform = DebertaPredictionHeadTransform(config) |
| |
|
| | self.embedding_size = getattr(config, "embedding_size", config.hidden_size) |
| | |
| | |
| | self.decoder = nn.Linear(self.embedding_size, config.vocab_size, bias=False) |
| |
|
| | self.bias = nn.Parameter(torch.zeros(config.vocab_size)) |
| |
|
| | |
| | self.decoder.bias = self.bias |
| |
|
| | def forward(self, hidden_states): |
| | hidden_states = self.transform(hidden_states) |
| | hidden_states = self.decoder(hidden_states) |
| | return hidden_states |
| |
|
| |
|
| | |
| | class DebertaOnlyMLMHead(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.predictions = DebertaLMPredictionHead(config) |
| |
|
| | def forward(self, sequence_output): |
| | prediction_scores = self.predictions(sequence_output) |
| | return prediction_scores |
| |
|
| |
|
| | @add_start_docstrings( |
| | """ |
| | DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the |
| | pooled output) e.g. for GLUE tasks. |
| | """, |
| | DEBERTA_START_DOCSTRING, |
| | ) |
| | class DebertaForSequenceClassification(DebertaPreTrainedModel): |
| | def __init__(self, config): |
| | super().__init__(config) |
| |
|
| | num_labels = getattr(config, "num_labels", 2) |
| | self.num_labels = num_labels |
| |
|
| | self.deberta = DebertaModel(config) |
| | self.pooler = ContextPooler(config) |
| | output_dim = self.pooler.output_dim |
| |
|
| | self.classifier = nn.Linear(output_dim, num_labels) |
| | drop_out = getattr(config, "cls_dropout", None) |
| | drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out |
| | self.dropout = StableDropout(drop_out) |
| |
|
| | |
| | self.post_init() |
| |
|
| | def get_input_embeddings(self): |
| | return self.deberta.get_input_embeddings() |
| |
|
| | def set_input_embeddings(self, new_embeddings): |
| | self.deberta.set_input_embeddings(new_embeddings) |
| |
|
| | @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) |
| | @add_code_sample_docstrings( |
| | checkpoint=_CHECKPOINT_FOR_DOC, |
| | output_type=SequenceClassifierOutput, |
| | config_class=_CONFIG_FOR_DOC, |
| | ) |
| | def forward( |
| | self, |
| | input_ids: Optional[torch.Tensor] = None, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | token_type_ids: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.Tensor] = None, |
| | inputs_embeds: Optional[torch.Tensor] = None, |
| | labels: Optional[torch.Tensor] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, SequenceClassifierOutput]: |
| | r""" |
| | labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| | Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., |
| | config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If |
| | `config.num_labels > 1` a classification loss is computed (Cross-Entropy). |
| | """ |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | outputs = self.deberta( |
| | input_ids, |
| | token_type_ids=token_type_ids, |
| | attention_mask=attention_mask, |
| | position_ids=position_ids, |
| | inputs_embeds=inputs_embeds, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| |
|
| | encoder_layer = outputs[0] |
| | pooled_output = self.pooler(encoder_layer) |
| | pooled_output = self.dropout(pooled_output) |
| | logits = self.classifier(pooled_output) |
| |
|
| | loss = None |
| | if labels is not None: |
| | if self.config.problem_type is None: |
| | if self.num_labels == 1: |
| | |
| | loss_fn = nn.MSELoss() |
| | logits = logits.view(-1).to(labels.dtype) |
| | loss = loss_fn(logits, labels.view(-1)) |
| | elif labels.dim() == 1 or labels.size(-1) == 1: |
| | label_index = (labels >= 0).nonzero() |
| | labels = labels.long() |
| | if label_index.size(0) > 0: |
| | labeled_logits = torch.gather( |
| | logits, 0, label_index.expand(label_index.size(0), logits.size(1)) |
| | ) |
| | labels = torch.gather(labels, 0, label_index.view(-1)) |
| | loss_fct = CrossEntropyLoss() |
| | loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1)) |
| | else: |
| | loss = torch.tensor(0).to(logits) |
| | else: |
| | log_softmax = nn.LogSoftmax(-1) |
| | loss = -((log_softmax(logits) * labels).sum(-1)).mean() |
| | elif self.config.problem_type == "regression": |
| | loss_fct = MSELoss() |
| | if self.num_labels == 1: |
| | loss = loss_fct(logits.squeeze(), labels.squeeze()) |
| | else: |
| | loss = loss_fct(logits, labels) |
| | elif self.config.problem_type == "single_label_classification": |
| | loss_fct = CrossEntropyLoss() |
| | loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) |
| | elif self.config.problem_type == "multi_label_classification": |
| | loss_fct = BCEWithLogitsLoss() |
| | loss = loss_fct(logits, labels) |
| | if not return_dict: |
| | output = (logits,) + outputs[1:] |
| | return ((loss,) + output) if loss is not None else output |
| |
|
| | return SequenceClassifierOutput( |
| | loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions |
| | ) |
| |
|
| |
|
| | @add_start_docstrings( |
| | """ |
| | DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for |
| | Named-Entity-Recognition (NER) tasks. |
| | """, |
| | DEBERTA_START_DOCSTRING, |
| | ) |
| | class DebertaForTokenClassification(DebertaPreTrainedModel): |
| | def __init__(self, config): |
| | super().__init__(config) |
| | self.num_labels = config.num_labels |
| |
|
| | self.deberta = DebertaModel(config) |
| | self.dropout = nn.Dropout(config.hidden_dropout_prob) |
| | self.classifier = nn.Linear(config.hidden_size, config.num_labels) |
| |
|
| | |
| | self.post_init() |
| |
|
| | @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) |
| | @add_code_sample_docstrings( |
| | checkpoint=_CHECKPOINT_FOR_DOC, |
| | output_type=TokenClassifierOutput, |
| | config_class=_CONFIG_FOR_DOC, |
| | ) |
| | def forward( |
| | self, |
| | input_ids: Optional[torch.Tensor] = None, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | token_type_ids: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.Tensor] = None, |
| | inputs_embeds: Optional[torch.Tensor] = None, |
| | labels: Optional[torch.Tensor] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, TokenClassifierOutput]: |
| | r""" |
| | labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| | Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. |
| | """ |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | outputs = self.deberta( |
| | input_ids, |
| | attention_mask=attention_mask, |
| | token_type_ids=token_type_ids, |
| | position_ids=position_ids, |
| | inputs_embeds=inputs_embeds, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| |
|
| | sequence_output = outputs[0] |
| |
|
| | sequence_output = self.dropout(sequence_output) |
| | logits = self.classifier(sequence_output) |
| |
|
| | loss = None |
| | if labels is not None: |
| | loss_fct = CrossEntropyLoss() |
| | loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) |
| |
|
| | if not return_dict: |
| | output = (logits,) + outputs[1:] |
| | return ((loss,) + output) if loss is not None else output |
| |
|
| | return TokenClassifierOutput( |
| | loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions |
| | ) |
| |
|
| |
|
| | @add_start_docstrings( |
| | """ |
| | DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear |
| | layers on top of the hidden-states output to compute `span start logits` and `span end logits`). |
| | """, |
| | DEBERTA_START_DOCSTRING, |
| | ) |
| | class DebertaForQuestionAnswering(DebertaPreTrainedModel): |
| | def __init__(self, config): |
| | super().__init__(config) |
| | self.num_labels = config.num_labels |
| |
|
| | self.deberta = DebertaModel(config) |
| | self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) |
| |
|
| | |
| | self.post_init() |
| |
|
| | @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) |
| | @add_code_sample_docstrings( |
| | checkpoint=_CHECKPOINT_FOR_QA, |
| | output_type=QuestionAnsweringModelOutput, |
| | config_class=_CONFIG_FOR_DOC, |
| | expected_output=_QA_EXPECTED_OUTPUT, |
| | expected_loss=_QA_EXPECTED_LOSS, |
| | qa_target_start_index=_QA_TARGET_START_INDEX, |
| | qa_target_end_index=_QA_TARGET_END_INDEX, |
| | ) |
| | def forward( |
| | self, |
| | input_ids: Optional[torch.Tensor] = None, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | token_type_ids: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.Tensor] = None, |
| | inputs_embeds: Optional[torch.Tensor] = None, |
| | start_positions: Optional[torch.Tensor] = None, |
| | end_positions: Optional[torch.Tensor] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, QuestionAnsweringModelOutput]: |
| | r""" |
| | start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| | Labels for position (index) of the start of the labelled span for computing the token classification loss. |
| | Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence |
| | are not taken into account for computing the loss. |
| | end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| | Labels for position (index) of the end of the labelled span for computing the token classification loss. |
| | Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence |
| | are not taken into account for computing the loss. |
| | """ |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | outputs = self.deberta( |
| | input_ids, |
| | attention_mask=attention_mask, |
| | token_type_ids=token_type_ids, |
| | position_ids=position_ids, |
| | inputs_embeds=inputs_embeds, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| |
|
| | sequence_output = outputs[0] |
| |
|
| | logits = self.qa_outputs(sequence_output) |
| | start_logits, end_logits = logits.split(1, dim=-1) |
| | start_logits = start_logits.squeeze(-1).contiguous() |
| | end_logits = end_logits.squeeze(-1).contiguous() |
| |
|
| | total_loss = None |
| | if start_positions is not None and end_positions is not None: |
| | |
| | if len(start_positions.size()) > 1: |
| | start_positions = start_positions.squeeze(-1) |
| | if len(end_positions.size()) > 1: |
| | end_positions = end_positions.squeeze(-1) |
| | |
| | ignored_index = start_logits.size(1) |
| | start_positions = start_positions.clamp(0, ignored_index) |
| | end_positions = end_positions.clamp(0, ignored_index) |
| |
|
| | loss_fct = CrossEntropyLoss(ignore_index=ignored_index) |
| | start_loss = loss_fct(start_logits, start_positions) |
| | end_loss = loss_fct(end_logits, end_positions) |
| | total_loss = (start_loss + end_loss) / 2 |
| |
|
| | if not return_dict: |
| | output = (start_logits, end_logits) + outputs[1:] |
| | return ((total_loss,) + output) if total_loss is not None else output |
| |
|
| | return QuestionAnsweringModelOutput( |
| | loss=total_loss, |
| | start_logits=start_logits, |
| | end_logits=end_logits, |
| | hidden_states=outputs.hidden_states, |
| | attentions=outputs.attentions, |
| | ) |
| |
|