| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """PyTorch BERT model. """ |
|
|
| from __future__ import absolute_import, division, print_function, unicode_literals |
|
|
| import logging |
| import math |
| import sys |
|
|
| import torch |
| import torch.utils.checkpoint |
| from torch import nn |
|
|
| from .configuration_bert import BertConfig |
|
|
| logger = logging.getLogger(__name__) |
|
|
|
|
| def gelu(x): |
| """ Original Implementation of the gelu activation function in Google Bert repo when initially created. |
| For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): |
| 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) |
| Also see https://arxiv.org/abs/1606.08415 |
| """ |
| return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) |
|
|
|
|
| def gelu_new(x): |
| """ Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT). |
| Also see https://arxiv.org/abs/1606.08415 |
| """ |
| return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) |
|
|
|
|
| def swish(x): |
| return x * torch.sigmoid(x) |
|
|
|
|
| ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish, "gelu_new": gelu_new} |
|
|
| BertLayerNorm = torch.nn.LayerNorm |
|
|
|
|
| class BertEmbeddings(nn.Module): |
| """Construct the embeddings from word, position and token_type embeddings. |
| """ |
|
|
| def __init__(self, config): |
| super(BertEmbeddings, self).__init__() |
| self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0) |
| self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) |
| self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) |
|
|
| |
| |
| self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
| self.dropout = nn.Dropout(config.hidden_dropout_prob) |
|
|
| def forward(self, input_ids, token_type_ids=None, position_ids=None): |
| seq_length = input_ids.size(1) |
| if position_ids is None: |
| position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) |
| position_ids = position_ids.unsqueeze(0).expand_as(input_ids) |
| if token_type_ids is None: |
| token_type_ids = torch.zeros_like(input_ids) |
|
|
| words_embeddings = self.word_embeddings(input_ids) |
| position_embeddings = self.position_embeddings(position_ids) |
| token_type_embeddings = self.token_type_embeddings(token_type_ids) |
|
|
| embeddings = words_embeddings + position_embeddings + token_type_embeddings |
| embeddings = self.LayerNorm(embeddings) |
| embeddings = self.dropout(embeddings) |
| return embeddings |
|
|
|
|
| class BertSelfAttention(nn.Module): |
| def __init__(self, config): |
| super(BertSelfAttention, self).__init__() |
| if config.hidden_size % config.num_attention_heads != 0: |
| raise ValueError( |
| "The hidden size (%d) is not a multiple of the number of attention " |
| "heads (%d)" % (config.hidden_size, config.num_attention_heads)) |
| self.output_attentions = config.output_attentions |
|
|
| self.num_attention_heads = config.num_attention_heads |
| self.attention_head_size = int(config.hidden_size / config.num_attention_heads) |
| self.all_head_size = self.num_attention_heads * self.attention_head_size |
|
|
| self.query = nn.Linear(config.hidden_size, self.all_head_size) |
| self.key = nn.Linear(config.hidden_size, self.all_head_size) |
| self.value = nn.Linear(config.hidden_size, self.all_head_size) |
|
|
| self.dropout = nn.Dropout(config.attention_probs_dropout_prob) |
|
|
| def transpose_for_scores(self, x): |
| new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) |
| x = x.view(*new_x_shape) |
| return x.permute(0, 2, 1, 3) |
|
|
| def forward(self, hidden_states, attention_mask=None, head_mask=None): |
| mixed_query_layer = self.query(hidden_states) |
| mixed_key_layer = self.key(hidden_states) |
| mixed_value_layer = self.value(hidden_states) |
|
|
| query_layer = self.transpose_for_scores(mixed_query_layer) |
| key_layer = self.transpose_for_scores(mixed_key_layer) |
| value_layer = self.transpose_for_scores(mixed_value_layer) |
|
|
| |
| attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) |
| attention_scores = attention_scores / math.sqrt(self.attention_head_size) |
| if attention_mask is not None: |
| |
| attention_scores = attention_scores + attention_mask |
|
|
| |
| attention_probs = nn.Softmax(dim=-1)(attention_scores) |
|
|
| |
| |
| attention_probs = self.dropout(attention_probs) |
|
|
| |
| if head_mask is not None: |
| attention_probs = attention_probs * head_mask |
|
|
| context_layer = torch.matmul(attention_probs, value_layer) |
|
|
| context_layer = context_layer.permute(0, 2, 1, 3).contiguous() |
| new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) |
| context_layer = context_layer.view(*new_context_layer_shape) |
|
|
| outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,) |
| return outputs |
|
|
|
|
| class BertSelfOutput(nn.Module): |
| def __init__(self, config): |
| super(BertSelfOutput, self).__init__() |
| self.dense = nn.Linear(config.hidden_size, config.hidden_size) |
| self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
| self.dropout = nn.Dropout(config.hidden_dropout_prob) |
|
|
| def forward(self, hidden_states, input_tensor): |
| hidden_states = self.dense(hidden_states) |
| hidden_states = self.dropout(hidden_states) |
| hidden_states = self.LayerNorm(hidden_states + input_tensor) |
| return hidden_states |
|
|
|
|
| class BertAttention(nn.Module): |
| def __init__(self, config): |
| super(BertAttention, self).__init__() |
| self.self = BertSelfAttention(config) |
| self.output = BertSelfOutput(config) |
| self.pruned_heads = set() |
|
|
| def forward(self, input_tensor, attention_mask=None, head_mask=None): |
| self_outputs = self.self(input_tensor, attention_mask, head_mask) |
| attention_output = self.output(self_outputs[0], input_tensor) |
| outputs = (attention_output,) + self_outputs[1:] |
| return outputs |
|
|
|
|
| class BertIntermediate(nn.Module): |
| def __init__(self, config): |
| super(BertIntermediate, self).__init__() |
| self.dense = nn.Linear(config.hidden_size, config.intermediate_size) |
| if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)): |
| self.intermediate_act_fn = ACT2FN[config.hidden_act] |
| else: |
| self.intermediate_act_fn = config.hidden_act |
|
|
| def forward(self, hidden_states): |
| hidden_states = self.dense(hidden_states) |
| hidden_states = self.intermediate_act_fn(hidden_states) |
| return hidden_states |
|
|
|
|
| class BertOutput(nn.Module): |
| def __init__(self, config): |
| super(BertOutput, self).__init__() |
| self.dense = nn.Linear(config.intermediate_size, config.hidden_size) |
| self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
| self.dropout = nn.Dropout(config.hidden_dropout_prob) |
|
|
| def forward(self, hidden_states, input_tensor): |
| hidden_states = self.dense(hidden_states) |
| hidden_states = self.dropout(hidden_states) |
| hidden_states = self.LayerNorm(hidden_states + input_tensor) |
| return hidden_states |
|
|
|
|
| class BertLayer(nn.Module): |
| def __init__(self, config): |
| super(BertLayer, self).__init__() |
| self.attention = BertAttention(config) |
| self.intermediate = BertIntermediate(config) |
| self.output = BertOutput(config) |
|
|
| def forward(self, hidden_states, attention_mask=None, head_mask=None): |
| attention_outputs = self.attention(hidden_states, attention_mask, head_mask) |
| attention_output = attention_outputs[0] |
| intermediate_output = self.intermediate(attention_output) |
| layer_output = self.output(intermediate_output, attention_output) |
| outputs = (layer_output,) + attention_outputs[1:] |
| return outputs |
|
|
|
|
| class BertEncoder(nn.Module): |
| def __init__(self, config, checkpointing=False): |
| super(BertEncoder, self).__init__() |
| self.output_attentions = config.output_attentions |
| self.output_hidden_states = config.output_hidden_states |
| self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) |
| self.checkpointing = checkpointing |
|
|
| def forward(self, hidden_states, attention_mask=None, head_mask=None): |
| all_hidden_states = () |
| all_attentions = () |
| for i, layer_module in enumerate(self.layer): |
| if self.output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
| if self.checkpointing: |
| layer_outputs = torch.utils.checkpoint.checkpoint( |
| layer_module, |
| hidden_states, |
| attention_mask, |
| head_mask[i], |
| use_reentrant=False, |
| ) |
| else: |
| layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i]) |
|
|
| hidden_states = layer_outputs[0] |
|
|
| if self.output_attentions: |
| all_attentions = all_attentions + (layer_outputs[1],) |
|
|
| |
| if self.output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
| outputs = (hidden_states,) |
| if self.output_hidden_states: |
| outputs = outputs + (all_hidden_states,) |
| if self.output_attentions: |
| outputs = outputs + (all_attentions,) |
| return outputs |
|
|
|
|
| class BertPooler(nn.Module): |
| def __init__(self, config): |
| super(BertPooler, self).__init__() |
| self.dense = nn.Linear(config.hidden_size, config.hidden_size) |
| self.activation = nn.Tanh() |
|
|
| def forward(self, hidden_states): |
| |
| |
| first_token_tensor = hidden_states[:, 0] |
| pooled_output = self.dense(first_token_tensor) |
| pooled_output = self.activation(pooled_output) |
| return pooled_output |
|
|
|
|
| class BertPredictionHeadTransform(nn.Module): |
| def __init__(self, config): |
| super(BertPredictionHeadTransform, self).__init__() |
| self.dense = nn.Linear(config.hidden_size, config.hidden_size) |
| if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)): |
| self.transform_act_fn = ACT2FN[config.hidden_act] |
| else: |
| self.transform_act_fn = config.hidden_act |
| self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
|
|
| def forward(self, hidden_states): |
| hidden_states = self.dense(hidden_states) |
| hidden_states = self.transform_act_fn(hidden_states) |
| hidden_states = self.LayerNorm(hidden_states) |
| return hidden_states |
|
|
|
|
| class BertLMPredictionHead(nn.Module): |
| def __init__(self, config): |
| super(BertLMPredictionHead, self).__init__() |
| self.transform = BertPredictionHeadTransform(config) |
|
|
| |
| |
| self.decoder = nn.Linear(config.hidden_size, |
| config.vocab_size, |
| bias=False) |
|
|
| self.bias = nn.Parameter(torch.zeros(config.vocab_size)) |
|
|
| def forward(self, hidden_states): |
| hidden_states = self.transform(hidden_states) |
| hidden_states = self.decoder(hidden_states) + self.bias |
| return hidden_states |
|
|
|
|
| class BertOnlyMLMHead(nn.Module): |
| def __init__(self, config): |
| super(BertOnlyMLMHead, self).__init__() |
| self.predictions = BertLMPredictionHead(config) |
|
|
| def forward(self, sequence_output): |
| prediction_scores = self.predictions(sequence_output) |
| return prediction_scores |
|
|
|
|
| class BertOnlyNSPHead(nn.Module): |
| def __init__(self, config): |
| super(BertOnlyNSPHead, self).__init__() |
| self.seq_relationship = nn.Linear(config.hidden_size, 2) |
|
|
| def forward(self, pooled_output): |
| seq_relationship_score = self.seq_relationship(pooled_output) |
| return seq_relationship_score |
|
|
|
|
| class BertPreTrainingHeads(nn.Module): |
| def __init__(self, config): |
| super(BertPreTrainingHeads, self).__init__() |
| self.predictions = BertLMPredictionHead(config) |
| self.seq_relationship = nn.Linear(config.hidden_size, 2) |
|
|
| def forward(self, sequence_output, pooled_output): |
| prediction_scores = self.predictions(sequence_output) |
| seq_relationship_score = self.seq_relationship(pooled_output) |
| return prediction_scores, seq_relationship_score |
|
|
|
|
| class BertPreTrainedModel(nn.Module): |
| config_class = BertConfig |
| base_model_prefix = "bert" |
|
|
| def __init__(self, config): |
| super(BertPreTrainedModel, self).__init__() |
| self.config = config |
|
|
| def _init_weights(self, module): |
| """ Initialize the weights """ |
| if isinstance(module, (nn.Linear, nn.Embedding)): |
| |
| |
| module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
| elif isinstance(module, BertLayerNorm): |
| module.bias.data.zero_() |
| module.weight.data.fill_(1.0) |
| if isinstance(module, nn.Linear) and module.bias is not None: |
| module.bias.data.zero_() |
|
|
|
|
| class BertModel(BertPreTrainedModel): |
| r""" |
| Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: |
| **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` |
| Sequence of hidden-states at the output of the last layer of the model. |
| **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)`` |
| Last layer hidden-state of the first token of the sequence (classification token) |
| further processed by a Linear layer and a Tanh activation function. The Linear |
| layer weights are trained from the next sentence prediction (classification) |
| objective during Bert pretraining. This output is usually *not* a good summary |
| of the semantic content of the input, you're often better with averaging or pooling |
| the sequence of hidden-states for the whole input sequence. |
| **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) |
| list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) |
| of shape ``(batch_size, sequence_length, hidden_size)``: |
| Hidden-states of the model at the output of each layer plus the initial embedding outputs. |
| **attentions**: (`optional`, returned when ``config.output_attentions=True``) |
| list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: |
| Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. |
| |
| Examples:: |
| |
| tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') |
| model = BertModel.from_pretrained('bert-base-uncased') |
| input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 |
| outputs = model(input_ids) |
| last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple |
| |
| """ |
|
|
| def __init__(self, config, checkpointing=False): |
| super(BertModel, self).__init__(config) |
|
|
| self.embeddings = BertEmbeddings(config) |
| self.encoder = BertEncoder(config, checkpointing=checkpointing) |
| self.pooler = BertPooler(config) |
|
|
| self.apply(self._init_weights) |
|
|
| def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None): |
| if attention_mask is None: |
| attention_mask = torch.ones_like(input_ids) |
| if token_type_ids is None: |
| token_type_ids = torch.zeros_like(input_ids) |
|
|
| |
| |
| |
| |
| |
| extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) |
|
|
| |
| |
| |
| |
| |
| extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) |
| extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 |
|
|
| |
| |
| |
| |
| |
| if head_mask is not None: |
| if head_mask.dim() == 1: |
| head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) |
| head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) |
| elif head_mask.dim() == 2: |
| head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze( |
| -1) |
| head_mask = head_mask.to( |
| dtype=next(self.parameters()).dtype) |
| else: |
| head_mask = [None] * self.config.num_hidden_layers |
|
|
| embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids) |
| encoder_outputs = self.encoder(embedding_output, |
| extended_attention_mask, |
| head_mask=head_mask) |
| sequence_output = encoder_outputs[0] |
| pooled_output = self.pooler(sequence_output) |
|
|
| outputs = (sequence_output, pooled_output,) + encoder_outputs[ |
| 1:] |
| return outputs |
|
|