| from transformers import BertPreTrainedModel, BertModel | |
| from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions | |
| import torch | |
| import torch.nn as nn | |
| from typing import Optional, Tuple, Union | |
| class BertForCausalLM(BertPreTrainedModel): | |
| """ | |
| BERT model with a language modeling head for instruction following and text generation. | |
| Supports 100+ languages with primary focus on English. | |
| """ | |
| _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"] | |
| def __init__(self, config): | |
| super().__init__(config) | |
| self.bert = BertModel(config, add_pooling_layer=False) | |
| self.cls = BertOnlyMLMHead(config) | |
| self.post_init() | |
| def get_output_embeddings(self): | |
| return self.cls.predictions.decoder | |
| def set_output_embeddings(self, new_embeddings): | |
| self.cls.predictions.decoder = new_embeddings | |
| def forward( | |
| self, | |
| input_ids: Optional[torch.LongTensor] = None, | |
| attention_mask: Optional[torch.FloatTensor] = None, | |
| token_type_ids: Optional[torch.LongTensor] = None, | |
| position_ids: Optional[torch.LongTensor] = None, | |
| head_mask: Optional[torch.FloatTensor] = None, | |
| inputs_embeds: Optional[torch.FloatTensor] = None, | |
| encoder_hidden_states: Optional[torch.FloatTensor] = None, | |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, | |
| labels: Optional[torch.LongTensor] = None, | |
| past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, | |
| use_cache: Optional[bool] = None, | |
| output_attentions: Optional[bool] = None, | |
| output_hidden_states: Optional[bool] = None, | |
| return_dict: Optional[bool] = None, | |
| ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: | |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
| outputs = self.bert( | |
| input_ids, | |
| attention_mask=attention_mask, | |
| token_type_ids=token_type_ids, | |
| position_ids=position_ids, | |
| head_mask=head_mask, | |
| inputs_embeds=inputs_embeds, | |
| encoder_hidden_states=encoder_hidden_states, | |
| encoder_attention_mask=encoder_attention_mask, | |
| past_key_values=past_key_values, | |
| use_cache=use_cache, | |
| output_attentions=output_attentions, | |
| output_hidden_states=output_hidden_states, | |
| return_dict=return_dict, | |
| ) | |
| sequence_output = outputs[0] | |
| prediction_scores = self.cls(sequence_output) | |
| lm_loss = None | |
| if labels is not None: | |
| loss_fct = nn.CrossEntropyLoss() | |
| lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) | |
| if not return_dict: | |
| output = (prediction_scores,) + outputs[2:] | |
| return ((lm_loss,) + output) if lm_loss is not None else output | |
| return CausalLMOutputWithCrossAttentions( | |
| loss=lm_loss, | |
| logits=prediction_scores, | |
| past_key_values=outputs.past_key_values, | |
| hidden_states=outputs.hidden_states, | |
| attentions=outputs.attentions, | |
| cross_attentions=outputs.cross_attentions, | |
| ) | |
| def prepare_inputs_for_generation( | |
| self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs | |
| ): | |
| input_shape = input_ids.shape | |
| if attention_mask is None: | |
| attention_mask = input_ids.new_ones(input_shape) | |
| if past_key_values is not None: | |
| input_ids = input_ids[:, -1:] | |
| return { | |
| "input_ids": input_ids, | |
| "attention_mask": attention_mask, | |
| "past_key_values": past_key_values, | |
| } | |
| class BertOnlyMLMHead(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| self.predictions = BertLMPredictionHead(config) | |
| def forward(self, sequence_output): | |
| prediction_scores = self.predictions(sequence_output) | |
| return prediction_scores | |
| class BertLMPredictionHead(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| self.transform = BertPredictionHeadTransform(config) | |
| self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) | |
| self.bias = nn.Parameter(torch.zeros(config.vocab_size)) | |
| self.decoder.bias = self.bias | |
| def forward(self, hidden_states): | |
| hidden_states = self.transform(hidden_states) | |
| hidden_states = self.decoder(hidden_states) | |
| return hidden_states | |
| class BertPredictionHeadTransform(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| self.dense = nn.Linear(config.hidden_size, config.hidden_size) | |
| self.transform_act_fn = nn.GELU() | |
| self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) | |
| def forward(self, hidden_states): | |
| hidden_states = self.dense(hidden_states) | |
| hidden_states = self.transform_act_fn(hidden_states) | |
| hidden_states = self.LayerNorm(hidden_states) | |
| return hidden_states |