| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """PyTorch OpenAI GPT model.""" |
|
|
|
|
| import json |
| import logging |
| import math |
| import os |
|
|
| import torch |
| import torch.nn as nn |
| from torch.nn import CrossEntropyLoss |
|
|
| from .activations import gelu_new, swish |
| from .configuration_openai import OpenAIGPTConfig |
| from .file_utils import add_start_docstrings, add_start_docstrings_to_callable |
| from .modeling_utils import Conv1D, PreTrainedModel, SequenceSummary, prune_conv1d_layer |
|
|
|
|
| logger = logging.getLogger(__name__) |
|
|
| OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP = { |
| "openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-pytorch_model.bin" |
| } |
|
|
|
|
| def load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path): |
| """ Load tf pre-trained weights in a pytorch model (from NumPy arrays here) |
| """ |
| import re |
| import numpy as np |
|
|
| if ".ckpt" in openai_checkpoint_folder_path: |
| openai_checkpoint_folder_path = os.path.dirname(openai_checkpoint_folder_path) |
|
|
| logger.info("Loading weights from {}".format(openai_checkpoint_folder_path)) |
|
|
| with open(openai_checkpoint_folder_path + "/parameters_names.json", "r", encoding="utf-8") as names_handle: |
| names = json.load(names_handle) |
| with open(openai_checkpoint_folder_path + "/params_shapes.json", "r", encoding="utf-8") as shapes_handle: |
| shapes = json.load(shapes_handle) |
| offsets = np.cumsum([np.prod(shape) for shape in shapes]) |
| init_params = [np.load(openai_checkpoint_folder_path + "/params_{}.npy".format(n)) for n in range(10)] |
| init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1] |
| init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)] |
|
|
| |
| |
| |
| init_params = [arr.squeeze() for arr in init_params] |
|
|
| try: |
| assert model.tokens_embed.weight.shape == init_params[1].shape |
| assert model.positions_embed.weight.shape == init_params[0].shape |
| except AssertionError as e: |
| e.args += (model.tokens_embed.weight.shape, init_params[1].shape) |
| e.args += (model.positions_embed.weight.shape, init_params[0].shape) |
| raise |
|
|
| model.tokens_embed.weight.data = torch.from_numpy(init_params[1]) |
| model.positions_embed.weight.data = torch.from_numpy(init_params[0]) |
| names.pop(0) |
| |
| init_params.pop(0) |
| init_params.pop(0) |
|
|
| for name, array in zip(names, init_params): |
| name = name[6:] |
| assert name[-2:] == ":0" |
| name = name[:-2] |
| name = name.split("/") |
| pointer = model |
| for m_name in name: |
| if re.fullmatch(r"[A-Za-z]+\d+", m_name): |
| scope_names = re.split(r"(\d+)", m_name) |
| else: |
| scope_names = [m_name] |
| if scope_names[0] == "g": |
| pointer = getattr(pointer, "weight") |
| elif scope_names[0] == "b": |
| pointer = getattr(pointer, "bias") |
| elif scope_names[0] == "w": |
| pointer = getattr(pointer, "weight") |
| else: |
| pointer = getattr(pointer, scope_names[0]) |
| if len(scope_names) >= 2: |
| num = int(scope_names[1]) |
| pointer = pointer[num] |
| try: |
| assert pointer.shape == array.shape |
| except AssertionError as e: |
| e.args += (pointer.shape, array.shape) |
| raise |
| try: |
| assert pointer.shape == array.shape |
| except AssertionError as e: |
| e.args += (pointer.shape, array.shape) |
| raise |
| logger.info("Initialize PyTorch weight {}".format(name)) |
| pointer.data = torch.from_numpy(array) |
| return model |
|
|
|
|
| ACT_FNS = {"relu": nn.ReLU, "swish": swish, "gelu": gelu_new} |
|
|
|
|
| class Attention(nn.Module): |
| def __init__(self, nx, n_ctx, config, scale=False): |
| super().__init__() |
| n_state = nx |
| |
| assert n_state % config.n_head == 0 |
| self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx)) |
| self.n_head = config.n_head |
| self.split_size = n_state |
| self.scale = scale |
|
|
| self.output_attentions = config.output_attentions |
|
|
| self.c_attn = Conv1D(n_state * 3, nx) |
| self.c_proj = Conv1D(n_state, nx) |
| self.attn_dropout = nn.Dropout(config.attn_pdrop) |
| self.resid_dropout = nn.Dropout(config.resid_pdrop) |
| self.pruned_heads = set() |
|
|
| def prune_heads(self, heads): |
| if len(heads) == 0: |
| return |
| mask = torch.ones(self.n_head, self.split_size // self.n_head) |
| heads = set(heads) - self.pruned_heads |
| for head in heads: |
| head -= sum(1 if h < head else 0 for h in self.pruned_heads) |
| mask[head] = 0 |
| mask = mask.view(-1).contiguous().eq(1) |
| index = torch.arange(len(mask))[mask].long() |
| index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)]) |
| |
| self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1) |
| self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0) |
| |
| self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads)) |
| self.n_head = self.n_head - len(heads) |
| self.pruned_heads = self.pruned_heads.union(heads) |
|
|
| def _attn(self, q, k, v, attention_mask=None, head_mask=None): |
| w = torch.matmul(q, k) |
| if self.scale: |
| w = w / math.sqrt(v.size(-1)) |
| |
| |
| b = self.bias[:, :, : w.size(-2), : w.size(-1)] |
| w = w * b + -1e4 * (1 - b) |
|
|
| if attention_mask is not None: |
| |
| w = w + attention_mask |
|
|
| w = nn.Softmax(dim=-1)(w) |
| w = self.attn_dropout(w) |
|
|
| |
| if head_mask is not None: |
| w = w * head_mask |
|
|
| outputs = [torch.matmul(w, v)] |
| if self.output_attentions: |
| outputs.append(w) |
| return outputs |
|
|
| def merge_heads(self, x): |
| x = x.permute(0, 2, 1, 3).contiguous() |
| new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),) |
| return x.view(*new_x_shape) |
|
|
| def split_heads(self, x, k=False): |
| new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head) |
| x = x.view(*new_x_shape) |
| if k: |
| return x.permute(0, 2, 3, 1) |
| else: |
| return x.permute(0, 2, 1, 3) |
|
|
| def forward(self, x, attention_mask=None, head_mask=None): |
| x = self.c_attn(x) |
| query, key, value = x.split(self.split_size, dim=2) |
| query = self.split_heads(query) |
| key = self.split_heads(key, k=True) |
| value = self.split_heads(value) |
|
|
| attn_outputs = self._attn(query, key, value, attention_mask, head_mask) |
| a = attn_outputs[0] |
|
|
| a = self.merge_heads(a) |
| a = self.c_proj(a) |
| a = self.resid_dropout(a) |
|
|
| outputs = [a] + attn_outputs[1:] |
| return outputs |
|
|
|
|
| class MLP(nn.Module): |
| def __init__(self, n_state, config): |
| super().__init__() |
| nx = config.n_embd |
| self.c_fc = Conv1D(n_state, nx) |
| self.c_proj = Conv1D(nx, n_state) |
| self.act = ACT_FNS[config.afn] |
| self.dropout = nn.Dropout(config.resid_pdrop) |
|
|
| def forward(self, x): |
| h = self.act(self.c_fc(x)) |
| h2 = self.c_proj(h) |
| return self.dropout(h2) |
|
|
|
|
| class Block(nn.Module): |
| def __init__(self, n_ctx, config, scale=False): |
| super().__init__() |
| nx = config.n_embd |
| self.attn = Attention(nx, n_ctx, config, scale) |
| self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon) |
| self.mlp = MLP(4 * nx, config) |
| self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon) |
|
|
| def forward(self, x, attention_mask=None, head_mask=None): |
| attn_outputs = self.attn(x, attention_mask=attention_mask, head_mask=head_mask) |
| a = attn_outputs[0] |
|
|
| n = self.ln_1(x + a) |
| m = self.mlp(n) |
| h = self.ln_2(n + m) |
|
|
| outputs = [h] + attn_outputs[1:] |
| return outputs |
|
|
|
|
| class OpenAIGPTPreTrainedModel(PreTrainedModel): |
| """ An abstract class to handle weights initialization and |
| a simple interface for downloading and loading pretrained models. |
| """ |
|
|
| config_class = OpenAIGPTConfig |
| pretrained_model_archive_map = OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP |
| load_tf_weights = load_tf_weights_in_openai_gpt |
| base_model_prefix = "transformer" |
|
|
| def _init_weights(self, module): |
| """ Initialize the weights. |
| """ |
| if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)): |
| |
| |
| module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
| if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None: |
| module.bias.data.zero_() |
| elif isinstance(module, nn.LayerNorm): |
| module.bias.data.zero_() |
| module.weight.data.fill_(1.0) |
|
|
|
|
| OPENAI_GPT_START_DOCSTRING = r""" |
| |
| This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. |
| Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general |
| usage and behavior. |
| |
| Parameters: |
| config (:class:`~transformers.OpenAIGPTConfig`): Model configuration class with all the parameters of the model. |
| Initializing with a config file does not load the weights associated with the model, only the configuration. |
| Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. |
| """ |
|
|
| OPENAI_GPT_INPUTS_DOCSTRING = r""" |
| Args: |
| input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): |
| Indices of input sequence tokens in the vocabulary. |
| |
| Indices can be obtained using :class:`transformers.OpenAIGPTTokenizer`. |
| See :func:`transformers.PreTrainedTokenizer.encode` and |
| :func:`transformers.PreTrainedTokenizer.encode_plus` for details. |
| |
| `What are input IDs? <../glossary.html#input-ids>`__ |
| attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): |
| Mask to avoid performing attention on padding token indices. |
| Mask values selected in ``[0, 1]``: |
| ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. |
| |
| `What are attention masks? <../glossary.html#attention-mask>`__ |
| token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): |
| Segment token indices to indicate first and second portions of the inputs. |
| Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` |
| corresponds to a `sentence B` token |
| |
| `What are token type IDs? <../glossary.html#token-type-ids>`_ |
| position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): |
| Indices of positions of each input sequence tokens in the position embeddings. |
| Selected in the range ``[0, config.max_position_embeddings - 1]``. |
| |
| `What are position IDs? <../glossary.html#position-ids>`_ |
| head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`): |
| Mask to nullify selected heads of the self-attention modules. |
| Mask values selected in ``[0, 1]``: |
| :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**. |
| input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): |
| Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. |
| This is useful if you want more control over how to convert `input_ids` indices into associated vectors |
| than the model's internal embedding lookup matrix. |
| """ |
|
|
|
|
| @add_start_docstrings( |
| "The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top.", |
| OPENAI_GPT_START_DOCSTRING, |
| ) |
| class OpenAIGPTModel(OpenAIGPTPreTrainedModel): |
| def __init__(self, config): |
| super().__init__(config) |
| self.output_attentions = config.output_attentions |
| self.output_hidden_states = config.output_hidden_states |
|
|
| self.tokens_embed = nn.Embedding(config.vocab_size, config.n_embd) |
| self.positions_embed = nn.Embedding(config.n_positions, config.n_embd) |
| self.drop = nn.Dropout(config.embd_pdrop) |
| self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)]) |
|
|
| self.init_weights() |
|
|
| def get_input_embeddings(self): |
| return self.tokens_embed |
|
|
| def set_input_embeddings(self, new_embeddings): |
| self.tokens_embed = new_embeddings |
|
|
| def _prune_heads(self, heads_to_prune): |
| """ Prunes heads of the model. |
| heads_to_prune: dict of {layer_num: list of heads to prune in this layer} |
| """ |
| for layer, heads in heads_to_prune.items(): |
| self.h[layer].attn.prune_heads(heads) |
|
|
| @add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING) |
| def forward( |
| self, |
| input_ids=None, |
| attention_mask=None, |
| token_type_ids=None, |
| position_ids=None, |
| head_mask=None, |
| inputs_embeds=None, |
| ): |
| r""" |
| Return: |
| :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.OpenAIGPTConfig`) and inputs: |
| last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): |
| Sequence of hidden-states at the last layer of the model. |
| hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): |
| Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) |
| of shape :obj:`(batch_size, sequence_length, hidden_size)`. |
| |
| Hidden-states of the model at the output of each layer plus the initial embedding outputs. |
| attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): |
| Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape |
| :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. |
| |
| Attentions weights after the attention softmax, used to compute the weighted average in the self-attention |
| heads. |
| |
| Examples:: |
| |
| from transformers import OpenAIGPTTokenizer, OpenAIGPTModel |
| import torch |
| |
| tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt') |
| model = OpenAIGPTModel.from_pretrained('openai-gpt') |
| input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 |
| outputs = model(input_ids) |
| last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple |
| |
| """ |
| if input_ids is not None and inputs_embeds is not None: |
| raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") |
| elif input_ids is not None: |
| input_shape = input_ids.size() |
| input_ids = input_ids.view(-1, input_shape[-1]) |
| elif inputs_embeds is not None: |
| input_shape = inputs_embeds.size()[:-1] |
| else: |
| raise ValueError("You have to specify either input_ids or inputs_embeds") |
|
|
| if position_ids is None: |
| |
| device = input_ids.device if input_ids is not None else inputs_embeds.device |
| position_ids = torch.arange(input_shape[-1], dtype=torch.long, device=device) |
| position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) |
|
|
| |
| if attention_mask is not None: |
| |
| |
| |
| |
| |
| attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) |
|
|
| |
| |
| |
| |
| |
| attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) |
| attention_mask = (1.0 - attention_mask) * -10000.0 |
|
|
| |
| |
| |
| |
| if head_mask is not None: |
| if head_mask.dim() == 1: |
| head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) |
| head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1) |
| elif head_mask.dim() == 2: |
| head_mask = ( |
| head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) |
| ) |
| head_mask = head_mask.to( |
| dtype=next(self.parameters()).dtype |
| ) |
| else: |
| head_mask = [None] * self.config.n_layer |
|
|
| if inputs_embeds is None: |
| inputs_embeds = self.tokens_embed(input_ids) |
| position_embeds = self.positions_embed(position_ids) |
| if token_type_ids is not None: |
| token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) |
| token_type_embeds = self.tokens_embed(token_type_ids) |
| else: |
| token_type_embeds = 0 |
| hidden_states = inputs_embeds + position_embeds + token_type_embeds |
| hidden_states = self.drop(hidden_states) |
|
|
| output_shape = input_shape + (hidden_states.size(-1),) |
|
|
| all_attentions = () |
| all_hidden_states = () |
| for i, block in enumerate(self.h): |
| if self.output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),) |
|
|
| outputs = block(hidden_states, attention_mask, head_mask[i]) |
| hidden_states = outputs[0] |
| if self.output_attentions: |
| all_attentions = all_attentions + (outputs[1],) |
|
|
| |
| if self.output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),) |
|
|
| outputs = (hidden_states.view(*output_shape),) |
| if self.output_hidden_states: |
| outputs = outputs + (all_hidden_states,) |
| if self.output_attentions: |
| outputs = outputs + (all_attentions,) |
| return outputs |
|
|
|
|
| @add_start_docstrings( |
| """OpenAI GPT Model transformer with a language modeling head on top |
| (linear layer with weights tied to the input embeddings). """, |
| OPENAI_GPT_START_DOCSTRING, |
| ) |
| class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel): |
| def __init__(self, config): |
| super().__init__(config) |
| self.transformer = OpenAIGPTModel(config) |
| self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) |
|
|
| self.init_weights() |
|
|
| def get_output_embeddings(self): |
| return self.lm_head |
|
|
| @add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING) |
| def forward( |
| self, |
| input_ids=None, |
| attention_mask=None, |
| token_type_ids=None, |
| position_ids=None, |
| head_mask=None, |
| inputs_embeds=None, |
| labels=None, |
| ): |
| r""" |
| labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): |
| Labels for language modeling. |
| Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids`` |
| Indices are selected in ``[-100, 0, ..., config.vocab_size]`` |
| All labels set to ``-100`` are ignored (masked), the loss is only |
| computed for labels in ``[0, ..., config.vocab_size]`` |
| |
| Return: |
| :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.OpenAIGPTConfig`) and inputs: |
| loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided) |
| Language modeling loss. |
| prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): |
| Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). |
| past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`): |
| Contains pre-computed hidden-states (key and values in the attention blocks). |
| Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model |
| should not be passed as input ids as they have already been computed. |
| hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): |
| Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) |
| of shape :obj:`(batch_size, sequence_length, hidden_size)`. |
| |
| Hidden-states of the model at the output of each layer plus the initial embedding outputs. |
| attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): |
| Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape |
| :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. |
| |
| Attentions weights after the attention softmax, used to compute the weighted average in the self-attention |
| heads. |
| |
| Examples:: |
| |
| from transformers import OpenAIGPTTokenizer, OpenAIGPTLMHeadModel |
| import torch |
| |
| tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt') |
| model = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt') |
| input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 |
| outputs = model(input_ids, labels=input_ids) |
| loss, logits = outputs[:2] |
| |
| """ |
| transformer_outputs = self.transformer( |
| input_ids, |
| attention_mask=attention_mask, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| ) |
| hidden_states = transformer_outputs[0] |
| lm_logits = self.lm_head(hidden_states) |
|
|
| outputs = (lm_logits,) + transformer_outputs[1:] |
| if labels is not None: |
| |
| shift_logits = lm_logits[..., :-1, :].contiguous() |
| shift_labels = labels[..., 1:].contiguous() |
| |
| loss_fct = CrossEntropyLoss() |
| loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) |
| outputs = (loss,) + outputs |
|
|
| return outputs |
|
|
|
|
| @add_start_docstrings( |
| """OpenAI GPT Model transformer with a language modeling and a multiple-choice classification |
| head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers. |
| The language modeling head has its weights tied to the input embeddings, |
| the classification head takes as input the input of a specified classification token index in the input sequence). |
| """, |
| OPENAI_GPT_START_DOCSTRING, |
| ) |
| class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel): |
| def __init__(self, config): |
| super().__init__(config) |
|
|
| config.num_labels = 1 |
| self.transformer = OpenAIGPTModel(config) |
| self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) |
| self.multiple_choice_head = SequenceSummary(config) |
|
|
| self.init_weights() |
|
|
| def get_output_embeddings(self): |
| return self.lm_head |
|
|
| @add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING) |
| def forward( |
| self, |
| input_ids=None, |
| attention_mask=None, |
| token_type_ids=None, |
| position_ids=None, |
| head_mask=None, |
| inputs_embeds=None, |
| mc_token_ids=None, |
| lm_labels=None, |
| mc_labels=None, |
| ): |
| r""" |
| mc_token_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input) |
| Index of the classification token in each input sequence. |
| Selected in the range ``[0, input_ids.size(-1) - 1[``. |
| lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`) |
| Labels for language modeling. |
| Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids`` |
| Indices are selected in ``[-1, 0, ..., config.vocab_size]`` |
| All labels set to ``-100`` are ignored (masked), the loss is only |
| computed for labels in ``[0, ..., config.vocab_size]`` |
| mc_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size)`, `optional`, defaults to :obj:`None`) |
| Labels for computing the multiple choice classification loss. |
| Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension |
| of the input tensors. (see `input_ids` above) |
| |
| Return: |
| :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.OpenAIGPTConfig`) and inputs: |
| lm_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``lm_labels`` is provided): |
| Language modeling loss. |
| mc_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`multiple_choice_labels` is provided): |
| Multiple choice classification loss. |
| lm_prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`): |
| Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). |
| mc_prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`): |
| Prediction scores of the multiple choice classification head (scores for each choice before SoftMax). |
| past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`): |
| Contains pre-computed hidden-states (key and values in the attention blocks). |
| Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model |
| should not be passed as input ids as they have already been computed. |
| hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): |
| Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) |
| of shape :obj:`(batch_size, sequence_length, hidden_size)`. |
| |
| Hidden-states of the model at the output of each layer plus the initial embedding outputs. |
| attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): |
| Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape |
| :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. |
| |
| Attentions weights after the attention softmax, used to compute the weighted average in the self-attention |
| heads. |
| |
| Examples:: |
| |
| from transformers import OpenAIGPTTokenizer, OpenAIGPTDoubleHeadsModel |
| import torch |
| |
| tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt') |
| model = OpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt') |
| tokenizer.add_special_tokens({'cls_token': '[CLS]'}) # Add a [CLS] to the vocabulary (we should train it also!) |
| model.resize_token_embeddings(len(tokenizer)) |
| |
| choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] |
| input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices |
| mc_token_ids = torch.tensor([input_ids.size(-1)-1, input_ids.size(-1)-1]).unsqueeze(0) # Batch size 1 |
| |
| outputs = model(input_ids, mc_token_ids=mc_token_ids) |
| lm_prediction_scores, mc_prediction_scores = outputs[:2] |
| |
| """ |
| transformer_outputs = self.transformer( |
| input_ids, |
| attention_mask=attention_mask, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| ) |
| hidden_states = transformer_outputs[0] |
|
|
| lm_logits = self.lm_head(hidden_states) |
| mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1) |
|
|
| outputs = (lm_logits, mc_logits) + transformer_outputs[1:] |
| if mc_labels is not None: |
| loss_fct = CrossEntropyLoss() |
| loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)) |
| outputs = (loss,) + outputs |
| if lm_labels is not None: |
| shift_logits = lm_logits[..., :-1, :].contiguous() |
| shift_labels = lm_labels[..., 1:].contiguous() |
| loss_fct = CrossEntropyLoss() |
| loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) |
| outputs = (loss,) + outputs |
|
|
| return outputs |
|
|