| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ PyTorch GPT-J model.""" |
|
|
| import warnings |
| from typing import Optional, Tuple, Union |
|
|
| import torch |
| import torch.fx |
| import torch.utils.checkpoint |
| from torch import nn |
| from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss |
|
|
| from transformers.activations import ACT2FN |
| from transformers.modeling_outputs import ( |
| BaseModelOutputWithPast, |
| CausalLMOutputWithPast, |
| QuestionAnsweringModelOutput, |
| SequenceClassifierOutputWithPast, |
| ) |
| from transformers.modeling_utils import PreTrainedModel |
| from transformers.utils import ( |
| add_code_sample_docstrings, |
| add_start_docstrings, |
| add_start_docstrings_to_model_forward, |
| is_torch_fx_proxy, |
| logging, |
| ) |
| from .configuration_gptj import GPTJConfig |
|
|
| logger = logging.get_logger(__name__) |
|
|
| _CHECKPOINT_FOR_DOC = "hf-internal-testing/tiny-random-gptj" |
| _REAL_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-j-6B" |
| _CONFIG_FOR_DOC = "GPTJConfig" |
|
|
|
|
| GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = [ |
| "EleutherAI/gpt-j-6B", |
| |
| ] |
|
|
| def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor: |
| inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64) / dim)) |
| sinusoid_inp = torch.einsum( |
| "i , j -> i j", torch.arange(num_pos, dtype=torch.int64).float(), inv_freq |
| ).float() |
| return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1) |
|
|
|
|
| @torch.fx.wrap |
| def get_embed_positions(embed_positions, position_ids): |
| return embed_positions.to(position_ids.device).repeat(position_ids.shape[0], 1, 1) |
|
|
|
|
| def rotate_every_two(x: torch.Tensor) -> torch.Tensor: |
| x1 = x[:, :, :, ::2] |
| x2 = x[:, :, :, 1::2] |
| x = torch.stack((-x2, x1), dim=-1) |
| return x.flatten(-2) |
|
|
|
|
| def apply_rotary_pos_emb( |
| tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor |
| ) -> torch.Tensor: |
| sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3).to(tensor.device) |
| cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3).to(tensor.device) |
| return (tensor * cos) + (rotate_every_two(tensor) * sin) |
|
|
|
|
| class GPTJAttention(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
|
|
| max_positions = config.max_position_embeddings |
| self.register_buffer( |
| "bias", |
| torch.tril( |
| torch.ones((max_positions, max_positions), dtype=torch.bool) |
| ).view(1, 1, max_positions, max_positions), |
| persistent=False, |
| ) |
| self.register_buffer("masked_bias", torch.tensor(-1e9), persistent=False) |
|
|
| self.attn_dropout = nn.Dropout(config.attn_pdrop) |
| self.resid_dropout = nn.Dropout(config.resid_pdrop) |
|
|
| self.embed_dim = config.hidden_size |
| self.num_attention_heads = config.num_attention_heads |
| self.head_dim = self.embed_dim // self.num_attention_heads |
| if self.head_dim * self.num_attention_heads != self.embed_dim: |
| raise ValueError( |
| f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and" |
| f" `num_attention_heads`: {self.num_attention_heads})." |
| ) |
| self.scale_attn = torch.sqrt( |
| torch.tensor(self.head_dim, dtype=torch.float32) |
| ).to(torch.get_default_dtype()) |
|
|
| self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) |
| self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) |
| self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) |
| self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) |
| self.rotary_dim = config.rotary_dim |
| pos_embd_dim = self.rotary_dim or self.embed_dim |
| self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim) |
|
|
| def _split_heads(self, tensor, num_attention_heads, attn_head_size, rotary): |
| """ |
| Splits hidden dim into attn_head_size and num_attention_heads |
| """ |
| new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size) |
| tensor = tensor.view(new_shape) |
| if rotary: |
| return tensor |
| if len(tensor.shape) == 5: |
| return tensor.permute( |
| 0, 1, 3, 2, 4 |
| ) |
| elif len(tensor.shape) == 4: |
| return tensor.permute( |
| 0, 2, 1, 3 |
| ) |
| else: |
| raise ValueError( |
| f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}" |
| ) |
|
|
| def _merge_heads(self, tensor, num_attention_heads, attn_head_size): |
| """ |
| Merges attn_head_size dim and num_attn_heads dim into hidden dim |
| """ |
| if len(tensor.shape) == 5: |
| tensor = tensor.permute(0, 1, 3, 2, 4).contiguous() |
| elif len(tensor.shape) == 4: |
| tensor = tensor.permute(0, 2, 1, 3).contiguous() |
| else: |
| raise ValueError( |
| f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}" |
| ) |
| new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,) |
| return tensor.view(new_shape) |
|
|
| def _attn( |
| self, |
| query, |
| key, |
| value, |
| attention_mask=None, |
| head_mask=None, |
| ): |
| |
| query_length, key_length = query.size(-2), key.size(-2) |
| causal_mask = self.bias[ |
| :, :, key_length - query_length : key_length, :key_length |
| ] |
|
|
| |
| query = query.to(torch.float32) |
| key = key.to(torch.float32) |
|
|
| attn_weights = torch.matmul(query, key.transpose(-1, -2)) |
|
|
| mask_value = torch.finfo(attn_weights.dtype).min |
| |
| |
| mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to( |
| attn_weights.device |
| ) |
| attn_weights = torch.where( |
| causal_mask.to(attn_weights.device), attn_weights, mask_value |
| ) |
|
|
| attn_weights = attn_weights / self.scale_attn |
|
|
| if attention_mask is not None: |
| |
| attn_weights = attn_weights + attention_mask |
|
|
| attn_weights = nn.functional.softmax(attn_weights, dim=-1) |
| attn_weights = attn_weights.to(value.dtype) |
| attn_weights = self.attn_dropout(attn_weights) |
|
|
| |
| if head_mask is not None: |
| attn_weights = attn_weights * head_mask |
|
|
| attn_output = torch.matmul(attn_weights, value) |
|
|
| return attn_output, attn_weights |
|
|
| def _get_embed_positions(self, position_ids): |
| embed_positions = self.embed_positions |
| if embed_positions.device != position_ids.device: |
| embed_positions = embed_positions.to(position_ids.device) |
| self.embed_positions = embed_positions |
| return embed_positions.repeat(position_ids.shape[0], 1, 1) |
|
|
| def forward( |
| self, |
| hidden_states: torch.FloatTensor, |
| layer_past: Optional[Tuple[torch.Tensor]] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| use_cache: Optional[bool] = False, |
| output_attentions: Optional[bool] = False, |
| ) -> Union[ |
| Tuple[torch.Tensor, Tuple[torch.Tensor]], |
| Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]], |
| ]: |
| query = self.q_proj(hidden_states) |
| key = self.k_proj(hidden_states) |
| value = self.v_proj(hidden_states) |
|
|
| query = self._split_heads(query, self.num_attention_heads, self.head_dim, True) |
| key = self._split_heads(key, self.num_attention_heads, self.head_dim, True) |
| value = self._split_heads(value, self.num_attention_heads, self.head_dim, False) |
|
|
| if is_torch_fx_proxy(position_ids) or torch.jit.is_tracing(): |
| |
| |
| embed_positions = get_embed_positions(self.embed_positions, position_ids) |
| else: |
| embed_positions = self._get_embed_positions(position_ids) |
|
|
| repeated_position_ids = position_ids.unsqueeze(-1).repeat( |
| 1, 1, embed_positions.shape[-1] |
| ) |
| sincos = torch.gather(embed_positions, 1, repeated_position_ids) |
| sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1) |
|
|
| if self.rotary_dim is not None: |
| k_rot = key[:, :, :, : self.rotary_dim] |
| k_pass = key[:, :, :, self.rotary_dim :] |
|
|
| q_rot = query[:, :, :, : self.rotary_dim] |
| q_pass = query[:, :, :, self.rotary_dim :] |
|
|
| k_rot = apply_rotary_pos_emb(k_rot, sin, cos) |
| q_rot = apply_rotary_pos_emb(q_rot, sin, cos) |
|
|
| key = torch.cat([k_rot, k_pass], dim=-1) |
| query = torch.cat([q_rot, q_pass], dim=-1) |
| else: |
| key = apply_rotary_pos_emb(key, sin, cos) |
| query = apply_rotary_pos_emb(query, sin, cos) |
|
|
| key = key.permute(0, 2, 1, 3) |
| query = query.permute(0, 2, 1, 3) |
|
|
| if layer_past is not None: |
| past_key = layer_past[0] |
| past_value = layer_past[1] |
| key = torch.cat((past_key, key), dim=-2) |
| value = torch.cat((past_value, value), dim=-2) |
|
|
| if use_cache is True: |
| |
| |
| present = (key.to(hidden_states.dtype), value) |
| else: |
| present = None |
|
|
| |
| attn_output, attn_weights = self._attn( |
| query, key, value, attention_mask, head_mask |
| ) |
|
|
| attn_output = self._merge_heads( |
| attn_output, self.num_attention_heads, self.head_dim |
| ) |
| attn_output = self.out_proj(attn_output) |
| attn_output = self.resid_dropout(attn_output) |
|
|
| outputs = (attn_output, present) |
| if output_attentions: |
| outputs += (attn_weights,) |
|
|
| return outputs |
|
|
|
|
| class GPTJMLP(nn.Module): |
| def __init__( |
| self, intermediate_size, config |
| ): |
| super().__init__() |
| embed_dim = config.n_embd |
|
|
| self.fc_in = nn.Linear(embed_dim, intermediate_size) |
| self.fc_out = nn.Linear(intermediate_size, embed_dim) |
|
|
| self.act = ACT2FN[config.activation_function] |
| self.dropout = nn.Dropout(config.resid_pdrop) |
|
|
| def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor: |
| hidden_states = self.fc_in(hidden_states) |
| hidden_states = self.act(hidden_states) |
| hidden_states = self.fc_out(hidden_states) |
| hidden_states = self.dropout(hidden_states) |
| return hidden_states |
|
|
|
|
| class GPTJBlock(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd |
| self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) |
| self.attn = GPTJAttention(config) |
| self.mlp = GPTJMLP(inner_dim, config) |
|
|
| def forward( |
| self, |
| hidden_states: Optional[torch.FloatTensor], |
| layer_past: Optional[Tuple[torch.Tensor]] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| use_cache: Optional[bool] = False, |
| output_attentions: Optional[bool] = False, |
| ) -> Union[ |
| Tuple[torch.Tensor], |
| Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]], |
| ]: |
| residual = hidden_states |
| hidden_states = self.ln_1(hidden_states) |
| attn_outputs = self.attn( |
| hidden_states=hidden_states, |
| layer_past=layer_past, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| ) |
| attn_output = attn_outputs[0] |
| outputs = attn_outputs[1:] |
|
|
| feed_forward_hidden_states = self.mlp(hidden_states) |
| hidden_states = ( |
| attn_output.to(feed_forward_hidden_states.device) |
| + feed_forward_hidden_states |
| + residual.to(feed_forward_hidden_states.device) |
| ) |
|
|
| if use_cache: |
| outputs = (hidden_states,) + outputs |
| else: |
| outputs = (hidden_states,) + outputs[1:] |
|
|
| return outputs |
|
|
|
|
| class GPTJPreTrainedModel(PreTrainedModel): |
| """ |
| An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained |
| models. |
| """ |
|
|
| config_class = GPTJConfig |
| base_model_prefix = "transformer" |
| is_parallelizable = True |
| supports_gradient_checkpointing = True |
| _no_split_modules = ["GPTJBlock"] |
| _skip_keys_device_placement = "past_key_values" |
|
|
| def __init__(self, *inputs, **kwargs): |
| super().__init__(*inputs, **kwargs) |
|
|
| def _init_weights(self, module): |
| """Initialize the weights.""" |
| if isinstance(module, (nn.Linear,)): |
| |
| |
| module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
| if module.bias is not None: |
| module.bias.data.zero_() |
| elif isinstance(module, nn.Embedding): |
| module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
| if module.padding_idx is not None: |
| module.weight.data[module.padding_idx].zero_() |
| elif isinstance(module, nn.LayerNorm): |
| module.bias.data.zero_() |
| module.weight.data.fill_(1.0) |
|
|
|
|
| GPTJ_START_DOCSTRING = r""" |
| This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use |
| it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and |
| behavior. |
| |
| Parameters: |
| config ([`GPTJConfig`]): Model configuration class with all the parameters of the model. |
| Initializing with a config file does not load the weights associated with the model, only the |
| configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. |
| """ |
|
|
| GPTJ_INPUTS_DOCSTRING = r""" |
| Args: |
| input_ids (`torch.LongTensor` of shape `({0})`): |
| Indices of input sequence tokens in the vocabulary. |
| |
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| [`PreTrainedTokenizer.__call__`] for details. |
| |
| [What are input IDs?](../glossary#input-ids) |
| attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): |
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| |
| - 1 for tokens that are **not masked**, |
| - 0 for tokens that are **masked**. |
| |
| [What are attention masks?](../glossary#attention-mask) |
| token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): |
| Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, |
| 1]`: |
| |
| - 0 corresponds to a *sentence A* token, |
| - 1 corresponds to a *sentence B* token. |
| |
| [What are token type IDs?](../glossary#token-type-ids) |
| position_ids (`torch.LongTensor` of shape `({0})`, *optional*): |
| Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, |
| config.n_positions - 1]`. |
| |
| [What are position IDs?](../glossary#position-ids) |
| head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*): |
| Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: |
| |
| - 1 indicates the head is **not masked**, |
| - 0 indicates the head is **masked**. |
| |
| inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*): |
| Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This |
| is useful if you want more control over how to convert *input_ids* indices into associated vectors than the |
| model's internal embedding lookup matrix. |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
| tensors for more detail. |
| output_hidden_states (`bool`, *optional*): |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
| more detail. |
| return_dict (`bool`, *optional*): |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| """ |
|
|
|
|
| @add_start_docstrings( |
| "The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top.", |
| GPTJ_START_DOCSTRING, |
| ) |
| class GPTJModel(GPTJPreTrainedModel): |
| def __init__(self, config): |
| super().__init__(config) |
|
|
| self.embed_dim = config.n_embd |
| self.vocab_size = config.vocab_size |
| self.wte = nn.Embedding(config.vocab_size, self.embed_dim) |
| self.drop = nn.Dropout(config.embd_pdrop) |
| self.h = nn.ModuleList([GPTJBlock(config) for _ in range(config.n_layer)]) |
| self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) |
|
|
| self.gradient_checkpointing = False |
|
|
| |
| self.post_init() |
|
|
| def get_input_embeddings(self): |
| return self.wte |
|
|
| def set_input_embeddings(self, new_embeddings): |
| self.wte = new_embeddings |
|
|
| @add_start_docstrings_to_model_forward( |
| GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length") |
| ) |
| @add_code_sample_docstrings( |
| checkpoint=_CHECKPOINT_FOR_DOC, |
| output_type=BaseModelOutputWithPast, |
| config_class=_CONFIG_FOR_DOC, |
| real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, |
| ) |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, BaseModelOutputWithPast]: |
| output_attentions = ( |
| output_attentions |
| if output_attentions is not None |
| else self.config.output_attentions |
| ) |
| output_hidden_states = ( |
| output_hidden_states |
| if output_hidden_states is not None |
| else self.config.output_hidden_states |
| ) |
| use_cache = use_cache if use_cache is not None else self.config.use_cache |
| return_dict = ( |
| return_dict if return_dict is not None else self.config.use_return_dict |
| ) |
|
|
| if input_ids is not None and inputs_embeds is not None: |
| raise ValueError( |
| "You cannot specify both input_ids and inputs_embeds at the same time" |
| ) |
| elif input_ids is not None: |
| self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) |
| input_shape = input_ids.size() |
| input_ids = input_ids.view(-1, input_shape[-1]) |
| batch_size = input_ids.shape[0] |
| elif inputs_embeds is not None: |
| input_shape = inputs_embeds.size()[:-1] |
| batch_size = inputs_embeds.shape[0] |
| else: |
| raise ValueError("You have to specify either input_ids or inputs_embeds") |
|
|
| device = input_ids.device if input_ids is not None else inputs_embeds.device |
|
|
| if token_type_ids is not None: |
| token_type_ids = token_type_ids.view(-1, input_shape[-1]) |
|
|
| if past_key_values is None: |
| past_length = 0 |
| past_key_values = tuple([None] * len(self.h)) |
| else: |
| past_length = past_key_values[0][0].size(-2) |
|
|
| if position_ids is None: |
| position_ids = torch.arange( |
| past_length, |
| input_shape[-1] + past_length, |
| dtype=torch.long, |
| device=device, |
| ) |
| position_ids = position_ids.unsqueeze(0) |
|
|
| |
| if attention_mask is not None: |
| if batch_size <= 0: |
| raise ValueError("batch_size has to be defined and > 0") |
| attention_mask = attention_mask.view(batch_size, -1) |
| |
| |
| |
| |
| |
| attention_mask = attention_mask[:, None, None, :] |
|
|
| |
| |
| |
| |
| |
| attention_mask = attention_mask.to(dtype=self.dtype) |
| attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min |
|
|
| |
| |
| |
| |
| head_mask = self.get_head_mask(head_mask, self.config.n_layer) |
|
|
| if inputs_embeds is None: |
| inputs_embeds = self.wte(input_ids) |
|
|
| hidden_states = inputs_embeds |
|
|
| if token_type_ids is not None: |
| token_type_embeds = self.wte(token_type_ids) |
| hidden_states = hidden_states + token_type_embeds |
|
|
| hidden_states = self.drop(hidden_states) |
|
|
| output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),) |
|
|
| if self.gradient_checkpointing and self.training: |
| if use_cache: |
| logger.warning_once( |
| "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." |
| ) |
| use_cache = False |
|
|
| presents = () if use_cache else None |
| all_self_attentions = () if output_attentions else None |
| all_hidden_states = () if output_hidden_states else None |
| for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): |
| |
| if layer_past is not None: |
| layer_past = tuple( |
| past_state.to(hidden_states.device) for past_state in layer_past |
| ) |
| |
| if attention_mask is not None: |
| attention_mask = attention_mask.to(hidden_states.device) |
| if isinstance(head_mask, torch.Tensor): |
| head_mask = head_mask.to(hidden_states.device) |
| if output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
| if self.gradient_checkpointing and self.training: |
| outputs = self._gradient_checkpointing_func( |
| block.__call__, |
| hidden_states, |
| None, |
| attention_mask, |
| position_ids, |
| head_mask[i], |
| use_cache, |
| output_attentions, |
| ) |
| else: |
| outputs = block( |
| hidden_states=hidden_states, |
| layer_past=layer_past, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| head_mask=head_mask[i], |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| ) |
|
|
| hidden_states = outputs[0] |
| if use_cache is True: |
| presents = presents + (outputs[1],) |
|
|
| if output_attentions: |
| all_self_attentions = all_self_attentions + ( |
| outputs[2 if use_cache else 1], |
| ) |
|
|
| hidden_states = self.ln_f(hidden_states) |
|
|
| hidden_states = hidden_states.view(output_shape) |
| |
| if output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
| if not return_dict: |
| return tuple( |
| v |
| for v in [ |
| hidden_states, |
| presents, |
| all_hidden_states, |
| all_self_attentions, |
| ] |
| if v is not None |
| ) |
|
|
| return BaseModelOutputWithPast( |
| last_hidden_state=hidden_states, |
| past_key_values=presents, |
| hidden_states=all_hidden_states, |
| attentions=all_self_attentions, |
| ) |
|
|
|
|
| @add_start_docstrings( |
| """ |
| The GPT-J Model transformer with a language modeling head on top. |
| """, |
| GPTJ_START_DOCSTRING, |
| ) |
| class GPTJForCausalLM(GPTJPreTrainedModel): |
| _tied_weights_keys = ["lm_head.weight"] |
|
|
| def __init__(self, config): |
| super().__init__(config) |
| self.transformer = GPTJModel(config) |
| self.lm_head = nn.Linear(config.n_embd, config.vocab_size) |
|
|
| |
| self.post_init() |
|
|
| def get_output_embeddings(self): |
| return self.lm_head |
|
|
| def set_output_embeddings(self, new_embeddings): |
| self.lm_head = new_embeddings |
|
|
| def prepare_inputs_for_generation( |
| self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs |
| ): |
| token_type_ids = kwargs.get("token_type_ids", None) |
| |
| if past_key_values: |
| past_length = past_key_values[0][0].shape[2] |
|
|
| |
| if input_ids.shape[1] > past_length: |
| remove_prefix_length = past_length |
| else: |
| |
| remove_prefix_length = input_ids.shape[1] - 1 |
|
|
| input_ids = input_ids[:, remove_prefix_length:] |
| if token_type_ids is not None: |
| token_type_ids = token_type_ids[:, -input_ids.shape[1] :] |
|
|
| attention_mask = kwargs.get("attention_mask", None) |
| position_ids = kwargs.get("position_ids", None) |
|
|
| if attention_mask is not None and position_ids is None: |
| |
| position_ids = attention_mask.long().cumsum(-1) - 1 |
| position_ids.masked_fill_(attention_mask == 0, 1) |
| if past_key_values: |
| position_ids = position_ids[:, -input_ids.shape[1] :] |
|
|
| |
| if inputs_embeds is not None and past_key_values is None: |
| model_inputs = {"inputs_embeds": inputs_embeds} |
| else: |
| model_inputs = {"input_ids": input_ids} |
|
|
| model_inputs.update( |
| { |
| "past_key_values": past_key_values, |
| "use_cache": kwargs.get("use_cache"), |
| "position_ids": position_ids, |
| "attention_mask": attention_mask, |
| "token_type_ids": token_type_ids, |
| } |
| ) |
|
|
| return model_inputs |
|
|
| @add_start_docstrings_to_model_forward( |
| GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length") |
| ) |
| @add_code_sample_docstrings( |
| checkpoint=_CHECKPOINT_FOR_DOC, |
| output_type=CausalLMOutputWithPast, |
| config_class=_CONFIG_FOR_DOC, |
| real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, |
| ) |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, CausalLMOutputWithPast]: |
| r""" |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set |
| `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` |
| are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` |
| """ |
| return_dict = ( |
| return_dict if return_dict is not None else self.config.use_return_dict |
| ) |
|
|
| transformer_outputs = self.transformer( |
| input_ids, |
| past_key_values=past_key_values, |
| attention_mask=attention_mask, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
| hidden_states = transformer_outputs[0] |
|
|
| |
| hidden_states = hidden_states.to(self.lm_head.weight.device) |
|
|
| |
| |
| |
| lm_logits = self.lm_head(hidden_states).to(torch.float32) |
|
|
| loss = None |
| if labels is not None: |
| |
| labels = labels.to(lm_logits.device) |
| |
| shift_logits = lm_logits[..., :-1, :].contiguous() |
| shift_labels = labels[..., 1:].contiguous() |
| |
| loss_fct = CrossEntropyLoss() |
| loss = loss_fct( |
| shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1) |
| ) |
|
|
| loss = loss.to(hidden_states.dtype) |
|
|
| if not return_dict: |
| output = (lm_logits,) + transformer_outputs[1:] |
| return ((loss,) + output) if loss is not None else output |
|
|
| return CausalLMOutputWithPast( |
| loss=loss, |
| logits=lm_logits, |
| past_key_values=transformer_outputs.past_key_values, |
| hidden_states=transformer_outputs.hidden_states, |
| attentions=transformer_outputs.attentions, |
| ) |
|
|
| @staticmethod |
| def _reorder_cache( |
| past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor |
| ) -> Tuple[Tuple[torch.Tensor]]: |
| """ |
| This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or |
| [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct |
| beam_idx at every generation step. |
| """ |
| return tuple( |
| tuple( |
| past_state.index_select(0, beam_idx.to(past_state.device)) |
| for past_state in layer_past |
| ) |
| for layer_past in past_key_values |
| ) |
|
|
|
|
| @add_start_docstrings( |
| """ |
| The GPT-J Model transformer with a sequence classification head on top (linear layer). |
| |
| [`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models |
| (e.g. GPT, GPT-2, GPT-Neo) do. |
| |
| Since it does classification on the last token, it requires to know the position of the last token. If a |
| `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If |
| no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the |
| padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in |
| each row of the batch). |
| """, |
| GPTJ_START_DOCSTRING, |
| ) |
| class GPTJForSequenceClassification(GPTJPreTrainedModel): |
| def __init__(self, config): |
| super().__init__(config) |
| self.num_labels = config.num_labels |
| self.transformer = GPTJModel(config) |
| self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) |
|
|
| |
| self.post_init() |
|
|
| @add_start_docstrings_to_model_forward( |
| GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length") |
| ) |
| @add_code_sample_docstrings( |
| checkpoint="ydshieh/tiny-random-gptj-for-sequence-classification", |
| output_type=SequenceClassifierOutputWithPast, |
| config_class=_CONFIG_FOR_DOC, |
| real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, |
| ) |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, SequenceClassifierOutputWithPast]: |
| r""" |
| labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., |
| config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If |
| `config.num_labels > 1` a classification loss is computed (Cross-Entropy). |
| """ |
| return_dict = ( |
| return_dict if return_dict is not None else self.config.use_return_dict |
| ) |
|
|
| transformer_outputs = self.transformer( |
| input_ids, |
| past_key_values=past_key_values, |
| attention_mask=attention_mask, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
| hidden_states = transformer_outputs[0] |
| logits = self.score(hidden_states) |
|
|
| if input_ids is not None: |
| batch_size = input_ids.shape[0] |
| else: |
| batch_size = inputs_embeds.shape[0] |
|
|
| if self.config.pad_token_id is None and batch_size != 1: |
| raise ValueError( |
| "Cannot handle batch sizes > 1 if no padding token is defined." |
| ) |
| if self.config.pad_token_id is None: |
| sequence_lengths = -1 |
| else: |
| if input_ids is not None: |
| |
| sequence_lengths = ( |
| torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 |
| ) |
| sequence_lengths = sequence_lengths % input_ids.shape[-1] |
| sequence_lengths = sequence_lengths.to(logits.device) |
| else: |
| sequence_lengths = -1 |
| logger.warning( |
| f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " |
| "unexpected if using padding tokens in conjunction with `inputs_embeds.`" |
| ) |
|
|
| pooled_logits = logits[ |
| torch.arange(batch_size, device=logits.device), sequence_lengths |
| ] |
|
|
| loss = None |
| if labels is not None: |
| labels = labels.to(pooled_logits.device) |
| if self.config.problem_type is None: |
| if self.num_labels == 1: |
| self.config.problem_type = "regression" |
| elif self.num_labels > 1 and ( |
| labels.dtype == torch.long or labels.dtype == torch.int |
| ): |
| self.config.problem_type = "single_label_classification" |
| else: |
| self.config.problem_type = "multi_label_classification" |
|
|
| if self.config.problem_type == "regression": |
| loss_fct = MSELoss() |
| if self.num_labels == 1: |
| loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) |
| else: |
| loss = loss_fct(pooled_logits, labels) |
| elif self.config.problem_type == "single_label_classification": |
| loss_fct = CrossEntropyLoss() |
| loss = loss_fct( |
| pooled_logits.view(-1, self.num_labels), labels.view(-1) |
| ) |
| elif self.config.problem_type == "multi_label_classification": |
| loss_fct = BCEWithLogitsLoss() |
| loss = loss_fct(pooled_logits, labels) |
| if not return_dict: |
| output = (pooled_logits,) + transformer_outputs[1:] |
| return ((loss,) + output) if loss is not None else output |
|
|
| return SequenceClassifierOutputWithPast( |
| loss=loss, |
| logits=pooled_logits, |
| past_key_values=transformer_outputs.past_key_values, |
| hidden_states=transformer_outputs.hidden_states, |
| attentions=transformer_outputs.attentions, |
| ) |
|
|
|
|
| @add_start_docstrings( |
| """ |
| The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like |
| SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). |
| """, |
| GPTJ_START_DOCSTRING, |
| ) |
| class GPTJForQuestionAnswering(GPTJPreTrainedModel): |
| def __init__(self, config): |
| super().__init__(config) |
| self.num_labels = config.num_labels |
| self.transformer = GPTJModel(config) |
| self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) |
|
|
| |
| self.post_init() |
|
|
| @add_start_docstrings_to_model_forward( |
| GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length") |
| ) |
| @add_code_sample_docstrings( |
| checkpoint=_CHECKPOINT_FOR_DOC, |
| output_type=QuestionAnsweringModelOutput, |
| config_class=_CONFIG_FOR_DOC, |
| real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, |
| ) |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| start_positions: Optional[torch.LongTensor] = None, |
| end_positions: Optional[torch.LongTensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, QuestionAnsweringModelOutput]: |
| r""" |
| start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| Labels for position (index) of the start of the labelled span for computing the token classification loss. |
| Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence |
| are not taken into account for computing the loss. |
| end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| Labels for position (index) of the end of the labelled span for computing the token classification loss. |
| Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence |
| are not taken into account for computing the loss. |
| """ |
| return_dict = ( |
| return_dict if return_dict is not None else self.config.use_return_dict |
| ) |
|
|
| outputs = self.transformer( |
| input_ids, |
| attention_mask=attention_mask, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| sequence_output = outputs[0] |
|
|
| logits = self.qa_outputs(sequence_output) |
| start_logits, end_logits = logits.split(1, dim=-1) |
| start_logits = start_logits.squeeze(-1).contiguous() |
| end_logits = end_logits.squeeze(-1).contiguous() |
|
|
| total_loss = None |
| if start_positions is not None and end_positions is not None: |
| |
| if len(start_positions.size()) > 1: |
| start_positions = start_positions.squeeze(-1).to(start_logits.device) |
| if len(end_positions.size()) > 1: |
| end_positions = end_positions.squeeze(-1).to(end_logits.device) |
| |
| ignored_index = start_logits.size(1) |
| start_positions = start_positions.clamp(0, ignored_index) |
| end_positions = end_positions.clamp(0, ignored_index) |
|
|
| loss_fct = CrossEntropyLoss(ignore_index=ignored_index) |
| start_loss = loss_fct(start_logits, start_positions) |
| end_loss = loss_fct(end_logits, end_positions) |
| total_loss = (start_loss + end_loss) / 2 |
|
|
| if not return_dict: |
| output = (start_logits, end_logits) + outputs[2:] |
| return ((total_loss,) + output) if total_loss is not None else output |
|
|
| return QuestionAnsweringModelOutput( |
| loss=total_loss, |
| start_logits=start_logits, |
| end_logits=end_logits, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| ) |
|
|