| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """PyTorch GPTNeoX model.""" |
|
|
| from typing import Optional, Tuple, Union |
|
|
| import torch |
| import torch.utils.checkpoint |
| from packaging import version |
| from torch import nn |
| from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss |
|
|
| from transformers.activations import ACT2FN |
| from transformers.cache_utils import Cache, DynamicCache, StaticCache |
| from transformers.file_utils import ( |
| add_code_sample_docstrings, |
| add_start_docstrings, |
| add_start_docstrings_to_model_forward, |
| replace_return_docstrings, |
| ) |
| from transformers.generation import GenerationMixin |
| from transformers.modeling_attn_mask_utils import AttentionMaskConverter |
| from transformers.modeling_outputs import ( |
| BaseModelOutputWithPast, |
| CausalLMOutputWithPast, |
| QuestionAnsweringModelOutput, |
| SequenceClassifierOutputWithPast, |
| TokenClassifierOutput, |
| ) |
| from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS |
| from transformers.modeling_utils import PreTrainedModel |
| from transformers.utils import ( |
| get_torch_version, |
| is_flash_attn_2_available, |
| is_flash_attn_greater_or_equal_2_10, |
| logging, |
| ) |
| from .config_custom import GPTNeoXConfig |
|
|
|
|
| if is_flash_attn_2_available(): |
| from transformers.modeling_flash_attention_utils import _flash_attention_forward |
|
|
| logger = logging.get_logger(__name__) |
|
|
| _CHECKPOINT_FOR_DOC = "trl-internal-testing/tiny-random-GPTNeoXForCausalLM" |
| _REAL_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-neox-20b" |
| _CONFIG_FOR_DOC = "GPTNeoXConfig" |
|
|
|
|
| |
| def _prepare_4d_causal_attention_mask_with_cache_position( |
| attention_mask: torch.Tensor, |
| sequence_length: int, |
| target_length: int, |
| dtype: torch.dtype, |
| device: torch.device, |
| min_dtype: float, |
| cache_position: torch.Tensor, |
| batch_size: int, |
| ): |
| """ |
| Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape |
| `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. |
| |
| Args: |
| attention_mask (`torch.Tensor`): |
| A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. |
| sequence_length (`int`): |
| The sequence length being processed. |
| target_length (`int`): |
| The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. |
| dtype (`torch.dtype`): |
| The dtype to use for the 4D attention mask. |
| device (`torch.device`): |
| The device to plcae the 4D attention mask on. |
| min_dtype (`float`): |
| The minimum value representable with the dtype `dtype`. |
| cache_position (`torch.Tensor`): |
| Indices depicting the position of the input sequence tokens in the sequence. |
| batch_size (`torch.Tensor`): |
| Batch size. |
| """ |
| if attention_mask is not None and attention_mask.dim() == 4: |
| |
| causal_mask = attention_mask |
| else: |
| causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device) |
| if sequence_length != 1: |
| causal_mask = torch.triu(causal_mask, diagonal=1) |
| causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) |
| causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) |
| if attention_mask is not None: |
| causal_mask = causal_mask.clone() |
| mask_length = attention_mask.shape[-1] |
| padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] |
| padding_mask = padding_mask == 0 |
| causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( |
| padding_mask, min_dtype |
| ) |
|
|
| return causal_mask |
|
|
|
|
| class GPTNeoXPreTrainedModel(PreTrainedModel): |
| """ |
| An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained |
| models. |
| """ |
|
|
| config_class = GPTNeoXConfig |
| base_model_prefix = "gpt_neox" |
| supports_gradient_checkpointing = True |
| _no_split_modules = ["GPTNeoXLayer"] |
| _skip_keys_device_placement = "past_key_values" |
| _supports_flash_attn_2 = True |
| _supports_cache_class = True |
| _supports_quantized_cache = True |
| _supports_static_cache = True |
| _supports_sdpa = True |
|
|
| def _init_weights(self, module): |
| """Initialize the weights""" |
| if isinstance(module, nn.Linear): |
| module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
| if module.bias is not None: |
| module.bias.data.zero_() |
| elif isinstance(module, nn.Embedding): |
| module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
| if module.padding_idx is not None: |
| module.weight.data[module.padding_idx].zero_() |
| elif isinstance(module, nn.LayerNorm): |
| module.bias.data.zero_() |
| module.weight.data.fill_(1.0) |
|
|
|
|
| class GPTNeoXAttention(nn.Module): |
| def __init__(self, config, layer_idx=None): |
| super().__init__() |
| self.config = config |
| self.num_attention_heads = config.num_attention_heads |
| self.hidden_size = config.hidden_size |
| if self.hidden_size % self.num_attention_heads != 0: |
| raise ValueError( |
| "The hidden size is not divisble by the number of attention heads! Make sure to update them" |
| ) |
| self.head_size = self.hidden_size // self.num_attention_heads |
| self.rotary_ndims = int(self.head_size * config.rotary_pct) |
| self.rope_theta = config.rotary_emb_base |
| self._init_bias(config.max_position_embeddings) |
|
|
| self.register_buffer("masked_bias", torch.tensor(-1e9), persistent=False) |
| self.rotary_emb = GPTNeoXRotaryEmbedding(config=self.config) |
|
|
| if layer_idx is None: |
| logger.warning_once( |
| f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " |
| "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " |
| "when creating this class." |
| ) |
| self.norm_factor = self.head_size**-0.5 |
| self.query_key_value = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=config.attention_bias) |
| self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attention_bias) |
| self.attention_dropout = nn.Dropout(config.attention_dropout) |
| self.is_causal = True |
| self.layer_idx = layer_idx |
|
|
| def _init_bias(self, max_positions, device=None): |
| self.register_buffer( |
| "bias", |
| torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view( |
| 1, 1, max_positions, max_positions |
| ), |
| persistent=False, |
| ) |
| if device is not None: |
| self.bias = self.bias.to(device) |
|
|
| def forward( |
| self, |
| hidden_states: torch.FloatTensor, |
| attention_mask: torch.FloatTensor, |
| position_ids: torch.LongTensor, |
| head_mask: Optional[torch.FloatTensor] = None, |
| layer_past: Optional[Cache] = None, |
| use_cache: Optional[bool] = False, |
| output_attentions: Optional[bool] = False, |
| padding_mask: Optional[torch.Tensor] = None, |
| cache_position: Optional[torch.LongTensor] = None, |
| position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, |
| ): |
| |
| query, key, value, present = self._attn_projections_and_rope( |
| hidden_states=hidden_states, |
| position_ids=position_ids, |
| layer_past=layer_past, |
| use_cache=use_cache, |
| position_embeddings=position_embeddings, |
| ) |
|
|
| |
| attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) |
|
|
| |
| attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_size) |
| attn_output = self.dense(attn_output) |
|
|
| outputs = (attn_output, present) |
| if output_attentions: |
| outputs += (attn_weights,) |
|
|
| return outputs |
|
|
| @classmethod |
| def _split_heads(cls, tensor, num_attention_heads, attn_head_size): |
| """ |
| Splits hidden dim into attn_head_size and num_attention_heads |
| """ |
| |
| new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size) |
| |
| tensor = tensor.view(new_shape) |
| |
| tensor = tensor.permute(0, 2, 1, 3) |
| return tensor |
|
|
| @classmethod |
| def _merge_heads(cls, tensor, num_attention_heads, attn_head_size): |
| """ |
| Merges attn_head_size dim and num_attn_heads dim into hidden dim |
| """ |
| |
| tensor = tensor.permute(0, 2, 1, 3).contiguous() |
| |
| tensor = tensor.view(tensor.size(0), tensor.size(1), num_attention_heads * attn_head_size) |
| |
| return tensor |
|
|
| def _attn_projections_and_rope( |
| self, |
| hidden_states: torch.FloatTensor, |
| position_ids: torch.LongTensor, |
| layer_past: Optional[Tuple[torch.Tensor]] = None, |
| use_cache: Optional[bool] = False, |
| cache_position: Optional[torch.LongTensor] = None, |
| position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, |
| ): |
| |
| |
| |
| qkv = self.query_key_value(hidden_states) |
|
|
| |
| |
| new_qkv_shape = qkv.size()[:-1] + (self.num_attention_heads, 3 * self.head_size) |
| qkv = qkv.view(*new_qkv_shape) |
|
|
| |
| query = qkv[..., : self.head_size].permute(0, 2, 1, 3) |
| key = qkv[..., self.head_size : 2 * self.head_size].permute(0, 2, 1, 3) |
| value = qkv[..., 2 * self.head_size :].permute(0, 2, 1, 3) |
|
|
| |
| query_rot = query[..., : self.rotary_ndims] |
| query_pass = query[..., self.rotary_ndims :] |
| key_rot = key[..., : self.rotary_ndims] |
| key_pass = key[..., self.rotary_ndims :] |
|
|
| if position_embeddings is None: |
| logger.warning_once( |
| "The attention layers in this model are transitioning from computing the RoPE embeddings internally " |
| "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed " |
| "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be " |
| "removed and `position_embeddings` will be mandatory." |
| ) |
| cos, sin = self.rotary_emb(value, position_ids) |
| else: |
| cos, sin = position_embeddings |
| query, key = apply_rotary_pos_emb(query_rot, key_rot, cos, sin) |
| query = torch.cat((query, query_pass), dim=-1) |
| key = torch.cat((key, key_pass), dim=-1) |
|
|
| |
| if layer_past is not None: |
| cache_kwargs = { |
| "sin": sin, |
| "cos": cos, |
| "partial_rotation_size": self.rotary_ndims, |
| "cache_position": cache_position, |
| } |
| key, value = layer_past.update(key, value, self.layer_idx, cache_kwargs) |
|
|
| return query, key, value, layer_past |
|
|
| def _attn(self, query, key, value, attention_mask=None, head_mask=None): |
| |
| |
| batch_size, num_attention_heads, query_length, attn_head_size = query.size() |
| key_length = key.size(-2) |
|
|
| |
| if key_length > self.bias.shape[-1]: |
| self._init_bias(key_length, device=key.device) |
| causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length] |
|
|
| query = query.view(batch_size * num_attention_heads, query_length, attn_head_size) |
| key = key.view(batch_size * num_attention_heads, key_length, attn_head_size) |
| attn_scores = torch.zeros( |
| batch_size * num_attention_heads, |
| query_length, |
| key_length, |
| dtype=query.dtype, |
| device=key.device, |
| ) |
| attn_scores = torch.baddbmm( |
| attn_scores, |
| query, |
| key.transpose(1, 2), |
| beta=1.0, |
| alpha=self.norm_factor, |
| ) |
| attn_scores = attn_scores.view(batch_size, num_attention_heads, query_length, key_length) |
|
|
| mask_value = torch.finfo(attn_scores.dtype).min |
| |
| |
| mask_value = torch.tensor(mask_value, dtype=attn_scores.dtype).to(attn_scores.device) |
| attn_scores = torch.where(causal_mask, attn_scores, mask_value) |
|
|
| if attention_mask is not None: |
| causal_mask = attention_mask[:, :, :, : key.shape[-2]] |
| attn_scores = attn_scores + causal_mask |
|
|
| attn_weights = nn.functional.softmax(attn_scores, dim=-1) |
| attn_weights = attn_weights.to(value.dtype) |
|
|
| |
| if head_mask is not None: |
| attn_weights = attn_weights * head_mask |
|
|
| attn_weights = self.attention_dropout(attn_weights) |
|
|
| attn_output = torch.matmul(attn_weights, value) |
| return attn_output, attn_weights |
|
|
|
|
| class GPTNeoXFlashAttention2(GPTNeoXAttention): |
| """ |
| GPTNeoX flash attention module. This module inherits from `GPTNeoXAttention` as the weights of the module stays |
| untouched. The only required change would be on the forward pass where it needs to correctly call the public API of |
| flash attention and deal with padding tokens in case the input contains any of them. |
| """ |
|
|
| |
| def __init__(self, *args, **kwargs): |
| super().__init__(*args, **kwargs) |
|
|
| |
| |
| |
| self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() |
|
|
| def forward( |
| self, |
| hidden_states: torch.FloatTensor, |
| attention_mask: torch.FloatTensor, |
| position_ids: torch.LongTensor, |
| head_mask: Optional[torch.FloatTensor] = None, |
| layer_past: Optional[Cache] = None, |
| use_cache: Optional[bool] = False, |
| output_attentions: Optional[bool] = False, |
| cache_position: Optional[torch.LongTensor] = None, |
| position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, |
| ): |
| |
| query, key, value, present = self._attn_projections_and_rope( |
| hidden_states=hidden_states, |
| position_ids=position_ids, |
| layer_past=layer_past, |
| use_cache=use_cache, |
| cache_position=cache_position, |
| position_embeddings=position_embeddings, |
| ) |
|
|
| query_length = query.shape[-2] |
|
|
| |
| target_dtype = value.dtype |
| if query.dtype != target_dtype: |
| query = query.to(target_dtype) |
| if key.dtype != target_dtype: |
| key = key.to(target_dtype) |
|
|
| |
| query = query.permute(0, 2, 1, 3) |
| key = key.permute(0, 2, 1, 3) |
| value = value.permute(0, 2, 1, 3) |
|
|
| attention_dropout = self.config.attention_dropout if self.training else 0.0 |
|
|
| |
| attn_weights = None |
|
|
| |
| attn_output = _flash_attention_forward( |
| query, |
| key, |
| value, |
| attention_mask, |
| query_length, |
| dropout = attention_dropout, |
| softmax_scale = self.norm_factor, |
| is_causal = self.is_causal, |
| use_top_left_mask=self._flash_attn_uses_top_left_mask |
| ) |
| |
| attn_output = attn_output.reshape(attn_output.size(0), attn_output.size(1), self.hidden_size) |
| |
| attn_output = self.dense(attn_output) |
|
|
| outputs = (attn_output, layer_past) |
| if output_attentions: |
| outputs += (attn_weights,) |
|
|
| return outputs |
|
|
|
|
| class GPTNeoXSdpaAttention(GPTNeoXAttention): |
| """ |
| GPTNeoX attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from |
| `GPTNeoXAttention` as the weights of the module stays untouched. The only changes are on the forward pass |
| to adapt to the SDPA API. |
| """ |
|
|
| def __init__(self, config, layer_idx=None): |
| super().__init__(config, layer_idx=layer_idx) |
|
|
| |
| |
| |
| self.require_contiguous_qkv = version.parse(get_torch_version()) < version.parse("2.2.0") |
|
|
| def forward( |
| self, |
| hidden_states: torch.FloatTensor, |
| attention_mask: torch.FloatTensor, |
| position_ids: torch.LongTensor, |
| head_mask: Optional[torch.FloatTensor] = None, |
| layer_past: Optional[Tuple[torch.Tensor]] = None, |
| use_cache: Optional[bool] = False, |
| output_attentions: Optional[bool] = False, |
| cache_position: Optional[torch.LongTensor] = None, |
| position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, |
| ): |
| if output_attentions or head_mask is not None: |
| logger.warning_once( |
| "`GPTNeoXSdpaAttention` is used but `torch.nn.functional.scaled_dot_product_attention` does not support " |
| "`output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but " |
| "specifying the manual implementation will be required from Transformers version v5.0.0 onwards. " |
| 'This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' |
| ) |
| return super().forward( |
| hidden_states=hidden_states, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| layer_past=layer_past, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| cache_position=cache_position, |
| ) |
|
|
| bsz, q_len, _ = hidden_states.size() |
|
|
| |
| query, key, value, present = self._attn_projections_and_rope( |
| hidden_states=hidden_states, |
| position_ids=position_ids, |
| layer_past=layer_past, |
| use_cache=use_cache, |
| cache_position=cache_position, |
| position_embeddings=position_embeddings, |
| ) |
|
|
| causal_mask = attention_mask |
| if attention_mask is not None: |
| causal_mask = causal_mask[:, :, :, : key.shape[-2]] |
|
|
| |
| target_dtype = value.dtype |
| if query.dtype != target_dtype: |
| query = query.to(target_dtype) |
| if key.dtype != target_dtype: |
| key = key.to(target_dtype) |
|
|
| |
| if self.require_contiguous_qkv and query.device.type == "cuda" and attention_mask is not None: |
| query = query.contiguous() |
| key = key.contiguous() |
| value = value.contiguous() |
|
|
| |
| |
| is_causal = True if causal_mask is None and q_len > 1 else False |
|
|
| attn_output = torch.nn.functional.scaled_dot_product_attention( |
| query=query, |
| key=key, |
| value=value, |
| attn_mask=causal_mask, |
| dropout_p=self.attention_dropout.p if self.training else 0.0, |
| is_causal=is_causal, |
| ) |
|
|
| |
| attn_output = attn_output.transpose(1, 2).contiguous() |
| attn_output = attn_output.view(bsz, q_len, self.hidden_size) |
|
|
| attn_output = self.dense(attn_output) |
|
|
| return attn_output, present, None |
|
|
|
|
| def attention_mask_func(attention_scores, ltor_mask): |
| attention_scores.masked_fill_(~ltor_mask, torch.finfo(attention_scores.dtype).min) |
| return attention_scores |
|
|
|
|
| |
| class GPTNeoXRotaryEmbedding(nn.Module): |
| def __init__( |
| self, |
| dim=None, |
| max_position_embeddings=2048, |
| base=10000, |
| device=None, |
| scaling_factor=1.0, |
| rope_type="default", |
| config: Optional[GPTNeoXConfig] = None, |
| ): |
| super().__init__() |
| |
| self.rope_kwargs = {} |
| if config is None: |
| logger.warning_once( |
| "`GPTNeoXRotaryEmbedding` can now be fully parameterized by passing the model config through the " |
| "`config` argument. All other arguments will be removed in v4.46" |
| ) |
| self.rope_kwargs = { |
| "rope_type": rope_type, |
| "factor": scaling_factor, |
| "dim": dim, |
| "base": base, |
| "max_position_embeddings": max_position_embeddings, |
| } |
| self.rope_type = rope_type |
| self.max_seq_len_cached = max_position_embeddings |
| self.original_max_seq_len = max_position_embeddings |
| else: |
| |
| if config.rope_scaling is not None: |
| self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) |
| else: |
| self.rope_type = "default" |
| self.max_seq_len_cached = config.max_position_embeddings |
| self.original_max_seq_len = config.max_position_embeddings |
|
|
| self.config = config |
| self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] |
|
|
| inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, **self.rope_kwargs) |
| self.register_buffer("inv_freq", inv_freq, persistent=False) |
| self.original_inv_freq = self.inv_freq |
|
|
| def _dynamic_frequency_update(self, position_ids, device): |
| """ |
| dynamic RoPE layers should recompute `inv_freq` in the following situations: |
| 1 - growing beyond the cached sequence length (allow scaling) |
| 2 - the current sequence length is in the original scale (avoid losing precision with small sequences) |
| """ |
| seq_len = torch.max(position_ids) + 1 |
| if seq_len > self.max_seq_len_cached: |
| inv_freq, self.attention_scaling = self.rope_init_fn( |
| self.config, device, seq_len=seq_len, **self.rope_kwargs |
| ) |
| self.register_buffer("inv_freq", inv_freq, persistent=False) |
| self.max_seq_len_cached = seq_len |
|
|
| if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: |
| self.register_buffer("inv_freq", self.original_inv_freq, persistent=False) |
| self.max_seq_len_cached = self.original_max_seq_len |
|
|
| @torch.no_grad() |
| def forward(self, x, position_ids): |
| if "dynamic" in self.rope_type: |
| self._dynamic_frequency_update(position_ids, device=x.device) |
|
|
| |
| inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) |
| position_ids_expanded = position_ids[:, None, :].float() |
| |
| device_type = x.device.type |
| device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" |
| with torch.autocast(device_type=device_type, enabled=False): |
| freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) |
| emb = torch.cat((freqs, freqs), dim=-1) |
| cos = emb.cos() |
| sin = emb.sin() |
|
|
| |
| cos = cos * self.attention_scaling |
| sin = sin * self.attention_scaling |
|
|
| return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) |
|
|
|
|
| |
| class GPTNeoXLinearScalingRotaryEmbedding(GPTNeoXRotaryEmbedding): |
| """GPTNeoXRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" |
|
|
| def __init__(self, *args, **kwargs): |
| logger.warning_once( |
| "`GPTNeoXLinearScalingRotaryEmbedding` is deprecated an will be removed in v4.46. Please use " |
| "`GPTNeoXRotaryEmbedding`, which now also does linear scaling (simply pass the model config to __init__)." |
| ) |
| kwargs["rope_type"] = "linear" |
| super().__init__(*args, **kwargs) |
|
|
|
|
| |
| class GPTNeoXDynamicNTKScalingRotaryEmbedding(GPTNeoXRotaryEmbedding): |
| """GPTNeoXRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" |
|
|
| def __init__(self, *args, **kwargs): |
| logger.warning_once( |
| "`GPTNeoXDynamicNTKScalingRotaryEmbedding` is deprecated an will be removed in v4.46. Please use " |
| "`GPTNeoXRotaryEmbedding`, which now also does dynamic ntk scaling (simply pass the model config to " |
| "__init__)." |
| ) |
| kwargs["rope_type"] = "dynamic" |
| super().__init__(*args, **kwargs) |
|
|
|
|
| def rotate_half(x): |
| """Rotates half the hidden dims of the input.""" |
| x1 = x[..., : x.shape[-1] // 2] |
| x2 = x[..., x.shape[-1] // 2 :] |
| return torch.cat((-x2, x1), dim=-1) |
|
|
|
|
| |
| def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): |
| """Applies Rotary Position Embedding to the query and key tensors. |
| |
| Args: |
| q (`torch.Tensor`): The query tensor. |
| k (`torch.Tensor`): The key tensor. |
| cos (`torch.Tensor`): The cosine part of the rotary embedding. |
| sin (`torch.Tensor`): The sine part of the rotary embedding. |
| position_ids (`torch.Tensor`, *optional*): |
| Deprecated and unused. |
| unsqueeze_dim (`int`, *optional*, defaults to 1): |
| The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and |
| sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note |
| that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and |
| k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes |
| cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have |
| the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. |
| Returns: |
| `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. |
| """ |
| cos = cos.unsqueeze(unsqueeze_dim) |
| sin = sin.unsqueeze(unsqueeze_dim) |
| q_embed = (q * cos) + (rotate_half(q) * sin) |
| k_embed = (k * cos) + (rotate_half(k) * sin) |
| return q_embed, k_embed |
|
|
|
|
| class GPTNeoXMLP(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.dense_h_to_4h = nn.Linear(config.hidden_size, config.intermediate_size) |
| self.dense_4h_to_h = nn.Linear(config.intermediate_size, config.hidden_size) |
| self.act = ACT2FN[config.hidden_act] |
|
|
| def forward(self, hidden_states): |
| hidden_states = self.dense_h_to_4h(hidden_states) |
| hidden_states = self.act(hidden_states) |
| hidden_states = self.dense_4h_to_h(hidden_states) |
| return hidden_states |
|
|
|
|
| GPT_NEOX_ATTENTION_CLASSES = { |
| "eager": GPTNeoXAttention, |
| "flash_attention_2": GPTNeoXFlashAttention2, |
| "sdpa": GPTNeoXSdpaAttention, |
| } |
|
|
|
|
| class GPTNeoXLayer(nn.Module): |
| def __init__(self, config, layer_idx): |
| super().__init__() |
| self.use_parallel_residual = config.use_parallel_residual |
| self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
| self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
| self.post_attention_dropout = nn.Dropout(config.hidden_dropout) |
| self.post_mlp_dropout = nn.Dropout(config.hidden_dropout) |
| self.attention = GPT_NEOX_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx) |
| self.mlp = GPTNeoXMLP(config) |
|
|
| def forward( |
| self, |
| hidden_states: Optional[torch.FloatTensor], |
| attention_mask: Optional[torch.FloatTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| use_cache: Optional[bool] = False, |
| layer_past: Optional[Cache] = None, |
| output_attentions: Optional[bool] = False, |
| cache_position: Optional[torch.LongTensor] = None, |
| position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, |
| ): |
| attention_layer_outputs = self.attention( |
| self.input_layernorm(hidden_states), |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| layer_past=layer_past, |
| head_mask=head_mask, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| cache_position=cache_position, |
| position_embeddings=position_embeddings, |
| ) |
| attn_output = attention_layer_outputs[0] |
| attn_output = self.post_attention_dropout(attn_output) |
| outputs = attention_layer_outputs[1:] |
|
|
| if self.use_parallel_residual: |
| |
| |
| mlp_output = self.mlp(self.post_attention_layernorm(hidden_states)) |
| mlp_output = self.post_mlp_dropout(mlp_output) |
| hidden_states = mlp_output + attn_output + hidden_states |
| else: |
| |
| |
| |
| attn_output = attn_output + hidden_states |
| mlp_output = self.mlp(self.post_attention_layernorm(attn_output)) |
| mlp_output = self.post_mlp_dropout(mlp_output) |
| hidden_states = mlp_output + attn_output |
|
|
| if use_cache: |
| outputs = (hidden_states,) + outputs |
| else: |
| outputs = (hidden_states,) + outputs[1:] |
|
|
| return outputs |
|
|
|
|
| GPT_NEOX_START_DOCSTRING = r""" |
| This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use |
| it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and |
| behavior. |
| |
| Parameters: |
| config ([`~GPTNeoXConfig`]): Model configuration class with all the parameters of the model. |
| Initializing with a config file does not load the weights associated with the model, only the |
| configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. |
| """ |
|
|
| GPT_NEOX_INPUTS_DOCSTRING = r""" |
| Args: |
| input_ids (`torch.LongTensor` of shape `({0})`): |
| Indices of input sequence tokens in the vocabulary. |
| |
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| [`PreTrainedTokenizer.__call__`] for details. |
| |
| [What are input IDs?](../glossary#input-ids) |
| attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): |
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| |
| - 1 for tokens that are **not masked**, |
| - 0 for tokens that are **masked**. |
| |
| [What are attention masks?](../glossary#attention-mask) |
| position_ids (`torch.LongTensor` of shape `({0})`, *optional*): |
| Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, |
| config.n_positions - 1]`. |
| |
| [What are position IDs?](../glossary#position-ids) |
| head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): |
| Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: |
| |
| - 1 indicates the head is **not masked**, |
| - 0 indicates the head is **masked**. |
| |
| inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): |
| Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This |
| is useful if you want more control over how to convert *input_ids* indices into associated vectors than the |
| model's internal embedding lookup matrix. |
| past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): |
| Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention |
| blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` |
| returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. |
| |
| Two formats are allowed: |
| - a [`~cache_utils.Cache`] instance, see our |
| [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache); |
| - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of |
| shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy |
| cache format. |
| |
| The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the |
| legacy cache format will be returned. |
| |
| If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't |
| have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` |
| of shape `(batch_size, sequence_length)`. |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
| tensors for more detail. |
| output_hidden_states (`bool`, *optional*): |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
| more detail. |
| return_dict (`bool`, *optional*): |
| Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. |
| cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): |
| Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`, |
| this tensor is not affected by padding. It is used to update the cache in the correct position and to infer |
| the complete sequence length. |
| """ |
|
|
|
|
| @add_start_docstrings( |
| "The bare GPTNeoX Model transformer outputting raw hidden-states without any specific head on top.", |
| GPT_NEOX_START_DOCSTRING, |
| ) |
| class GPTNeoXModel(GPTNeoXPreTrainedModel): |
| def __init__(self, config): |
| super().__init__(config) |
| self.config = config |
|
|
| self.embed_in = nn.Embedding(config.vocab_size, config.hidden_size) |
| self.emb_dropout = nn.Dropout(config.hidden_dropout) |
| self.layers = nn.ModuleList([GPTNeoXLayer(config, i) for i in range(config.num_hidden_layers)]) |
| self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
| self.rotary_emb = GPTNeoXRotaryEmbedding(config=config) |
|
|
| self._attn_implementation = config._attn_implementation |
|
|
| self.gradient_checkpointing = False |
|
|
| |
| self.post_init() |
|
|
| def get_input_embeddings(self): |
| return self.embed_in |
|
|
| def set_input_embeddings(self, value): |
| self.embed_in = value |
|
|
| @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format("batch_size, sequence_length")) |
| @add_code_sample_docstrings( |
| checkpoint=_CHECKPOINT_FOR_DOC, |
| real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, |
| output_type=BaseModelOutputWithPast, |
| config_class=_CONFIG_FOR_DOC, |
| ) |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.FloatTensor]]]] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| cache_position: Optional[torch.LongTensor] = None, |
| ) -> Union[Tuple, BaseModelOutputWithPast]: |
| r""" |
| use_cache (`bool`, *optional*): |
| If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see |
| `past_key_values`). |
| """ |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| use_cache = use_cache if use_cache is not None else self.config.use_cache |
|
|
| if (input_ids is None) ^ (inputs_embeds is not None): |
| raise ValueError( |
| "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one" |
| ) |
|
|
| if self.gradient_checkpointing and self.training: |
| if use_cache: |
| logger.warning_once( |
| "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." |
| ) |
| use_cache = False |
|
|
| if inputs_embeds is None: |
| inputs_embeds = self.embed_in(input_ids) |
|
|
| |
| return_legacy_cache = False |
| if use_cache and not isinstance(past_key_values, Cache): |
| return_legacy_cache = True |
| if past_key_values is None: |
| past_key_values = DynamicCache() |
| else: |
| past_key_values = DynamicCache.from_legacy_cache(past_key_values) |
| logger.warning_once( |
| "We detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and " |
| "will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class " |
| "(https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)" |
| ) |
|
|
| seq_length = inputs_embeds.shape[1] |
| if cache_position is None: |
| past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 |
| cache_position = torch.arange(past_seen_tokens, past_seen_tokens + seq_length, device=inputs_embeds.device) |
|
|
| if position_ids is None: |
| position_ids = cache_position.unsqueeze(0) |
|
|
| causal_mask = self._update_causal_mask( |
| attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions |
| ) |
|
|
| |
| |
| |
| |
| |
| head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) |
| hidden_states = self.emb_dropout(inputs_embeds) |
|
|
| |
| position_embeddings = self.rotary_emb(hidden_states, position_ids) |
|
|
| next_decoder_cache = None |
| all_attentions = () if output_attentions else None |
| all_hidden_states = () if output_hidden_states else None |
| for i, layer in enumerate( |
| self.layers, |
| ): |
| if output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
| if self.gradient_checkpointing and self.training: |
| outputs = self._gradient_checkpointing_func( |
| layer.__call__, |
| hidden_states, |
| causal_mask, |
| position_ids, |
| head_mask[i], |
| use_cache, |
| None, |
| output_attentions, |
| cache_position, |
| position_embeddings, |
| ) |
| else: |
| outputs = layer( |
| hidden_states, |
| attention_mask=causal_mask, |
| position_ids=position_ids, |
| head_mask=head_mask[i], |
| layer_past=past_key_values, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| cache_position=cache_position, |
| position_embeddings=position_embeddings, |
| ) |
| hidden_states = outputs[0] |
| if use_cache is True: |
| next_decoder_cache = outputs[1] |
| if output_attentions: |
| all_attentions = all_attentions + (outputs[2 if use_cache else 1],) |
|
|
| hidden_states = self.final_layer_norm(hidden_states) |
| |
| if output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
| next_cache = next_decoder_cache if use_cache else None |
| if return_legacy_cache: |
| next_cache = next_cache.to_legacy_cache() |
|
|
| if not return_dict: |
| return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_attentions] if v is not None) |
|
|
| return BaseModelOutputWithPast( |
| last_hidden_state=hidden_states, |
| past_key_values=next_cache, |
| hidden_states=all_hidden_states, |
| attentions=all_attentions, |
| ) |
|
|
| |
| def _update_causal_mask( |
| self, |
| attention_mask: torch.Tensor, |
| input_tensor: torch.Tensor, |
| cache_position: torch.Tensor, |
| past_key_values: Cache, |
| output_attentions: bool, |
| ): |
| if self.config._attn_implementation == "flash_attention_2": |
| if attention_mask is not None and 0.0 in attention_mask: |
| return attention_mask |
| return None |
|
|
| |
| |
| |
| past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 |
| using_static_cache = isinstance(past_key_values, StaticCache) |
|
|
| |
| if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions: |
| if AttentionMaskConverter._ignore_causal_mask_sdpa( |
| attention_mask, |
| inputs_embeds=input_tensor, |
| past_key_values_length=past_seen_tokens, |
| is_training=self.training, |
| ): |
| return None |
|
|
| dtype, device = input_tensor.dtype, input_tensor.device |
| min_dtype = torch.finfo(dtype).min |
| sequence_length = input_tensor.shape[1] |
| if using_static_cache: |
| target_length = past_key_values.get_max_length() |
| else: |
| target_length = ( |
| attention_mask.shape[-1] |
| if isinstance(attention_mask, torch.Tensor) |
| else past_seen_tokens + sequence_length + 1 |
| ) |
|
|
| |
| causal_mask = _prepare_4d_causal_attention_mask_with_cache_position( |
| attention_mask, |
| sequence_length=sequence_length, |
| target_length=target_length, |
| dtype=dtype, |
| device=device, |
| min_dtype=min_dtype, |
| cache_position=cache_position, |
| batch_size=input_tensor.shape[0], |
| ) |
|
|
| if ( |
| self.config._attn_implementation == "sdpa" |
| and attention_mask is not None |
| and attention_mask.device.type == "cuda" |
| and not output_attentions |
| ): |
| |
| |
| |
| causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) |
|
|
| return causal_mask |
|
|
|
|
| @add_start_docstrings( |
| """GPTNeoX Model with a `language modeling` head on top for CLM fine-tuning.""", GPT_NEOX_START_DOCSTRING |
| ) |
| class GPTNeoXForCausalLM(GPTNeoXPreTrainedModel, GenerationMixin): |
| _tied_weights_keys = ["embed_out.weight"] |
|
|
| def __init__(self, config): |
| super().__init__(config) |
|
|
| self.gpt_neox = GPTNeoXModel(config) |
| self.embed_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False) |
|
|
| |
| self.post_init() |
|
|
| def get_output_embeddings(self): |
| return self.embed_out |
|
|
| def set_output_embeddings(self, new_embeddings): |
| self.embed_out = new_embeddings |
|
|
| @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format("batch_size, sequence_length")) |
| @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.FloatTensor]]]] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| cache_position: Optional[torch.LongTensor] = None, |
| ) -> Union[Tuple, CausalLMOutputWithPast]: |
| r""" |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in |
| `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are |
| ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`. |
| use_cache (`bool`, *optional*): |
| If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see |
| `past_key_values`). |
| |
| Returns: |
| |
| Example: |
| |
| ```python |
| >>> from transformers import AutoTokenizer, GPTNeoXForCausalLM, GPTNeoXConfig |
| >>> import torch |
| |
| >>> tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b") |
| >>> config = GPTNeoXConfig.from_pretrained("EleutherAI/gpt-neox-20b") |
| >>> config.is_decoder = True |
| >>> model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/gpt-neox-20b", config=config) |
| |
| >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") |
| >>> outputs = model(**inputs) |
| |
| >>> prediction_logits = outputs.logits |
| ```""" |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| outputs = self.gpt_neox( |
| input_ids, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| past_key_values=past_key_values, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| cache_position=cache_position, |
| ) |
|
|
| hidden_states = outputs[0] |
| lm_logits = self.embed_out(hidden_states) |
|
|
| lm_loss = None |
| if labels is not None: |
| |
| labels = labels.to(lm_logits.device) |
| |
| shift_logits = lm_logits[:, :-1, :].contiguous() |
| labels = labels[:, 1:].contiguous() |
| loss_fct = CrossEntropyLoss() |
| lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), labels.view(-1)) |
|
|
| if not return_dict: |
| output = (lm_logits,) + outputs[1:] |
| return ((lm_loss,) + output) if lm_loss is not None else output |
|
|
| return CausalLMOutputWithPast( |
| loss=lm_loss, |
| logits=lm_logits, |
| past_key_values=outputs.past_key_values, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| ) |
|
|
| |
| def prepare_inputs_for_generation( |
| self, |
| input_ids, |
| past_key_values=None, |
| attention_mask=None, |
| inputs_embeds=None, |
| cache_position=None, |
| position_ids=None, |
| use_cache=True, |
| **kwargs, |
| ): |
| |
| |
| |
| if past_key_values is not None: |
| if inputs_embeds is not None: |
| input_ids = input_ids[:, -cache_position.shape[0] :] |
| elif input_ids.shape[1] != cache_position.shape[0]: |
| input_ids = input_ids[:, cache_position] |
|
|
| if attention_mask is not None and position_ids is None: |
| |
| position_ids = attention_mask.long().cumsum(-1) - 1 |
| position_ids.masked_fill_(attention_mask == 0, 1) |
| if past_key_values: |
| position_ids = position_ids[:, -input_ids.shape[1] :] |
|
|
| |
| position_ids = position_ids.clone(memory_format=torch.contiguous_format) |
|
|
| |
| if inputs_embeds is not None and cache_position[0] == 0: |
| model_inputs = {"inputs_embeds": inputs_embeds, "input_ids": None} |
| else: |
| |
| model_inputs = {"input_ids": input_ids.clone(memory_format=torch.contiguous_format), "inputs_embeds": None} |
|
|
| if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2: |
| if model_inputs["inputs_embeds"] is not None: |
| batch_size, sequence_length, _ = model_inputs["inputs_embeds"].shape |
| device = model_inputs["inputs_embeds"].device |
| else: |
| batch_size, sequence_length = model_inputs["input_ids"].shape |
| device = model_inputs["input_ids"].device |
|
|
| dtype = self.embed_out.weight.dtype |
| min_dtype = torch.finfo(dtype).min |
|
|
| attention_mask = _prepare_4d_causal_attention_mask_with_cache_position( |
| attention_mask, |
| sequence_length=sequence_length, |
| target_length=past_key_values.get_max_length(), |
| dtype=dtype, |
| device=device, |
| min_dtype=min_dtype, |
| cache_position=cache_position, |
| batch_size=batch_size, |
| ) |
|
|
| model_inputs.update( |
| { |
| "position_ids": position_ids, |
| "cache_position": cache_position, |
| "past_key_values": past_key_values, |
| "use_cache": use_cache, |
| "attention_mask": attention_mask, |
| } |
| ) |
| return model_inputs |
|
|
| def _reorder_cache(self, past_key_values, beam_idx): |
| reordered_past = () |
| for layer_past in past_key_values: |
| reordered_past += ( |
| tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2]) |
| + layer_past[2:], |
| ) |
| return reordered_past |
|
|
|
|
| @add_start_docstrings( |
| """ |
| The GPTNeoX Model transformer with a sequence classification head on top (linear layer). |
| |
| [`GPTNeoXForSequenceClassification`] uses the last token in order to do the classification, as other causal models |
| (e.g. GPT-1) do. |
| |
| Since it does classification on the last token, it requires to know the position of the last token. If a |
| `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If |
| no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the |
| padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in |
| each row of the batch). |
| """, |
| GPT_NEOX_START_DOCSTRING, |
| ) |
| class GPTNeoXForSequenceClassification(GPTNeoXPreTrainedModel): |
| def __init__(self, config): |
| super().__init__(config) |
| self.num_labels = config.num_labels |
| self.gpt_neox = GPTNeoXModel(config) |
| self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) |
|
|
| |
| self.post_init() |
|
|
| @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING) |
| @add_code_sample_docstrings( |
| checkpoint=_CHECKPOINT_FOR_DOC, |
| output_type=SequenceClassifierOutputWithPast, |
| config_class=_CONFIG_FOR_DOC, |
| ) |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.FloatTensor]]]] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]: |
| r""" |
| labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., |
| config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If |
| `config.num_labels > 1` a classification loss is computed (Cross-Entropy). |
| """ |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| outputs = self.gpt_neox( |
| input_ids, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| past_key_values=past_key_values, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
| hidden_states = outputs[0] |
| logits = self.score(hidden_states) |
|
|
| if input_ids is not None: |
| batch_size, sequence_length = input_ids.shape[:2] |
| else: |
| batch_size, sequence_length = inputs_embeds.shape[:2] |
|
|
| if self.config.pad_token_id is None and batch_size != 1: |
| raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") |
| if self.config.pad_token_id is None: |
| sequence_lengths = -1 |
| else: |
| if input_ids is not None: |
| |
| sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 |
| sequence_lengths = sequence_lengths % input_ids.shape[-1] |
| sequence_lengths = sequence_lengths.to(logits.device) |
| else: |
| sequence_lengths = -1 |
| logger.warning_once( |
| f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " |
| "unexpected if using padding tokens in conjunction with `inputs_embeds.`" |
| ) |
|
|
| pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] |
|
|
| loss = None |
| if labels is not None: |
| labels = labels.to(logits.device) |
| if self.config.problem_type is None: |
| if self.num_labels == 1: |
| self.config.problem_type = "regression" |
| elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): |
| self.config.problem_type = "single_label_classification" |
| else: |
| self.config.problem_type = "multi_label_classification" |
|
|
| if self.config.problem_type == "regression": |
| loss_fct = MSELoss() |
| if self.num_labels == 1: |
| loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) |
| else: |
| loss = loss_fct(pooled_logits, labels) |
| elif self.config.problem_type == "single_label_classification": |
| loss_fct = CrossEntropyLoss() |
| loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) |
| elif self.config.problem_type == "multi_label_classification": |
| loss_fct = BCEWithLogitsLoss() |
| loss = loss_fct(pooled_logits, labels) |
| if not return_dict: |
| output = (pooled_logits,) + outputs[1:] |
| return ((loss,) + output) if loss is not None else output |
|
|
| return SequenceClassifierOutputWithPast( |
| loss=loss, |
| logits=pooled_logits, |
| past_key_values=outputs.past_key_values, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| ) |
|
|
|
|
| class GPTNeoXForTokenClassification(GPTNeoXPreTrainedModel): |
| def __init__(self, config): |
| super().__init__(config) |
| self.num_labels = config.num_labels |
|
|
| self.gpt_neox = GPTNeoXModel(config) |
| self.dropout = nn.Dropout(config.classifier_dropout) |
| self.classifier = nn.Linear(config.hidden_size, config.num_labels) |
|
|
| |
| self.post_init() |
|
|
| @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING) |
| @add_code_sample_docstrings( |
| checkpoint="LarsJonasson/pythia-410m-deduped-sft-swedish", |
| output_type=TokenClassifierOutput, |
| config_class=_CONFIG_FOR_DOC, |
| expected_loss=0.25, |
| ) |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor]]]] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, TokenClassifierOutput]: |
| r""" |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., |
| config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If |
| `config.num_labels > 1` a classification loss is computed (Cross-Entropy). |
| """ |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| outputs = self.gpt_neox( |
| input_ids, |
| past_key_values=past_key_values, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| hidden_states = outputs[0] |
| hidden_states = self.dropout(hidden_states) |
| logits = self.classifier(hidden_states) |
|
|
| loss = None |
| if labels is not None: |
| labels = labels.to(logits.device) |
| loss_fct = CrossEntropyLoss() |
| loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) |
|
|
| if not return_dict: |
| output = (logits,) + outputs[2:] |
| return ((loss,) + output) if loss is not None else output |
|
|
| return TokenClassifierOutput( |
| loss=loss, |
| logits=logits, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| ) |
|
|
|
|
| @add_start_docstrings( |
| """ |
| The GPT-NeoX Model transformer with a span classification head on top for extractive question-answering tasks like |
| SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). |
| """, |
| GPT_NEOX_START_DOCSTRING, |
| ) |
| class GPTNeoXForQuestionAnswering(GPTNeoXPreTrainedModel): |
| def __init__(self, config): |
| super().__init__(config) |
| self.num_labels = config.num_labels |
| self.gpt_neox = GPTNeoXModel(config) |
| self.qa_outputs = nn.Linear(config.hidden_size, 2) |
|
|
| |
| self.post_init() |
|
|
| @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format("batch_size, sequence_length")) |
| @add_code_sample_docstrings( |
| checkpoint=_CHECKPOINT_FOR_DOC, |
| output_type=QuestionAnsweringModelOutput, |
| config_class=_CONFIG_FOR_DOC, |
| real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, |
| ) |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| start_positions: Optional[torch.LongTensor] = None, |
| end_positions: Optional[torch.LongTensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, QuestionAnsweringModelOutput]: |
| r""" |
| start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| Labels for position (index) of the start of the labelled span for computing the token classification loss. |
| Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence |
| are not taken into account for computing the loss. |
| end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| Labels for position (index) of the end of the labelled span for computing the token classification loss. |
| Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence |
| are not taken into account for computing the loss. |
| """ |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| outputs = self.gpt_neox( |
| input_ids, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| sequence_output = outputs[0] |
|
|
| logits = self.qa_outputs(sequence_output) |
| start_logits, end_logits = logits.split(1, dim=-1) |
| start_logits = start_logits.squeeze(-1).contiguous() |
| end_logits = end_logits.squeeze(-1).contiguous() |
|
|
| total_loss = None |
| if start_positions is not None and end_positions is not None: |
| |
| if len(start_positions.size()) > 1: |
| start_positions = start_positions.squeeze(-1).to(start_logits.device) |
| if len(end_positions.size()) > 1: |
| end_positions = end_positions.squeeze(-1).to(end_logits.device) |
| |
| ignored_index = start_logits.size(1) |
| start_positions = start_positions.clamp(0, ignored_index) |
| end_positions = end_positions.clamp(0, ignored_index) |
|
|
| loss_fct = CrossEntropyLoss(ignore_index=ignored_index) |
| start_loss = loss_fct(start_logits, start_positions) |
| end_loss = loss_fct(end_logits, end_positions) |
| total_loss = (start_loss + end_loss) / 2 |
|
|
| if not return_dict: |
| output = (start_logits, end_logits) + outputs[2:] |
| return ((total_loss,) + output) if total_loss is not None else output |
|
|
| return QuestionAnsweringModelOutput( |
| loss=total_loss, |
| start_logits=start_logits, |
| end_logits=end_logits, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| ) |
|
|