zeng123 commited on
Commit
5bd111c
·
verified ·
1 Parent(s): f3106b9

Upload modeling_gpt_neox.py

Browse files
Files changed (1) hide show
  1. modeling_gpt_neox.py +1587 -0
modeling_gpt_neox.py ADDED
@@ -0,0 +1,1587 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch GPTNeoX model."""
16
+
17
+ from typing import Optional, Tuple, Union, List
18
+
19
+ import torch
20
+ import torch.utils.checkpoint
21
+ from packaging import version
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from transformers.activations import ACT2FN
26
+ from transformers.cache_utils import Cache, DynamicCache, StaticCache
27
+ from transformers.file_utils import (
28
+ add_code_sample_docstrings,
29
+ add_start_docstrings,
30
+ add_start_docstrings_to_model_forward,
31
+ replace_return_docstrings,
32
+ )
33
+ from transformers.generation import GenerationMixin
34
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
35
+ from transformers.modeling_outputs import (
36
+ BaseModelOutputWithPast,
37
+ CausalLMOutputWithPast,
38
+ QuestionAnsweringModelOutput,
39
+ SequenceClassifierOutputWithPast,
40
+ TokenClassifierOutput,
41
+ )
42
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS
43
+ from transformers.modeling_utils import PreTrainedModel
44
+ from transformers.utils import (
45
+ get_torch_version,
46
+ is_flash_attn_2_available,
47
+ is_flash_attn_greater_or_equal_2_10,
48
+ logging,
49
+ )
50
+ from transformers.models.gpt_neox.configuration_gpt_neox import GPTNeoXConfig
51
+ import math
52
+
53
+ if is_flash_attn_2_available():
54
+ from transformers.modeling_flash_attention_utils import _flash_attention_forward
55
+
56
+ logger = logging.get_logger(__name__)
57
+
58
+ _CHECKPOINT_FOR_DOC = "trl-internal-testing/tiny-random-GPTNeoXForCausalLM"
59
+ _REAL_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-neox-20b"
60
+ _CONFIG_FOR_DOC = "GPTNeoXConfig"
61
+
62
+
63
+ class GPTNeoXPreTrainedModel(PreTrainedModel):
64
+ """
65
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
66
+ models.
67
+ """
68
+
69
+ config_class = GPTNeoXConfig
70
+ base_model_prefix = "gpt_neox"
71
+ supports_gradient_checkpointing = True
72
+ _no_split_modules = ["GPTNeoXLayer"]
73
+ _skip_keys_device_placement = "past_key_values"
74
+ _supports_flash_attn_2 = True
75
+ _supports_cache_class = True
76
+ _supports_quantized_cache = True
77
+ _supports_static_cache = True
78
+ _supports_sdpa = True
79
+
80
+ def _init_weights(self, module):
81
+ """Initialize the weights"""
82
+ if isinstance(module, nn.Linear):
83
+ # Initialize weights with truncated normal distribution, truncated at 3σ, variance 2/(5*hidden_size)
84
+ std = math.sqrt(2.0 / (5 * self.config.hidden_size))
85
+
86
+ # For output projections of attention and MLP layers, apply additional scaling based on network depth
87
+ if hasattr(module, '_is_attention_output') or hasattr(module, '_is_mlp_output'):
88
+ std = std / math.sqrt(2.0 * self.config.num_hidden_layers * (self.config.pondering_steps + 1))
89
+ # std = 2 / self.config.num_hidden_layers / math.sqrt(self.config.hidden_size)/(self.config.pondering_steps + 1)
90
+ # nn.init.normal_(module.weight.data, mean=0.0, std=std, a=-3*std, b=3*std)
91
+ nn.init.trunc_normal_(module.weight.data, mean=0.0, std=std, a=-3*std, b=3*std)
92
+ if module.bias is not None:
93
+ module.bias.data.zero_()
94
+ elif isinstance(module, nn.Embedding):
95
+ # Initialize embedding layer weights with truncated normal distribution
96
+ std = math.sqrt(2.0 / (5 * self.config.hidden_size))
97
+ # nn.init.normal_(module.weight.data, mean=0.0, std=std, a=-3*std, b=3*std)
98
+ nn.init.trunc_normal_(module.weight.data, mean=0.0, std=std, a=-3*std, b=3*std)
99
+ if module.padding_idx is not None:
100
+ module.weight.data[module.padding_idx].zero_()
101
+ elif isinstance(module, nn.LayerNorm):
102
+ module.bias.data.zero_()
103
+ module.weight.data.fill_(1.0)
104
+
105
+
106
+ class GPTNeoXAttention(nn.Module):
107
+ def __init__(self, config, layer_idx=None):
108
+ super().__init__()
109
+ self.config = config
110
+ self.num_attention_heads = config.num_attention_heads
111
+ self.hidden_size = config.hidden_size
112
+ if self.hidden_size % self.num_attention_heads != 0:
113
+ raise ValueError(
114
+ "The hidden size is not divisble by the number of attention heads! Make sure to update them"
115
+ )
116
+ self.head_size = self.hidden_size // self.num_attention_heads
117
+ self.rotary_ndims = int(self.head_size * config.rotary_pct)
118
+ self.rope_theta = config.rotary_emb_base
119
+ self._init_bias(config.max_position_embeddings)
120
+
121
+ self.register_buffer("masked_bias", torch.tensor(-1e9), persistent=False)
122
+ self.rotary_emb = GPTNeoXRotaryEmbedding(config=self.config)
123
+
124
+ if layer_idx is None:
125
+ logger.warning_once(
126
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
127
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
128
+ "when creating this class."
129
+ )
130
+ self.norm_factor = self.head_size**-0.5
131
+ self.query_key_value = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=config.attention_bias)
132
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attention_bias)
133
+ self.dense._is_attention_output = True # Mark as attention output layer
134
+ self.attention_dropout = nn.Dropout(config.attention_dropout)
135
+ self.is_causal = True
136
+ self.layer_idx = layer_idx
137
+
138
+ def _init_bias(self, max_positions, device=None):
139
+ self.register_buffer(
140
+ "bias",
141
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
142
+ 1, 1, max_positions, max_positions
143
+ ),
144
+ persistent=False,
145
+ )
146
+ if device is not None:
147
+ self.bias = self.bias.to(device)
148
+
149
+ def forward(
150
+ self,
151
+ hidden_states: torch.FloatTensor,
152
+ attention_mask: torch.FloatTensor,
153
+ position_ids: torch.LongTensor,
154
+ head_mask: Optional[torch.FloatTensor] = None,
155
+ layer_past: Optional[Cache] = None,
156
+ use_cache: Optional[bool] = False,
157
+ output_attentions: Optional[bool] = False,
158
+ padding_mask: Optional[torch.Tensor] = None,
159
+ cache_position: Optional[torch.LongTensor] = None,
160
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
161
+ ):
162
+ # Apply attention-specific projections and rope
163
+ query, key, value, present = self._attn_projections_and_rope(
164
+ hidden_states=hidden_states,
165
+ position_ids=position_ids,
166
+ layer_past=layer_past,
167
+ use_cache=use_cache,
168
+ position_embeddings=position_embeddings,
169
+ )
170
+
171
+ # Compute attention
172
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
173
+
174
+ # Reshape outputs
175
+ attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_size)
176
+ attn_output = self.dense(attn_output)
177
+
178
+ outputs = (attn_output, present)
179
+ if output_attentions:
180
+ outputs += (attn_weights,)
181
+
182
+ return outputs
183
+
184
+ @classmethod
185
+ def _split_heads(cls, tensor, num_attention_heads, attn_head_size):
186
+ """
187
+ Splits hidden dim into attn_head_size and num_attention_heads
188
+ """
189
+ # tensor: [bs, seq_len, hidden_size]
190
+ new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size)
191
+ # -> [bs, seq_len, num_attention_heads, attn_head_size]
192
+ tensor = tensor.view(new_shape)
193
+ # -> [bs, num_attention_heads, seq_len, attn_head_size]
194
+ tensor = tensor.permute(0, 2, 1, 3)
195
+ return tensor
196
+
197
+ @classmethod
198
+ def _merge_heads(cls, tensor, num_attention_heads, attn_head_size):
199
+ """
200
+ Merges attn_head_size dim and num_attn_heads dim into hidden dim
201
+ """
202
+ # tensor [bs, num_attention_heads, seq_len, attn_head_size]
203
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
204
+ # -> [bs, seq_len, num_attention_heads, attn_head_size]
205
+ tensor = tensor.view(tensor.size(0), tensor.size(1), num_attention_heads * attn_head_size)
206
+ # -> [bs, seq_len, hidden_size]
207
+ return tensor
208
+
209
+ def _attn_projections_and_rope(
210
+ self,
211
+ hidden_states: torch.FloatTensor,
212
+ position_ids: torch.LongTensor,
213
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
214
+ use_cache: Optional[bool] = False,
215
+ cache_position: Optional[torch.LongTensor] = None,
216
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
217
+ ):
218
+ # Compute QKV
219
+ # Attention heads [batch, seq_len, hidden_size]
220
+ # --> [batch, seq_len, (np * 3 * head_size)]
221
+ qkv = self.query_key_value(hidden_states)
222
+
223
+ # [batch, seq_len, (num_heads * 3 * head_size)]
224
+ # --> [batch, seq_len, num_heads, 3 * head_size]
225
+ new_qkv_shape = qkv.size()[:-1] + (self.num_attention_heads, 3 * self.head_size)
226
+ qkv = qkv.view(*new_qkv_shape)
227
+
228
+ # [batch, seq_len, num_attention_heads, 3 * head_size] --> 3 [batch, num_attention_heads, seq_len, head_size]
229
+ query = qkv[..., : self.head_size].permute(0, 2, 1, 3)
230
+ key = qkv[..., self.head_size : 2 * self.head_size].permute(0, 2, 1, 3)
231
+ value = qkv[..., 2 * self.head_size :].permute(0, 2, 1, 3)
232
+
233
+ # Compute rotary embeddings on rotary_ndims
234
+ query_rot = query[..., : self.rotary_ndims]
235
+ query_pass = query[..., self.rotary_ndims :]
236
+ key_rot = key[..., : self.rotary_ndims]
237
+ key_pass = key[..., self.rotary_ndims :]
238
+
239
+ if position_embeddings is None:
240
+ logger.warning_once(
241
+ "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
242
+ "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
243
+ "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be "
244
+ "removed and `position_embeddings` will be mandatory."
245
+ )
246
+ cos, sin = self.rotary_emb(value, position_ids)
247
+ else:
248
+ cos, sin = position_embeddings
249
+ query, key = apply_rotary_pos_emb(query_rot, key_rot, cos, sin)
250
+ query = torch.cat((query, query_pass), dim=-1)
251
+ key = torch.cat((key, key_pass), dim=-1)
252
+
253
+ # Cache QKV values
254
+ if layer_past is not None:
255
+ cache_kwargs = {
256
+ "sin": sin,
257
+ "cos": cos,
258
+ "partial_rotation_size": self.rotary_ndims,
259
+ "cache_position": cache_position,
260
+ }
261
+ key, value = layer_past.update(key, value, self.layer_idx, cache_kwargs)
262
+
263
+ return query, key, value, layer_past
264
+
265
+ def _attn(self, query, key, value, attention_mask=None, head_mask=None):
266
+ # q, k, v: [bs, num_attention_heads, seq_len, attn_head_size]
267
+ # compute causal mask from causal mask buffer
268
+ batch_size, num_attention_heads, query_length, attn_head_size = query.size()
269
+ key_length = key.size(-2)
270
+
271
+ # dynamically increase the causal mask with the key length, if needed.
272
+ if key_length > self.bias.shape[-1]:
273
+ self._init_bias(key_length, device=key.device)
274
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
275
+
276
+ query = query.view(batch_size * num_attention_heads, query_length, attn_head_size)
277
+ key = key.view(batch_size * num_attention_heads, key_length, attn_head_size)
278
+ attn_scores = torch.zeros(
279
+ batch_size * num_attention_heads,
280
+ query_length,
281
+ key_length,
282
+ dtype=query.dtype,
283
+ device=key.device,
284
+ )
285
+ attn_scores = torch.baddbmm(
286
+ attn_scores,
287
+ query,
288
+ key.transpose(1, 2),
289
+ beta=1.0,
290
+ alpha=self.norm_factor,
291
+ )
292
+ attn_scores = attn_scores.view(batch_size, num_attention_heads, query_length, key_length)
293
+
294
+ mask_value = torch.finfo(attn_scores.dtype).min
295
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
296
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
297
+ mask_value = torch.tensor(mask_value, dtype=attn_scores.dtype).to(attn_scores.device)
298
+ attn_scores = torch.where(causal_mask, attn_scores, mask_value)
299
+
300
+ if attention_mask is not None: # no matter the length, we just slice it
301
+ causal_mask = attention_mask[:, :, :, : key.shape[-2]]
302
+ attn_scores = attn_scores + causal_mask
303
+
304
+ attn_weights = nn.functional.softmax(attn_scores, dim=-1)
305
+ attn_weights = attn_weights.to(value.dtype)
306
+
307
+ # Mask heads if we want to
308
+ if head_mask is not None:
309
+ attn_weights = attn_weights * head_mask
310
+
311
+ attn_weights = self.attention_dropout(attn_weights)
312
+
313
+ attn_output = torch.matmul(attn_weights, value)
314
+ return attn_output, attn_weights
315
+
316
+
317
+ class GPTNeoXFlashAttention2(GPTNeoXAttention):
318
+ """
319
+ GPTNeoX flash attention module. This module inherits from `GPTNeoXAttention` as the weights of the module stays
320
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
321
+ flash attention and deal with padding tokens in case the input contains any of them.
322
+ """
323
+
324
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
325
+ def __init__(self, *args, **kwargs):
326
+ super().__init__(*args, **kwargs)
327
+
328
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
329
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
330
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
331
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
332
+
333
+ def forward(
334
+ self,
335
+ hidden_states: torch.FloatTensor,
336
+ attention_mask: torch.FloatTensor,
337
+ position_ids: torch.LongTensor,
338
+ head_mask: Optional[torch.FloatTensor] = None,
339
+ layer_past: Optional[Cache] = None,
340
+ use_cache: Optional[bool] = False,
341
+ output_attentions: Optional[bool] = False,
342
+ cache_position: Optional[torch.LongTensor] = None,
343
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
344
+ ):
345
+ # Apply attention-specific projections and rope
346
+ query, key, value, present = self._attn_projections_and_rope(
347
+ hidden_states=hidden_states,
348
+ position_ids=position_ids,
349
+ layer_past=layer_past,
350
+ use_cache=use_cache,
351
+ cache_position=cache_position,
352
+ position_embeddings=position_embeddings,
353
+ )
354
+
355
+ query_length = query.shape[-2]
356
+
357
+ # GPT-neo-X casts query and key in fp32 to apply rotary embedding in full precision
358
+ target_dtype = value.dtype
359
+ if query.dtype != target_dtype:
360
+ query = query.to(target_dtype)
361
+ if key.dtype != target_dtype:
362
+ key = key.to(target_dtype)
363
+
364
+ # Permute to get the expected shape for Flash Attention
365
+ query = query.permute(0, 2, 1, 3)
366
+ key = key.permute(0, 2, 1, 3)
367
+ value = value.permute(0, 2, 1, 3)
368
+
369
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
370
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
371
+ # cast them back in float16 / bfloat16 just to be sure everything works as expected.
372
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
373
+ input_dtype = query.dtype
374
+ if input_dtype == torch.float32:
375
+ if torch.is_autocast_enabled():
376
+ target_dtype = torch.get_autocast_gpu_dtype()
377
+ # Handle the case where the model is quantized
378
+ elif hasattr(self.config, "_pre_quantization_dtype"):
379
+ target_dtype = self.config._pre_quantization_dtype
380
+ else:
381
+ target_dtype = self.query_key_value.weight.dtype
382
+
383
+ logger.warning_once(
384
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
385
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
386
+ f" {target_dtype}."
387
+ )
388
+
389
+ query = query.to(target_dtype)
390
+ key = key.to(target_dtype)
391
+ value = value.to(target_dtype)
392
+
393
+ attention_dropout = self.config.attention_dropout if self.training else 0.0
394
+
395
+ # Compute attention
396
+ attn_weights = _flash_attention_forward(
397
+ query,
398
+ key,
399
+ value,
400
+ attention_mask,
401
+ query_length,
402
+ dropout=attention_dropout,
403
+ softmax_scale=self.norm_factor,
404
+ is_causal=self.is_causal,
405
+ use_top_left_mask=self._flash_attn_uses_top_left_mask,
406
+ )
407
+
408
+ # Reshape outputs
409
+ attn_output = attn_weights.reshape(
410
+ attn_weights.shape[0], attn_weights.shape[1], self.num_attention_heads * self.head_size
411
+ )
412
+ attn_output = self.dense(attn_output)
413
+
414
+ outputs = (attn_output, layer_past)
415
+ if output_attentions:
416
+ outputs += (attn_weights,)
417
+
418
+ return outputs
419
+
420
+
421
+ class GPTNeoXSdpaAttention(GPTNeoXAttention):
422
+ """
423
+ GPTNeoX attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
424
+ `GPTNeoXAttention` as the weights of the module stays untouched. The only changes are on the forward pass
425
+ to adapt to the SDPA API.
426
+ """
427
+
428
+ def __init__(self, config, layer_idx=None):
429
+ super().__init__(config, layer_idx=layer_idx)
430
+
431
+ # SDPA with memory-efficient backend is broken in torch==2.1.2 when using non-contiguous inputs and a custom
432
+ # attn_mask, so we need to call `.contiguous()`. This was fixed in torch==2.2.0.
433
+ # Reference: https://github.com/pytorch/pytorch/issues/112577
434
+ self.require_contiguous_qkv = version.parse(get_torch_version()) < version.parse("2.2.0")
435
+
436
+ def forward(
437
+ self,
438
+ hidden_states: torch.FloatTensor,
439
+ attention_mask: torch.FloatTensor,
440
+ position_ids: torch.LongTensor,
441
+ head_mask: Optional[torch.FloatTensor] = None,
442
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
443
+ use_cache: Optional[bool] = False,
444
+ output_attentions: Optional[bool] = False,
445
+ cache_position: Optional[torch.LongTensor] = None,
446
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
447
+ ):
448
+ if output_attentions or head_mask is not None:
449
+ logger.warning_once(
450
+ "`GPTNeoXSdpaAttention` is used but `torch.nn.functional.scaled_dot_product_attention` does not support "
451
+ "`output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but "
452
+ "specifying the manual implementation will be required from Transformers version v5.0.0 onwards. "
453
+ 'This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
454
+ )
455
+ return super().forward(
456
+ hidden_states=hidden_states,
457
+ attention_mask=attention_mask,
458
+ position_ids=position_ids,
459
+ head_mask=head_mask,
460
+ layer_past=layer_past,
461
+ use_cache=use_cache,
462
+ output_attentions=output_attentions,
463
+ cache_position=cache_position,
464
+ )
465
+
466
+ bsz, q_len, _ = hidden_states.size()
467
+
468
+ # Apply attention-specific projections and rope
469
+ query, key, value, present = self._attn_projections_and_rope(
470
+ hidden_states=hidden_states,
471
+ position_ids=position_ids,
472
+ layer_past=layer_past,
473
+ use_cache=use_cache,
474
+ cache_position=cache_position,
475
+ position_embeddings=position_embeddings,
476
+ )
477
+
478
+ causal_mask = attention_mask
479
+ if attention_mask is not None:
480
+ causal_mask = causal_mask[:, :, :, : key.shape[-2]]
481
+
482
+ # GPT-neo-X casts query and key in fp32 to apply rotary embedding in full precision
483
+ target_dtype = value.dtype
484
+ if query.dtype != target_dtype:
485
+ query = query.to(target_dtype)
486
+ if key.dtype != target_dtype:
487
+ key = key.to(target_dtype)
488
+
489
+ # Avoid torch==2.1.2 specific bug for the memory-efficient backend in SDPA
490
+ if self.require_contiguous_qkv and query.device.type == "cuda" and attention_mask is not None:
491
+ query = query.contiguous()
492
+ key = key.contiguous()
493
+ value = value.contiguous()
494
+
495
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
496
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
497
+ is_causal = True if causal_mask is None and q_len > 1 else False
498
+
499
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
500
+ query=query,
501
+ key=key,
502
+ value=value,
503
+ attn_mask=causal_mask,
504
+ dropout_p=self.attention_dropout.p if self.training else 0.0,
505
+ is_causal=is_causal,
506
+ )
507
+
508
+ # Reshape outputs
509
+ attn_output = attn_output.transpose(1, 2).contiguous()
510
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
511
+
512
+ attn_output = self.dense(attn_output)
513
+
514
+ return attn_output, present, None
515
+
516
+
517
+ def attention_mask_func(attention_scores, ltor_mask):
518
+ attention_scores.masked_fill_(~ltor_mask, torch.finfo(attention_scores.dtype).min)
519
+ return attention_scores
520
+
521
+
522
+ # Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->GPTNeoX
523
+ class GPTNeoXRotaryEmbedding(nn.Module):
524
+ def __init__(
525
+ self,
526
+ dim=None,
527
+ max_position_embeddings=2048,
528
+ base=10000,
529
+ device=None,
530
+ scaling_factor=1.0,
531
+ rope_type="default",
532
+ config: Optional[GPTNeoXConfig] = None,
533
+ ):
534
+ super().__init__()
535
+ # TODO (joao): remove the `if` below, only used for BC
536
+ self.rope_kwargs = {}
537
+ if config is None:
538
+ logger.warning_once(
539
+ "`GPTNeoXRotaryEmbedding` can now be fully parameterized by passing the model config through the "
540
+ "`config` argument. All other arguments will be removed in v4.46"
541
+ )
542
+ self.rope_kwargs = {
543
+ "rope_type": rope_type,
544
+ "factor": scaling_factor,
545
+ "dim": dim,
546
+ "base": base,
547
+ "max_position_embeddings": max_position_embeddings,
548
+ }
549
+ self.rope_type = rope_type
550
+ self.max_seq_len_cached = max_position_embeddings
551
+ self.original_max_seq_len = max_position_embeddings
552
+ else:
553
+ # BC: "rope_type" was originally "type"
554
+ if config.rope_scaling is not None:
555
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
556
+ else:
557
+ self.rope_type = "default"
558
+ self.max_seq_len_cached = config.max_position_embeddings
559
+ self.original_max_seq_len = config.max_position_embeddings
560
+
561
+ self.config = config
562
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
563
+
564
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, **self.rope_kwargs)
565
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
566
+ self.original_inv_freq = self.inv_freq
567
+
568
+ def _dynamic_frequency_update(self, position_ids, device):
569
+ """
570
+ dynamic RoPE layers should recompute `inv_freq` in the following situations:
571
+ 1 - growing beyond the cached sequence length (allow scaling)
572
+ 2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
573
+ """
574
+ seq_len = torch.max(position_ids) + 1
575
+ if seq_len > self.max_seq_len_cached: # growth
576
+ inv_freq, self.attention_scaling = self.rope_init_fn(
577
+ self.config, device, seq_len=seq_len, **self.rope_kwargs
578
+ )
579
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
580
+ self.max_seq_len_cached = seq_len
581
+
582
+ if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
583
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
584
+ self.max_seq_len_cached = self.original_max_seq_len
585
+
586
+ @torch.no_grad()
587
+ def forward(self, x, position_ids):
588
+ if "dynamic" in self.rope_type:
589
+ self._dynamic_frequency_update(position_ids, device=x.device)
590
+
591
+ # Core RoPE block
592
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
593
+ position_ids_expanded = position_ids[:, None, :].float()
594
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
595
+ device_type = x.device.type
596
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
597
+ with torch.autocast(device_type=device_type, enabled=False):
598
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
599
+ emb = torch.cat((freqs, freqs), dim=-1)
600
+ cos = emb.cos()
601
+ sin = emb.sin()
602
+
603
+ # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
604
+ cos = cos * self.attention_scaling
605
+ sin = sin * self.attention_scaling
606
+
607
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
608
+
609
+
610
+ # Copied from transformers.models.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->GPTNeoX
611
+ class GPTNeoXLinearScalingRotaryEmbedding(GPTNeoXRotaryEmbedding):
612
+ """GPTNeoXRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
613
+
614
+ def __init__(self, *args, **kwargs):
615
+ logger.warning_once(
616
+ "`GPTNeoXLinearScalingRotaryEmbedding` is deprecated an will be removed in v4.46. Please use "
617
+ "`GPTNeoXRotaryEmbedding`, which now also does linear scaling (simply pass the model config to __init__)."
618
+ )
619
+ kwargs["rope_type"] = "linear"
620
+ super().__init__(*args, **kwargs)
621
+
622
+
623
+ # Copied from transformers.models.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->GPTNeoX
624
+ class GPTNeoXDynamicNTKScalingRotaryEmbedding(GPTNeoXRotaryEmbedding):
625
+ """GPTNeoXRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
626
+
627
+ def __init__(self, *args, **kwargs):
628
+ logger.warning_once(
629
+ "`GPTNeoXDynamicNTKScalingRotaryEmbedding` is deprecated an will be removed in v4.46. Please use "
630
+ "`GPTNeoXRotaryEmbedding`, which now also does dynamic ntk scaling (simply pass the model config to "
631
+ "__init__)."
632
+ )
633
+ kwargs["rope_type"] = "dynamic"
634
+ super().__init__(*args, **kwargs)
635
+
636
+
637
+ def rotate_half(x):
638
+ """Rotates half the hidden dims of the input."""
639
+ x1 = x[..., : x.shape[-1] // 2]
640
+ x2 = x[..., x.shape[-1] // 2 :]
641
+ return torch.cat((-x2, x1), dim=-1)
642
+
643
+
644
+ # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
645
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
646
+ """Applies Rotary Position Embedding to the query and key tensors.
647
+
648
+ Args:
649
+ q (`torch.Tensor`): The query tensor.
650
+ k (`torch.Tensor`): The key tensor.
651
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
652
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
653
+ position_ids (`torch.Tensor`, *optional*):
654
+ Deprecated and unused.
655
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
656
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
657
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
658
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
659
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
660
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
661
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
662
+ Returns:
663
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
664
+ """
665
+ cos = cos.unsqueeze(unsqueeze_dim)
666
+ sin = sin.unsqueeze(unsqueeze_dim)
667
+ q_embed = (q * cos) + (rotate_half(q) * sin)
668
+ k_embed = (k * cos) + (rotate_half(k) * sin)
669
+ return q_embed, k_embed
670
+
671
+
672
+ class GPTNeoXMLP(nn.Module):
673
+ def __init__(self, config):
674
+ super().__init__()
675
+ self.dense_h_to_4h = nn.Linear(config.hidden_size, config.intermediate_size)
676
+ self.dense_4h_to_h = nn.Linear(config.intermediate_size, config.hidden_size)
677
+ self.dense_4h_to_h._is_mlp_output = True # Mark as MLP output layer
678
+ self.act = ACT2FN[config.hidden_act]
679
+
680
+ def forward(self, hidden_states):
681
+ hidden_states = self.dense_h_to_4h(hidden_states)
682
+ hidden_states = self.act(hidden_states)
683
+ hidden_states = self.dense_4h_to_h(hidden_states)
684
+ return hidden_states
685
+
686
+
687
+ GPT_NEOX_ATTENTION_CLASSES = {
688
+ "eager": GPTNeoXAttention,
689
+ "flash_attention_2": GPTNeoXFlashAttention2,
690
+ "sdpa": GPTNeoXSdpaAttention,
691
+ }
692
+
693
+
694
+ class GPTNeoXLayer(nn.Module):
695
+ def __init__(self, config, layer_idx):
696
+ super().__init__()
697
+ self.use_parallel_residual = config.use_parallel_residual
698
+ self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
699
+ self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
700
+ self.post_attention_dropout = nn.Dropout(config.hidden_dropout)
701
+ self.post_mlp_dropout = nn.Dropout(config.hidden_dropout)
702
+ self.attention = GPT_NEOX_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
703
+ self.mlp = GPTNeoXMLP(config)
704
+
705
+ def forward(
706
+ self,
707
+ hidden_states: Optional[torch.FloatTensor],
708
+ attention_mask: Optional[torch.FloatTensor] = None,
709
+ position_ids: Optional[torch.LongTensor] = None,
710
+ head_mask: Optional[torch.FloatTensor] = None,
711
+ use_cache: Optional[bool] = False,
712
+ layer_past: Optional[Cache] = None,
713
+ output_attentions: Optional[bool] = False,
714
+ cache_position: Optional[torch.LongTensor] = None,
715
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
716
+ ):
717
+ attention_layer_outputs = self.attention(
718
+ self.input_layernorm(hidden_states),
719
+ attention_mask=attention_mask,
720
+ position_ids=position_ids,
721
+ layer_past=layer_past,
722
+ head_mask=head_mask,
723
+ use_cache=use_cache,
724
+ output_attentions=output_attentions,
725
+ cache_position=cache_position,
726
+ position_embeddings=position_embeddings,
727
+ )
728
+ attn_output = attention_layer_outputs[0] # output_attn: attn_output, present, (attn_weights)
729
+ attn_output = self.post_attention_dropout(attn_output)
730
+ outputs = attention_layer_outputs[1:]
731
+
732
+ if self.use_parallel_residual:
733
+ # pseudocode:
734
+ # x = x + attn(ln1(x)) + mlp(ln2(x))
735
+ mlp_output = self.mlp(self.post_attention_layernorm(hidden_states))
736
+ mlp_output = self.post_mlp_dropout(mlp_output)
737
+ hidden_states = mlp_output + attn_output + hidden_states
738
+ else:
739
+ # pseudocode:
740
+ # x = x + attn(ln1(x))
741
+ # x = x + mlp(ln2(x))
742
+ attn_output = attn_output + hidden_states
743
+ mlp_output = self.mlp(self.post_attention_layernorm(attn_output))
744
+ mlp_output = self.post_mlp_dropout(mlp_output)
745
+ hidden_states = mlp_output + attn_output
746
+
747
+ if use_cache:
748
+ outputs = (hidden_states,) + outputs # hidden_states, present, (attn_weights)
749
+ else:
750
+ outputs = (hidden_states,) + outputs[1:] # hidden_states, (attn_weights)
751
+
752
+ return outputs
753
+
754
+
755
+ GPT_NEOX_START_DOCSTRING = r"""
756
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
757
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
758
+ behavior.
759
+
760
+ Parameters:
761
+ config ([`~GPTNeoXConfig`]): Model configuration class with all the parameters of the model.
762
+ Initializing with a config file does not load the weights associated with the model, only the
763
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
764
+ """
765
+
766
+ GPT_NEOX_INPUTS_DOCSTRING = r"""
767
+ Args:
768
+ input_ids (`torch.LongTensor` of shape `({0})`):
769
+ Indices of input sequence tokens in the vocabulary.
770
+
771
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
772
+ [`PreTrainedTokenizer.__call__`] for details.
773
+
774
+ [What are input IDs?](../glossary#input-ids)
775
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
776
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
777
+
778
+ - 1 for tokens that are **not masked**,
779
+ - 0 for tokens that are **masked**.
780
+
781
+ [What are attention masks?](../glossary#attention-mask)
782
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
783
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
784
+ config.n_positions - 1]`.
785
+
786
+ [What are position IDs?](../glossary#position-ids)
787
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
788
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
789
+
790
+ - 1 indicates the head is **not masked**,
791
+ - 0 indicates the head is **masked**.
792
+
793
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
794
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
795
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
796
+ model's internal embedding lookup matrix.
797
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
798
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
799
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
800
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
801
+
802
+ Two formats are allowed:
803
+ - a [`~cache_utils.Cache`] instance, see our
804
+ [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);
805
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
806
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
807
+ cache format.
808
+
809
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
810
+ legacy cache format will be returned.
811
+
812
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
813
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
814
+ of shape `(batch_size, sequence_length)`.
815
+ output_attentions (`bool`, *optional*):
816
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
817
+ tensors for more detail.
818
+ output_hidden_states (`bool`, *optional*):
819
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
820
+ more detail.
821
+ return_dict (`bool`, *optional*):
822
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
823
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
824
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
825
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
826
+ the complete sequence length.
827
+ """
828
+
829
+
830
+ @add_start_docstrings(
831
+ "The bare GPTNeoX Model transformer outputting raw hidden-states without any specific head on top.",
832
+ GPT_NEOX_START_DOCSTRING,
833
+ )
834
+ class GPTNeoXModel(GPTNeoXPreTrainedModel):
835
+ def __init__(self, config):
836
+ super().__init__(config)
837
+ self.config = config
838
+
839
+ self.embed_in = nn.Embedding(config.vocab_size, config.hidden_size)
840
+ self.emb_dropout = nn.Dropout(config.hidden_dropout)
841
+ self.layers = nn.ModuleList([GPTNeoXLayer(config, i) for i in range(config.num_hidden_layers)])
842
+ self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
843
+ self.rotary_emb = GPTNeoXRotaryEmbedding(config=config)
844
+
845
+ self._attn_implementation = config._attn_implementation
846
+ self.gradient_checkpointing = False
847
+ self.checkpoint_num_layers = config.checkpoint_num_layers
848
+
849
+ # Initialize weights and apply final processing
850
+ self.post_init()
851
+
852
+ def get_input_embeddings(self):
853
+ return self.embed_in
854
+
855
+ def set_input_embeddings(self, value):
856
+ self.embed_in = value
857
+
858
+ @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
859
+ @add_code_sample_docstrings(
860
+ checkpoint=_CHECKPOINT_FOR_DOC,
861
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
862
+ output_type=BaseModelOutputWithPast,
863
+ config_class=_CONFIG_FOR_DOC,
864
+ )
865
+ def forward(
866
+ self,
867
+ input_ids: Optional[torch.LongTensor] = None,
868
+ attention_mask: Optional[torch.FloatTensor] = None,
869
+ position_ids: Optional[torch.LongTensor] = None,
870
+ head_mask: Optional[torch.FloatTensor] = None,
871
+ inputs_embeds: Optional[torch.FloatTensor] = None,
872
+ past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.FloatTensor]]]] = None,
873
+ use_cache: Optional[bool] = None,
874
+ output_attentions: Optional[bool] = None,
875
+ output_hidden_states: Optional[bool] = None,
876
+ return_dict: Optional[bool] = None,
877
+ cache_position: Optional[torch.LongTensor] = None,
878
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
879
+ r"""
880
+ use_cache (`bool`, *optional*):
881
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
882
+ `past_key_values`).
883
+ """
884
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
885
+ output_hidden_states = (
886
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
887
+ )
888
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
889
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
890
+
891
+ if (input_ids is None) ^ (inputs_embeds is not None):
892
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
893
+
894
+ if self.gradient_checkpointing and self.training:
895
+ if use_cache:
896
+ logger.warning_once(
897
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
898
+ )
899
+ use_cache = False
900
+
901
+ if inputs_embeds is None:
902
+ inputs_embeds = self.embed_in(input_ids)
903
+
904
+ # kept for BC (non `Cache` `past_key_values` inputs)
905
+ return_legacy_cache = False
906
+ if use_cache and not isinstance(past_key_values, Cache):
907
+ return_legacy_cache = True
908
+ if past_key_values is None:
909
+ past_key_values = DynamicCache()
910
+ else:
911
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
912
+ logger.warning_once(
913
+ "We detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and "
914
+ "will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class "
915
+ "(https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)"
916
+ )
917
+
918
+ seq_length = inputs_embeds.shape[1]
919
+ if cache_position is None:
920
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
921
+ cache_position = torch.arange(past_seen_tokens, past_seen_tokens + seq_length, device=inputs_embeds.device)
922
+
923
+ if position_ids is None:
924
+ position_ids = cache_position.unsqueeze(0)
925
+
926
+ causal_mask = self._update_causal_mask(
927
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
928
+ )
929
+
930
+ # Prepare head mask if needed
931
+ # 1.0 in head_mask indicate we keep the head
932
+ # attention_probs has shape bsz x n_heads x N x N
933
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
934
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
935
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
936
+ hidden_states = self.emb_dropout(inputs_embeds)
937
+
938
+ # create position embeddings to be shared across the decoder layers
939
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
940
+
941
+ next_decoder_cache = None
942
+ all_attentions = () if output_attentions else None
943
+ all_hidden_states = () if output_hidden_states else None
944
+ for i, layer in enumerate(
945
+ self.layers,
946
+ ):
947
+ if output_hidden_states:
948
+ all_hidden_states = all_hidden_states + (hidden_states,)
949
+
950
+ if self.gradient_checkpointing and self.training and i < self.checkpoint_num_layers:
951
+ outputs = self._gradient_checkpointing_func(
952
+ layer.__call__,
953
+ hidden_states,
954
+ causal_mask,
955
+ position_ids,
956
+ head_mask[i],
957
+ use_cache,
958
+ None,
959
+ output_attentions,
960
+ cache_position,
961
+ position_embeddings,
962
+ )
963
+ else:
964
+ outputs = layer(
965
+ hidden_states,
966
+ attention_mask=causal_mask,
967
+ position_ids=position_ids,
968
+ head_mask=head_mask[i],
969
+ layer_past=past_key_values,
970
+ use_cache=use_cache,
971
+ output_attentions=output_attentions,
972
+ cache_position=cache_position,
973
+ position_embeddings=position_embeddings,
974
+ )
975
+ hidden_states = outputs[0]
976
+ if use_cache is True:
977
+ next_decoder_cache = outputs[1]
978
+ if output_attentions:
979
+ all_attentions = all_attentions + (outputs[2 if use_cache else 1],)
980
+
981
+ hidden_states = self.final_layer_norm(hidden_states)
982
+ # Add last hidden state
983
+ if output_hidden_states:
984
+ all_hidden_states = all_hidden_states + (hidden_states,)
985
+
986
+ next_cache = next_decoder_cache if use_cache else None
987
+ if return_legacy_cache:
988
+ next_cache = next_cache.to_legacy_cache()
989
+
990
+ if not return_dict:
991
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_attentions] if v is not None)
992
+
993
+ return BaseModelOutputWithPast(
994
+ last_hidden_state=hidden_states,
995
+ past_key_values=next_cache,
996
+ hidden_states=all_hidden_states,
997
+ attentions=all_attentions,
998
+ )
999
+
1000
+ # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask
1001
+ def _update_causal_mask(
1002
+ self,
1003
+ attention_mask: torch.Tensor,
1004
+ input_tensor: torch.Tensor,
1005
+ cache_position: torch.Tensor,
1006
+ past_key_values: Cache,
1007
+ output_attentions: bool,
1008
+ ):
1009
+ if self.config._attn_implementation == "flash_attention_2":
1010
+ if attention_mask is not None and 0.0 in attention_mask:
1011
+ return attention_mask
1012
+ return None
1013
+
1014
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
1015
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
1016
+ # to infer the attention mask.
1017
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1018
+ using_static_cache = isinstance(past_key_values, StaticCache)
1019
+
1020
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
1021
+ if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
1022
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
1023
+ attention_mask,
1024
+ inputs_embeds=input_tensor,
1025
+ past_key_values_length=past_seen_tokens,
1026
+ is_training=self.training,
1027
+ ):
1028
+ return None
1029
+
1030
+ dtype, device = input_tensor.dtype, input_tensor.device
1031
+ sequence_length = input_tensor.shape[1]
1032
+ if using_static_cache:
1033
+ target_length = past_key_values.get_max_cache_shape()
1034
+ else:
1035
+ target_length = (
1036
+ attention_mask.shape[-1]
1037
+ if isinstance(attention_mask, torch.Tensor)
1038
+ else past_seen_tokens + sequence_length + 1
1039
+ )
1040
+
1041
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
1042
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
1043
+ attention_mask,
1044
+ sequence_length=sequence_length,
1045
+ target_length=target_length,
1046
+ dtype=dtype,
1047
+ device=device,
1048
+ cache_position=cache_position,
1049
+ batch_size=input_tensor.shape[0],
1050
+ )
1051
+
1052
+ if (
1053
+ self.config._attn_implementation == "sdpa"
1054
+ and attention_mask is not None
1055
+ and attention_mask.device.type == "cuda"
1056
+ and not output_attentions
1057
+ ):
1058
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
1059
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
1060
+ # Details: https://github.com/pytorch/pytorch/issues/110213
1061
+ min_dtype = torch.finfo(dtype).min
1062
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
1063
+
1064
+ return causal_mask
1065
+
1066
+ @staticmethod
1067
+ # Copied from transformers.models.llama.modeling_llama.LlamaModel._prepare_4d_causal_attention_mask_with_cache_position
1068
+ def _prepare_4d_causal_attention_mask_with_cache_position(
1069
+ attention_mask: torch.Tensor,
1070
+ sequence_length: int,
1071
+ target_length: int,
1072
+ dtype: torch.dtype,
1073
+ device: torch.device,
1074
+ cache_position: torch.Tensor,
1075
+ batch_size: int,
1076
+ **kwargs,
1077
+ ):
1078
+ """
1079
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
1080
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
1081
+
1082
+ Args:
1083
+ attention_mask (`torch.Tensor`):
1084
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
1085
+ `(batch_size, 1, query_length, key_value_length)`.
1086
+ sequence_length (`int`):
1087
+ The sequence length being processed.
1088
+ target_length (`int`):
1089
+ The target length: when generating with static cache, the mask should be as long as the static cache,
1090
+ to account for the 0 padding, the part of the cache that is not filled yet.
1091
+ dtype (`torch.dtype`):
1092
+ The dtype to use for the 4D attention mask.
1093
+ device (`torch.device`):
1094
+ The device to plcae the 4D attention mask on.
1095
+ cache_position (`torch.Tensor`):
1096
+ Indices depicting the position of the input sequence tokens in the sequence.
1097
+ batch_size (`torch.Tensor`):
1098
+ Batch size.
1099
+ """
1100
+ if attention_mask is not None and attention_mask.dim() == 4:
1101
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
1102
+ causal_mask = attention_mask
1103
+ else:
1104
+ min_dtype = torch.finfo(dtype).min
1105
+ causal_mask = torch.full(
1106
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
1107
+ )
1108
+ if sequence_length != 1:
1109
+ causal_mask = torch.triu(causal_mask, diagonal=1)
1110
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
1111
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
1112
+ if attention_mask is not None:
1113
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
1114
+ mask_length = attention_mask.shape[-1]
1115
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
1116
+ padding_mask = padding_mask == 0
1117
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
1118
+ padding_mask, min_dtype
1119
+ )
1120
+
1121
+ return causal_mask
1122
+
1123
+ @add_start_docstrings(
1124
+ """GPTNeoX Model with a `language modeling` head on top for CLM fine-tuning.""", GPT_NEOX_START_DOCSTRING
1125
+ )
1126
+ class GPTNeoXForCausalLM(GPTNeoXPreTrainedModel, GenerationMixin):
1127
+ _tied_weights_keys = ["embed_out.weight"]
1128
+
1129
+ def __init__(self, config):
1130
+ super().__init__(config)
1131
+
1132
+ self.gpt_neox = GPTNeoXModel(config)
1133
+ self.embed_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1134
+ self.adapter = nn.Linear(2*config.hidden_size, config.hidden_size, bias=False)
1135
+ self.post_init()
1136
+
1137
+ def get_output_embeddings(self):
1138
+ return self.embed_out
1139
+
1140
+ def set_output_embeddings(self, new_embeddings):
1141
+ self.embed_out = new_embeddings
1142
+
1143
+ def compute_interpolated_embeds(self, weight: torch.Tensor, hidden: Union[torch.Tensor, Tuple[torch.Tensor]], use_topk: bool = True) -> torch.Tensor:
1144
+
1145
+ if use_topk:
1146
+ logits = self.embed_out(hidden) # [B, L, vocab_size]
1147
+ logits_scaled = logits
1148
+ top_k = 100
1149
+ topk_values, topk_indices = torch.topk(logits_scaled, k=top_k, dim=-1)
1150
+ probs_topk = torch.softmax(topk_values, dim=-1) # [B, L, top_k]
1151
+ # Extract corresponding token embeddings from weight based on topk indices, shape: [B, L, top_k, hidden_size]
1152
+ embedding_topk = weight[topk_indices]
1153
+ # Weighted sum based on softmax probabilities to get interpolated embedding, [B, L, hidden_size]
1154
+ interpolated_embeds = torch.einsum("bsl,bslh->bsh", probs_topk, embedding_topk)
1155
+ return interpolated_embeds
1156
+
1157
+ else:
1158
+ logits = self.embed_out(hidden)
1159
+ probs = torch.softmax(logits, dim=-1)
1160
+ interpolated_embeds = torch.matmul(probs, weight) # [B, L, hidden_size]
1161
+ return interpolated_embeds
1162
+
1163
+ @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1164
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1165
+ def forward(
1166
+ self,
1167
+ input_ids: Optional[torch.LongTensor] = None,
1168
+ attention_mask: Optional[torch.FloatTensor] = None,
1169
+ position_ids: Optional[torch.LongTensor] = None,
1170
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1171
+ head_mask: Optional[torch.FloatTensor] = None,
1172
+ past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.FloatTensor]]]] = None,
1173
+ labels: Optional[torch.LongTensor] = None,
1174
+ use_cache: Optional[bool] = None,
1175
+ output_attentions: Optional[bool] = None,
1176
+ output_hidden_states: Optional[bool] = None,
1177
+ return_dict: Optional[bool] = None,
1178
+ cache_position: Optional[torch.LongTensor] = None,
1179
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1180
+ """
1181
+ Args:
1182
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1183
+ Indices of input sequence tokens in the vocabulary.
1184
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1185
+ Mask to avoid performing attention on padding token indices.
1186
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1187
+ Indices of positions of each input sequence tokens in the position embeddings.
1188
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1189
+ Mask to nullify selected heads of the self-attention modules.
1190
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1191
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
1192
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*):
1193
+ Contains precomputed key and value hidden states of the attention blocks.
1194
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1195
+ Labels for computing the left-to-right language modeling loss (next word prediction).
1196
+ use_cache (`bool`, *optional*):
1197
+ If set to `True`, `past_key_values` key value states are returned.
1198
+ output_attentions (`bool`, *optional*):
1199
+ Whether or not to return the attentions tensors of all attention layers.
1200
+ output_hidden_states (`bool`, *optional*):
1201
+ Whether or not to return the hidden states of all layers.
1202
+ return_dict (`bool`, *optional*):
1203
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
1204
+ cache_position (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1205
+ Indices of positions of each input sequence tokens in the cache.
1206
+
1207
+ Returns:
1208
+ Returns a [`CausalLMOutputWithPast`] or a tuple of tensors if `return_dict=False`.
1209
+ """
1210
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1211
+ # If in recursive mode, use simplified logic
1212
+ if self.config.ponderinglm:
1213
+
1214
+ # Get initial embeddings
1215
+ if inputs_embeds is None:
1216
+ inputs_embeds0 = self.gpt_neox.embed_in(input_ids)
1217
+ if self.config.scale_embeds:
1218
+ embed_scale = torch.sqrt(torch.tensor(inputs_embeds0.shape[-1], dtype=inputs_embeds0.dtype))
1219
+ inputs_embeds0 = inputs_embeds0 * embed_scale
1220
+ weight = self.gpt_neox.embed_in.weight * embed_scale
1221
+ else:
1222
+ weight = self.gpt_neox.embed_in.weight
1223
+ if self.config.mutiply_pondering_steps:
1224
+ current_embeds = inputs_embeds0 * self.config.pondering_steps
1225
+ else:
1226
+ current_embeds = inputs_embeds0
1227
+
1228
+ # Iterative optimization
1229
+ for iteration in range(self.config.pondering_steps):
1230
+ # Forward pass to get hidden states
1231
+ outputs = self.gpt_neox(
1232
+ input_ids=None,
1233
+ attention_mask=attention_mask,
1234
+ position_ids=position_ids,
1235
+ head_mask=head_mask,
1236
+ inputs_embeds=current_embeds,
1237
+ past_key_values=None,
1238
+ use_cache=use_cache,
1239
+ output_attentions=output_attentions,
1240
+ output_hidden_states=self.config.output_hidden_states,
1241
+ return_dict=return_dict,
1242
+ cache_position=cache_position,
1243
+ )
1244
+ interpolated_embeds = self.compute_interpolated_embeds(weight, outputs.last_hidden_state, use_topk=False)
1245
+ current_embeds = current_embeds + interpolated_embeds
1246
+
1247
+ # Final forward pass to compute loss
1248
+ final_outputs = self.gpt_neox(
1249
+ input_ids=None,
1250
+ attention_mask=attention_mask,
1251
+ position_ids=position_ids,
1252
+ head_mask=head_mask,
1253
+ inputs_embeds=current_embeds,
1254
+ past_key_values=None,
1255
+ use_cache=use_cache,
1256
+ output_attentions=output_attentions,
1257
+ output_hidden_states=False,
1258
+ return_dict=return_dict,
1259
+ cache_position=cache_position,
1260
+ )
1261
+
1262
+ hidden_states = final_outputs[0]
1263
+ logits = self.embed_out(hidden_states)
1264
+
1265
+ loss = None
1266
+ # print_top_k_from_logits_in_debug(logits[0,-1,:], 5)
1267
+ if labels is not None:
1268
+ shift_logits = logits[:, :-1, :].contiguous()
1269
+ shift_labels = labels[:, 1:].contiguous()
1270
+ loss_fct = CrossEntropyLoss()
1271
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1272
+
1273
+ if not return_dict:
1274
+ output = (logits,) + final_outputs[1:]
1275
+ return ((loss,) + output) if loss is not None else output
1276
+
1277
+ return CausalLMOutputWithPast(
1278
+ loss=loss,
1279
+ logits=logits,
1280
+ past_key_values=final_outputs.past_key_values,
1281
+ hidden_states=final_outputs.hidden_states,
1282
+ attentions=final_outputs.attentions,
1283
+ )
1284
+
1285
+ def _reorder_cache(self, past_key_values, beam_idx):
1286
+ reordered_past = ()
1287
+ for layer_past in past_key_values:
1288
+ reordered_past += (
1289
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
1290
+ + layer_past[2:],
1291
+ )
1292
+ return reordered_past
1293
+
1294
+
1295
+ @add_start_docstrings(
1296
+ """
1297
+ The GPTNeoX Model transformer with a sequence classification head on top for extractive question-answering tasks like
1298
+ SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1299
+ """,
1300
+ GPT_NEOX_START_DOCSTRING,
1301
+ )
1302
+ class GPTNeoXForSequenceClassification(GPTNeoXPreTrainedModel):
1303
+ def __init__(self, config):
1304
+ super().__init__(config)
1305
+ self.num_labels = config.num_labels
1306
+ self.gpt_neox = GPTNeoXModel(config)
1307
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1308
+
1309
+ # Initialize weights and apply final processing
1310
+ self.post_init()
1311
+
1312
+ @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING)
1313
+ @add_code_sample_docstrings(
1314
+ checkpoint=_CHECKPOINT_FOR_DOC,
1315
+ output_type=SequenceClassifierOutputWithPast,
1316
+ config_class=_CONFIG_FOR_DOC,
1317
+ )
1318
+ def forward(
1319
+ self,
1320
+ input_ids: Optional[torch.LongTensor] = None,
1321
+ attention_mask: Optional[torch.FloatTensor] = None,
1322
+ position_ids: Optional[torch.LongTensor] = None,
1323
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1324
+ head_mask: Optional[torch.FloatTensor] = None,
1325
+ past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.FloatTensor]]]] = None,
1326
+ labels: Optional[torch.LongTensor] = None,
1327
+ use_cache: Optional[bool] = None,
1328
+ output_attentions: Optional[bool] = None,
1329
+ output_hidden_states: Optional[bool] = None,
1330
+ return_dict: Optional[bool] = None,
1331
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]:
1332
+ r"""
1333
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1334
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1335
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1336
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1337
+ """
1338
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1339
+
1340
+ outputs = self.gpt_neox(
1341
+ input_ids,
1342
+ attention_mask=attention_mask,
1343
+ position_ids=position_ids,
1344
+ head_mask=head_mask,
1345
+ inputs_embeds=inputs_embeds,
1346
+ past_key_values=past_key_values,
1347
+ use_cache=use_cache,
1348
+ output_attentions=output_attentions,
1349
+ output_hidden_states=output_hidden_states,
1350
+ return_dict=return_dict,
1351
+ )
1352
+ hidden_states = outputs[0]
1353
+ logits = self.score(hidden_states)
1354
+
1355
+ if input_ids is not None:
1356
+ batch_size, sequence_length = input_ids.shape[:2]
1357
+ else:
1358
+ batch_size, sequence_length = inputs_embeds.shape[:2]
1359
+
1360
+ if self.config.pad_token_id is None and batch_size != 1:
1361
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1362
+ if self.config.pad_token_id is None:
1363
+ sequence_lengths = -1
1364
+ else:
1365
+ if input_ids is not None:
1366
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1367
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1368
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1369
+ sequence_lengths = sequence_lengths.to(logits.device)
1370
+ else:
1371
+ sequence_lengths = -1
1372
+ logger.warning_once(
1373
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1374
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1375
+ )
1376
+
1377
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1378
+
1379
+ loss = None
1380
+ if labels is not None:
1381
+ labels = labels.to(logits.device)
1382
+ if self.config.problem_type is None:
1383
+ if self.num_labels == 1:
1384
+ self.config.problem_type = "regression"
1385
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1386
+ self.config.problem_type = "single_label_classification"
1387
+ else:
1388
+ self.config.problem_type = "multi_label_classification"
1389
+
1390
+ if self.config.problem_type == "regression":
1391
+ loss_fct = MSELoss()
1392
+ if self.num_labels == 1:
1393
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1394
+ else:
1395
+ loss = loss_fct(pooled_logits, labels)
1396
+ elif self.config.problem_type == "single_label_classification":
1397
+ loss_fct = CrossEntropyLoss()
1398
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1399
+ elif self.config.problem_type == "multi_label_classification":
1400
+ loss_fct = BCEWithLogitsLoss()
1401
+ loss = loss_fct(pooled_logits, labels)
1402
+ if not return_dict:
1403
+ output = (pooled_logits,) + outputs[1:]
1404
+ return ((loss,) + output) if loss is not None else output
1405
+
1406
+ return SequenceClassifierOutputWithPast(
1407
+ loss=loss,
1408
+ logits=pooled_logits,
1409
+ past_key_values=outputs.past_key_values,
1410
+ hidden_states=outputs.hidden_states,
1411
+ attentions=outputs.attentions,
1412
+ )
1413
+
1414
+
1415
+ class GPTNeoXForTokenClassification(GPTNeoXPreTrainedModel):
1416
+ def __init__(self, config):
1417
+ super().__init__(config)
1418
+ self.num_labels = config.num_labels
1419
+
1420
+ self.gpt_neox = GPTNeoXModel(config)
1421
+ self.dropout = nn.Dropout(config.classifier_dropout)
1422
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1423
+
1424
+ # Initialize weights and apply final processing
1425
+ self.post_init()
1426
+
1427
+ @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING)
1428
+ @add_code_sample_docstrings(
1429
+ checkpoint="LarsJonasson/pythia-410m-deduped-sft-swedish",
1430
+ output_type=TokenClassifierOutput,
1431
+ config_class=_CONFIG_FOR_DOC,
1432
+ expected_loss=0.25,
1433
+ )
1434
+ def forward(
1435
+ self,
1436
+ input_ids: Optional[torch.LongTensor] = None,
1437
+ past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor]]]] = None,
1438
+ attention_mask: Optional[torch.FloatTensor] = None,
1439
+ token_type_ids: Optional[torch.LongTensor] = None,
1440
+ position_ids: Optional[torch.LongTensor] = None,
1441
+ head_mask: Optional[torch.FloatTensor] = None,
1442
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1443
+ labels: Optional[torch.LongTensor] = None,
1444
+ use_cache: Optional[bool] = None,
1445
+ output_attentions: Optional[bool] = None,
1446
+ output_hidden_states: Optional[bool] = None,
1447
+ return_dict: Optional[bool] = None,
1448
+ ) -> Union[Tuple, TokenClassifierOutput]:
1449
+ r"""
1450
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1451
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1452
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1453
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1454
+ """
1455
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1456
+
1457
+ outputs = self.gpt_neox(
1458
+ input_ids,
1459
+ past_key_values=past_key_values,
1460
+ attention_mask=attention_mask,
1461
+ position_ids=position_ids,
1462
+ head_mask=head_mask,
1463
+ inputs_embeds=inputs_embeds,
1464
+ use_cache=use_cache,
1465
+ output_attentions=output_attentions,
1466
+ output_hidden_states=output_hidden_states,
1467
+ return_dict=return_dict,
1468
+ )
1469
+
1470
+ hidden_states = outputs[0]
1471
+ hidden_states = self.dropout(hidden_states)
1472
+ logits = self.classifier(hidden_states)
1473
+
1474
+ loss = None
1475
+ if labels is not None:
1476
+ labels = labels.to(logits.device)
1477
+ loss_fct = CrossEntropyLoss()
1478
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1479
+
1480
+ if not return_dict:
1481
+ output = (logits,) + outputs[2:]
1482
+ return ((loss,) + output) if loss is not None else output
1483
+
1484
+ return TokenClassifierOutput(
1485
+ loss=loss,
1486
+ logits=logits,
1487
+ hidden_states=outputs.hidden_states,
1488
+ attentions=outputs.attentions,
1489
+ )
1490
+
1491
+
1492
+ @add_start_docstrings(
1493
+ """
1494
+ The GPT-NeoX Model transformer with a span classification head on top for extractive question-answering tasks like
1495
+ SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1496
+ """,
1497
+ GPT_NEOX_START_DOCSTRING,
1498
+ )
1499
+ class GPTNeoXForQuestionAnswering(GPTNeoXPreTrainedModel):
1500
+ def __init__(self, config):
1501
+ super().__init__(config)
1502
+ self.num_labels = config.num_labels
1503
+ self.gpt_neox = GPTNeoXModel(config)
1504
+ self.qa_outputs = nn.Linear(config.hidden_size, 2)
1505
+
1506
+ # Initialize weights and apply final processing
1507
+ self.post_init()
1508
+
1509
+ @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1510
+ @add_code_sample_docstrings(
1511
+ checkpoint=_CHECKPOINT_FOR_DOC,
1512
+ output_type=QuestionAnsweringModelOutput,
1513
+ config_class=_CONFIG_FOR_DOC,
1514
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
1515
+ )
1516
+ def forward(
1517
+ self,
1518
+ input_ids: Optional[torch.LongTensor] = None,
1519
+ attention_mask: Optional[torch.FloatTensor] = None,
1520
+ token_type_ids: Optional[torch.LongTensor] = None,
1521
+ position_ids: Optional[torch.LongTensor] = None,
1522
+ head_mask: Optional[torch.FloatTensor] = None,
1523
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1524
+ start_positions: Optional[torch.LongTensor] = None,
1525
+ end_positions: Optional[torch.LongTensor] = None,
1526
+ output_attentions: Optional[bool] = None,
1527
+ output_hidden_states: Optional[bool] = None,
1528
+ return_dict: Optional[bool] = None,
1529
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1530
+ r"""
1531
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1532
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1533
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1534
+ are not taken into account for computing the loss.
1535
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1536
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1537
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1538
+ are not taken into account for computing the loss.
1539
+ """
1540
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1541
+
1542
+ outputs = self.gpt_neox(
1543
+ input_ids,
1544
+ attention_mask=attention_mask,
1545
+ position_ids=position_ids,
1546
+ head_mask=head_mask,
1547
+ inputs_embeds=inputs_embeds,
1548
+ output_attentions=output_attentions,
1549
+ output_hidden_states=output_hidden_states,
1550
+ return_dict=return_dict,
1551
+ )
1552
+
1553
+ sequence_output = outputs[0]
1554
+
1555
+ logits = self.qa_outputs(sequence_output)
1556
+ start_logits, end_logits = logits.split(1, dim=-1)
1557
+ start_logits = start_logits.squeeze(-1).contiguous()
1558
+ end_logits = end_logits.squeeze(-1).contiguous()
1559
+
1560
+ total_loss = None
1561
+ if start_positions is not None and end_positions is not None:
1562
+ # If we are on multi-GPU, split add a dimension
1563
+ if len(start_positions.size()) > 1:
1564
+ start_positions = start_positions.squeeze(-1).to(start_logits.device)
1565
+ if len(end_positions.size()) > 1:
1566
+ end_positions = end_positions.squeeze(-1).to(end_logits.device)
1567
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1568
+ ignored_index = start_logits.size(1)
1569
+ start_positions = start_positions.clamp(0, ignored_index)
1570
+ end_positions = end_positions.clamp(0, ignored_index)
1571
+
1572
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1573
+ start_loss = loss_fct(start_logits, start_positions)
1574
+ end_loss = loss_fct(end_logits, end_positions)
1575
+ total_loss = (start_loss + end_loss) / 2
1576
+
1577
+ if not return_dict:
1578
+ output = (start_logits, end_logits) + outputs[2:]
1579
+ return ((total_loss,) + output) if total_loss is not None else output
1580
+
1581
+ return QuestionAnsweringModelOutput(
1582
+ loss=total_loss,
1583
+ start_logits=start_logits,
1584
+ end_logits=end_logits,
1585
+ hidden_states=outputs.hidden_states,
1586
+ attentions=outputs.attentions,
1587
+ )