patent commited on
Commit
6f5cbae
·
1 Parent(s): 20e91a4

Upload py1.py

Browse files
Files changed (1) hide show
  1. py1.py +1109 -0
py1.py ADDED
@@ -0,0 +1,1109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch GPT-J model."""
16
+
17
+ from typing import Tuple
18
+
19
+ import torch
20
+ import torch.utils.checkpoint
21
+ from torch import nn
22
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
23
+
24
+ from ...activations import ACT2FN
25
+ from ...modeling_outputs import (
26
+ BaseModelOutputWithPast,
27
+ CausalLMOutputWithPast,
28
+ QuestionAnsweringModelOutput,
29
+ SequenceClassifierOutputWithPast,
30
+ )
31
+ from ...modeling_utils import PreTrainedModel
32
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
33
+ from ...utils.model_parallel_utils import assert_device_map, get_device_map
34
+ from .configuration_gptj import GPTJConfig
35
+
36
+
37
+ logger = logging.get_logger(__name__)
38
+
39
+ _CHECKPOINT_FOR_DOC = "hf-internal-testing/tiny-random-gptj"
40
+ _CONFIG_FOR_DOC = "GPTJConfig"
41
+ _TOKENIZER_FOR_DOC = "GPT2Tokenizer"
42
+
43
+ _CHECKPOINT_FOR_QA = "ydshieh/tiny-random-gptj-for-question-answering"
44
+ _QA_EXPECTED_OUTPUT = "' was Jim Henson?Jim Henson was a n'"
45
+ _QA_EXPECTED_LOSS = 3.13
46
+
47
+ _CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "ydshieh/tiny-random-gptj-for-sequence-classification"
48
+ _SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_0'"
49
+ _SEQ_CLASS_EXPECTED_LOSS = 0.76
50
+
51
+
52
+ GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = [
53
+ "EleutherAI/gpt-j-6B",
54
+ # See all GPT-J models at https://huggingface.co/models?filter=gptj
55
+ ]
56
+
57
+
58
+ def fixed_pos_embedding(x, seq_dim=1, seq_len=None):
59
+ dim = x.shape[-1]
60
+ if seq_len is None:
61
+ seq_len = x.shape[seq_dim]
62
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2) / dim))
63
+ sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(seq_len), inv_freq).to(x.device).float()
64
+ return torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)
65
+
66
+
67
+ def rotate_every_two(x):
68
+ x1 = x[:, :, :, ::2]
69
+ x2 = x[:, :, :, 1::2]
70
+ x = torch.stack((-x2, x1), axis=-1)
71
+ return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')
72
+
73
+
74
+ def duplicate_interleave(m):
75
+ """
76
+ A simple version of `torch.repeat_interleave` for duplicating a matrix while interleaving the copy.
77
+ """
78
+ dim0 = m.shape[0]
79
+ m = m.view(-1, 1) # flatten the matrix
80
+ m = m.repeat(1, 2) # repeat all elements into the 2nd dimension
81
+ m = m.view(dim0, -1) # reshape into a matrix, interleaving the copy
82
+ return m
83
+
84
+
85
+ def apply_rotary_pos_emb(x, sincos, offset=0):
86
+ sin, cos = map(lambda t: duplicate_interleave(t)[None, offset : x.shape[1] + offset, None, :], sincos)
87
+ # einsum notation for lambda t: repeat(t[offset:x.shape[1]+offset,:], "n d -> () n () (d j)", j=2)
88
+ return (x * cos) + (rotate_every_two(x) * sin)
89
+
90
+
91
+ class GPTJAttention(nn.Module):
92
+ def __init__(self, config):
93
+ super().__init__()
94
+
95
+ max_positions = config.max_position_embeddings
96
+ self.register_buffer(
97
+ "bias",
98
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
99
+ 1, 1, max_positions, max_positions
100
+ ),
101
+ )
102
+ self.register_buffer("masked_bias", torch.tensor(-1e9))
103
+
104
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
105
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
106
+
107
+ self.embed_dim = config.hidden_size
108
+ self.num_attention_heads = config.num_attention_heads
109
+ self.head_dim = self.embed_dim // self.num_attention_heads
110
+ if self.head_dim * self.num_attention_heads != self.embed_dim:
111
+ raise ValueError(
112
+ f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and `num_attention_heads`: {self.num_attention_heads})."
113
+ )
114
+ self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype())
115
+
116
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
117
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
118
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
119
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
120
+ self.rotary_dim = None
121
+ if config.rotary_dim is not None:
122
+ self.rotary_dim = config.rotary_dim
123
+
124
+ def _split_heads(self, tensor, num_attention_heads, attn_head_size, rotary):
125
+ """
126
+ Splits hidden dim into attn_head_size and num_attention_heads
127
+ """
128
+ new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size)
129
+ tensor = tensor.view(new_shape)
130
+ if rotary:
131
+ return tensor
132
+ if len(tensor.shape) == 5:
133
+ return tensor.permute(0, 1, 3, 2, 4) # (batch, blocks, head, block_length, head_features)
134
+ elif len(tensor.shape) == 4:
135
+ return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
136
+ else:
137
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
138
+
139
+ def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
140
+ """
141
+ Merges attn_head_size dim and num_attn_heads dim into hidden dim
142
+ """
143
+ if len(tensor.shape) == 5:
144
+ tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
145
+ elif len(tensor.shape) == 4:
146
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
147
+ else:
148
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
149
+ new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
150
+ return tensor.view(new_shape)
151
+
152
+ def _attn(
153
+ self,
154
+ query,
155
+ key,
156
+ value,
157
+ attention_mask=None,
158
+ head_mask=None,
159
+ ):
160
+
161
+ # compute causal mask from causal mask buffer
162
+ query_length, key_length = query.size(-2), key.size(-2)
163
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool()
164
+
165
+ # Keep the attention weights computation in fp32 to avoid overflow issues
166
+ query = query.to(torch.float32)
167
+ key = key.to(torch.float32)
168
+
169
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
170
+ attn_weights = torch.where(causal_mask, attn_weights, self.masked_bias.to(attn_weights.dtype))
171
+
172
+ attn_weights = attn_weights / self.scale_attn
173
+
174
+ if attention_mask is not None:
175
+ # Apply the attention mask
176
+ attn_weights = attn_weights + attention_mask
177
+
178
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
179
+ attn_weights = attn_weights.to(value.dtype)
180
+ attn_weights = self.attn_dropout(attn_weights)
181
+
182
+ # Mask heads if we want to
183
+ if head_mask is not None:
184
+ attn_weights = attn_weights * head_mask
185
+
186
+ attn_output = torch.matmul(attn_weights, value)
187
+
188
+ return attn_output, attn_weights
189
+
190
+ def forward(
191
+ self,
192
+ hidden_states,
193
+ attention_mask=None,
194
+ layer_past=None,
195
+ head_mask=None,
196
+ use_cache=False,
197
+ output_attentions=False,
198
+ ):
199
+
200
+ query = self.q_proj(hidden_states)
201
+ key = self.k_proj(hidden_states)
202
+ value = self.v_proj(hidden_states)
203
+
204
+ query = self._split_heads(query, self.num_attention_heads, self.head_dim, True)
205
+ key = self._split_heads(key, self.num_attention_heads, self.head_dim, True)
206
+ value = self._split_heads(value, self.num_attention_heads, self.head_dim, False)
207
+
208
+ seq_len = key.shape[1]
209
+ offset = 0
210
+
211
+ if layer_past is not None:
212
+ offset = layer_past[0].shape[-2]
213
+ seq_len += offset
214
+
215
+ if self.rotary_dim is not None:
216
+ k_rot = key[:, :, :, : self.rotary_dim]
217
+ k_pass = key[:, :, :, self.rotary_dim :]
218
+
219
+ q_rot = query[:, :, :, : self.rotary_dim]
220
+ q_pass = query[:, :, :, self.rotary_dim :]
221
+
222
+ sincos = fixed_pos_embedding(k_rot, 1, seq_len=seq_len)
223
+ k_rot = apply_rotary_pos_emb(k_rot, sincos, offset=offset)
224
+ q_rot = apply_rotary_pos_emb(q_rot, sincos, offset=offset)
225
+
226
+ key = torch.cat([k_rot, k_pass], dim=-1)
227
+ query = torch.cat([q_rot, q_pass], dim=-1)
228
+ else:
229
+ sincos = fixed_pos_embedding(key, 1, seq_len=seq_len)
230
+ key = apply_rotary_pos_emb(key, sincos, offset=offset)
231
+ query = apply_rotary_pos_emb(query, sincos, offset=offset)
232
+
233
+ key = key.permute(0, 2, 1, 3)
234
+ query = query.permute(0, 2, 1, 3)
235
+
236
+ if layer_past is not None:
237
+ past_key = layer_past[0]
238
+ past_value = layer_past[1]
239
+ key = torch.cat((past_key, key), dim=-2)
240
+ value = torch.cat((past_value, value), dim=-2)
241
+
242
+ if use_cache is True:
243
+ present = (key, value)
244
+ else:
245
+ present = None
246
+
247
+ # compute self-attention: V x Softmax(QK^T)
248
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
249
+
250
+ attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim)
251
+ attn_output = self.out_proj(attn_output)
252
+ attn_output = self.resid_dropout(attn_output)
253
+
254
+ outputs = (attn_output, present)
255
+ if output_attentions:
256
+ outputs += (attn_weights,)
257
+
258
+ return outputs # a, present, (attentions)
259
+
260
+
261
+ class GPTJMLP(nn.Module):
262
+ def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim
263
+ super().__init__()
264
+ embed_dim = config.n_embd
265
+
266
+ self.fc_in = nn.Linear(embed_dim, intermediate_size)
267
+ self.fc_out = nn.Linear(intermediate_size, embed_dim)
268
+
269
+ self.act = ACT2FN[config.activation_function]
270
+ self.dropout = nn.Dropout(config.resid_pdrop)
271
+
272
+ def forward(self, hidden_states):
273
+ hidden_states = self.fc_in(hidden_states)
274
+ hidden_states = self.act(hidden_states)
275
+ hidden_states = self.fc_out(hidden_states)
276
+ hidden_states = self.dropout(hidden_states)
277
+ return hidden_states
278
+
279
+
280
+ class GPTJBlock(nn.Module):
281
+ def __init__(self, config):
282
+ super().__init__()
283
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
284
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
285
+ self.attn = GPTJAttention(config)
286
+ self.mlp = GPTJMLP(inner_dim, config)
287
+
288
+ def forward(
289
+ self,
290
+ hidden_states,
291
+ layer_past=None,
292
+ attention_mask=None,
293
+ head_mask=None,
294
+ use_cache=False,
295
+ output_attentions=False,
296
+ ):
297
+ residual = hidden_states
298
+ hidden_states = self.ln_1(hidden_states)
299
+ attn_outputs = self.attn(
300
+ hidden_states,
301
+ layer_past=layer_past,
302
+ attention_mask=attention_mask,
303
+ head_mask=head_mask,
304
+ use_cache=use_cache,
305
+ output_attentions=output_attentions,
306
+ )
307
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
308
+ outputs = attn_outputs[1:]
309
+
310
+ feed_forward_hidden_states = self.mlp(hidden_states)
311
+ hidden_states = attn_output + feed_forward_hidden_states + residual
312
+
313
+ if use_cache:
314
+ outputs = (hidden_states,) + outputs
315
+ else:
316
+ outputs = (hidden_states,) + outputs[1:]
317
+
318
+ return outputs # hidden_states, present, (attentions)
319
+
320
+
321
+ class GPTJPreTrainedModel(PreTrainedModel):
322
+ """
323
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
324
+ models.
325
+ """
326
+
327
+ config_class = GPTJConfig
328
+ base_model_prefix = "transformer"
329
+ is_parallelizable = True
330
+ supports_gradient_checkpointing = True
331
+
332
+ def __init__(self, *inputs, **kwargs):
333
+ super().__init__(*inputs, **kwargs)
334
+
335
+ def _init_weights(self, module):
336
+ """Initialize the weights."""
337
+ if isinstance(module, (nn.Linear,)):
338
+ # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization
339
+ # cf https://github.com/pytorch/pytorch/pull/5617
340
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
341
+ if module.bias is not None:
342
+ module.bias.data.zero_()
343
+ elif isinstance(module, nn.Embedding):
344
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
345
+ if module.padding_idx is not None:
346
+ module.weight.data[module.padding_idx].zero_()
347
+ elif isinstance(module, nn.LayerNorm):
348
+ module.bias.data.zero_()
349
+ module.weight.data.fill_(1.0)
350
+
351
+ def _set_gradient_checkpointing(self, module, value=False):
352
+ if isinstance(module, GPTJModel):
353
+ module.gradient_checkpointing = value
354
+
355
+
356
+ GPTJ_START_DOCSTRING = r"""
357
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
358
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
359
+ behavior.
360
+
361
+ Parameters:
362
+ config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.
363
+ Initializing with a config file does not load the weights associated with the model, only the
364
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
365
+ """
366
+
367
+ GPTJ_INPUTS_DOCSTRING = r"""
368
+ Args:
369
+ input_ids (`torch.LongTensor` of shape `({0})`):
370
+ Indices of input sequence tokens in the vocabulary.
371
+
372
+ Indices can be obtained using [`GPTJTokenizer`]. See [`PreTrainedTokenizer.encode`] and
373
+ [`PreTrainedTokenizer.__call__`] for details.
374
+
375
+ [What are input IDs?](../glossary#input-ids)
376
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
377
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
378
+
379
+ - 1 for tokens that are **not masked**,
380
+ - 0 for tokens that are **masked**.
381
+
382
+ [What are attention masks?](../glossary#attention-mask)
383
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
384
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
385
+ 1]`:
386
+
387
+ - 0 corresponds to a *sentence A* token,
388
+ - 1 corresponds to a *sentence B* token.
389
+
390
+ [What are token type IDs?](../glossary#token-type-ids)
391
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
392
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
393
+ config.n_positions - 1]`.
394
+
395
+ [What are position IDs?](../glossary#position-ids)
396
+ head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*):
397
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
398
+
399
+ - 1 indicates the head is **not masked**,
400
+ - 0 indicates the head is **masked**.
401
+
402
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*):
403
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
404
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
405
+ model's internal embedding lookup matrix.
406
+ output_attentions (`bool`, *optional*):
407
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
408
+ tensors for more detail.
409
+ output_hidden_states (`bool`, *optional*):
410
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
411
+ more detail.
412
+ return_dict (`bool`, *optional*):
413
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
414
+ """
415
+
416
+ PARALLELIZE_DOCSTRING = r"""
417
+ This is an experimental feature and is a subject to change at a moment's notice. Uses a device map to distribute
418
+ attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks
419
+ across all devices.
420
+
421
+ Args:
422
+ device_map (`Dict[int, list]`, optional, defaults to None):
423
+ A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
424
+ automatically mapped to the first device (for esoteric reasons). That means that the first device should
425
+ have fewer attention modules mapped to it than other devices. For reference, the GPT-J models have the
426
+ following number of attention modules:
427
+
428
+ - gpt-j-6B: 28
429
+
430
+ Example:
431
+
432
+ ```python
433
+ # Here is an example of a device map on a machine with 4 GPUs using gpt-j-6B, which has a total of 28 attention modules:
434
+ model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
435
+ device_map = {
436
+ 0: [0, 1, 2, 3, 4, 5, 6],
437
+ 1: [7, 8, 9, 10, 11, 12, 13],
438
+ 2: [14, 15, 16, 17, 18, 19, 20],
439
+ 3: [21, 22, 23, 24, 25, 26, 27],
440
+ }
441
+ model.parallelize(device_map)
442
+ ```
443
+ """
444
+
445
+ DEPARALLELIZE_DOCSTRING = r"""
446
+ Moves the model to CPU from a model parallel state.
447
+
448
+ Example:
449
+
450
+ ```python
451
+ # On a 4 GPU machine with gpt-j-6B:
452
+ model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
453
+ device_map = {
454
+ 0: [0, 1, 2, 3, 4, 5, 6],
455
+ 1: [7, 8, 9, 10, 11, 12, 13],
456
+ 2: [14, 15, 16, 17, 18, 19, 20],
457
+ 3: [21, 22, 23, 24, 25, 26, 27],
458
+ }
459
+ model.parallelize(device_map) # Splits the model across several devices
460
+ model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
461
+ ```
462
+ """
463
+
464
+
465
+ @add_start_docstrings(
466
+ "The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top.",
467
+ GPTJ_START_DOCSTRING,
468
+ )
469
+ class GPTJModel(GPTJPreTrainedModel):
470
+ def __init__(self, config):
471
+ super().__init__(config)
472
+
473
+ self.embed_dim = config.n_embd
474
+ self.vocab_size = config.vocab_size
475
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
476
+ self.drop = nn.Dropout(config.embd_pdrop)
477
+ self.h = nn.ModuleList([GPTJBlock(config) for _ in range(config.n_layer)])
478
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
479
+
480
+ # Model parallel
481
+ self.model_parallel = False
482
+ self.device_map = None
483
+ self.gradient_checkpointing = False
484
+
485
+ # Initialize weights and apply final processing
486
+ self.post_init()
487
+
488
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
489
+ def parallelize(self, device_map=None):
490
+ # Check validity of device_map
491
+ self.device_map = (
492
+ get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map
493
+ )
494
+ assert_device_map(self.device_map, len(self.h))
495
+ self.model_parallel = True
496
+ self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
497
+ self.last_device = "cuda:" + str(max(self.device_map.keys()))
498
+ self.wte = self.wte.to(self.first_device)
499
+ # Load onto devices
500
+ for k, v in self.device_map.items():
501
+ for block in v:
502
+ cuda_device = "cuda:" + str(k)
503
+ self.h[block] = self.h[block].to(cuda_device)
504
+ # ln_f to last
505
+ self.ln_f = self.ln_f.to(self.last_device)
506
+
507
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
508
+ def deparallelize(self):
509
+ self.model_parallel = False
510
+ self.device_map = None
511
+ self.first_device = "cpu"
512
+ self.last_device = "cpu"
513
+ self.wte = self.wte.to("cpu")
514
+ for index in range(len(self.h)):
515
+ self.h[index] = self.h[index].to("cpu")
516
+ self.ln_f = self.ln_f.to("cpu")
517
+ torch.cuda.empty_cache()
518
+
519
+ def get_input_embeddings(self):
520
+ return self.wte
521
+
522
+ def set_input_embeddings(self, new_embeddings):
523
+ self.wte = new_embeddings
524
+
525
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
526
+ @add_code_sample_docstrings(
527
+ processor_class=_TOKENIZER_FOR_DOC,
528
+ checkpoint=_CHECKPOINT_FOR_DOC,
529
+ output_type=BaseModelOutputWithPast,
530
+ config_class=_CONFIG_FOR_DOC,
531
+ )
532
+ def forward(
533
+ self,
534
+ input_ids=None,
535
+ past_key_values=None,
536
+ attention_mask=None,
537
+ token_type_ids=None,
538
+ position_ids=None,
539
+ head_mask=None,
540
+ inputs_embeds=None,
541
+ use_cache=None,
542
+ output_attentions=None,
543
+ output_hidden_states=None,
544
+ return_dict=None,
545
+ ):
546
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
547
+ output_hidden_states = (
548
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
549
+ )
550
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
551
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
552
+
553
+ if input_ids is not None and inputs_embeds is not None:
554
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
555
+ elif input_ids is not None:
556
+ input_shape = input_ids.size()
557
+ input_ids = input_ids.view(-1, input_shape[-1])
558
+ batch_size = input_ids.shape[0]
559
+ elif inputs_embeds is not None:
560
+ input_shape = inputs_embeds.size()[:-1]
561
+ batch_size = inputs_embeds.shape[0]
562
+ else:
563
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
564
+
565
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
566
+
567
+ if token_type_ids is not None:
568
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
569
+
570
+ if position_ids is not None:
571
+ position_ids = position_ids.view(-1, input_shape[-1])
572
+
573
+ if past_key_values is None:
574
+ past_length = 0
575
+ past_key_values = tuple([None] * len(self.h))
576
+ else:
577
+ past_length = past_key_values[0][0].size(-2)
578
+
579
+ if position_ids is None:
580
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
581
+ position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
582
+
583
+ # Attention mask.
584
+ if attention_mask is not None:
585
+ if batch_size <= 0:
586
+ raise ValueError("batch_size has to be defined and > 0")
587
+ attention_mask = attention_mask.view(batch_size, -1)
588
+ # We create a 3D attention mask from a 2D tensor mask.
589
+ # Sizes are [batch_size, 1, 1, to_seq_length]
590
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
591
+ # this attention mask is more simple than the triangular masking of causal attention
592
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
593
+ attention_mask = attention_mask[:, None, None, :]
594
+
595
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
596
+ # masked positions, this operation will create a tensor which is 0.0 for
597
+ # positions we want to attend and -10000.0 for masked positions.
598
+ # Since we are adding it to the raw scores before the softmax, this is
599
+ # effectively the same as removing these entirely.
600
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
601
+ attention_mask = (1.0 - attention_mask) * -10000.0
602
+
603
+ # Prepare head mask if needed
604
+ # 1.0 in head_mask indicate we keep the head
605
+ # attention_probs has shape bsz x num_attention_heads x N x N
606
+ # head_mask has shape n_layer x batch x num_attention_heads x N x N
607
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
608
+
609
+ if inputs_embeds is None:
610
+ inputs_embeds = self.wte(input_ids)
611
+
612
+ hidden_states = inputs_embeds
613
+
614
+ if token_type_ids is not None:
615
+ token_type_embeds = self.wte(token_type_ids)
616
+ hidden_states = hidden_states + token_type_embeds
617
+
618
+ hidden_states = self.drop(hidden_states)
619
+
620
+ output_shape = input_shape + (hidden_states.size(-1),)
621
+
622
+ presents = () if use_cache else None
623
+ all_self_attentions = () if output_attentions else None
624
+ all_hidden_states = () if output_hidden_states else None
625
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
626
+
627
+ # Model parallel
628
+ if self.model_parallel:
629
+ torch.cuda.set_device(hidden_states.device)
630
+ # Ensure layer_past is on same device as hidden_states (might not be correct)
631
+ if layer_past is not None:
632
+ layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
633
+ # Ensure that attention_mask is always on the same device as hidden_states
634
+ if attention_mask is not None:
635
+ attention_mask = attention_mask.to(hidden_states.device)
636
+ if isinstance(head_mask, torch.Tensor):
637
+ head_mask = head_mask.to(hidden_states.device)
638
+ if output_hidden_states:
639
+ all_hidden_states = all_hidden_states + (hidden_states,)
640
+
641
+ if self.gradient_checkpointing and self.training:
642
+
643
+ if use_cache:
644
+ logger.warning(
645
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
646
+ )
647
+ use_cache = False
648
+
649
+ def create_custom_forward(module):
650
+ def custom_forward(*inputs):
651
+ # None for past_key_value
652
+ return module(*inputs, use_cache, output_attentions)
653
+
654
+ return custom_forward
655
+
656
+ outputs = torch.utils.checkpoint.checkpoint(
657
+ create_custom_forward(block),
658
+ hidden_states,
659
+ None,
660
+ attention_mask,
661
+ head_mask[i],
662
+ )
663
+ else:
664
+ outputs = block(
665
+ hidden_states,
666
+ layer_past=layer_past,
667
+ attention_mask=attention_mask,
668
+ head_mask=head_mask[i],
669
+ use_cache=use_cache,
670
+ output_attentions=output_attentions,
671
+ )
672
+
673
+ hidden_states = outputs[0]
674
+ if use_cache is True:
675
+ presents = presents + (outputs[1],)
676
+
677
+ if output_attentions:
678
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
679
+
680
+ # Model Parallel: If it's the last layer for that device, put things on the next device
681
+ if self.model_parallel:
682
+ for k, v in self.device_map.items():
683
+ if i == v[-1] and "cuda:" + str(k) != self.last_device:
684
+ hidden_states = hidden_states.to("cuda:" + str(k + 1))
685
+
686
+ hidden_states = self.ln_f(hidden_states)
687
+
688
+ hidden_states = hidden_states.view(output_shape)
689
+ # Add last hidden state
690
+ if output_hidden_states:
691
+ all_hidden_states = all_hidden_states + (hidden_states,)
692
+
693
+ if not return_dict:
694
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
695
+
696
+ return BaseModelOutputWithPast(
697
+ last_hidden_state=hidden_states,
698
+ past_key_values=presents,
699
+ hidden_states=all_hidden_states,
700
+ attentions=all_self_attentions,
701
+ )
702
+
703
+
704
+ @add_start_docstrings(
705
+ """
706
+ The GPT-J Model transformer with a language modeling head on top.
707
+ """,
708
+ GPTJ_START_DOCSTRING,
709
+ )
710
+ class GPTJForCausalLM(GPTJPreTrainedModel):
711
+ _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"h\.\d+\.attn\.bias"]
712
+
713
+ def __init__(self, config):
714
+ super().__init__(config)
715
+ self.transformer = GPTJModel(config)
716
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
717
+
718
+ # Model parallel
719
+ self.model_parallel = False
720
+ self.device_map = None
721
+
722
+ # Initialize weights and apply final processing
723
+ self.post_init()
724
+
725
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
726
+ def parallelize(self, device_map=None):
727
+ self.device_map = (
728
+ get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
729
+ if device_map is None
730
+ else device_map
731
+ )
732
+ assert_device_map(self.device_map, len(self.transformer.h))
733
+ self.transformer.parallelize(self.device_map)
734
+ self.lm_head = self.lm_head.to(self.transformer.first_device)
735
+ self.model_parallel = True
736
+
737
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
738
+ def deparallelize(self):
739
+ self.transformer.deparallelize()
740
+ self.transformer = self.transformer.to("cpu")
741
+ self.lm_head = self.lm_head.to("cpu")
742
+ self.model_parallel = False
743
+ torch.cuda.empty_cache()
744
+
745
+ def get_output_embeddings(self):
746
+ return self.lm_head
747
+
748
+ def set_output_embeddings(self, new_embeddings):
749
+ self.lm_head = new_embeddings
750
+
751
+ def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
752
+ token_type_ids = kwargs.get("token_type_ids", None)
753
+ # only last token for inputs_ids if past is defined in kwargs
754
+ if past:
755
+ input_ids = input_ids[:, -1].unsqueeze(-1)
756
+ if token_type_ids is not None:
757
+ token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
758
+
759
+ attention_mask = kwargs.get("attention_mask", None)
760
+ position_ids = kwargs.get("position_ids", None)
761
+
762
+ if attention_mask is not None and position_ids is None:
763
+ # create position_ids on the fly for batch generation
764
+ position_ids = attention_mask.long().cumsum(-1) - 1
765
+ position_ids.masked_fill_(attention_mask == 0, 1)
766
+ if past:
767
+ position_ids = position_ids[:, -1].unsqueeze(-1)
768
+ else:
769
+ position_ids = None
770
+ return {
771
+ "input_ids": input_ids,
772
+ "past_key_values": past,
773
+ "use_cache": kwargs.get("use_cache"),
774
+ "position_ids": position_ids,
775
+ "attention_mask": attention_mask,
776
+ "token_type_ids": token_type_ids,
777
+ }
778
+
779
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
780
+ @add_code_sample_docstrings(
781
+ processor_class=_TOKENIZER_FOR_DOC,
782
+ checkpoint=_CHECKPOINT_FOR_DOC,
783
+ output_type=CausalLMOutputWithPast,
784
+ config_class=_CONFIG_FOR_DOC,
785
+ )
786
+ def forward(
787
+ self,
788
+ input_ids=None,
789
+ past_key_values=None,
790
+ attention_mask=None,
791
+ token_type_ids=None,
792
+ position_ids=None,
793
+ head_mask=None,
794
+ inputs_embeds=None,
795
+ labels=None,
796
+ use_cache=None,
797
+ output_attentions=None,
798
+ output_hidden_states=None,
799
+ return_dict=None,
800
+ ):
801
+ r"""
802
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
803
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
804
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
805
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
806
+ """
807
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
808
+
809
+ transformer_outputs = self.transformer(
810
+ input_ids,
811
+ past_key_values=past_key_values,
812
+ attention_mask=attention_mask,
813
+ token_type_ids=token_type_ids,
814
+ position_ids=position_ids,
815
+ head_mask=head_mask,
816
+ inputs_embeds=inputs_embeds,
817
+ use_cache=use_cache,
818
+ output_attentions=output_attentions,
819
+ output_hidden_states=output_hidden_states,
820
+ return_dict=return_dict,
821
+ )
822
+ hidden_states = transformer_outputs[0]
823
+
824
+ # Set device for model parallelism
825
+ if self.model_parallel:
826
+ torch.cuda.set_device(self.transformer.first_device)
827
+ hidden_states = hidden_states.to(self.lm_head.weight.device)
828
+
829
+ # make sure sampling in fp16 works correctly and
830
+ # compute loss in fp32 to match with mesh-tf version
831
+ # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179
832
+ lm_logits = self.lm_head(hidden_states).to(torch.float32)
833
+
834
+ loss = None
835
+ if labels is not None:
836
+ # Shift so that tokens < n predict n
837
+ shift_logits = lm_logits[..., :-1, :].contiguous()
838
+ shift_labels = labels[..., 1:].contiguous()
839
+ # Flatten the tokens
840
+ loss_fct = CrossEntropyLoss()
841
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
842
+
843
+ loss = loss.to(hidden_states.dtype)
844
+
845
+ if not return_dict:
846
+ output = (lm_logits,) + transformer_outputs[1:]
847
+ return ((loss,) + output) if loss is not None else output
848
+
849
+ return CausalLMOutputWithPast(
850
+ loss=loss,
851
+ logits=lm_logits,
852
+ past_key_values=transformer_outputs.past_key_values,
853
+ hidden_states=transformer_outputs.hidden_states,
854
+ attentions=transformer_outputs.attentions,
855
+ )
856
+
857
+ @staticmethod
858
+ def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
859
+ """
860
+ This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
861
+ [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
862
+ beam_idx at every generation step.
863
+ """
864
+ return tuple(
865
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
866
+ for layer_past in past
867
+ )
868
+
869
+
870
+ @add_start_docstrings(
871
+ """
872
+ The GPT-J Model transformer with a sequence classification head on top (linear layer).
873
+
874
+ [`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models
875
+ (e.g. GPT, GPT-2, GPT-Neo) do.
876
+
877
+ Since it does classification on the last token, it requires to know the position of the last token. If a
878
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
879
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
880
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
881
+ each row of the batch).
882
+ """,
883
+ GPTJ_START_DOCSTRING,
884
+ )
885
+ class GPTJForSequenceClassification(GPTJPreTrainedModel):
886
+ _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"h\.\d+\.attn\.bias", r"lm_head\.weight"]
887
+
888
+ def __init__(self, config):
889
+ super().__init__(config)
890
+ self.num_labels = config.num_labels
891
+ self.transformer = GPTJModel(config)
892
+ self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
893
+
894
+ # Model parallel
895
+ self.model_parallel = False
896
+ self.device_map = None
897
+
898
+ # Initialize weights and apply final processing
899
+ self.post_init()
900
+
901
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
902
+ @add_code_sample_docstrings(
903
+ processor_class=_TOKENIZER_FOR_DOC,
904
+ checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION,
905
+ output_type=SequenceClassifierOutputWithPast,
906
+ config_class=_CONFIG_FOR_DOC,
907
+ expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
908
+ expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
909
+ )
910
+ def forward(
911
+ self,
912
+ input_ids=None,
913
+ past_key_values=None,
914
+ attention_mask=None,
915
+ token_type_ids=None,
916
+ position_ids=None,
917
+ head_mask=None,
918
+ inputs_embeds=None,
919
+ labels=None,
920
+ use_cache=None,
921
+ output_attentions=None,
922
+ output_hidden_states=None,
923
+ return_dict=None,
924
+ ):
925
+ r"""
926
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
927
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
928
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
929
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
930
+ """
931
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
932
+
933
+ transformer_outputs = self.transformer(
934
+ input_ids,
935
+ past_key_values=past_key_values,
936
+ attention_mask=attention_mask,
937
+ token_type_ids=token_type_ids,
938
+ position_ids=position_ids,
939
+ head_mask=head_mask,
940
+ inputs_embeds=inputs_embeds,
941
+ use_cache=use_cache,
942
+ output_attentions=output_attentions,
943
+ output_hidden_states=output_hidden_states,
944
+ return_dict=return_dict,
945
+ )
946
+ hidden_states = transformer_outputs[0]
947
+ logits = self.score(hidden_states)
948
+
949
+ if input_ids is not None:
950
+ batch_size = input_ids.shape[0]
951
+ else:
952
+ batch_size = inputs_embeds.shape[0]
953
+
954
+ if self.config.pad_token_id is None and batch_size != 1:
955
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
956
+ if self.config.pad_token_id is None:
957
+ sequence_lengths = -1
958
+ else:
959
+ if input_ids is not None:
960
+ sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1
961
+ else:
962
+ sequence_lengths = -1
963
+ logger.warning(
964
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
965
+ f"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
966
+ )
967
+
968
+ pooled_logits = logits[torch.arange(batch_size, device=self.device), sequence_lengths]
969
+
970
+ loss = None
971
+ if labels is not None:
972
+ if self.config.problem_type is None:
973
+ if self.num_labels == 1:
974
+ self.config.problem_type = "regression"
975
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
976
+ self.config.problem_type = "single_label_classification"
977
+ else:
978
+ self.config.problem_type = "multi_label_classification"
979
+
980
+ if self.config.problem_type == "regression":
981
+ loss_fct = MSELoss()
982
+ if self.num_labels == 1:
983
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
984
+ else:
985
+ loss = loss_fct(pooled_logits, labels)
986
+ elif self.config.problem_type == "single_label_classification":
987
+ loss_fct = CrossEntropyLoss()
988
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
989
+ elif self.config.problem_type == "multi_label_classification":
990
+ loss_fct = BCEWithLogitsLoss()
991
+ loss = loss_fct(pooled_logits, labels)
992
+ if not return_dict:
993
+ output = (pooled_logits,) + transformer_outputs[1:]
994
+ return ((loss,) + output) if loss is not None else output
995
+
996
+ return SequenceClassifierOutputWithPast(
997
+ loss=loss,
998
+ logits=pooled_logits,
999
+ past_key_values=transformer_outputs.past_key_values,
1000
+ hidden_states=transformer_outputs.hidden_states,
1001
+ attentions=transformer_outputs.attentions,
1002
+ )
1003
+
1004
+
1005
+ @add_start_docstrings(
1006
+ """
1007
+ The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like
1008
+ SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1009
+ """,
1010
+ GPTJ_START_DOCSTRING,
1011
+ )
1012
+ class GPTJForQuestionAnswering(GPTJPreTrainedModel):
1013
+ _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"h\.\d+\.attn\.bias", r"lm_head\.weight"]
1014
+
1015
+ def __init__(self, config):
1016
+ super().__init__(config)
1017
+ self.num_labels = config.num_labels
1018
+ self.transformer = GPTJModel(config)
1019
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1020
+
1021
+ # Model parallel
1022
+ self.model_parallel = False
1023
+ self.device_map = None
1024
+
1025
+ # Initialize weights and apply final processing
1026
+ self.post_init()
1027
+
1028
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1029
+ @add_code_sample_docstrings(
1030
+ processor_class=_TOKENIZER_FOR_DOC,
1031
+ checkpoint=_CHECKPOINT_FOR_QA,
1032
+ output_type=QuestionAnsweringModelOutput,
1033
+ config_class=_CONFIG_FOR_DOC,
1034
+ expected_output=_QA_EXPECTED_OUTPUT,
1035
+ expected_loss=_QA_EXPECTED_LOSS,
1036
+ )
1037
+ def forward(
1038
+ self,
1039
+ input_ids=None,
1040
+ attention_mask=None,
1041
+ token_type_ids=None,
1042
+ position_ids=None,
1043
+ head_mask=None,
1044
+ inputs_embeds=None,
1045
+ start_positions=None,
1046
+ end_positions=None,
1047
+ output_attentions=None,
1048
+ output_hidden_states=None,
1049
+ return_dict=None,
1050
+ ):
1051
+ r"""
1052
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1053
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1054
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1055
+ are not taken into account for computing the loss.
1056
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1057
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1058
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1059
+ are not taken into account for computing the loss.
1060
+ """
1061
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1062
+
1063
+ outputs = self.transformer(
1064
+ input_ids,
1065
+ attention_mask=attention_mask,
1066
+ token_type_ids=token_type_ids,
1067
+ position_ids=position_ids,
1068
+ head_mask=head_mask,
1069
+ inputs_embeds=inputs_embeds,
1070
+ output_attentions=output_attentions,
1071
+ output_hidden_states=output_hidden_states,
1072
+ return_dict=return_dict,
1073
+ )
1074
+
1075
+ sequence_output = outputs[0]
1076
+
1077
+ logits = self.qa_outputs(sequence_output)
1078
+ start_logits, end_logits = logits.split(1, dim=-1)
1079
+ start_logits = start_logits.squeeze(-1).contiguous()
1080
+ end_logits = end_logits.squeeze(-1).contiguous()
1081
+
1082
+ total_loss = None
1083
+ if start_positions is not None and end_positions is not None:
1084
+ # If we are on multi-GPU, split add a dimension
1085
+ if len(start_positions.size()) > 1:
1086
+ start_positions = start_positions.squeeze(-1)
1087
+ if len(end_positions.size()) > 1:
1088
+ end_positions = end_positions.squeeze(-1)
1089
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1090
+ ignored_index = start_logits.size(1)
1091
+ start_positions = start_positions.clamp(0, ignored_index)
1092
+ end_positions = end_positions.clamp(0, ignored_index)
1093
+
1094
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1095
+ start_loss = loss_fct(start_logits, start_positions)
1096
+ end_loss = loss_fct(end_logits, end_positions)
1097
+ total_loss = (start_loss + end_loss) / 2
1098
+
1099
+ if not return_dict:
1100
+ output = (start_logits, end_logits) + outputs[2:]
1101
+ return ((total_loss,) + output) if total_loss is not None else output
1102
+
1103
+ return QuestionAnsweringModelOutput(
1104
+ loss=total_loss,
1105
+ start_logits=start_logits,
1106
+ end_logits=end_logits,
1107
+ hidden_states=outputs.hidden_states,
1108
+ attentions=outputs.attentions,
1109
+ )