ZhenYang21 commited on
Commit
e6b2fed
·
1 Parent(s): 2334cdd

Delete modeling_glm.py

Browse files
Files changed (1) hide show
  1. modeling_glm.py +0 -975
modeling_glm.py DELETED
@@ -1,975 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022 shunxing1234 The HuggingFace Inc. team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """ PyTorch GLM model. """
16
-
17
- import math
18
-
19
- import torch
20
- import torch.utils.checkpoint
21
- import torch.nn.functional as F
22
- from torch.nn import init, LayerNorm, Linear, CrossEntropyLoss
23
-
24
- from transformers.activations import gelu
25
- from transformers.utils import (
26
- add_code_sample_docstrings,
27
- add_start_docstrings,
28
- add_start_docstrings_to_model_forward,
29
- )
30
- from transformers.modeling_outputs import (
31
- BaseModelOutputWithPastAndCrossAttentions,
32
- ModelOutput,
33
- SequenceClassifierOutput,
34
- )
35
-
36
- from transformers.modeling_utils import (
37
- PreTrainedModel,
38
- )
39
- from .configuration_glm import GLMConfig
40
- from torch.nn.parameter import Parameter
41
-
42
- _CHECKPOINT_FOR_DOC = "shunxing1234/GLM"
43
- _CONFIG_FOR_DOC = "GLMConfig"
44
- _TOKENIZER_FOR_DOC = "GLMTokenizer"
45
-
46
- GLM_PRETRAINED_MODEL_ARCHIVE_LIST = [
47
- "shunxing1234/GLM",
48
- # See all GLM models at https://huggingface.co/models?filter=glm
49
- ]
50
-
51
-
52
- def unscaled_init_method(sigma):
53
- """Init method based on N(0, sigma)."""
54
-
55
- def init_(tensor):
56
- return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)
57
-
58
- return init_
59
-
60
-
61
- def scaled_init_method(mean, std, num_layers):
62
- """Init method based on N(0, sigma/sqrt(2*num_layers)."""
63
- std = std / math.sqrt(2.0 * num_layers)
64
-
65
- def init_(tensor):
66
- return torch.nn.init.normal_(tensor, mean=mean, std=std)
67
-
68
- return init_
69
-
70
-
71
- def ensure_divisibility(numerator, denominator):
72
- """Ensure that numerator is divisible by the denominator."""
73
- assert numerator % denominator == 0, '{} is not divisible by {}'.format(
74
- numerator, denominator)
75
-
76
-
77
- def divide(numerator, denominator):
78
- """Ensure that numerator is divisible by the denominator and return
79
- the division value."""
80
- ensure_divisibility(numerator, denominator)
81
- return numerator // denominator
82
-
83
-
84
- def split_tensor_along_last_dim(tensor, num_partitions,
85
- contiguous_split_chunks=False):
86
- """Split a tensor along its last dimension.
87
- Arguments:
88
- tensor: input tensor.
89
- num_partitions: number of partitions to split the tensor
90
- contiguous_split_chunks: If True, make each chunk contiguous
91
- in memory.
92
- """
93
- # Get the size and dimension.
94
- last_dim = tensor.dim() - 1
95
- last_dim_size = divide(tensor.size()[last_dim], num_partitions)
96
- # Split.
97
- tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
98
- # Note: torch.split does not create contiguous tensors by default.
99
- if contiguous_split_chunks:
100
- return tuple(chunk.contiguous() for chunk in tensor_list)
101
-
102
- return tensor_list
103
-
104
-
105
- class MLP(torch.nn.Module):
106
- """MLP for GPT2.
107
-
108
- MLP will take the input with h hidden state, project it to 4*h
109
- hidden dimension, perform gelu transformation, and project the
110
- state back into h hidden dimension. At the end, dropout is also
111
- applied.
112
-
113
- Arguments:
114
- hidden_size: The hidden size of the self attention.
115
- output_dropout_prob: dropout probability for the outputs
116
- after self attention and final output.
117
- init_method: initialization method used for the weights. Note
118
- that all biases are initialized to zero and
119
- layernorm weight are initialized to one.
120
- output_layer_init_method: output layer initialization. If None,
121
- use `init_method`.
122
- """
123
-
124
- def __init__(self, hidden_size, output_dropout_prob, init_method,
125
- output_layer_init_method=None):
126
- super(MLP, self).__init__()
127
- # Set output layer initialization if not provided.
128
- if output_layer_init_method is None:
129
- output_layer_init_method = init_method
130
- # Project to 4h.
131
- self.dense_h_to_4h = Linear(hidden_size, 4 * hidden_size)
132
-
133
- # Project back to h.
134
- self.dense_4h_to_h = Linear(
135
- 4 * hidden_size,
136
- hidden_size)
137
-
138
- self.dropout = torch.nn.Dropout(output_dropout_prob)
139
-
140
- def forward(self, hidden_states):
141
- # [b, s, 4hp]
142
- intermediate_parallel = self.dense_h_to_4h(hidden_states)
143
- intermediate_parallel = gelu(intermediate_parallel)
144
-
145
- # [b, s, h]
146
- output = self.dense_4h_to_h(intermediate_parallel)
147
- output = self.dropout(output)
148
- return output
149
-
150
-
151
- class VocabEmbedding(torch.nn.Module):
152
- """Embedding parallelized in the vocabulary dimension.
153
-
154
- This is mainly adapted from torch.nn.Embedding and all the default
155
- values are kept.
156
- Arguments:
157
- num_embeddings: vocabulary size.
158
- embedding_dim: size of hidden state.
159
- init_method: method to initialize weights.
160
- """
161
-
162
- def __init__(self, config):
163
- super(VocabEmbedding, self).__init__()
164
- # Keep the input dimensions.
165
- self.num_embeddings = config.vocab_size
166
- self.embedding_dim = config.hidden_size
167
- # Set the detauls for compatibility.
168
- self.padding_idx = None
169
- self.max_norm = None
170
- self.norm_type = 2.
171
- self.scale_grad_by_freq = False
172
- self.sparse = False
173
- self._weight = None
174
-
175
- self.vocab_start_index = 0
176
- self.vocab_end_index = self.num_embeddings
177
-
178
- # Allocate weights.
179
- self.weight = Parameter(torch.Tensor(self.num_embeddings,
180
- self.embedding_dim))
181
- # And initialize.
182
- init.xavier_normal_(self.weight)
183
-
184
- def forward(self, input_):
185
- # Get the embeddings.
186
- output = F.embedding(input_, self.weight,
187
- self.padding_idx, self.max_norm,
188
- self.norm_type, self.scale_grad_by_freq,
189
- self.sparse)
190
- return output
191
-
192
-
193
- class PositionalEmbedding(torch.nn.Module):
194
-
195
- def __init__(self, hidden_size):
196
- super(PositionalEmbedding, self).__init__()
197
-
198
- self.hidden_size = hidden_size
199
-
200
- inv_freq = 1 / (10000 ** (torch.arange(0.0, hidden_size, 2.0) / hidden_size))
201
- self.register_buffer('inv_freq', inv_freq)
202
-
203
- def forward(self, pos_seq, bsz=None):
204
- sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
205
- pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
206
-
207
- if bsz is not None:
208
- return pos_emb[None, :, :].expand(bsz, -1, -1)
209
- else:
210
- return pos_emb[None, :, :]
211
-
212
-
213
- class SelfAttention(torch.nn.Module):
214
- """self-attention layer for GLM.
215
-
216
- Self-attention layer takes input with size [b, s, h] where b is
217
- the batch size, s is the sequence lenght, and h is the hidden size
218
- and creates output of the same size.
219
- Arguments:
220
- hidden_size: total hidden size of the layer (h).
221
- num_attention_heads: number of attention heads (n). Note that we
222
- require n to be divisible by number of GPUs
223
- used to parallelize the model. Also, we
224
- require hidden size to be divisible by n.
225
- attention_dropout_prob: dropout probability for the attention scores.
226
- init_method: weight initialization.
227
- output_layer_init_method: output layer initialization. If None, use
228
- `init_method`.
229
- We use the following notation:
230
- h: hidden_size
231
- n: num_attention_heads
232
- p: number of partitions
233
- np: n/p
234
- hp: h/p
235
- hn: h/n
236
- b: batch size
237
- s: sequence length
238
- """
239
-
240
- def __init__(self, hidden_size, num_attention_heads,
241
- attention_dropout_prob, output_dropout_prob,
242
- init_method, output_layer_init_method=None,
243
- attention_scale=1.0):
244
- super(SelfAttention, self).__init__()
245
- # Set output layer initialization if not provided.
246
- if output_layer_init_method is None:
247
- output_layer_init_method = init_method
248
- # Per attention head and per partition values.
249
- self.hidden_size = hidden_size
250
- self.hidden_size_per_attention_head = divide(hidden_size,
251
- num_attention_heads)
252
-
253
- self.num_attention_heads = num_attention_heads
254
- self.attention_scale = attention_scale
255
- # Strided linear layer.
256
- self.query_key_value = Linear(hidden_size, 3 * hidden_size)
257
-
258
- # Dropout. Note that for a single iteration, this layer will generate
259
- # different outputs on different number of parallel partitions but
260
- # on average it should not be partition dependent.
261
- self.attention_dropout = torch.nn.Dropout(attention_dropout_prob)
262
-
263
- # Output.
264
- self.dense = Linear(hidden_size,
265
- hidden_size)
266
- self.output_dropout = torch.nn.Dropout(output_dropout_prob)
267
-
268
- def _transpose_for_scores(self, tensor):
269
- """Transpose a 3D tensor [b, s, np*hn] into a 4D tensor with
270
- size [b, np, s, hn].
271
- """
272
- new_tensor_shape = tensor.size()[:-1] + \
273
- (self.num_attention_heads,
274
- self.hidden_size_per_attention_head)
275
- tensor = tensor.view(*new_tensor_shape)
276
- return tensor.permute(0, 2, 1, 3)
277
-
278
- def forward(self, hidden_states, ltor_mask, mem=None):
279
- # hidden_states: [b, s, h]
280
- # ltor_mask: [b,1,s,s]
281
-
282
- # Attention heads. [b, s, hp]
283
- query_length = hidden_states.size(1)
284
- # self attention
285
- if mem is None:
286
- mixed_x_layer = self.query_key_value(hidden_states)
287
- (mixed_query_layer,
288
- mixed_key_layer,
289
- mixed_value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3)
290
- else:
291
- cat = torch.cat((mem, hidden_states), 1)
292
- mixed_x_layer = self.query_key_value(cat)
293
- (mixed_query_layer,
294
- mixed_key_layer,
295
- mixed_value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3)
296
- mixed_query_layer = mixed_query_layer[:, -query_length:]
297
-
298
- # Reshape and transpose [b, np, s, hn]
299
- query_layer = self._transpose_for_scores(mixed_query_layer)
300
- key_layer = self._transpose_for_scores(mixed_key_layer)
301
- value_layer = self._transpose_for_scores(mixed_value_layer)
302
-
303
- if self.attention_scale > 1.0:
304
- # Raw attention scores. [b, np, s, s]
305
- attention_scores = torch.matmul(query_layer / math.sqrt(self.attention_scale),
306
- key_layer.transpose(-1, -2) / math.sqrt(
307
- self.hidden_size_per_attention_head * self.attention_scale))
308
- else:
309
- attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2) / math.sqrt(
310
- self.hidden_size_per_attention_head))
311
-
312
- # Apply the left to right attention mask.
313
- ltor_mask = ltor_mask.type_as(attention_scores)
314
- attention_scores = torch.mul(attention_scores, ltor_mask)
315
- if self.attention_scale > 1.0:
316
- max_attention_scores = attention_scores.max(dim=-1, keepdim=True)[0]
317
- attention_scores -= max_attention_scores
318
- attention_scores *= self.attention_scale
319
-
320
- attention_scores = attention_scores + (-65504.0) * (1.0 - ltor_mask)
321
- # Attention probabilities. [b, np, s, s]
322
- attention_probs = torch.nn.Softmax(dim=-1)(attention_scores)
323
- # This is actually dropping out entire tokens to attend to, which might
324
- # seem a bit unusual, but is taken from the original Transformer paper.
325
- # with get_cuda_rng_tracker().fork():
326
- attention_probs = self.attention_dropout(attention_probs)
327
-
328
- # Context layer.
329
- # [b, np, s, hn]
330
- context_layer = torch.matmul(attention_probs, value_layer)
331
- # [b, s, np, hn]
332
- context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
333
- new_context_layer_shape = context_layer.size()[:-2] + \
334
- (self.hidden_size,)
335
- # [b, s, hp]
336
- context_layer = context_layer.view(*new_context_layer_shape)
337
-
338
- # Output. [b, s, h]
339
- output = self.dense(context_layer)
340
- output = self.output_dropout(output)
341
-
342
- return output
343
-
344
-
345
- class GLMBlock(torch.nn.Module):
346
- """A single layer transformer for GLM.
347
-
348
- We use the following notation:
349
- h: hidden size
350
- n: number of attention heads
351
- b: batch size
352
- s: sequence length
353
- Transformore layer takes input with size [b, s, h] and returns an
354
- output of the same size.
355
-
356
- Arguments:
357
- hidden_size: The hidden size of the self attention.
358
- num_attention_heads: number of attention head in the self
359
- attention.
360
- attention_dropout_prob: dropout probability of the attention
361
- score in self attention.
362
- output_dropout_prob: dropout probability for the outputs
363
- after self attention and final output.
364
- layernorm_epsilon: epsilon used in layernorm to avoid
365
- division by zero.
366
- init_method: initialization method used for the weights. Note
367
- that all biases are initialized to zero and
368
- layernorm weight are initialized to one.
369
- output_layer_init_method: output layers (attention output and
370
- mlp output) initialization. If None,
371
- use `init_method`.
372
- """
373
-
374
- def __init__(self,
375
- hidden_size,
376
- num_attention_heads,
377
- attention_dropout_prob,
378
- output_dropout_prob,
379
- layernorm_epsilon,
380
- init_method,
381
- output_layer_init_method=None,
382
- attention_scale=1.0):
383
- super(GLMBlock, self).__init__()
384
- # Set output layer initialization if not provided.
385
- if output_layer_init_method is None:
386
- output_layer_init_method = init_method
387
-
388
- # Layernorm on the input data.
389
- self.input_layernorm = LayerNorm(hidden_size, eps=layernorm_epsilon)
390
-
391
- # Self attention.
392
- self.attention = SelfAttention(
393
- hidden_size,
394
- num_attention_heads,
395
- attention_dropout_prob,
396
- output_dropout_prob,
397
- init_method,
398
- output_layer_init_method=output_layer_init_method,
399
- attention_scale=attention_scale)
400
-
401
- # Layernorm on the input data.
402
- self.post_attention_layernorm = LayerNorm(hidden_size,
403
- eps=layernorm_epsilon)
404
-
405
- # MLP
406
- self.mlp = MLP(
407
- hidden_size,
408
- output_dropout_prob,
409
- init_method,
410
- output_layer_init_method=output_layer_init_method)
411
-
412
- def forward(self, hidden_states, ltor_mask, mem=None):
413
- # hidden_states: [b, s, h]
414
- # ltor_mask: [b,1, s,s]
415
-
416
- # Layer norm at the begining of the transformer layer.
417
- layernorm_output = self.input_layernorm(hidden_states)
418
- mem = self.input_layernorm(mem) if mem is not None else None
419
- # Self attention.
420
- attention_output = self.attention(layernorm_output, ltor_mask, mem)
421
- # Residual connection.
422
- layernorm_input = hidden_states + attention_output
423
- # Layer norm post the self attention.
424
- layernorm_output = self.post_attention_layernorm(layernorm_input)
425
- # MLP.
426
- mlp_output = self.mlp(layernorm_output)
427
- # Second residual connection.
428
- output = layernorm_input + mlp_output
429
-
430
- return output
431
-
432
-
433
- class GLMStack(torch.nn.Module):
434
- """GLM transformer.
435
-
436
- This module takes input from embedding layer and it's output can
437
- be used directly by a logit layer. It consists of L (num-layers)
438
- blocks of:
439
- layer norm
440
- self attention
441
- residual connection
442
- layer norm
443
- mlp
444
- residual connection
445
- followed by a final layer norm.
446
-
447
- Arguments:
448
- num_layers: Number of transformer layers.
449
- hidden_size: The hidden size of the self attention.
450
- num_attention_heads: number of attention head in the self
451
- attention.
452
- attention_dropout_prob: dropout probability of the attention
453
- score in self attention.
454
- output_dropout_prob: dropout probability for the outputs
455
- after self attention and final output.
456
- checkpoint_activations: if True, checkpoint activations.
457
- checkpoint_num_layers: number of layers to checkpoint. This
458
- is basically the chunk size in checkpoitning.
459
- layernorm_epsilon: epsilon used in layernorm to avoid
460
- division by zero.
461
- init_method_std: standard deviation of the init method which has
462
- the form N(0, std).
463
- use_scaled_init_for_output_weights: If Ture use 1/sqrt(2*num_layers)
464
- scaling for the output weights (
465
- output of self attention and mlp).
466
- """
467
-
468
- def __init__(self,
469
- num_layers,
470
- hidden_size,
471
- num_attention_heads,
472
- max_sequence_length,
473
- embedding_dropout_prob,
474
- attention_dropout_prob,
475
- output_dropout_prob,
476
- checkpoint_activations,
477
- checkpoint_num_layers=1,
478
- layernorm_epsilon=1.0e-5,
479
- init_method_std=0.02,
480
- use_scaled_init_for_output_weights=True,
481
- block_position_encoding=False,
482
- attention_scale=1.0,
483
- ):
484
- super(GLMStack, self).__init__()
485
- self.hidden_size = hidden_size
486
- # Store activation checkpoiting flag.
487
- self.checkpoint_activations = checkpoint_activations
488
- self.checkpoint_num_layers = checkpoint_num_layers
489
-
490
- output_layer_init_method = None
491
- if use_scaled_init_for_output_weights:
492
- output_layer_init_method = scaled_init_method(0.0, init_method_std,
493
- num_layers)
494
- # Embeddings dropout
495
- self.embedding_dropout = torch.nn.Dropout(embedding_dropout_prob)
496
- self.block_position_encoding = block_position_encoding
497
-
498
- # Position embedding (serial).
499
- if block_position_encoding:
500
- self.position_embeddings = torch.nn.Embedding(max_sequence_length + 1, hidden_size)
501
- self.block_position_embeddings = torch.nn.Embedding(max_sequence_length + 1, hidden_size)
502
- torch.nn.init.normal_(self.block_position_embeddings.weight, mean=0.0, std=init_method_std)
503
- else:
504
- self.position_embeddings = torch.nn.Embedding(max_sequence_length, hidden_size)
505
- # Initialize the position embeddings.
506
- torch.nn.init.normal_(self.position_embeddings.weight, mean=0.0, std=init_method_std)
507
-
508
- def get_layer():
509
-
510
- return GLMBlock(
511
- hidden_size,
512
- num_attention_heads,
513
- attention_dropout_prob,
514
- output_dropout_prob,
515
- layernorm_epsilon,
516
- unscaled_init_method(init_method_std),
517
- output_layer_init_method=output_layer_init_method,
518
- attention_scale=attention_scale)
519
-
520
- # Transformer layers.
521
- self.layers = torch.nn.ModuleList(
522
- [get_layer() for _ in range(num_layers)])
523
-
524
- # Final layer norm before output.
525
- self.final_layernorm = LayerNorm(hidden_size, eps=layernorm_epsilon)
526
-
527
- def forward(self, hidden_states, position_ids, attention_mask, memory_states=None):
528
-
529
- batch_size, query_length = hidden_states.size()[:2]
530
- memory_length = memory_states[0].size(1) if memory_states else 0
531
- # attention mask is the beginning postion of B region, \in [0, query_len)
532
- is_scalar = torch.numel(attention_mask) == 1
533
- is_sep = is_scalar or torch.numel(attention_mask) == batch_size
534
- if is_sep:
535
- sep = attention_mask.item() if is_scalar else attention_mask
536
-
537
- # conventional transformer
538
- def build_mask_matrix(seq_length, sep, memory_length=0):
539
- m = hidden_states.new_ones((1, seq_length, seq_length))
540
- m = torch.tril(m)
541
- if is_scalar:
542
- m[0, :, :int(sep)] = 1
543
- else:
544
- m = m.expand(batch_size, -1, -1)
545
- ids = torch.arange(seq_length, device=sep.device, dtype=sep.dtype).view(1, -1)
546
- mask = ids < sep.view(-1, 1)
547
- m = m.masked_fill(mask.unsqueeze(1).expand_as(m), 1)
548
- if memory_length > 0:
549
- m = m.expand(batch_size, -1, -1)
550
- m = torch.cat((hidden_states.new_ones((batch_size, seq_length, memory_length)), m), dim=2)
551
- m = m.unsqueeze(1)
552
- return m
553
-
554
- attention_mask = build_mask_matrix(query_length, sep, memory_length=memory_length)
555
- else:
556
- if attention_mask.dim() == 2:
557
- attention_mask = attention_mask.unsqueeze(1).unsqueeze(1)
558
- attention_mask = attention_mask[:, :, :, -query_length - memory_length:]
559
-
560
- if self.block_position_encoding:
561
- position_ids, block_position_ids = position_ids[:, 0], position_ids[:, 1]
562
- position_embeddings = self.position_embeddings(position_ids)
563
-
564
- hidden_states = hidden_states + position_embeddings
565
- if self.block_position_encoding:
566
- block_position_embeddings = self.block_position_embeddings(block_position_ids)
567
- hidden_states = hidden_states + block_position_embeddings
568
- hidden_states = self.embedding_dropout(hidden_states)
569
-
570
- def check_detach(_hidden_states):
571
- return _hidden_states.detach()
572
-
573
- mem_layers = [check_detach(hidden_states)]
574
-
575
- for i, layer in enumerate(self.layers):
576
-
577
- args = [hidden_states, attention_mask]
578
-
579
- def create_custom_forward(module):
580
- def custom_forward(*inputs):
581
- # None for past_key_value
582
- return module(*inputs)
583
-
584
- return custom_forward
585
-
586
- mem_i = memory_states[i] if memory_states else None
587
-
588
- if self.checkpoint_activations:
589
- hidden_states = torch.utils.checkpoint.checkpoint(
590
- create_custom_forward(layer),
591
- hidden_states,
592
- mem=mem_i,
593
- )
594
- else:
595
- hidden_states = layer(*args, mem=mem_i)
596
- mem_layers.append(check_detach(hidden_states))
597
-
598
- # Final layer norm.
599
- output = self.final_layernorm(hidden_states)
600
- mem_layers = self.update_mems(mem_layers, memory_states)
601
- return (output, mem_layers)
602
-
603
- def update_mems(self, hiddens, mems):
604
- memory_length = mems[0].size(1) if mems else 0
605
- query_length = hiddens[0].size(1)
606
- new_memory_length = memory_length + query_length
607
-
608
- new_mems = []
609
- # with torch.no_grad():
610
- for i in range(len(hiddens)):
611
- if new_memory_length <= query_length:
612
- new_mems.append(hiddens[i][:, -new_memory_length:])
613
- else:
614
- new_mems.append(torch.cat((mems[i][:, -new_memory_length + query_length:], hiddens[i]), dim=1))
615
- return new_mems
616
-
617
-
618
- class GLMPreTrainedModel(PreTrainedModel):
619
- """
620
- An abstract class to handle weights initialization and
621
- a simple interface for downloading and loading pretrained models.
622
- """
623
-
624
- config_class = GLMConfig
625
- base_model_prefix = "glm"
626
- supports_gradient_checkpointing = True
627
- _keys_to_ignore_on_load_missing = [r"position_ids"]
628
-
629
- def _init_weights(self, module):
630
- """ Initialize the weights """
631
- if isinstance(module, torch.nn.Linear):
632
- # Slightly different from the TF version which uses truncated_normal for initialization
633
- # cf https://github.com/pytorch/pytorch/pull/5617
634
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
635
- if module.bias is not None:
636
- module.bias.data.zero_()
637
- elif isinstance(module, torch.nn.Embedding):
638
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
639
- if module.padding_idx is not None:
640
- module.weight.data[module.padding_idx].zero_()
641
- elif isinstance(module, torch.nn.LayerNorm):
642
- module.bias.data.zero_()
643
- module.weight.data.fill_(1.0)
644
-
645
- def _set_gradient_checkpointing(self, module, value=False):
646
- if isinstance(module, GLMModel):
647
- module.gradient_checkpointing = value
648
-
649
-
650
- GLM_START_DOCSTRING = r"""
651
- This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class.
652
- Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
653
- usage and behavior.
654
-
655
- Parameters:
656
- config ([`~GLMConfig`]): Model configuration class with all the parameters of the model.
657
- Initializing with a config file does not load the weights associated with the model, only the configuration.
658
- Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
659
- """
660
-
661
- GLM_INPUTS_DOCSTRING = r"""
662
- Args:
663
- input_ids (`torch.LongTensor` of shape `({0})`):
664
- Indices of input sequence tokens in the vocabulary.
665
-
666
- Indices can be obtained using [`GLMTokenizer`].
667
- See [`PreTrainedTokenizer.encode`] and
668
- [`PreTrainedTokenizer.__call__`] for details.
669
-
670
- [What are input IDs?](../glossary#input-ids)
671
- attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
672
- Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
673
-
674
- - 1 for tokens that are **not masked**,
675
- - 0 for tokens that are **masked**.
676
-
677
- [What are attention masks?](../glossary#attention-mask)
678
- token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
679
- Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`:
680
-
681
- - 0 corresponds to a *sentence A* token,
682
- - 1 corresponds to a *sentence B* token.
683
-
684
- [What are token type IDs?](../glossary#token-type-ids)
685
- position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
686
- Indices of positions of each input sequence tokens in the position embeddings.
687
- Selected in the range `[0, config.max_position_embeddings - 1]`.
688
-
689
- [What are position IDs?](../glossary#position-ids)
690
- head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
691
- Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
692
-
693
- - 1 indicates the head is **not masked**,
694
- - 0 indicates the head is **masked**.
695
-
696
- inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
697
- Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
698
- This is useful if you want more control over how to convert *input_ids* indices into associated vectors
699
- than the model's internal embedding lookup matrix.
700
- output_attentions (`bool`, *optional*):
701
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
702
- tensors for more detail.
703
- output_hidden_states (`bool`, *optional*):
704
- Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
705
- more detail.
706
- return_dict (`bool`, *optional*):
707
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
708
- """
709
-
710
-
711
- @add_start_docstrings(
712
- "The bare GLM Model transformer outputting raw hidden-states without any specific head on top.",
713
- GLM_START_DOCSTRING,
714
- )
715
- class GLMModel(GLMPreTrainedModel):
716
- """
717
-
718
- The model can behave as an encoder (with only self-attention) as well
719
- as a decoder, in which case a layer of cross-attention is added between
720
- the self-attention layers, following the architecture described in [Attention is
721
- all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani,
722
- Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
723
-
724
- To behave as an decoder the model needs to be initialized with the
725
- `is_decoder` argument of the configuration set to `True`.
726
- To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder`
727
- argument and `add_cross_attention` set to `True`; an
728
- `encoder_hidden_states` is then expected as an input to the forward pass.
729
- """
730
-
731
- def __init__(self, config):
732
- super().__init__(config)
733
- self.config = config
734
- self.output_predict = config.output_predict
735
- # Word embeddings (parallel).
736
- self.word_embeddings = VocabEmbedding(config)
737
-
738
- # Transformer
739
- self.transformer = GLMStack(config.num_layers,
740
- config.hidden_size,
741
- config.num_attention_heads,
742
- config.max_sequence_length,
743
- config.embedding_dropout_prob,
744
- config.attention_dropout_prob,
745
- config.output_dropout_prob,
746
- config.checkpoint_activations,
747
- config.checkpoint_num_layers,
748
- attention_scale=config.attention_scale,
749
- block_position_encoding=config.block_position_encoding)
750
-
751
- # Initialize weights and apply final processing
752
- self.post_init()
753
-
754
- @add_start_docstrings_to_model_forward(GLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
755
- @add_code_sample_docstrings(
756
- processor_class=_TOKENIZER_FOR_DOC,
757
- checkpoint=_CHECKPOINT_FOR_DOC,
758
- output_type=BaseModelOutputWithPastAndCrossAttentions,
759
- config_class=_CONFIG_FOR_DOC,
760
- )
761
- def forward(
762
- self,
763
- input_ids=None,
764
- position_ids=None,
765
- attention_mask=None,
766
- mems=None,
767
- **kwargs
768
- ):
769
- batch_size = input_ids.size(0)
770
- words_embeddings = self.word_embeddings(input_ids)
771
- embeddings = words_embeddings
772
-
773
- device = input_ids.device
774
- input_shape = input_ids.size()
775
-
776
- if position_ids is None:
777
- position_ids = torch.arange(0, input_shape[-1], dtype=torch.long, device=device)
778
- block_position_ids = torch.zeros(input_shape[-1], dtype=torch.long, device=device)
779
- position_ids = torch.stack((position_ids, block_position_ids), dim=0).unsqueeze(0)
780
- if attention_mask is None:
781
- attention_mask = torch.zeros(batch_size)
782
- # Transformer.
783
- transformer_output = self.transformer(embeddings, position_ids, attention_mask, mems)
784
- last_hidden_states, mems = transformer_output
785
- logits = None
786
- if self.output_predict:
787
- logits = F.linear(last_hidden_states, self.word_embeddings.weight)
788
-
789
- return ModelOutput(
790
- last_hidden_states=last_hidden_states,
791
- logits=logits,
792
- mems=mems,
793
- )
794
-
795
-
796
- @add_start_docstrings(
797
- """GLM Model transformer for multiple choice classification""",
798
- GLM_START_DOCSTRING
799
- )
800
- class GLMForMultipleChoice(GLMPreTrainedModel):
801
- def __init__(self, config):
802
- super().__init__(config)
803
- self.glm = GLMModel(config)
804
- self.post_init()
805
-
806
- def forward(
807
- self,
808
- input_ids=None,
809
- position_ids=None,
810
- attention_mask=None,
811
- choice_ids=None,
812
- choice_indices=None,
813
- labels=None,
814
- mems=None,
815
- **kwargs
816
- ):
817
- model_output = self.glm(input_ids, position_ids, attention_mask, mems=mems, **kwargs)
818
- lm_logits = model_output.logits
819
- log_probs = []
820
- for output, choices, choice_index in zip(F.log_softmax(lm_logits, dim=-1), choice_ids, choice_indices):
821
- log_probs_single = []
822
- for choice, choice_target_id in zip(choices, choice_index):
823
- tmp = output[choice_target_id, choice]
824
- log_probs_single.append(tmp.sum())
825
- log_probs.append(torch.stack(log_probs_single))
826
- log_probs = torch.stack(log_probs)
827
- loss = None
828
- if labels is not None:
829
- loss_fct = CrossEntropyLoss()
830
- loss = loss_fct(log_probs, labels)
831
- return ModelOutput(
832
- loss=loss,
833
- logits=log_probs,
834
- lm_logits=lm_logits,
835
- mems=model_output.mems
836
- )
837
-
838
- @add_start_docstrings(
839
- """GLM Model transformer with a `language modeling` head on top""",
840
- GLM_START_DOCSTRING,
841
- )
842
- class GLMForConditionalGeneration(GLMPreTrainedModel):
843
- def __init__(self, config):
844
- super().__init__(config)
845
- self.glm = GLMModel(config)
846
- self.post_init()
847
-
848
- def _reorder_cache(self, past, beam_idx):
849
- # if decoder past is not included in output
850
- # speedy decoding is disabled and no need to reorder
851
- if past is None:
852
- return past
853
- reordered_decoder_past = ()
854
- for layer_past_states in past:
855
- # get the correct batch idx from layer past batch dim
856
- reordered_decoder_past = reordered_decoder_past + (
857
- layer_past_states.index_select(0, beam_idx.to(layer_past_states.device)),)
858
- return reordered_decoder_past
859
-
860
- def prepare_inputs_for_generation(self, input_ids, past=None, position_ids=None, generation_attention_mask=None,
861
- **kwargs):
862
- # only last token for inputs_ids if past is defined in kwargs
863
- attention_mask = generation_attention_mask
864
- seq_length = input_ids.shape[1]
865
- if past:
866
- if position_ids is not None:
867
- position_ids = position_ids[:, :, seq_length - 1].unsqueeze(-1)
868
- if attention_mask is not None:
869
- attention_mask = attention_mask[:, :, seq_length - 1, :seq_length].unsqueeze(-2)
870
- input_ids = input_ids[:, -1].unsqueeze(-1)
871
- else:
872
- if position_ids is not None:
873
- position_ids = position_ids[:, :, :seq_length]
874
- if attention_mask is not None:
875
- attention_mask = attention_mask[:, :, :seq_length, :seq_length]
876
- if position_ids is not None and input_ids.size(0) > position_ids.size(0):
877
- batch_size = position_ids.size(0)
878
- num_beams = input_ids.size(0) // batch_size
879
- position_ids = position_ids.unsqueeze(1).expand(-1, num_beams, -1, -1)
880
- position_ids = position_ids.reshape(batch_size * num_beams, *position_ids.shape[-2:])
881
- if attention_mask is not None and input_ids.size(0) > attention_mask.size(0):
882
- batch_size = attention_mask.size(0)
883
- num_beams = input_ids.size(0) // batch_size
884
- attention_mask = attention_mask.unsqueeze(1).expand(-1, num_beams, -1, -1, -1)
885
- attention_mask = attention_mask.reshape(batch_size * num_beams, *attention_mask.shape[-3:])
886
- return {
887
- "input_ids": input_ids,
888
- "position_ids": position_ids,
889
- "attention_mask": attention_mask,
890
- "mems": past,
891
- }
892
-
893
- def forward(
894
- self,
895
- input_ids=None,
896
- position_ids=None,
897
- attention_mask=None,
898
- labels=None,
899
- mems=None,
900
- **kwargs
901
- ):
902
- model_output = self.glm(input_ids, position_ids, attention_mask, mems=mems, **kwargs)
903
- lm_logits = model_output.logits
904
- loss = None
905
- if labels is not None:
906
- loss_fct = CrossEntropyLoss(ignore_index=-100)
907
- loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
908
- return ModelOutput(
909
- loss=loss,
910
- logits=lm_logits,
911
- mems=model_output.mems
912
- )
913
-
914
-
915
- @add_start_docstrings(
916
- """GLM Model transformer with a sequence classification/regression head on top (a linear layer on top of
917
- the pooled output) e.g. for GLUE tasks. """,
918
- GLM_START_DOCSTRING,
919
- )
920
- class GLMForSequenceClassification(GLMPreTrainedModel):
921
- def __init__(self, config: GLMConfig, hidden_dropout=None, num_class=1):
922
- super().__init__(config)
923
- self.pool_token = config.pool_token
924
- self.glm = GLMModel(config)
925
- self.glm.output_predict = False
926
- self.num_class = num_class
927
- # Multi-choice head.
928
- self.dense = torch.nn.Linear(config.hidden_size, config.hidden_size)
929
- classifier_dropout = (
930
- config.classifier_dropout if config.classifier_dropout is not None else config.output_dropout_prob
931
- )
932
- self.dropout = torch.nn.Dropout(classifier_dropout)
933
- self.out_proj = torch.nn.Linear(config.hidden_size, config.num_labels)
934
-
935
- # Initialize weights and apply final processing
936
- self.post_init()
937
-
938
- @add_start_docstrings_to_model_forward(GLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
939
- @add_code_sample_docstrings(
940
- processor_class=_TOKENIZER_FOR_DOC,
941
- checkpoint=_CHECKPOINT_FOR_DOC,
942
- output_type=SequenceClassifierOutput,
943
- config_class=_CONFIG_FOR_DOC,
944
- )
945
- def forward(self,
946
- input_ids=None,
947
- position_ids=None,
948
- attention_mask=None,
949
- labels=None):
950
-
951
- num_choices = None
952
-
953
- if len(input_ids.shape) == 3:
954
- batch_size, num_choices = input_ids.shape[:2]
955
- input_ids = input_ids.reshape(-1, input_ids.size(-1))
956
- attention_mask = attention_mask.reshape(-1, *attention_mask.size()[2:])
957
- position_ids = position_ids.reshape(-1, *position_ids.size()[2:])
958
- model_out = self.glm(input_ids, position_ids, attention_mask)
959
- outputs, mems = model_out.last_hidden_states, model_out.mems
960
-
961
- output = outputs[:, 0, :]
962
- output = self.dropout(output)
963
- output = torch.tanh(self.dense(output))
964
- output = self.dropout(output)
965
- logits = self.out_proj(output)
966
- if num_choices is not None:
967
- logits = logits.view(-1, num_choices)
968
- loss = None
969
- if labels is not None:
970
- loss_fct = CrossEntropyLoss()
971
- loss = loss_fct(logits, labels)
972
- # loss = F.cross_entropy(logits.contiguous().float(), labels.long())
973
- return SequenceClassifierOutput(loss=loss,
974
- logits=logits,
975
- hidden_states=outputs)