lingbionlp commited on
Commit
5fa415a
·
verified ·
1 Parent(s): 3bd15c7

Upload 6 files

Browse files
modeling_chatglm.py ADDED
@@ -0,0 +1,1138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ PyTorch ChatGLM model. """
2
+
3
+ import math
4
+ import sys
5
+ import torch
6
+ import torch.utils.checkpoint
7
+ import torch.nn.functional as F
8
+ from torch import nn
9
+ from torch.nn import CrossEntropyLoss, LayerNorm, MSELoss, BCEWithLogitsLoss
10
+ from torch.nn.utils import skip_init
11
+ from typing import Optional, Tuple, Union, List, Dict, Any
12
+
13
+ from transformers.modeling_outputs import (
14
+ BaseModelOutputWithPast,
15
+ CausalLMOutputWithPast,
16
+ SequenceClassifierOutputWithPast,
17
+ )
18
+ from transformers.modeling_utils import PreTrainedModel
19
+ from transformers.utils import logging, is_torch_npu_available
20
+ from transformers.generation.logits_process import LogitsProcessor
21
+ from transformers.generation.utils import ModelOutput
22
+
23
+ from .configuration_chatglm import ChatGLMConfig
24
+
25
+ try:
26
+ from transformers.utils import is_flash_attn_greater_or_equal_2_10, is_flash_attn_2_available
27
+
28
+ if is_flash_attn_2_available():
29
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
30
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
31
+ except:
32
+ pass
33
+
34
+ # flags required to enable jit fusion kernels
35
+
36
+ if sys.platform != 'darwin' and not is_torch_npu_available():
37
+ torch._C._jit_set_profiling_mode(False)
38
+ torch._C._jit_set_profiling_executor(False)
39
+ torch._C._jit_override_can_fuse_on_cpu(True)
40
+ torch._C._jit_override_can_fuse_on_gpu(True)
41
+
42
+ logger = logging.get_logger(__name__)
43
+
44
+ _CHECKPOINT_FOR_DOC = "THUDM/ChatGLM"
45
+ _CONFIG_FOR_DOC = "ChatGLMConfig"
46
+
47
+
48
+ def default_init(cls, *args, **kwargs):
49
+ return cls(*args, **kwargs)
50
+
51
+
52
+ class InvalidScoreLogitsProcessor(LogitsProcessor):
53
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
54
+ if torch.isnan(scores).any() or torch.isinf(scores).any():
55
+ scores.zero_()
56
+ scores[..., 198] = 5e4
57
+ return scores
58
+
59
+
60
+ def split_tensor_along_last_dim(
61
+ tensor: torch.Tensor,
62
+ num_partitions: int,
63
+ contiguous_split_chunks: bool = False,
64
+ ) -> List[torch.Tensor]:
65
+ """Split a tensor along its last dimension.
66
+
67
+ Arguments:
68
+ tensor: input tensor.
69
+ num_partitions: number of partitions to split the tensor
70
+ contiguous_split_chunks: If True, make each chunk contiguous
71
+ in memory.
72
+
73
+ Returns:
74
+ A list of Tensors
75
+ """
76
+ # Get the size and dimension.
77
+ last_dim = tensor.dim() - 1
78
+ last_dim_size = tensor.size()[last_dim] // num_partitions
79
+ # Split.
80
+ tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
81
+ # Note: torch.split does not create contiguous tensors by default.
82
+ if contiguous_split_chunks:
83
+ return tuple(chunk.contiguous() for chunk in tensor_list)
84
+
85
+ return tensor_list
86
+
87
+
88
+ class RotaryEmbedding(nn.Module):
89
+ def __init__(self, dim, rope_ratio=1, original_impl=False, device=None, dtype=None):
90
+ super().__init__()
91
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, device=device).to(dtype=dtype) / dim))
92
+ self.register_buffer("inv_freq", inv_freq)
93
+ self.dim = dim
94
+ self.original_impl = original_impl
95
+ self.rope_ratio = rope_ratio
96
+
97
+ def forward_impl(
98
+ self, seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int = 10000
99
+ ):
100
+ """Enhanced Transformer with Rotary Position Embedding.
101
+
102
+ Derived from: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/
103
+ transformers/rope/__init__.py. MIT License:
104
+ https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/license.
105
+ """
106
+ # $\Theta = {\theta_i = 10000^{\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$
107
+ base = base * self.rope_ratio
108
+ theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, dtype=torch.float, device=device) / n_elem))
109
+
110
+ # Create position indexes `[0, 1, ..., seq_len - 1]`
111
+ seq_idx = torch.arange(seq_len, dtype=torch.float, device=device)
112
+
113
+ # Calculate the product of position index and $\theta_i$
114
+ idx_theta = torch.outer(seq_idx, theta).float()
115
+
116
+ cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1)
117
+
118
+ # this is to mimic the behaviour of complex32, else we will get different results
119
+ if dtype in (torch.float16, torch.bfloat16, torch.int8):
120
+ cache = cache.bfloat16() if dtype == torch.bfloat16 else cache.half()
121
+ return cache
122
+
123
+ def forward(self, max_seq_len, offset=0):
124
+ return self.forward_impl(
125
+ max_seq_len, self.dim, dtype=self.inv_freq.dtype, device=self.inv_freq.device
126
+ )
127
+
128
+
129
+ @torch.jit.script
130
+ def apply_rotary_pos_emb(x: torch.Tensor, rope_cache: torch.Tensor) -> torch.Tensor:
131
+ # x: [b, np, sq, hn]
132
+ b, np, sq, hn = x.size(0), x.size(1), x.size(2), x.size(3)
133
+ rot_dim = rope_cache.shape[-2] * 2
134
+ x, x_pass = x[..., :rot_dim], x[..., rot_dim:]
135
+ # truncate to support variable sizes
136
+ rope_cache = rope_cache[:, :sq]
137
+ xshaped = x.reshape(b, np, sq, rot_dim // 2, 2)
138
+ rope_cache = rope_cache.view(-1, 1, sq, xshaped.size(3), 2)
139
+ x_out2 = torch.stack(
140
+ [
141
+ xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1],
142
+ xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1],
143
+ ],
144
+ -1,
145
+ )
146
+ x_out2 = x_out2.flatten(3)
147
+ return torch.cat((x_out2, x_pass), dim=-1)
148
+
149
+
150
+ class RMSNorm(torch.nn.Module):
151
+ def __init__(self, normalized_shape, eps=1e-5, device=None, dtype=None, **kwargs):
152
+ super().__init__()
153
+ self.weight = torch.nn.Parameter(torch.empty(normalized_shape, device=device, dtype=dtype))
154
+ self.eps = eps
155
+
156
+ def forward(self, hidden_states: torch.Tensor):
157
+ input_dtype = hidden_states.dtype
158
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
159
+ hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
160
+
161
+ return (self.weight * hidden_states).to(input_dtype)
162
+
163
+
164
+ class CoreAttention(torch.nn.Module):
165
+ def __init__(self, config: ChatGLMConfig, layer_number):
166
+ super(CoreAttention, self).__init__()
167
+ self.config = config
168
+ self.apply_query_key_layer_scaling = config.apply_query_key_layer_scaling
169
+ self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32
170
+ if self.apply_query_key_layer_scaling:
171
+ self.attention_softmax_in_fp32 = True
172
+ self.layer_number = max(1, layer_number)
173
+ self.is_causal = True
174
+
175
+ projection_size = config.kv_channels * config.num_attention_heads
176
+
177
+ # Per attention head and per partition values.
178
+ self.hidden_size_per_partition = projection_size
179
+ self.hidden_size_per_attention_head = projection_size // config.num_attention_heads
180
+ self.num_attention_heads_per_partition = config.num_attention_heads
181
+
182
+ coeff = None
183
+ self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
184
+ if self.apply_query_key_layer_scaling:
185
+ coeff = self.layer_number
186
+ self.norm_factor *= coeff
187
+ self.coeff = coeff
188
+
189
+ self.attention_dropout = torch.nn.Dropout(config.attention_dropout)
190
+
191
+ def forward(self, query_layer, key_layer, value_layer, attention_mask):
192
+ # [b, np, sq, sk]
193
+ output_size = (query_layer.size(0), query_layer.size(1), query_layer.size(2), key_layer.size(2))
194
+
195
+ # [b, np, sq, hn] -> [b * np, sq, hn]
196
+ query_layer = query_layer.view(output_size[0] * output_size[1], output_size[2], -1)
197
+ # [b, np, sk, hn] -> [b * np, sk, hn]
198
+ key_layer = key_layer.view(output_size[0] * output_size[1], output_size[3], -1)
199
+
200
+ # preallocting input tensor: [b * np, sq, sk]
201
+ matmul_input_buffer = torch.empty(
202
+ output_size[0] * output_size[1], output_size[2], output_size[3], dtype=query_layer.dtype,
203
+ device=query_layer.device
204
+ )
205
+
206
+ # Raw attention scores. [b * np, sq, sk]
207
+ matmul_result = torch.baddbmm(
208
+ matmul_input_buffer,
209
+ query_layer, # [b * np, sq, hn]
210
+ key_layer.transpose(1, 2), # [b * np, hn, sk]
211
+ beta=0.0,
212
+ alpha=(1.0 / self.norm_factor),
213
+ )
214
+
215
+ # change view to [b, np, sq, sk]
216
+ attention_scores = matmul_result.view(*output_size)
217
+
218
+ # ===========================
219
+ # Attention probs and dropout
220
+ # ===========================
221
+
222
+ # attention scores and attention mask [b, np, sq, sk]
223
+ if self.attention_softmax_in_fp32:
224
+ attention_scores = attention_scores.float()
225
+ if self.coeff is not None:
226
+ attention_scores = attention_scores * self.coeff
227
+ if attention_mask is None and attention_scores.shape[2] == attention_scores.shape[3]:
228
+ attention_mask = torch.ones(output_size[0], 1, output_size[2], output_size[3],
229
+ device=attention_scores.device, dtype=torch.bool)
230
+ attention_mask.tril_()
231
+ attention_mask = ~attention_mask
232
+ if attention_mask is not None:
233
+ attention_scores = attention_scores.masked_fill(attention_mask, float("-inf"))
234
+ attention_probs = F.softmax(attention_scores, dim=-1)
235
+ attention_probs = attention_probs.type_as(value_layer)
236
+
237
+ # This is actually dropping out entire tokens to attend to, which might
238
+ # seem a bit unusual, but is taken from the original Transformer paper.
239
+ attention_probs = self.attention_dropout(attention_probs)
240
+
241
+ # query layer shape: [b * np, sq, hn]
242
+ # value layer shape: [b, np, sk, hn]
243
+ # attention shape: [b, np, sq, sk]
244
+ # context layer shape: [b, np, sq, hn]
245
+ output_size = (value_layer.size(0), value_layer.size(1), query_layer.size(1), value_layer.size(3))
246
+ # change view [b * np, sk, hn]
247
+ value_layer = value_layer.view(output_size[0] * output_size[1], value_layer.size(2), -1)
248
+ # change view [b * np, sq, sk]
249
+ attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)
250
+ # matmul: [b * np, sq, hn]
251
+ context_layer = torch.bmm(attention_probs, value_layer)
252
+ # change view [b, np, sq, hn]
253
+ context_layer = context_layer.view(*output_size)
254
+ # [b, np, sq, hn] --> [b, sq, np, hn]
255
+ context_layer = context_layer.transpose(1, 2).contiguous()
256
+ # [b, sq, np, hn] --> [b, sq, hp]
257
+ new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)
258
+ context_layer = context_layer.reshape(*new_context_layer_shape)
259
+
260
+ return context_layer
261
+
262
+
263
+ class SdpaAttention(CoreAttention):
264
+ def forward(self, query_layer, key_layer, value_layer, attention_mask):
265
+ if attention_mask is None and query_layer.shape[2] == key_layer.shape[2]:
266
+ context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer,
267
+ is_causal=True,
268
+ dropout_p=self.config.attention_dropout if self.training else 0.0)
269
+ else:
270
+ if attention_mask is not None:
271
+ attention_mask = ~attention_mask
272
+ context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer,
273
+ attention_mask,
274
+ dropout_p=self.config.attention_dropout if self.training else 0.0)
275
+ context_layer = context_layer.transpose(1, 2).contiguous()
276
+ new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)
277
+ context_layer = context_layer.reshape(*new_context_layer_shape)
278
+ return context_layer
279
+
280
+
281
+ def _get_unpad_data(attention_mask):
282
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
283
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
284
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
285
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
286
+ return (
287
+ indices,
288
+ cu_seqlens,
289
+ max_seqlen_in_batch,
290
+ )
291
+
292
+
293
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2
294
+ class FlashAttention2(CoreAttention):
295
+ def __init__(self, *args, **kwargs):
296
+ super().__init__(*args, **kwargs)
297
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
298
+
299
+ def forward(self, query_states, key_states, value_states, attention_mask):
300
+ query_states = query_states.transpose(1, 2)
301
+ key_states = key_states.transpose(1, 2)
302
+ value_states = value_states.transpose(1, 2)
303
+ batch_size, query_length = query_states.shape[:2]
304
+ if not self._flash_attn_uses_top_left_mask:
305
+ causal = self.is_causal
306
+ else:
307
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
308
+ causal = self.is_causal and query_length != 1
309
+ dropout = self.config.attention_dropout if self.training else 0.0
310
+ # Contains at least one padding token in the sequence
311
+ if attention_mask is not None:
312
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
313
+ query_states, key_states, value_states, attention_mask, query_length
314
+ )
315
+
316
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
317
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
318
+
319
+ attn_output_unpad = flash_attn_varlen_func(
320
+ query_states,
321
+ key_states,
322
+ value_states,
323
+ cu_seqlens_q=cu_seqlens_q,
324
+ cu_seqlens_k=cu_seqlens_k,
325
+ max_seqlen_q=max_seqlen_in_batch_q,
326
+ max_seqlen_k=max_seqlen_in_batch_k,
327
+ dropout_p=dropout,
328
+ softmax_scale=None,
329
+ causal=causal,
330
+ )
331
+
332
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
333
+ else:
334
+ attn_output = flash_attn_func(
335
+ query_states, key_states, value_states, dropout, softmax_scale=None, causal=causal
336
+ )
337
+ attn_output = attn_output.reshape(batch_size, query_length, self.hidden_size_per_partition).contiguous()
338
+ return attn_output
339
+
340
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
341
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
342
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
343
+
344
+ key_layer = index_first_axis(
345
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
346
+ )
347
+ value_layer = index_first_axis(
348
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
349
+ )
350
+ if query_length == kv_seq_len:
351
+ query_layer = index_first_axis(
352
+ query_layer.reshape(batch_size * kv_seq_len, self.num_attention_heads_per_partition, head_dim),
353
+ indices_k
354
+ )
355
+ cu_seqlens_q = cu_seqlens_k
356
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
357
+ indices_q = indices_k
358
+ elif query_length == 1:
359
+ max_seqlen_in_batch_q = 1
360
+ cu_seqlens_q = torch.arange(
361
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
362
+ ) # There is a memcpy here, that is very bad.
363
+ indices_q = cu_seqlens_q[:-1]
364
+ query_layer = query_layer.squeeze(1)
365
+ else:
366
+ # The -q_len: slice assumes left padding.
367
+ attention_mask = attention_mask[:, -query_length:]
368
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
369
+
370
+ return (
371
+ query_layer,
372
+ key_layer,
373
+ value_layer,
374
+ indices_q,
375
+ (cu_seqlens_q, cu_seqlens_k),
376
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
377
+ )
378
+
379
+
380
+ CORE_ATTENTION_CLASSES = {
381
+ "eager": CoreAttention,
382
+ "sdpa": SdpaAttention,
383
+ "flash_attention_2": FlashAttention2
384
+ }
385
+
386
+
387
+ class SelfAttention(torch.nn.Module):
388
+ """Parallel self-attention layer abstract class.
389
+
390
+ Self-attention layer takes input with size [s, b, h]
391
+ and returns output of the same size.
392
+ """
393
+
394
+ def __init__(self, config: ChatGLMConfig, layer_number, device=None):
395
+ super(SelfAttention, self).__init__()
396
+ self.layer_number = max(1, layer_number)
397
+
398
+ self.projection_size = config.kv_channels * config.num_attention_heads
399
+
400
+ # Per attention head and per partition values.
401
+ self.hidden_size_per_attention_head = self.projection_size // config.num_attention_heads
402
+ self.num_attention_heads_per_partition = config.num_attention_heads
403
+
404
+ self.multi_query_attention = config.multi_query_attention
405
+ self.qkv_hidden_size = 3 * self.projection_size
406
+ if self.multi_query_attention:
407
+ self.num_multi_query_groups_per_partition = config.multi_query_group_num
408
+ self.qkv_hidden_size = (
409
+ self.projection_size + 2 * self.hidden_size_per_attention_head * config.multi_query_group_num
410
+ )
411
+ self.query_key_value = nn.Linear(config.hidden_size, self.qkv_hidden_size,
412
+ bias=config.add_bias_linear or config.add_qkv_bias,
413
+ device=device, **_config_to_kwargs(config)
414
+ )
415
+
416
+ self.core_attention = CORE_ATTENTION_CLASSES[config._attn_implementation](config, self.layer_number)
417
+
418
+ # Output.
419
+ self.dense = nn.Linear(self.projection_size, config.hidden_size, bias=config.add_bias_linear,
420
+ device=device, **_config_to_kwargs(config)
421
+ )
422
+
423
+ def _allocate_memory(self, inference_max_sequence_len, batch_size, device=None, dtype=None):
424
+ if self.multi_query_attention:
425
+ num_attention_heads = self.num_multi_query_groups_per_partition
426
+ else:
427
+ num_attention_heads = self.num_attention_heads_per_partition
428
+ return torch.empty(
429
+ inference_max_sequence_len,
430
+ batch_size,
431
+ num_attention_heads,
432
+ self.hidden_size_per_attention_head,
433
+ dtype=dtype,
434
+ device=device,
435
+ )
436
+
437
+ def forward(
438
+ self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True
439
+ ):
440
+ # hidden_states: [b, sq, h]
441
+
442
+ # =================================================
443
+ # Pre-allocate memory for key-values for inference.
444
+ # =================================================
445
+ # =====================
446
+ # Query, Key, and Value
447
+ # =====================
448
+
449
+ # Attention heads [b, sq, h] --> [b, sq, (np * 3 * hn)]
450
+ mixed_x_layer = self.query_key_value(hidden_states)
451
+
452
+ if self.multi_query_attention:
453
+ (query_layer, key_layer, value_layer) = mixed_x_layer.split(
454
+ [
455
+ self.num_attention_heads_per_partition * self.hidden_size_per_attention_head,
456
+ self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,
457
+ self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,
458
+ ],
459
+ dim=-1,
460
+ )
461
+ query_layer = query_layer.view(
462
+ query_layer.size()[:-1] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)
463
+ )
464
+ key_layer = key_layer.view(
465
+ key_layer.size()[:-1] + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)
466
+ )
467
+ value_layer = value_layer.view(
468
+ value_layer.size()[:-1]
469
+ + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)
470
+ )
471
+ else:
472
+ new_tensor_shape = mixed_x_layer.size()[:-1] + \
473
+ (self.num_attention_heads_per_partition,
474
+ 3 * self.hidden_size_per_attention_head)
475
+ mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
476
+
477
+ # [b, sq, np, 3 * hn] --> 3 [b, sq, np, hn]
478
+ (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3)
479
+
480
+ # [b, sq, np, hn] -> [b, np, sq, hn]
481
+ query_layer, key_layer, value_layer = [k.transpose(1, 2) for k in [query_layer, key_layer, value_layer]]
482
+
483
+ # apply relative positional encoding (rotary embedding)
484
+ if rotary_pos_emb is not None:
485
+ query_layer = apply_rotary_pos_emb(query_layer, rotary_pos_emb)
486
+ key_layer = apply_rotary_pos_emb(key_layer, rotary_pos_emb)
487
+
488
+ # adjust key and value for inference
489
+ if kv_cache is not None:
490
+ cache_k, cache_v = kv_cache
491
+ key_layer = torch.cat((cache_k, key_layer), dim=2)
492
+ value_layer = torch.cat((cache_v, value_layer), dim=2)
493
+ if use_cache:
494
+ if kv_cache is None:
495
+ kv_cache = torch.cat((key_layer.unsqueeze(0).unsqueeze(0), value_layer.unsqueeze(0).unsqueeze(0)),
496
+ dim=1)
497
+ else:
498
+ kv_cache = (key_layer, value_layer)
499
+ else:
500
+ kv_cache = None
501
+
502
+ if self.multi_query_attention:
503
+ key_layer = key_layer.unsqueeze(2)
504
+ key_layer = key_layer.expand(
505
+ -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1, -1
506
+ )
507
+ key_layer = key_layer.contiguous().view(
508
+ key_layer.size()[:1] + (self.num_attention_heads_per_partition,) + key_layer.size()[3:]
509
+ )
510
+ value_layer = value_layer.unsqueeze(2)
511
+ value_layer = value_layer.expand(
512
+ -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1, -1
513
+ )
514
+ value_layer = value_layer.contiguous().view(
515
+ value_layer.size()[:1] + (self.num_attention_heads_per_partition,) + value_layer.size()[3:]
516
+ )
517
+
518
+ # ==================================
519
+ # core attention computation
520
+ # ==================================
521
+
522
+ context_layer = self.core_attention(query_layer, key_layer, value_layer, attention_mask)
523
+
524
+ # =================
525
+ # Output. [sq, b, h]
526
+ # =================
527
+
528
+ output = self.dense(context_layer)
529
+
530
+ return output, kv_cache
531
+
532
+
533
+ def _config_to_kwargs(args):
534
+ common_kwargs = {
535
+ "dtype": args.torch_dtype,
536
+ }
537
+ return common_kwargs
538
+
539
+
540
+ class MLP(torch.nn.Module):
541
+ """MLP.
542
+
543
+ MLP will take the input with h hidden state, project it to 4*h
544
+ hidden dimension, perform nonlinear transformation, and project the
545
+ state back into h hidden dimension.
546
+ """
547
+
548
+ def __init__(self, config: ChatGLMConfig, device=None):
549
+ super(MLP, self).__init__()
550
+
551
+ self.add_bias = config.add_bias_linear
552
+
553
+ # Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf
554
+ self.dense_h_to_4h = nn.Linear(
555
+ config.hidden_size,
556
+ config.ffn_hidden_size * 2,
557
+ bias=self.add_bias,
558
+ device=device,
559
+ **_config_to_kwargs(config)
560
+ )
561
+
562
+ def swiglu(x):
563
+ x = torch.chunk(x, 2, dim=-1)
564
+ return F.silu(x[0]) * x[1]
565
+
566
+ self.activation_func = swiglu
567
+
568
+ # Project back to h.
569
+ self.dense_4h_to_h = nn.Linear(
570
+ config.ffn_hidden_size,
571
+ config.hidden_size,
572
+ bias=self.add_bias,
573
+ device=device,
574
+ **_config_to_kwargs(config)
575
+ )
576
+
577
+ def forward(self, hidden_states):
578
+ # [s, b, 4hp]
579
+ intermediate_parallel = self.dense_h_to_4h(hidden_states)
580
+ intermediate_parallel = self.activation_func(intermediate_parallel)
581
+ # [s, b, h]
582
+ output = self.dense_4h_to_h(intermediate_parallel)
583
+ return output
584
+
585
+
586
+ class GLMBlock(torch.nn.Module):
587
+ """A single transformer layer.
588
+
589
+ Transformer layer takes input with size [s, b, h] and returns an
590
+ output of the same size.
591
+ """
592
+
593
+ def __init__(self, config: ChatGLMConfig, layer_number, device=None):
594
+ super(GLMBlock, self).__init__()
595
+ self.layer_number = layer_number
596
+
597
+ self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm
598
+
599
+ self.fp32_residual_connection = config.fp32_residual_connection
600
+
601
+ LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm
602
+ # Layernorm on the input data.
603
+ self.input_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,
604
+ dtype=config.torch_dtype)
605
+
606
+ # Self attention.
607
+ self.self_attention = SelfAttention(config, layer_number, device=device)
608
+ self.hidden_dropout = config.hidden_dropout
609
+
610
+ # Layernorm on the attention output
611
+ self.post_attention_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,
612
+ dtype=config.torch_dtype)
613
+
614
+ # MLP
615
+ self.mlp = MLP(config, device=device)
616
+
617
+ def forward(
618
+ self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True,
619
+ ):
620
+ # hidden_states: [s, b, h]
621
+
622
+ # Layer norm at the beginning of the transformer layer.
623
+ layernorm_output = self.input_layernorm(hidden_states)
624
+ # Self attention.
625
+ attention_output, kv_cache = self.self_attention(
626
+ layernorm_output,
627
+ attention_mask,
628
+ rotary_pos_emb,
629
+ kv_cache=kv_cache,
630
+ use_cache=use_cache
631
+ )
632
+
633
+ # Residual connection.
634
+ if self.apply_residual_connection_post_layernorm:
635
+ residual = layernorm_output
636
+ else:
637
+ residual = hidden_states
638
+
639
+ layernorm_input = torch.nn.functional.dropout(attention_output, p=self.hidden_dropout, training=self.training)
640
+ layernorm_input = residual + layernorm_input
641
+
642
+ # Layer norm post the self attention.
643
+ layernorm_output = self.post_attention_layernorm(layernorm_input)
644
+
645
+ # MLP.
646
+ mlp_output = self.mlp(layernorm_output)
647
+
648
+ # Second residual connection.
649
+ if self.apply_residual_connection_post_layernorm:
650
+ residual = layernorm_output
651
+ else:
652
+ residual = layernorm_input
653
+
654
+ output = torch.nn.functional.dropout(mlp_output, p=self.hidden_dropout, training=self.training)
655
+ output = residual + output
656
+
657
+ return output, kv_cache
658
+
659
+
660
+ class GLMTransformer(torch.nn.Module):
661
+ """Transformer class."""
662
+
663
+ def __init__(self, config: ChatGLMConfig, device=None):
664
+ super(GLMTransformer, self).__init__()
665
+
666
+ self.fp32_residual_connection = config.fp32_residual_connection
667
+ self.post_layer_norm = config.post_layer_norm
668
+
669
+ # Number of layers.
670
+ self.num_layers = config.num_layers
671
+
672
+ # Transformer layers.
673
+ def build_layer(layer_number):
674
+ return GLMBlock(config, layer_number, device=device)
675
+
676
+ self.layers = torch.nn.ModuleList([build_layer(i + 1) for i in range(self.num_layers)])
677
+
678
+ if self.post_layer_norm:
679
+ LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm
680
+ # Final layer norm before output.
681
+ self.final_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,
682
+ dtype=config.torch_dtype)
683
+
684
+ self.gradient_checkpointing = False
685
+
686
+ def _get_layer(self, layer_number):
687
+ return self.layers[layer_number]
688
+
689
+ def forward(
690
+ self, hidden_states, attention_mask, rotary_pos_emb, kv_caches=None,
691
+ use_cache: Optional[bool] = True,
692
+ output_hidden_states: Optional[bool] = False,
693
+ ):
694
+ if not kv_caches:
695
+ kv_caches = [None for _ in range(self.num_layers)]
696
+ presents = () if use_cache else None
697
+ if self.gradient_checkpointing and self.training:
698
+ if use_cache:
699
+ logger.warning_once(
700
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
701
+ )
702
+ use_cache = False
703
+
704
+ all_self_attentions = None
705
+ all_hidden_states = () if output_hidden_states else None
706
+ for index in range(self.num_layers):
707
+ if output_hidden_states:
708
+ all_hidden_states = all_hidden_states + (hidden_states,)
709
+
710
+ layer = self._get_layer(index)
711
+ if self.gradient_checkpointing and self.training:
712
+ layer_ret = torch.utils.checkpoint.checkpoint(
713
+ layer,
714
+ hidden_states,
715
+ attention_mask,
716
+ rotary_pos_emb,
717
+ kv_caches[index],
718
+ use_cache,
719
+ use_reentrant=False
720
+ )
721
+ else:
722
+ layer_ret = layer(
723
+ hidden_states,
724
+ attention_mask,
725
+ rotary_pos_emb,
726
+ kv_cache=kv_caches[index],
727
+ use_cache=use_cache
728
+ )
729
+ hidden_states, kv_cache = layer_ret
730
+ if use_cache:
731
+ # token by token decoding, use tuple format
732
+ if kv_caches[0] is not None:
733
+ presents = presents + (kv_cache,)
734
+ # prefilling in decoding, use tensor format to save cuda memory
735
+ else:
736
+ if len(presents) == 0:
737
+ presents = kv_cache
738
+ else:
739
+ presents = torch.cat((presents, kv_cache.to(presents.device)), dim=0)
740
+
741
+ if output_hidden_states:
742
+ all_hidden_states = all_hidden_states + (hidden_states,)
743
+
744
+ # Final layer norm.
745
+ if self.post_layer_norm:
746
+ hidden_states = self.final_layernorm(hidden_states)
747
+
748
+ return hidden_states, presents, all_hidden_states, all_self_attentions
749
+
750
+
751
+ class ChatGLMPreTrainedModel(PreTrainedModel):
752
+ """
753
+ An abstract class to handle weights initialization and
754
+ a simple interface for downloading and loading pretrained models.
755
+ """
756
+
757
+ is_parallelizable = False
758
+ supports_gradient_checkpointing = True
759
+ config_class = ChatGLMConfig
760
+ base_model_prefix = "transformer"
761
+ _no_split_modules = ["GLMBlock"]
762
+ _supports_flash_attn_2 = True
763
+ _supports_sdpa = True
764
+
765
+ def _init_weights(self, module: nn.Module):
766
+ """Initialize the weights."""
767
+ return
768
+
769
+ def get_masks(self, input_ids, past_key_values, padding_mask=None):
770
+ if self.config._attn_implementation == "flash_attention_2":
771
+ if padding_mask is not None and not padding_mask.all():
772
+ return padding_mask
773
+ return None
774
+ batch_size, seq_length = input_ids.shape
775
+ full_attention_mask = torch.ones(batch_size, seq_length, seq_length, device=input_ids.device)
776
+ full_attention_mask.tril_()
777
+ past_length = 0
778
+ if past_key_values:
779
+ past_length = past_key_values[0][0].shape[2]
780
+ if past_length:
781
+ full_attention_mask = torch.cat((torch.ones(batch_size, seq_length, past_length,
782
+ device=input_ids.device), full_attention_mask), dim=-1)
783
+ if padding_mask is not None:
784
+ full_attention_mask = full_attention_mask * padding_mask.unsqueeze(1)
785
+ if not past_length and padding_mask is not None:
786
+ full_attention_mask -= padding_mask.unsqueeze(-1) - 1
787
+ full_attention_mask = (full_attention_mask < 0.5).bool()
788
+ full_attention_mask.unsqueeze_(1)
789
+ return full_attention_mask
790
+
791
+ def get_position_ids(self, input_ids, device):
792
+ batch_size, seq_length = input_ids.shape
793
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)
794
+ return position_ids
795
+
796
+ class Embedding(torch.nn.Module):
797
+ """Language model embeddings."""
798
+
799
+ def __init__(self, config: ChatGLMConfig, device=None):
800
+ super(Embedding, self).__init__()
801
+
802
+ self.hidden_size = config.hidden_size
803
+ # Word embeddings (parallel).
804
+ self.word_embeddings = nn.Embedding(
805
+ config.padded_vocab_size,
806
+ self.hidden_size,
807
+ dtype=config.torch_dtype,
808
+ device=device
809
+ )
810
+ self.fp32_residual_connection = config.fp32_residual_connection
811
+
812
+ def forward(self, input_ids):
813
+ # Embeddings.
814
+ words_embeddings = self.word_embeddings(input_ids)
815
+ embeddings = words_embeddings
816
+ # If the input flag for fp32 residual connection is set, convert for float.
817
+ if self.fp32_residual_connection:
818
+ embeddings = embeddings.float()
819
+ return embeddings
820
+
821
+
822
+ class ChatGLMModel(ChatGLMPreTrainedModel):
823
+ def __init__(self, config: ChatGLMConfig, device=None, empty_init=True):
824
+ super().__init__(config)
825
+ if empty_init:
826
+ init_method = skip_init
827
+ else:
828
+ init_method = default_init
829
+ init_kwargs = {}
830
+ if device is not None:
831
+ init_kwargs["device"] = device
832
+ self.embedding = init_method(Embedding, config, **init_kwargs)
833
+ self.num_layers = config.num_layers
834
+ self.multi_query_group_num = config.multi_query_group_num
835
+ self.kv_channels = config.kv_channels
836
+
837
+ # Rotary positional embeddings
838
+ self.seq_length = config.seq_length
839
+ rotary_dim = (
840
+ config.hidden_size // config.num_attention_heads if config.kv_channels is None else config.kv_channels
841
+ )
842
+
843
+ self.rotary_pos_emb = RotaryEmbedding(rotary_dim // 2, rope_ratio=config.rope_ratio,
844
+ original_impl=config.original_rope,
845
+ device=device, dtype=config.torch_dtype)
846
+ self.encoder = init_method(GLMTransformer, config, **init_kwargs)
847
+ self.output_layer = init_method(nn.Linear, config.hidden_size, config.padded_vocab_size, bias=False,
848
+ dtype=config.torch_dtype, **init_kwargs)
849
+
850
+ def get_input_embeddings(self):
851
+ return self.embedding.word_embeddings
852
+
853
+ def set_input_embeddings(self, value):
854
+ self.embedding.word_embeddings = value
855
+
856
+ def forward(
857
+ self,
858
+ input_ids,
859
+ position_ids: Optional[torch.Tensor] = None,
860
+ attention_mask: Optional[torch.BoolTensor] = None,
861
+ full_attention_mask: Optional[torch.BoolTensor] = None,
862
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
863
+ inputs_embeds: Optional[torch.Tensor] = None,
864
+ use_cache: Optional[bool] = None,
865
+ output_attentions: Optional[bool] = None,
866
+ output_hidden_states: Optional[bool] = None,
867
+ return_dict: Optional[bool] = None,
868
+ ):
869
+ output_hidden_states = (
870
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
871
+ )
872
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
873
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
874
+
875
+ batch_size, seq_length = input_ids.shape
876
+
877
+ if inputs_embeds is None:
878
+ inputs_embeds = self.embedding(input_ids)
879
+
880
+ if full_attention_mask is None:
881
+ if (attention_mask is not None and not attention_mask.all()) or (past_key_values and seq_length != 1):
882
+ full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask)
883
+
884
+ # Rotary positional embeddings
885
+ rotary_pos_emb = self.rotary_pos_emb(self.seq_length)
886
+ if position_ids is not None:
887
+ rotary_pos_emb = rotary_pos_emb[position_ids]
888
+ else:
889
+ rotary_pos_emb = rotary_pos_emb[None, :seq_length]
890
+
891
+ # Run encoder.
892
+ hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder(
893
+ inputs_embeds, full_attention_mask, rotary_pos_emb=rotary_pos_emb,
894
+ kv_caches=past_key_values, use_cache=use_cache, output_hidden_states=output_hidden_states
895
+ )
896
+ if presents is not None and type(presents) is torch.Tensor:
897
+ presents = presents.split(1, dim=0)
898
+ presents = list(presents)
899
+ presents = [list(x.squeeze(0).split(1, dim=0)) for x in presents]
900
+ presents = [tuple([x.squeeze(0) for x in y]) for y in presents]
901
+ presents = tuple(presents)
902
+
903
+ if not return_dict:
904
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
905
+
906
+ return BaseModelOutputWithPast(
907
+ last_hidden_state=hidden_states,
908
+ past_key_values=presents,
909
+ hidden_states=all_hidden_states,
910
+ attentions=all_self_attentions,
911
+ )
912
+
913
+
914
+ class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
915
+ def __init__(self, config: ChatGLMConfig, empty_init=True, device=None):
916
+ super().__init__(config)
917
+
918
+ self.max_sequence_length = config.max_length
919
+ self.transformer = ChatGLMModel(config, empty_init=empty_init, device=device)
920
+ self.config = config
921
+
922
+ def _update_model_kwargs_for_generation(
923
+ self,
924
+ outputs: ModelOutput,
925
+ model_kwargs: Dict[str, Any],
926
+ is_encoder_decoder: bool = False,
927
+ ) -> Dict[str, Any]:
928
+ # update past_key_values
929
+ cache_name, cache = self._extract_past_from_model_output(outputs)
930
+ model_kwargs[cache_name] = cache
931
+
932
+ # update attention mask
933
+ if "attention_mask" in model_kwargs:
934
+ attention_mask = model_kwargs["attention_mask"]
935
+ model_kwargs["attention_mask"] = torch.cat(
936
+ [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
937
+ )
938
+
939
+ # update position ids
940
+ if "position_ids" in model_kwargs:
941
+ position_ids = model_kwargs["position_ids"]
942
+ new_position_id = position_ids[..., -1:].clone()
943
+ new_position_id += 1
944
+ model_kwargs["position_ids"] = torch.cat(
945
+ [position_ids, new_position_id], dim=-1
946
+ )
947
+
948
+ model_kwargs["is_first_forward"] = False
949
+ return model_kwargs
950
+
951
+ def prepare_inputs_for_generation(
952
+ self,
953
+ input_ids: torch.LongTensor,
954
+ past_key_values: Optional[torch.Tensor] = None,
955
+ attention_mask: Optional[torch.Tensor] = None,
956
+ position_ids: Optional[torch.Tensor] = None,
957
+ use_cache: Optional[bool] = None,
958
+ is_first_forward: bool = True,
959
+ **kwargs
960
+ ) -> dict:
961
+ # only last token for input_ids if past is not None
962
+ if position_ids is None:
963
+ position_ids = self.get_position_ids(input_ids, device=input_ids.device)
964
+ if not is_first_forward:
965
+ if past_key_values is not None:
966
+ position_ids = position_ids[..., -1:]
967
+ input_ids = input_ids[:, -1:]
968
+ return {
969
+ "input_ids": input_ids,
970
+ "past_key_values": past_key_values,
971
+ "position_ids": position_ids,
972
+ "attention_mask": attention_mask,
973
+ "return_last_logit": True,
974
+ "use_cache": use_cache
975
+ }
976
+
977
+ def forward(
978
+ self,
979
+ input_ids: Optional[torch.Tensor] = None,
980
+ position_ids: Optional[torch.Tensor] = None,
981
+ attention_mask: Optional[torch.Tensor] = None,
982
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
983
+ inputs_embeds: Optional[torch.Tensor] = None,
984
+ labels: Optional[torch.Tensor] = None,
985
+ use_cache: Optional[bool] = None,
986
+ output_attentions: Optional[bool] = None,
987
+ output_hidden_states: Optional[bool] = None,
988
+ return_dict: Optional[bool] = None,
989
+ return_last_logit: Optional[bool] = False,
990
+ ):
991
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
992
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
993
+
994
+ transformer_outputs = self.transformer(
995
+ input_ids=input_ids,
996
+ position_ids=position_ids,
997
+ attention_mask=attention_mask,
998
+ past_key_values=past_key_values,
999
+ inputs_embeds=inputs_embeds,
1000
+ use_cache=use_cache,
1001
+ output_hidden_states=output_hidden_states,
1002
+ return_dict=return_dict,
1003
+ )
1004
+
1005
+ hidden_states = transformer_outputs[0]
1006
+ if return_last_logit:
1007
+ hidden_states = hidden_states[:, -1:]
1008
+ lm_logits = self.transformer.output_layer(hidden_states)
1009
+
1010
+ loss = None
1011
+ if labels is not None:
1012
+ lm_logits = lm_logits.to(torch.float32)
1013
+
1014
+ # Shift so that tokens < n predict n
1015
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1016
+ shift_labels = labels[..., 1:].contiguous()
1017
+ # Flatten the tokens
1018
+ loss_fct = CrossEntropyLoss(ignore_index=-100)
1019
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1020
+
1021
+ lm_logits = lm_logits.to(hidden_states.dtype)
1022
+ loss = loss.to(hidden_states.dtype)
1023
+
1024
+ if not return_dict:
1025
+ output = (lm_logits,) + transformer_outputs[1:]
1026
+ return ((loss,) + output) if loss is not None else output
1027
+
1028
+ return CausalLMOutputWithPast(
1029
+ loss=loss,
1030
+ logits=lm_logits,
1031
+ past_key_values=transformer_outputs.past_key_values,
1032
+ hidden_states=transformer_outputs.hidden_states,
1033
+ attentions=transformer_outputs.attentions,
1034
+ )
1035
+
1036
+ @staticmethod
1037
+ def _reorder_cache(
1038
+ past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor
1039
+ ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:
1040
+ """
1041
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
1042
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
1043
+ beam_idx at every generation step.
1044
+
1045
+ Output shares the same memory storage as `past`.
1046
+ """
1047
+ return tuple(
1048
+ (
1049
+ layer_past[0].index_select(0, beam_idx.to(layer_past[0].device)),
1050
+ layer_past[1].index_select(0, beam_idx.to(layer_past[1].device)),
1051
+ )
1052
+ for layer_past in past
1053
+ )
1054
+
1055
+
1056
+ class ChatGLMForSequenceClassification(ChatGLMPreTrainedModel):
1057
+ def __init__(self, config: ChatGLMConfig, empty_init=True, device=None):
1058
+ super().__init__(config)
1059
+
1060
+ self.num_labels = config.num_labels
1061
+ self.transformer = ChatGLMModel(config, empty_init=empty_init, device=device)
1062
+
1063
+ self.classifier_head = nn.Linear(config.hidden_size, config.num_labels, bias=True, dtype=config.torch_dtype)
1064
+ if config.classifier_dropout is not None:
1065
+ self.dropout = nn.Dropout(config.classifier_dropout)
1066
+ else:
1067
+ self.dropout = None
1068
+ self.config = config
1069
+
1070
+ def forward(
1071
+ self,
1072
+ input_ids: Optional[torch.LongTensor] = None,
1073
+ position_ids: Optional[torch.LongTensor] = None,
1074
+ attention_mask: Optional[torch.Tensor] = None,
1075
+ full_attention_mask: Optional[torch.Tensor] = None,
1076
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
1077
+ inputs_embeds: Optional[torch.LongTensor] = None,
1078
+ labels: Optional[torch.LongTensor] = None,
1079
+ use_cache: Optional[bool] = None,
1080
+ output_attentions: Optional[bool] = None,
1081
+ output_hidden_states: Optional[bool] = None,
1082
+ return_dict: Optional[bool] = None,
1083
+ ) -> Union[Tuple[torch.Tensor, ...], SequenceClassifierOutputWithPast]:
1084
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1085
+
1086
+ transformer_outputs = self.transformer(
1087
+ input_ids=input_ids,
1088
+ position_ids=position_ids,
1089
+ attention_mask=attention_mask,
1090
+ full_attention_mask=full_attention_mask,
1091
+ past_key_values=past_key_values,
1092
+ inputs_embeds=inputs_embeds,
1093
+ use_cache=use_cache,
1094
+ output_attentions=output_attentions,
1095
+ output_hidden_states=output_hidden_states,
1096
+ return_dict=return_dict,
1097
+ )
1098
+
1099
+ hidden_states = transformer_outputs[0]
1100
+ pooled_hidden_states = hidden_states[:, -1]
1101
+ if self.dropout is not None:
1102
+ pooled_hidden_states = self.dropout(pooled_hidden_states)
1103
+ logits = self.classifier_head(pooled_hidden_states)
1104
+
1105
+ loss = None
1106
+ if labels is not None:
1107
+ if self.config.problem_type is None:
1108
+ if self.num_labels == 1:
1109
+ self.config.problem_type = "regression"
1110
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1111
+ self.config.problem_type = "single_label_classification"
1112
+ else:
1113
+ self.config.problem_type = "multi_label_classification"
1114
+
1115
+ if self.config.problem_type == "regression":
1116
+ loss_fct = MSELoss()
1117
+ if self.num_labels == 1:
1118
+ loss = loss_fct(logits.squeeze().float(), labels.squeeze())
1119
+ else:
1120
+ loss = loss_fct(logits.float(), labels)
1121
+ elif self.config.problem_type == "single_label_classification":
1122
+ loss_fct = CrossEntropyLoss()
1123
+ loss = loss_fct(logits.view(-1, self.num_labels).float(), labels.view(-1))
1124
+ elif self.config.problem_type == "multi_label_classification":
1125
+ loss_fct = BCEWithLogitsLoss()
1126
+ loss = loss_fct(logits.float(), labels.view(-1, self.num_labels))
1127
+
1128
+ if not return_dict:
1129
+ output = (logits,) + transformer_outputs[1:]
1130
+ return ((loss,) + output) if loss is not None else output
1131
+
1132
+ return SequenceClassifierOutputWithPast(
1133
+ loss=loss,
1134
+ logits=logits,
1135
+ past_key_values=transformer_outputs.past_key_values,
1136
+ hidden_states=transformer_outputs.hidden_states,
1137
+ attentions=transformer_outputs.attentions,
1138
+ )
sft_args.json ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "glm4-9b-chat",
3
+ "model_id_or_path": "/data/SharedFile/ZhipuAI/glm-4-9b-chat",
4
+ "model_revision": "master",
5
+ "full_determinism": false,
6
+ "sft_type": "full",
7
+ "freeze_parameters": [],
8
+ "freeze_vit": false,
9
+ "freeze_parameters_ratio": 0.0,
10
+ "additional_trainable_parameters": [],
11
+ "tuner_backend": "peft",
12
+ "template_type": "chatglm4",
13
+ "output_dir": "/data/luol/project/Taiyi2/models/glm4-9b-chat/Taiyi2-chat",
14
+ "add_output_dir_suffix": false,
15
+ "ddp_backend": null,
16
+ "ddp_find_unused_parameters": null,
17
+ "ddp_broadcast_buffers": null,
18
+ "ddp_timeout": 1800,
19
+ "seed": 42,
20
+ "resume_from_checkpoint": null,
21
+ "resume_only_model": false,
22
+ "ignore_data_skip": false,
23
+ "dtype": "bf16",
24
+ "packing": false,
25
+ "train_backend": "transformers",
26
+ "tp": 1,
27
+ "pp": 1,
28
+ "min_lr": null,
29
+ "sequence_parallel": false,
30
+ "model_kwargs": {},
31
+ "loss_name": null,
32
+ "dataset": [
33
+ "/data/luol/project/Taiyi2/training_data/taiyi2-v5.jsonl"
34
+ ],
35
+ "val_dataset": [],
36
+ "dataset_seed": null,
37
+ "dataset_test_ratio": 0.01,
38
+ "use_loss_scale": false,
39
+ "loss_scale_config_path": "/data/luol/anaconda3/envs/swift26/lib/python3.8/site-packages/swift/llm/agent/default_loss_scale_config.json",
40
+ "system": null,
41
+ "tools_prompt": "react_en",
42
+ "max_length": 2048,
43
+ "truncation_strategy": "delete",
44
+ "check_dataset_strategy": "none",
45
+ "streaming": false,
46
+ "streaming_val_size": 0,
47
+ "streaming_buffer_size": 16384,
48
+ "model_name": [
49
+ null,
50
+ null
51
+ ],
52
+ "model_author": [
53
+ null,
54
+ null
55
+ ],
56
+ "quant_method": null,
57
+ "quantization_bit": 0,
58
+ "hqq_axis": 0,
59
+ "hqq_dynamic_config_path": null,
60
+ "bnb_4bit_comp_dtype": "bf16",
61
+ "bnb_4bit_quant_type": "nf4",
62
+ "bnb_4bit_use_double_quant": true,
63
+ "bnb_4bit_quant_storage": null,
64
+ "rescale_image": -1,
65
+ "target_modules": [
66
+ "dense_4h_to_h",
67
+ "dense",
68
+ "output_layer",
69
+ "query_key_value",
70
+ "dense_h_to_4h"
71
+ ],
72
+ "target_regex": null,
73
+ "modules_to_save": [],
74
+ "lora_rank": 8,
75
+ "lora_alpha": 32,
76
+ "lora_dropout": 0.05,
77
+ "lora_bias_trainable": "none",
78
+ "lora_dtype": null,
79
+ "lora_lr_ratio": null,
80
+ "use_rslora": false,
81
+ "use_dora": false,
82
+ "init_lora_weights": true,
83
+ "fourier_n_frequency": 2000,
84
+ "fourier_scaling": 300.0,
85
+ "rope_scaling": null,
86
+ "boft_block_size": 4,
87
+ "boft_block_num": 0,
88
+ "boft_n_butterfly_factor": 1,
89
+ "boft_dropout": 0.0,
90
+ "vera_rank": 256,
91
+ "vera_projection_prng_key": 0,
92
+ "vera_dropout": 0.0,
93
+ "vera_d_initial": 0.1,
94
+ "adapter_act": "gelu",
95
+ "adapter_length": 128,
96
+ "use_galore": false,
97
+ "galore_target_modules": null,
98
+ "galore_rank": 128,
99
+ "galore_update_proj_gap": 50,
100
+ "galore_scale": 1.0,
101
+ "galore_proj_type": "std",
102
+ "galore_optim_per_parameter": false,
103
+ "galore_with_embedding": false,
104
+ "galore_quantization": false,
105
+ "galore_proj_quant": false,
106
+ "galore_proj_bits": 4,
107
+ "galore_proj_group_size": 256,
108
+ "galore_cos_threshold": 0.4,
109
+ "galore_gamma_proj": 2,
110
+ "galore_queue_size": 5,
111
+ "adalora_target_r": 8,
112
+ "adalora_init_r": 12,
113
+ "adalora_tinit": 0,
114
+ "adalora_tfinal": 0,
115
+ "adalora_deltaT": 1,
116
+ "adalora_beta1": 0.85,
117
+ "adalora_beta2": 0.85,
118
+ "adalora_orth_reg_weight": 0.5,
119
+ "ia3_feedforward_modules": [],
120
+ "llamapro_num_new_blocks": 4,
121
+ "llamapro_num_groups": null,
122
+ "neftune_noise_alpha": null,
123
+ "neftune_backend": "transformers",
124
+ "lisa_activated_layers": 0,
125
+ "lisa_step_interval": 20,
126
+ "reft_layer_key": null,
127
+ "reft_layers": null,
128
+ "reft_rank": 4,
129
+ "reft_intervention_type": "LoreftIntervention",
130
+ "reft_args": null,
131
+ "use_liger": false,
132
+ "gradient_checkpointing": true,
133
+ "vit_use_gc": true,
134
+ "deepspeed": null,
135
+ "batch_size": 2,
136
+ "eval_batch_size": 2,
137
+ "auto_find_batch_size": false,
138
+ "num_train_epochs": 5,
139
+ "max_steps": -1,
140
+ "optim": "adamw_torch",
141
+ "adam_beta1": 0.9,
142
+ "adam_beta2": 0.95,
143
+ "adam_epsilon": 1e-08,
144
+ "learning_rate": 0.0001,
145
+ "weight_decay": 0.1,
146
+ "gradient_accumulation_steps": 16,
147
+ "max_grad_norm": 1,
148
+ "predict_with_generate": false,
149
+ "lr_scheduler_type": "cosine",
150
+ "lr_scheduler_kwargs": {},
151
+ "warmup_ratio": 0.05,
152
+ "warmup_steps": 0,
153
+ "eval_steps": 50000,
154
+ "save_steps": 50000,
155
+ "save_only_model": false,
156
+ "save_total_limit": 10,
157
+ "logging_steps": 5,
158
+ "acc_steps": 1,
159
+ "dataloader_num_workers": 1,
160
+ "dataloader_pin_memory": true,
161
+ "dataloader_drop_last": false,
162
+ "push_to_hub": false,
163
+ "hub_model_id": null,
164
+ "hub_token": null,
165
+ "hub_private_repo": false,
166
+ "hub_strategy": "every_save",
167
+ "test_oom_error": false,
168
+ "disable_tqdm": false,
169
+ "lazy_tokenize": false,
170
+ "preprocess_num_proc": 1,
171
+ "use_flash_attn": null,
172
+ "ignore_args_error": true,
173
+ "check_model_is_latest": true,
174
+ "logging_dir": "/data/luol/project/Taiyi2/models/glm4-9b-chat/Taiyi2-chat/runs",
175
+ "report_to": [
176
+ "tensorboard"
177
+ ],
178
+ "acc_strategy": "token",
179
+ "save_on_each_node": false,
180
+ "evaluation_strategy": "steps",
181
+ "save_strategy": "steps",
182
+ "save_safetensors": true,
183
+ "gpu_memory_fraction": null,
184
+ "include_num_input_tokens_seen": false,
185
+ "local_repo_path": null,
186
+ "custom_register_path": null,
187
+ "custom_dataset_info": null,
188
+ "device_map_config": null,
189
+ "device_max_memory": [],
190
+ "max_new_tokens": 2048,
191
+ "do_sample": null,
192
+ "temperature": null,
193
+ "top_k": null,
194
+ "top_p": null,
195
+ "repetition_penalty": null,
196
+ "num_beams": 1,
197
+ "fsdp": "",
198
+ "fsdp_config": null,
199
+ "sequence_parallel_size": 1,
200
+ "model_layer_cls_name": null,
201
+ "metric_warmup_step": 0,
202
+ "fsdp_num": 1,
203
+ "per_device_train_batch_size": null,
204
+ "per_device_eval_batch_size": null,
205
+ "eval_strategy": null,
206
+ "self_cognition_sample": 0,
207
+ "train_dataset_mix_ratio": 0.0,
208
+ "train_dataset_mix_ds": [
209
+ "ms-bench"
210
+ ],
211
+ "train_dataset_sample": -1,
212
+ "val_dataset_sample": null,
213
+ "safe_serialization": null,
214
+ "only_save_model": null,
215
+ "neftune_alpha": null,
216
+ "deepspeed_config_path": null,
217
+ "model_cache_dir": null,
218
+ "lora_dropout_p": null,
219
+ "lora_target_modules": [
220
+ "dense_4h_to_h",
221
+ "dense",
222
+ "output_layer",
223
+ "query_key_value",
224
+ "dense_h_to_4h"
225
+ ],
226
+ "lora_target_regex": null,
227
+ "lora_modules_to_save": [],
228
+ "boft_target_modules": [],
229
+ "boft_modules_to_save": [],
230
+ "vera_target_modules": [],
231
+ "vera_modules_to_save": [],
232
+ "ia3_target_modules": [],
233
+ "ia3_modules_to_save": [],
234
+ "custom_train_dataset_path": [],
235
+ "custom_val_dataset_path": [],
236
+ "device_map_config_path": null,
237
+ "push_hub_strategy": null,
238
+ "use_self_cognition": false,
239
+ "is_multimodal": false,
240
+ "is_vision": false,
241
+ "lora_use_embedding": false,
242
+ "lora_use_all": true,
243
+ "lora_m2s_use_embedding": false,
244
+ "lora_m2s_use_ln": false,
245
+ "torch_dtype": "torch.bfloat16",
246
+ "fp16": false,
247
+ "bf16": true,
248
+ "rank": -1,
249
+ "local_rank": -1,
250
+ "world_size": 1,
251
+ "local_world_size": 1,
252
+ "bnb_4bit_compute_dtype": "torch.bfloat16",
253
+ "load_in_4bit": false,
254
+ "load_in_8bit": false,
255
+ "train_sampler_random": true,
256
+ "train_type": "sft",
257
+ "training_args": "Seq2SeqTrainingArguments(output_dir='/data/luol/project/Taiyi2/models/glm4-9b-chat/Taiyi2-chat', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.STEPS: 'steps'>, prediction_loss_only=False, per_device_train_batch_size=2, per_device_eval_batch_size=2, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=16, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=0.0001, weight_decay=0.1, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1, num_train_epochs=5, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs={}, warmup_ratio=0.05, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/data/luol/project/Taiyi2/models/glm4-9b-chat/Taiyi2-chat/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=5, logging_nan_inf_filter=True, save_strategy=<IntervalStrategy.STEPS: 'steps'>, save_steps=50000, save_total_limit=10, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=None, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=50000, dataloader_num_workers=1, dataloader_prefetch_factor=None, past_index=-1, run_name='/data/luol/project/Taiyi2/models/glm4-9b-chat/Taiyi2-chat', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model='loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed=None, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['tensorboard'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=False, hub_always_push=False, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, eval_do_concat_batches=True, fp16_backend='auto', evaluation_strategy=None, push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=1800, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, dispatch_batches=None, split_batches=None, include_tokens_per_second=False, include_num_input_tokens_seen=False, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, eval_use_gather_object=False, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=GenerationConfig {\n \"do_sample\": true,\n \"eos_token_id\": 151329,\n \"max_new_tokens\": 2048,\n \"pad_token_id\": 151329,\n \"temperature\": 0.8,\n \"top_p\": 0.8\n}\n, acc_strategy='token', loss_name=None, additional_saved_files=[], train_sampler_random=True, metric_warmup_step=0, train_dataset_sample=-1)"
258
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "[MASK]",
5
+ "[gMASK]",
6
+ "[sMASK]",
7
+ "<sop>",
8
+ "<eop>",
9
+ "<|system|>",
10
+ "<|user|>",
11
+ "<|assistant|>",
12
+ "<|observation|>",
13
+ "<|begin_of_image|>",
14
+ "<|end_of_image|>",
15
+ "<|begin_of_video|>",
16
+ "<|end_of_video|>"
17
+ ],
18
+ "eos_token": {
19
+ "content": "<|endoftext|>",
20
+ "lstrip": false,
21
+ "normalized": false,
22
+ "rstrip": false,
23
+ "single_word": false
24
+ },
25
+ "pad_token": {
26
+ "content": "<|endoftext|>",
27
+ "lstrip": false,
28
+ "normalized": false,
29
+ "rstrip": false,
30
+ "single_word": false
31
+ }
32
+ }
tokenization_chatglm.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import regex as re
2
+ import base64
3
+ import os
4
+ import tiktoken
5
+ from typing import List, Optional, Union, Dict
6
+ from transformers import PreTrainedTokenizer
7
+ from transformers.utils import PaddingStrategy
8
+ from transformers.tokenization_utils_base import EncodedInput, BatchEncoding
9
+
10
+
11
+ class ChatGLM4Tokenizer(PreTrainedTokenizer):
12
+ vocab_files_names = {"vocab_file": "tokenizer.model"}
13
+ model_input_names = ["input_ids", "attention_mask", "position_ids"]
14
+
15
+ def __init__(
16
+ self,
17
+ vocab_file,
18
+ clean_up_tokenization_spaces=False,
19
+ **kwargs
20
+ ):
21
+ self.name = "GLM4Tokenizer"
22
+ self.vocab_file = vocab_file
23
+ pat_str = "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+"
24
+ self.pat_str = re.compile(pat_str)
25
+
26
+ mergeable_ranks = {}
27
+ with open(vocab_file) as f:
28
+ for line in f:
29
+ token, rank = line.strip().split()
30
+ rank = int(rank)
31
+ token = base64.b64decode(token)
32
+ mergeable_ranks[token] = rank
33
+
34
+ self.mergeable_ranks = mergeable_ranks
35
+
36
+ self.tokenizer = tiktoken.Encoding(
37
+ name="my_tokenizer",
38
+ pat_str=pat_str,
39
+ mergeable_ranks=mergeable_ranks,
40
+ special_tokens={}
41
+ )
42
+ self.decoder = {rank: token for token, rank in mergeable_ranks.items()}
43
+ self.n_words = len(self.decoder)
44
+
45
+ super().__init__(
46
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
47
+ **kwargs
48
+ )
49
+
50
+ @property
51
+ def vocab_size(self):
52
+ return self.n_words
53
+
54
+ def get_vocab(self):
55
+ """ Returns vocab as a dict """
56
+ vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}
57
+ vocab.update(self.added_tokens_encoder)
58
+ return vocab
59
+
60
+ def convert_tokens_to_string(self, tokens: List[Union[bytes, str, int]]) -> str:
61
+ """
62
+ Converts a sequence of tokens in a single string.
63
+ """
64
+ text = ""
65
+ temp = b""
66
+ for t in tokens:
67
+ if isinstance(t, int):
68
+ t = chr(t)
69
+ if isinstance(t, str):
70
+ if temp:
71
+ text += temp.decode("utf-8", errors="replace")
72
+ elif isinstance(t, bytes):
73
+ temp += t
74
+ else:
75
+ raise TypeError("token should only be of type int, bytes or str")
76
+ if temp:
77
+ text += temp.decode("utf-8", errors="replace")
78
+ return text
79
+
80
+ def _tokenize(self, text, **kwargs):
81
+ tokens = []
82
+ ids = self.tokenizer.encode(text)
83
+ for t in ids:
84
+ tokens.append(self.decoder[t])
85
+ return tokens
86
+
87
+ def _convert_token_to_id(self, token):
88
+ """ Converts a token (str) in an id using the vocab. """
89
+ return self.mergeable_ranks[token]
90
+
91
+ def _convert_id_to_token(self, index):
92
+ """Converts an index (integer) in a token (str) using the vocab."""
93
+ return self.decoder.get(index, "")
94
+
95
+ def save_vocabulary(self, save_directory, filename_prefix=None):
96
+ """
97
+ Save the vocabulary and special tokens file to a directory.
98
+
99
+ Args:
100
+ save_directory (`str`):
101
+ The directory in which to save the vocabulary.
102
+ filename_prefix (`str`, *optional*):
103
+ An optional prefix to add to the named of the saved files.
104
+
105
+ Returns:
106
+ `Tuple(str)`: Paths to the files saved.
107
+ """
108
+ if os.path.isdir(save_directory):
109
+ vocab_file = os.path.join(
110
+ save_directory, self.vocab_files_names["vocab_file"]
111
+ )
112
+ else:
113
+ vocab_file = save_directory
114
+
115
+ with open(self.vocab_file, 'rb') as fin:
116
+ proto_str = fin.read()
117
+
118
+ with open(vocab_file, "wb") as writer:
119
+ writer.write(proto_str)
120
+
121
+ return (vocab_file,)
122
+
123
+ def get_prefix_tokens(self):
124
+ prefix_tokens = [self.convert_tokens_to_ids("[gMASK]"), self.convert_tokens_to_ids("<sop>")]
125
+ return prefix_tokens
126
+
127
+ def build_single_message(self, role, metadata, message, tokenize=True):
128
+ assert role in ["system", "user", "assistant", "observation"], role
129
+ if tokenize:
130
+ role_tokens = [self.convert_tokens_to_ids(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n",
131
+ disallowed_special=())
132
+ message_tokens = self.tokenizer.encode(message, disallowed_special=())
133
+ tokens = role_tokens + message_tokens
134
+ return tokens
135
+ else:
136
+ return str(f"<|{role}|>{metadata}\n{message}")
137
+
138
+ def build_inputs_with_special_tokens(
139
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
140
+ ) -> List[int]:
141
+ """
142
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
143
+ adding special tokens. A BERT sequence has the following format:
144
+
145
+ - single sequence: `[CLS] X [SEP]`
146
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
147
+
148
+ Args:
149
+ token_ids_0 (`List[int]`):
150
+ List of IDs to which the special tokens will be added.
151
+ token_ids_1 (`List[int]`, *optional*):
152
+ Optional second list of IDs for sequence pairs.
153
+
154
+ Returns:
155
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
156
+ """
157
+ prefix_tokens = self.get_prefix_tokens()
158
+ token_ids_0 = prefix_tokens + token_ids_0
159
+ if token_ids_1 is not None:
160
+ token_ids_0 = token_ids_0 + token_ids_1 + [self.convert_tokens_to_ids("<eos>")]
161
+ return token_ids_0
162
+
163
+ def _pad(
164
+ self,
165
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
166
+ max_length: Optional[int] = None,
167
+ padding_side: str = "left",
168
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
169
+ pad_to_multiple_of: Optional[int] = None,
170
+ return_attention_mask: Optional[bool] = None,
171
+ ) -> dict:
172
+ """
173
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
174
+
175
+ Args:
176
+ encoded_inputs:
177
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
178
+ max_length: maximum length of the returned list and optionally padding length (see below).
179
+ Will truncate by taking into account the special tokens.
180
+ padding_strategy: PaddingStrategy to use for padding.
181
+
182
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
183
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
184
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
185
+ The tokenizer padding sides are defined in self.padding_side:
186
+
187
+ - 'left': pads on the left of the sequences
188
+ - 'right': pads on the right of the sequences
189
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
190
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
191
+ `>= 7.5` (Volta).
192
+ return_attention_mask:
193
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
194
+ """
195
+ # Load from model defaults
196
+
197
+ required_input = encoded_inputs[self.model_input_names[0]]
198
+ seq_length = len(required_input)
199
+
200
+ if padding_strategy == PaddingStrategy.LONGEST:
201
+ max_length = len(required_input)
202
+
203
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
204
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
205
+
206
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
207
+
208
+ # Initialize attention mask if not present.
209
+ if "attention_mask" not in encoded_inputs:
210
+ encoded_inputs["attention_mask"] = [1] * seq_length
211
+
212
+ if "position_ids" not in encoded_inputs:
213
+ encoded_inputs["position_ids"] = list(range(seq_length))
214
+
215
+ if needs_to_be_padded:
216
+ difference = max_length - len(required_input)
217
+
218
+ if "attention_mask" in encoded_inputs:
219
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
220
+ if "position_ids" in encoded_inputs:
221
+ encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"]
222
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
223
+
224
+ return encoded_inputs
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a493598071550244b2ee7f26118f3edec2150b9dfa967929a99052ac83fe716
3
+ size 2623634
tokenizer_config.json ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "151329": {
4
+ "content": "<|endoftext|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "151330": {
12
+ "content": "[MASK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "151331": {
20
+ "content": "[gMASK]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "151332": {
28
+ "content": "[sMASK]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "151333": {
36
+ "content": "<sop>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "151334": {
44
+ "content": "<eop>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "151335": {
52
+ "content": "<|system|>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "151336": {
60
+ "content": "<|user|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "151337": {
68
+ "content": "<|assistant|>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "151338": {
76
+ "content": "<|observation|>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "151339": {
84
+ "content": "<|begin_of_image|>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "151340": {
92
+ "content": "<|end_of_image|>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "151341": {
100
+ "content": "<|begin_of_video|>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "151342": {
108
+ "content": "<|end_of_video|>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ }
115
+ },
116
+ "additional_special_tokens": [
117
+ "<|endoftext|>",
118
+ "[MASK]",
119
+ "[gMASK]",
120
+ "[sMASK]",
121
+ "<sop>",
122
+ "<eop>",
123
+ "<|system|>",
124
+ "<|user|>",
125
+ "<|assistant|>",
126
+ "<|observation|>",
127
+ "<|begin_of_image|>",
128
+ "<|end_of_image|>",
129
+ "<|begin_of_video|>",
130
+ "<|end_of_video|>"
131
+ ],
132
+ "auto_map": {
133
+ "AutoTokenizer": [
134
+ "tokenization_chatglm.ChatGLM4Tokenizer",
135
+ null
136
+ ]
137
+ },
138
+ "chat_template": "[gMASK]<sop>{% for item in messages %}{% if item['tools'] is defined %}<|system|>\n你是一个名为 GLM-4 的人工智能助手。你是基于智谱AI训练的语言模型 GLM-4 模型开发的,你的任务是针对用户的问题和要求提供适当的答复和支持。\n\n# 可用工具{% set tools = item['tools'] %}{% for tool in tools %}{% if tool['type'] == 'function' %}\n\n## {{ tool['function']['name'] }}\n\n{{ tool['function'] | tojson(indent=4) }}\n在调用上述函数时,请使用 Json 格式表示调用的参数。{% elif tool['type'] == 'python' %}\n\n## python\n\n当你向 `python` 发送包含 Python 代码的消息时,该代码将会在一个有状态的 Jupyter notebook 环境中执行。\n`python` 返回代码执行的输出,或在执行 60 秒后返回超时。\n`/mnt/data` 将会持久化存储你的文件。在此会话中,`python` 无法访问互联网。不要使用 `python` 进行任何网络请求或者在线 API 调用,这些在线内容的访问将不会成功。{% elif tool['type'] == 'simple_browser' %}\n\n## simple_browser\n\n你可以使用 `simple_browser` 工具。该工具支持以下函数:\n`search(query: str, recency_days: int)`:使用搜索引擎进行查询并显示结果,可以使用 `recency_days` 参数控制搜索内容的时效性。\n`mclick(ids: list[int])`:获取一系列指定 id 的页面内容。每次调用时,须选择3-10个页面。选择多个角度的页面,同时尽可能选择可信任的信息来源。考虑到部分页面是无法加载的,你也可以多打开一些可能有用的页面而不用担心内容过多。\n`open_url(url: str)`:打开指定的 URL。\n\n使用 `【{引用 id}†{引用文本}】` 来引用内容。\n\n操作步骤:1. 使用 `search` 来获得信息列表; 2. 使用 `mclick` 来获取指定 ID 页面的内容; 3. 根据获得的内容进行回复。在回复中应当引用信息来源。\n 如果用户提供了 URL,也可以用 `open_url` 直接���开页面。\n如果初次搜索结果没有找到合适的信息,也可以再次使用 `search` 进行搜索。{% elif tool['type'] == 'cogview' %}\n\n## cogview\n\n如果用户的请求中包含了对图像的描述,你可以使用 `cogview` 来生成图像并展示给用户。你需要向 `cogview` 发送图像描述,规则:\n- 发送给 `cogview` 的消息必须使用英语。用户的中文描述必须完全翻译为英语。\n- 应当尽可能详细地描述图像生成的需求,需求描述约 100 英文单词。\n- 保持用户原始描述的意图。不要虚构内容或者没见过的人物。\n- 如无特殊说明,所在地为中国,持有中国立场并遵循中国社会主义价值观。{% endif %}{% endfor %}{% endif %}{% if item['content'] %}<|{{ item['role'] }}|>{{ item['metadata'] }}\n{{ item['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>{% endif %}",
139
+ "clean_up_tokenization_spaces": false,
140
+ "do_lower_case": false,
141
+ "eos_token": "<|endoftext|>",
142
+ "model_max_length": 128000,
143
+ "pad_token": "<|endoftext|>",
144
+ "padding_side": "left",
145
+ "remove_space": false,
146
+ "tokenizer_class": "ChatGLM4Tokenizer"
147
+ }