jingyaogong commited on
Commit
36cbeb1
·
verified ·
1 Parent(s): 35cd087

Delete model.py

Browse files
Files changed (1) hide show
  1. model.py +0 -387
model.py DELETED
@@ -1,387 +0,0 @@
1
- import math
2
- import struct
3
- import inspect
4
- import time
5
-
6
- from .LMConfig import LMConfig
7
- from typing import Any, Optional, Tuple, List, Union
8
- import numpy as np
9
- import torch
10
- import torch.nn.functional as F
11
- from torch import nn
12
- from transformers import PreTrainedModel
13
- from transformers.modeling_outputs import CausalLMOutputWithPast
14
-
15
-
16
- class RMSNorm(torch.nn.Module):
17
- def __init__(self, dim: int, eps: float = 1e-6):
18
- super().__init__()
19
- self.eps = eps
20
- self.weight = nn.Parameter(torch.ones(dim))
21
-
22
- def _norm(self, x):
23
- return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
24
-
25
- def forward(self, x):
26
- return self.weight * self._norm(x.float()).type_as(x)
27
-
28
-
29
- def precompute_pos_cis(dim: int, end: int = int(32 * 1024), theta: float = 1e6):
30
- freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
31
- t = torch.arange(end, device=freqs.device) # type: ignore
32
- freqs = torch.outer(t, freqs).float() # type: ignore
33
- pos_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64
34
- return pos_cis
35
-
36
-
37
- def apply_rotary_emb(xq, xk, pos_cis):
38
- def unite_shape(pos_cis, x):
39
- ndim = x.ndim
40
- assert 0 <= 1 < ndim
41
- assert pos_cis.shape == (x.shape[1], x.shape[-1])
42
- shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)]
43
- return pos_cis.view(*shape)
44
-
45
- xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
46
- xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
47
- pos_cis = unite_shape(pos_cis, xq_)
48
- xq_out = torch.view_as_real(xq_ * pos_cis).flatten(3)
49
- xk_out = torch.view_as_real(xk_ * pos_cis).flatten(3)
50
- return xq_out.type_as(xq), xk_out.type_as(xk)
51
-
52
-
53
- def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
54
- """torch.repeat_interleave(x, dim=2, repeats=n_rep)"""
55
- bs, slen, n_kv_heads, head_dim = x.shape
56
- if n_rep == 1:
57
- return x
58
- return (
59
- x[:, :, :, None, :]
60
- .expand(bs, slen, n_kv_heads, n_rep, head_dim)
61
- .reshape(bs, slen, n_kv_heads * n_rep, head_dim)
62
- )
63
-
64
-
65
- class Attention(nn.Module):
66
- def __init__(self, args: LMConfig):
67
- super().__init__()
68
- self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
69
- assert args.n_heads % self.n_kv_heads == 0
70
- self.n_local_heads = args.n_heads
71
- self.n_local_kv_heads = self.n_kv_heads
72
- self.n_rep = self.n_local_heads // self.n_local_kv_heads
73
- self.head_dim = args.dim // args.n_heads
74
- self.wq = nn.Linear(args.dim, args.n_heads * self.head_dim, bias=False)
75
- self.wk = nn.Linear(args.dim, self.n_kv_heads * self.head_dim, bias=False)
76
- self.wv = nn.Linear(args.dim, self.n_kv_heads * self.head_dim, bias=False)
77
- self.wo = nn.Linear(args.n_heads * self.head_dim, args.dim, bias=False)
78
- self.attn_dropout = nn.Dropout(args.dropout)
79
- self.resid_dropout = nn.Dropout(args.dropout)
80
- self.dropout = args.dropout
81
- self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention') and args.flash_attn
82
- # print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0")
83
- mask = torch.full((1, 1, args.max_seq_len, args.max_seq_len), float("-inf"))
84
- mask = torch.triu(mask, diagonal=1)
85
- self.register_buffer("mask", mask, persistent=False)
86
-
87
- def forward(self,
88
- x: torch.Tensor,
89
- pos_cis: torch.Tensor,
90
- past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
91
- use_cache=False):
92
- bsz, seq_len, _ = x.shape
93
- xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
94
- xq = xq.view(bsz, seq_len, self.n_local_heads, self.head_dim)
95
- xk = xk.view(bsz, seq_len, self.n_local_kv_heads, self.head_dim)
96
- xv = xv.view(bsz, seq_len, self.n_local_kv_heads, self.head_dim)
97
-
98
- xq, xk = apply_rotary_emb(xq, xk, pos_cis)
99
- # kv_cache实现
100
- if past_key_value is not None:
101
- xk = torch.cat([past_key_value[0], xk], dim=1)
102
- xv = torch.cat([past_key_value[1], xv], dim=1)
103
- past_kv = (xk, xv) if use_cache else None
104
-
105
- xq, xk, xv = (
106
- xq.transpose(1, 2),
107
- repeat_kv(xk, self.n_rep).transpose(1, 2),
108
- repeat_kv(xv, self.n_rep).transpose(1, 2)
109
- )
110
- if self.flash and seq_len != 1:
111
- dropout_p = self.dropout if self.training else 0.0
112
- output = F.scaled_dot_product_attention(
113
- xq, xk, xv,
114
- attn_mask=None,
115
- dropout_p=dropout_p,
116
- is_causal=True
117
- )
118
- else:
119
- scores = (xq @ xk.transpose(-2, -1)) / math.sqrt(self.head_dim)
120
- scores += self.mask[:, :, :seq_len, :seq_len]
121
- scores = F.softmax(scores.float(), dim=-1).type_as(xq)
122
- scores = self.attn_dropout(scores)
123
- output = scores @ xv
124
-
125
- output = output.transpose(1, 2).reshape(bsz, seq_len, -1)
126
- output = self.resid_dropout(self.wo(output))
127
- return output, past_kv
128
-
129
-
130
- class FeedForward(nn.Module):
131
- def __init__(self, config: LMConfig):
132
- super().__init__()
133
- if config.hidden_dim is None:
134
- hidden_dim = 4 * config.dim
135
- hidden_dim = int(2 * hidden_dim / 3)
136
- config.hidden_dim = config.multiple_of * ((hidden_dim + config.multiple_of - 1) // config.multiple_of)
137
- self.w1 = nn.Linear(config.dim, config.hidden_dim, bias=False)
138
- self.w2 = nn.Linear(config.hidden_dim, config.dim, bias=False)
139
- self.w3 = nn.Linear(config.dim, config.hidden_dim, bias=False)
140
- self.dropout = nn.Dropout(config.dropout)
141
-
142
- def forward(self, x):
143
- return self.dropout(self.w2(F.silu(self.w1(x)) * self.w3(x)))
144
-
145
-
146
- class MoEGate(nn.Module):
147
- def __init__(self, config: LMConfig):
148
- super().__init__()
149
- self.config = config
150
- self.top_k = config.num_experts_per_tok
151
- self.n_routed_experts = config.n_routed_experts
152
-
153
- self.scoring_func = config.scoring_func
154
- self.alpha = config.aux_loss_alpha
155
- self.seq_aux = config.seq_aux
156
-
157
- self.norm_topk_prob = config.norm_topk_prob
158
- self.gating_dim = config.dim
159
- self.weight = nn.Parameter(torch.empty((self.n_routed_experts, self.gating_dim)))
160
- self.reset_parameters()
161
-
162
- def reset_parameters(self) -> None:
163
- import torch.nn.init as init
164
- init.kaiming_uniform_(self.weight, a=math.sqrt(5))
165
-
166
- def forward(self, hidden_states):
167
- bsz, seq_len, h = hidden_states.shape
168
- hidden_states = hidden_states.view(-1, h)
169
- logits = F.linear(hidden_states, self.weight, None)
170
- if self.scoring_func == 'softmax':
171
- scores = logits.softmax(dim=-1)
172
- else:
173
- raise NotImplementedError(f'insupportable scoring function for MoE gating: {self.scoring_func}')
174
-
175
- topk_weight, topk_idx = torch.topk(scores, k=self.top_k, dim=-1, sorted=False)
176
-
177
- if self.top_k > 1 and self.norm_topk_prob:
178
- denominator = topk_weight.sum(dim=-1, keepdim=True) + 1e-20
179
- topk_weight = topk_weight / denominator
180
-
181
- if self.training and self.alpha > 0.0:
182
- scores_for_aux = scores
183
- aux_topk = self.top_k
184
- topk_idx_for_aux_loss = topk_idx.view(bsz, -1)
185
- if self.seq_aux:
186
- scores_for_seq_aux = scores_for_aux.view(bsz, seq_len, -1)
187
- ce = torch.zeros(bsz, self.n_routed_experts, device=hidden_states.device)
188
- ce.scatter_add_(1, topk_idx_for_aux_loss,
189
- torch.ones(bsz, seq_len * aux_topk, device=hidden_states.device)).div_(
190
- seq_len * aux_topk / self.n_routed_experts)
191
- aux_loss = (ce * scores_for_seq_aux.mean(dim=1)).sum(dim=1).mean() * self.alpha
192
- else:
193
- mask_ce = F.one_hot(topk_idx_for_aux_loss.view(-1), num_classes=self.n_routed_experts)
194
- ce = mask_ce.float().mean(0)
195
- Pi = scores_for_aux.mean(0)
196
- fi = ce * self.n_routed_experts
197
- aux_loss = (Pi * fi).sum() * self.alpha
198
- else:
199
- aux_loss = 0
200
- return topk_idx, topk_weight, aux_loss
201
-
202
-
203
- class MOEFeedForward(nn.Module):
204
- def __init__(self, config: LMConfig):
205
- super().__init__()
206
- self.config = config
207
- self.experts = nn.ModuleList([
208
- FeedForward(config)
209
- for _ in range(config.n_routed_experts)
210
- ])
211
- self.gate = MoEGate(config)
212
- if config.n_shared_experts is not None:
213
- self.shared_experts = FeedForward(config)
214
-
215
- def forward(self, x):
216
- identity = x
217
- orig_shape = x.shape
218
- bsz, seq_len, _ = x.shape
219
- # 使用门控机制选择专家
220
- topk_idx, topk_weight, aux_loss = self.gate(x)
221
- x = x.view(-1, x.shape[-1])
222
- flat_topk_idx = topk_idx.view(-1)
223
- if self.training:
224
- # 训练模式下,重复输入数据
225
- x = x.repeat_interleave(self.config.num_experts_per_tok, dim=0)
226
- y = torch.empty_like(x, dtype=torch.float16)
227
- for i, expert in enumerate(self.experts):
228
- y[flat_topk_idx == i] = expert(x[flat_topk_idx == i]).to(y.dtype) # 确保类型一致
229
- y = (y.view(*topk_weight.shape, -1) * topk_weight.unsqueeze(-1)).sum(dim=1)
230
- y = y.view(*orig_shape)
231
- else:
232
- # 推理模式下,只选择最优专家
233
- y = self.moe_infer(x, flat_topk_idx, topk_weight.view(-1, 1)).view(*orig_shape)
234
- if self.config.n_shared_experts is not None:
235
- y = y + self.shared_experts(identity)
236
- self.aux_loss = aux_loss
237
- return y
238
-
239
- @torch.no_grad()
240
- def moe_infer(self, x, flat_expert_indices, flat_expert_weights):
241
- expert_cache = torch.zeros_like(x)
242
- idxs = flat_expert_indices.argsort()
243
- tokens_per_expert = flat_expert_indices.bincount().cpu().numpy().cumsum(0)
244
- token_idxs = idxs // self.config.num_experts_per_tok
245
- # 例如当tokens_per_expert=[6, 15, 20, 26, 33, 38, 46, 52]
246
- # 当token_idxs=[3, 7, 19, 21, 24, 25, 4, 5, 6, 10, 11, 12...]
247
- # 意味着当token_idxs[:6] -> [3, 7, 19, 21, 24, 25, 4]位置的token都由专家0处理,token_idxs[6:15]位置的token都由专家1处理......
248
- for i, end_idx in enumerate(tokens_per_expert):
249
- start_idx = 0 if i == 0 else tokens_per_expert[i - 1]
250
- if start_idx == end_idx:
251
- continue
252
- expert = self.experts[i]
253
- exp_token_idx = token_idxs[start_idx:end_idx]
254
- expert_tokens = x[exp_token_idx]
255
- expert_out = expert(expert_tokens).to(expert_cache.dtype)
256
- expert_out.mul_(flat_expert_weights[idxs[start_idx:end_idx]])
257
- # 使用 scatter_add_ 进行 sum 操作
258
- expert_cache.scatter_add_(0, exp_token_idx.view(-1, 1).repeat(1, x.shape[-1]), expert_out)
259
-
260
- return expert_cache
261
-
262
-
263
- class MiniMindBlock(nn.Module):
264
- def __init__(self, layer_id: int, config: LMConfig):
265
- super().__init__()
266
- self.n_heads = config.n_heads
267
- self.dim = config.dim
268
- self.head_dim = config.dim // config.n_heads
269
- self.attention = Attention(config)
270
-
271
- self.layer_id = layer_id
272
- self.attention_norm = RMSNorm(config.dim, eps=config.norm_eps)
273
- self.ffn_norm = RMSNorm(config.dim, eps=config.norm_eps)
274
- self.feed_forward = FeedForward(config) if not config.use_moe else MOEFeedForward(config)
275
-
276
- def forward(self, x, pos_cis, past_key_value=None, use_cache=False):
277
- h_attn, past_kv = self.attention(
278
- self.attention_norm(x),
279
- pos_cis,
280
- past_key_value=past_key_value,
281
- use_cache=use_cache
282
- )
283
- h = x + h_attn
284
- out = h + self.feed_forward(self.ffn_norm(h))
285
- return out, past_kv
286
-
287
-
288
- class MiniMindLM(PreTrainedModel):
289
- config_class = LMConfig
290
-
291
- def __init__(self, params: LMConfig = None):
292
- self.params = params or LMConfig()
293
- super().__init__(self.params)
294
- self.vocab_size, self.n_layers = params.vocab_size, params.n_layers
295
- self.tok_embeddings = nn.Embedding(params.vocab_size, params.dim)
296
- self.dropout = nn.Dropout(params.dropout)
297
- self.layers = nn.ModuleList([MiniMindBlock(l, params) for l in range(self.n_layers)])
298
- self.norm = RMSNorm(params.dim, eps=params.norm_eps)
299
- self.output = nn.Linear(params.dim, params.vocab_size, bias=False)
300
- self.tok_embeddings.weight = self.output.weight
301
- self.register_buffer("pos_cis",
302
- precompute_pos_cis(dim=params.dim // params.n_heads, theta=params.rope_theta),
303
- persistent=False)
304
- self.OUT = CausalLMOutputWithPast()
305
-
306
- def forward(self,
307
- input_ids: Optional[torch.Tensor] = None,
308
- past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None,
309
- use_cache: bool = False,
310
- logits_to_keep: Union[int, torch.Tensor] = 0,
311
- **args):
312
- past_key_values = past_key_values or [None] * len(self.layers)
313
- start_pos = args.get('start_pos', 0)
314
- h = self.dropout(self.tok_embeddings(input_ids))
315
- pos_cis = self.pos_cis[start_pos:start_pos + input_ids.size(1)]
316
- past_kvs = []
317
- for l, layer in enumerate(self.layers):
318
- h, past_kv = layer(
319
- h, pos_cis,
320
- past_key_value=past_key_values[l],
321
- use_cache=use_cache
322
- )
323
- past_kvs.append(past_kv)
324
-
325
- slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
326
- logits = self.output(self.norm(h)[:, slice_indices, :])
327
- aux_loss = sum(l.feed_forward.aux_loss for l in self.layers if isinstance(l.feed_forward, MOEFeedForward))
328
- self.OUT.__setitem__('last_hidden_state', h)
329
- self.OUT.__setitem__('logits', logits)
330
- self.OUT.__setitem__('aux_loss', aux_loss)
331
- self.OUT.__setitem__('past_key_values', past_kvs)
332
- return self.OUT
333
-
334
- @torch.inference_mode()
335
- def generate(self, input_ids, eos_token_id=2, max_new_tokens=1024, temperature=0.75, top_p=0.90,
336
- stream=False, rp=1., use_cache=True, pad_token_id=0, num_return_sequences=1, **args):
337
- # 流式生成
338
- if stream:
339
- return self._stream(input_ids, eos_token_id, max_new_tokens, temperature, top_p, rp, use_cache, **args)
340
-
341
- # 直接生成
342
- generated = []
343
- for i in range(input_ids.size(0)):
344
- non_pad = input_ids[i][input_ids[i] != pad_token_id].unsqueeze(0)
345
- for _ in range(num_return_sequences):
346
- out = self._stream(non_pad, eos_token_id, max_new_tokens, temperature, top_p, rp, use_cache, **args)
347
- tokens_list = [tokens[:, -1:] for tokens in out]
348
- gen = torch.cat(tokens_list, dim=-1) if tokens_list else non_pad
349
- full_sequence = torch.cat([non_pad, gen], dim=-1)
350
- generated.append(full_sequence)
351
-
352
- max_length = max(seq.size(1) for seq in generated)
353
- generated = [
354
- torch.cat(
355
- [seq, torch.full((1, max_length - seq.size(1)), pad_token_id, dtype=seq.dtype, device=seq.device)],
356
- dim=-1)
357
- for seq in generated
358
- ]
359
- output = torch.cat(generated, dim=0)
360
- res = output.view(input_ids.size(0) * num_return_sequences, -1)
361
- return res
362
-
363
- def _stream(self, input_ids, eos_token_id, max_new_tokens, temperature, top_p, rp, use_cache, **args):
364
- start, first_seq, past_kvs = input_ids.shape[1], True, None
365
- while input_ids.shape[1] < max_new_tokens - 1:
366
- if first_seq or not use_cache:
367
- out, first_seq = self(input_ids, past_key_values=past_kvs, use_cache=use_cache, **args), False
368
- else:
369
- out = self(input_ids[:, -1:], past_key_values=past_kvs, use_cache=use_cache,
370
- start_pos=input_ids.shape[1] - 1, **args)
371
- logits, past_kvs = out.logits[:, -1, :], out.past_key_values
372
- logits[:, list(set(input_ids.tolist()[0]))] /= rp
373
- logits /= (temperature + 1e-9)
374
- if top_p is not None and top_p < 1.0:
375
- sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1)
376
- sorted_probs = F.softmax(sorted_logits, dim=-1)
377
- cumulative_probs = torch.cumsum(sorted_probs, dim=-1)
378
- sorted_indices_to_remove = cumulative_probs > top_p
379
- sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
380
- sorted_indices_to_remove[:, 0] = False
381
- indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
382
- logits[indices_to_remove] = -float('Inf')
383
- input_ids_next = torch.multinomial(F.softmax(logits, dim=-1), num_samples=1)
384
- input_ids = torch.cat((input_ids, input_ids_next), dim=1)
385
- yield input_ids[:, start:]
386
- if input_ids_next.item() == eos_token_id:
387
- break