jingyaogong commited on
Commit
22a1d10
·
1 Parent(s): 9e40365

Upload 2 files

Browse files
MiniMind2/model_minimind.py ADDED
@@ -0,0 +1,470 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘
2
+ # MiniMind Config
3
+ # 📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘
4
+
5
+ from transformers import PretrainedConfig
6
+
7
+
8
+ class MiniMindConfig(PretrainedConfig):
9
+ model_type = "minimind"
10
+
11
+ def __init__(
12
+ self,
13
+ dropout: float = 0.0,
14
+ bos_token_id: int = 1,
15
+ eos_token_id: int = 2,
16
+ hidden_act: str = 'silu',
17
+ hidden_size: int = 512,
18
+ intermediate_size: int = None,
19
+ max_position_embeddings: int = 32768,
20
+ num_attention_heads: int = 8,
21
+ num_hidden_layers: int = 8,
22
+ num_key_value_heads: int = 2,
23
+ vocab_size: int = 6400,
24
+ rms_norm_eps: float = 1e-05,
25
+ rope_theta: int = 1000000.0,
26
+ inference_rope_scaling: bool = False,
27
+ flash_attn: bool = True,
28
+ ####################################################
29
+ # Here are the specific configurations of MOE
30
+ # When use_moe is false, the following is invalid
31
+ ####################################################
32
+ use_moe: bool = False,
33
+ num_experts_per_tok: int = 2,
34
+ n_routed_experts: int = 4,
35
+ n_shared_experts: int = 1,
36
+ scoring_func: str = 'softmax',
37
+ aux_loss_alpha: float = 0.1,
38
+ seq_aux: bool = True,
39
+ norm_topk_prob: bool = True,
40
+ **kwargs
41
+ ):
42
+ super().__init__(**kwargs)
43
+ self.dropout = dropout
44
+ self.bos_token_id = bos_token_id
45
+ self.eos_token_id = eos_token_id
46
+ self.hidden_act = hidden_act
47
+ self.hidden_size = hidden_size
48
+ self.intermediate_size = intermediate_size
49
+ self.max_position_embeddings = max_position_embeddings
50
+ self.num_attention_heads = num_attention_heads
51
+ self.num_hidden_layers = num_hidden_layers
52
+ self.num_key_value_heads = num_key_value_heads
53
+ self.vocab_size = vocab_size
54
+ self.rms_norm_eps = rms_norm_eps
55
+ self.rope_theta = rope_theta
56
+ self.inference_rope_scaling = inference_rope_scaling
57
+ # 外推长度 = factor * original_max_position_embeddings
58
+ self.rope_scaling = {
59
+ "beta_fast": 4,
60
+ "beta_slow": 1,
61
+ "factor": 4,
62
+ "original_max_position_embeddings": 2048,
63
+ "type": "yarn"
64
+ } if self.inference_rope_scaling else None
65
+ self.flash_attn = flash_attn
66
+ ####################################################
67
+ # Here are the specific configurations of MOE
68
+ # When use_moe is false, the following is invalid
69
+ ####################################################
70
+ self.use_moe = use_moe
71
+ self.num_experts_per_tok = num_experts_per_tok # 每个token选择的专家数量
72
+ self.n_routed_experts = n_routed_experts # 总的专家数量
73
+ self.n_shared_experts = n_shared_experts # 共享专家
74
+ self.scoring_func = scoring_func # 评分函数,默认为'softmax'
75
+ self.aux_loss_alpha = aux_loss_alpha # 辅助损失的alpha参数
76
+ self.seq_aux = seq_aux # 是否在序列级别上计算辅助损失
77
+ self.norm_topk_prob = norm_topk_prob # 是否标准化top-k概率
78
+
79
+
80
+ # 📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘
81
+ # MiniMind Model
82
+ # 📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘📘
83
+
84
+ import math
85
+ import torch
86
+ import torch.nn.init as init
87
+ import torch.nn.functional as F
88
+ from torch import nn
89
+ from transformers.activations import ACT2FN
90
+ from typing import Optional, Tuple, List, Union
91
+ from transformers import PreTrainedModel, GenerationMixin, PretrainedConfig
92
+ from transformers.modeling_outputs import CausalLMOutputWithPast
93
+
94
+
95
+ class RMSNorm(torch.nn.Module):
96
+ def __init__(self, dim: int, eps: float = 1e-5):
97
+ super().__init__()
98
+ self.eps = eps
99
+ self.weight = nn.Parameter(torch.ones(dim))
100
+
101
+ def _norm(self, x):
102
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
103
+
104
+ def forward(self, x):
105
+ return self.weight * self._norm(x.float()).type_as(x)
106
+
107
+
108
+ def precompute_freqs_cis(dim: int, end: int = int(32 * 1024), rope_base: float = 1e6,
109
+ rope_scaling: Optional[dict] = None):
110
+ freqs = 1.0 / (rope_base ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
111
+ if rope_scaling is not None:
112
+ orig_max, factor, beta_fast, beta_slow = (
113
+ rope_scaling.get("original_max_position_embeddings", 2048), rope_scaling.get("factor", 4),
114
+ rope_scaling.get("beta_fast", 4.0), rope_scaling.get("beta_slow", 1.0)
115
+ )
116
+ if end / orig_max > 1.0:
117
+ corr_dim = next((i for i in range(dim // 2) if 2 * math.pi / freqs[i] > orig_max), dim // 2)
118
+ power = torch.arange(0, dim // 2, device=freqs.device).float() / max(dim // 2 - 1, 1)
119
+ beta = beta_slow + (beta_fast - beta_slow) * power
120
+ # λ = (β·α - β + 1)/(β·α) YaRN标准公式
121
+ scale = torch.where(torch.arange(dim // 2, device=freqs.device) < corr_dim, (beta * factor - beta + 1) / (beta * factor), 1.0 / factor)
122
+ freqs = freqs * scale
123
+
124
+ t = torch.arange(end, device=freqs.device)
125
+ freqs = torch.outer(t, freqs).float()
126
+ freqs_cos = torch.cat([torch.cos(freqs), torch.cos(freqs)], dim=-1)
127
+ freqs_sin = torch.cat([torch.sin(freqs), torch.sin(freqs)], dim=-1)
128
+ return freqs_cos, freqs_sin
129
+
130
+
131
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
132
+ def rotate_half(x):
133
+ return torch.cat((-x[..., x.shape[-1] // 2:], x[..., : x.shape[-1] // 2]), dim=-1)
134
+
135
+ q_embed = (q * cos.unsqueeze(unsqueeze_dim)) + (rotate_half(q) * sin.unsqueeze(unsqueeze_dim))
136
+ k_embed = (k * cos.unsqueeze(unsqueeze_dim)) + (rotate_half(k) * sin.unsqueeze(unsqueeze_dim))
137
+ return q_embed, k_embed
138
+
139
+
140
+ def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
141
+ """torch.repeat_interleave(x, dim=2, repeats=n_rep)"""
142
+ bs, slen, num_key_value_heads, head_dim = x.shape
143
+ if n_rep == 1:
144
+ return x
145
+ return (
146
+ x[:, :, :, None, :].expand(bs, slen, num_key_value_heads, n_rep, head_dim).reshape(bs, slen, num_key_value_heads * n_rep, head_dim)
147
+ )
148
+
149
+
150
+ class Attention(nn.Module):
151
+ def __init__(self, args: MiniMindConfig):
152
+ super().__init__()
153
+ self.num_key_value_heads = args.num_attention_heads if args.num_key_value_heads is None else args.num_key_value_heads
154
+ assert args.num_attention_heads % self.num_key_value_heads == 0
155
+ self.n_local_heads = args.num_attention_heads
156
+ self.n_local_kv_heads = self.num_key_value_heads
157
+ self.n_rep = self.n_local_heads // self.n_local_kv_heads
158
+ self.head_dim = args.hidden_size // args.num_attention_heads
159
+ self.q_proj = nn.Linear(args.hidden_size, args.num_attention_heads * self.head_dim, bias=False)
160
+ self.k_proj = nn.Linear(args.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
161
+ self.v_proj = nn.Linear(args.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
162
+ self.o_proj = nn.Linear(args.num_attention_heads * self.head_dim, args.hidden_size, bias=False)
163
+ self.attn_dropout = nn.Dropout(args.dropout)
164
+ self.resid_dropout = nn.Dropout(args.dropout)
165
+ self.dropout = args.dropout
166
+ self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention') and args.flash_attn
167
+ # print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0")
168
+
169
+ def forward(self,
170
+ x: torch.Tensor,
171
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor], # 修改为接收cos和sin
172
+ past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
173
+ use_cache=False,
174
+ attention_mask: Optional[torch.Tensor] = None):
175
+ bsz, seq_len, _ = x.shape
176
+ xq, xk, xv = self.q_proj(x), self.k_proj(x), self.v_proj(x)
177
+ xq = xq.view(bsz, seq_len, self.n_local_heads, self.head_dim)
178
+ xk = xk.view(bsz, seq_len, self.n_local_kv_heads, self.head_dim)
179
+ xv = xv.view(bsz, seq_len, self.n_local_kv_heads, self.head_dim)
180
+
181
+ cos, sin = position_embeddings
182
+ xq, xk = apply_rotary_pos_emb(xq, xk, cos[:seq_len], sin[:seq_len])
183
+
184
+ # kv_cache实现
185
+ if past_key_value is not None:
186
+ xk = torch.cat([past_key_value[0], xk], dim=1)
187
+ xv = torch.cat([past_key_value[1], xv], dim=1)
188
+ past_kv = (xk, xv) if use_cache else None
189
+
190
+ xq, xk, xv = (
191
+ xq.transpose(1, 2),
192
+ repeat_kv(xk, self.n_rep).transpose(1, 2),
193
+ repeat_kv(xv, self.n_rep).transpose(1, 2)
194
+ )
195
+
196
+ if self.flash and seq_len > 1 and (attention_mask is None or torch.all(attention_mask == 1)):
197
+ attn_mask = (
198
+ None
199
+ if attention_mask is None
200
+ else attention_mask.view(bsz, 1, 1, -1).expand(bsz, self.n_local_heads, seq_len, -1).bool()
201
+ )
202
+
203
+ output = F.scaled_dot_product_attention(xq, xk, xv, attn_mask=attn_mask, dropout_p=self.dropout if self.training else 0.0, is_causal=True)
204
+ else:
205
+ scores = (xq @ xk.transpose(-2, -1)) / math.sqrt(self.head_dim)
206
+ scores = scores + torch.triu(
207
+ torch.full((seq_len, seq_len), float("-inf"), device=scores.device),
208
+ diagonal=1
209
+ ).unsqueeze(0).unsqueeze(0) # scores+mask
210
+
211
+ if attention_mask is not None:
212
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
213
+ extended_attention_mask = (1.0 - extended_attention_mask) * -1e9
214
+ scores = scores + extended_attention_mask
215
+
216
+ scores = F.softmax(scores.float(), dim=-1).type_as(xq)
217
+ scores = self.attn_dropout(scores)
218
+ output = scores @ xv
219
+
220
+ output = output.transpose(1, 2).reshape(bsz, seq_len, -1)
221
+ output = self.resid_dropout(self.o_proj(output))
222
+ return output, past_kv
223
+
224
+
225
+ class FeedForward(nn.Module):
226
+ def __init__(self, config: MiniMindConfig):
227
+ super().__init__()
228
+ if config.intermediate_size is None:
229
+ intermediate_size = int(config.hidden_size * 8 / 3)
230
+ config.intermediate_size = 64 * ((intermediate_size + 64 - 1) // 64)
231
+ self.gate_proj = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
232
+ self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
233
+ self.up_proj = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
234
+ self.dropout = nn.Dropout(config.dropout)
235
+ self.act_fn = ACT2FN[config.hidden_act]
236
+
237
+ def forward(self, x):
238
+ return self.dropout(self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)))
239
+
240
+
241
+ class MoEGate(nn.Module):
242
+ def __init__(self, config: MiniMindConfig):
243
+ super().__init__()
244
+ self.config = config
245
+ self.top_k = config.num_experts_per_tok
246
+ self.n_routed_experts = config.n_routed_experts
247
+
248
+ self.scoring_func = config.scoring_func
249
+ self.alpha = config.aux_loss_alpha
250
+ self.seq_aux = config.seq_aux
251
+
252
+ self.norm_topk_prob = config.norm_topk_prob
253
+ self.gating_dim = config.hidden_size
254
+ self.weight = nn.Parameter(torch.empty((self.n_routed_experts, self.gating_dim)))
255
+ self.reset_parameters()
256
+
257
+ def reset_parameters(self) -> None:
258
+ init.kaiming_uniform_(self.weight, a=math.sqrt(5))
259
+
260
+ def forward(self, hidden_states):
261
+ bsz, seq_len, h = hidden_states.shape
262
+ hidden_states = hidden_states.view(-1, h)
263
+ logits = F.linear(hidden_states, self.weight, None)
264
+ if self.scoring_func == 'softmax':
265
+ scores = logits.softmax(dim=-1)
266
+ else:
267
+ raise NotImplementedError(f'insupportable scoring function for MoE gating: {self.scoring_func}')
268
+
269
+ topk_weight, topk_idx = torch.topk(scores, k=self.top_k, dim=-1, sorted=False)
270
+
271
+ if self.top_k > 1 and self.norm_topk_prob:
272
+ denominator = topk_weight.sum(dim=-1, keepdim=True) + 1e-20
273
+ topk_weight = topk_weight / denominator
274
+
275
+ if self.training and self.alpha > 0.0:
276
+ scores_for_aux = scores
277
+ aux_topk = self.top_k
278
+ topk_idx_for_aux_loss = topk_idx.view(bsz, -1)
279
+ if self.seq_aux:
280
+ scores_for_seq_aux = scores_for_aux.view(bsz, seq_len, -1)
281
+ ce = torch.zeros(bsz, self.n_routed_experts, device=hidden_states.device)
282
+ ce.scatter_add_(1, topk_idx_for_aux_loss,
283
+ torch.ones(bsz, seq_len * aux_topk, device=hidden_states.device)).div_(
284
+ seq_len * aux_topk / self.n_routed_experts)
285
+ aux_loss = (ce * scores_for_seq_aux.mean(dim=1)).sum(dim=1).mean() * self.alpha
286
+ else:
287
+ mask_ce = F.one_hot(topk_idx_for_aux_loss.view(-1), num_classes=self.n_routed_experts)
288
+ ce = mask_ce.float().mean(0)
289
+ Pi = scores_for_aux.mean(0)
290
+ fi = ce * self.n_routed_experts
291
+ aux_loss = (Pi * fi).sum() * self.alpha
292
+ else:
293
+ aux_loss = 0
294
+ return topk_idx, topk_weight, aux_loss
295
+
296
+
297
+ class MOEFeedForward(nn.Module):
298
+ def __init__(self, config: MiniMindConfig):
299
+ super().__init__()
300
+ self.config = config
301
+ self.experts = nn.ModuleList([
302
+ FeedForward(config)
303
+ for _ in range(config.n_routed_experts)
304
+ ])
305
+ self.gate = MoEGate(config)
306
+ if config.n_shared_experts > 0:
307
+ self.shared_experts = nn.ModuleList([
308
+ FeedForward(config)
309
+ for _ in range(config.n_shared_experts)
310
+ ])
311
+
312
+ def forward(self, x):
313
+ identity = x
314
+ orig_shape = x.shape
315
+ bsz, seq_len, _ = x.shape
316
+ # 使用门控机制选择专家
317
+ topk_idx, topk_weight, aux_loss = self.gate(x)
318
+ x = x.view(-1, x.shape[-1])
319
+ flat_topk_idx = topk_idx.view(-1)
320
+ if self.training:
321
+ x = x.repeat_interleave(self.config.num_experts_per_tok, dim=0)
322
+ y = torch.empty_like(x, dtype=torch.float16)
323
+ for i, expert in enumerate(self.experts):
324
+ y[flat_topk_idx == i] = expert(x[flat_topk_idx == i]).to(y.dtype) # 确保类型一致
325
+ y = (y.view(*topk_weight.shape, -1) * topk_weight.unsqueeze(-1)).sum(dim=1)
326
+ y = y.view(*orig_shape)
327
+ else:
328
+ y = self.moe_infer(x, flat_topk_idx, topk_weight.view(-1, 1)).view(*orig_shape)
329
+ if self.config.n_shared_experts > 0:
330
+ for expert in self.shared_experts:
331
+ y = y + expert(identity)
332
+ self.aux_loss = aux_loss
333
+ return y
334
+
335
+ @torch.no_grad()
336
+ def moe_infer(self, x, flat_expert_indices, flat_expert_weights):
337
+ expert_cache = torch.zeros_like(x)
338
+ idxs = flat_expert_indices.argsort()
339
+ tokens_per_expert = flat_expert_indices.bincount().cpu().numpy().cumsum(0)
340
+ token_idxs = idxs // self.config.num_experts_per_tok
341
+ # 当tokens_per_expert = [6, 15, 20, 26],tokens_per_expert.shape[0]即为专家数量(此时为4)
342
+ # 且token_idxs = [3, 7, 19, 21, 24, 25, 4, 5, 6, 10, 11, 12...] 时
343
+ # 意味token_idxs[:6] -> [3, 7, 19, 21, 24, 25]这6个位置属于专家0处理的token(每个token有可能被多个专家处理,这取决于num_experts_per_tok)
344
+ # 接下来9个位置token_idxs[6:15] -> [4, 5, 6, 10, 11, 12...]属于专家1处理的token...依此类推
345
+ for i, end_idx in enumerate(tokens_per_expert):
346
+ start_idx = 0 if i == 0 else tokens_per_expert[i - 1]
347
+ if start_idx == end_idx:
348
+ continue
349
+ expert = self.experts[i]
350
+ exp_token_idx = token_idxs[start_idx:end_idx]
351
+ expert_tokens = x[exp_token_idx]
352
+ expert_out = expert(expert_tokens).to(expert_cache.dtype)
353
+ expert_out.mul_(flat_expert_weights[idxs[start_idx:end_idx]])
354
+ expert_cache.scatter_add_(0, exp_token_idx.view(-1, 1).repeat(1, x.shape[-1]), expert_out)
355
+
356
+ return expert_cache
357
+
358
+
359
+ class MiniMindBlock(nn.Module):
360
+ def __init__(self, layer_id: int, config: MiniMindConfig):
361
+ super().__init__()
362
+ self.num_attention_heads = config.num_attention_heads
363
+ self.hidden_size = config.hidden_size
364
+ self.head_dim = config.hidden_size // config.num_attention_heads
365
+ self.self_attn = Attention(config)
366
+
367
+ self.layer_id = layer_id
368
+ self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
369
+ self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
370
+ self.mlp = FeedForward(config) if not config.use_moe else MOEFeedForward(config)
371
+
372
+ def forward(self, hidden_states, position_embeddings, past_key_value=None, use_cache=False, attention_mask=None):
373
+ residual = hidden_states
374
+ hidden_states, present_key_value = self.self_attn(
375
+ self.input_layernorm(hidden_states), position_embeddings,
376
+ past_key_value, use_cache, attention_mask
377
+ )
378
+ hidden_states += residual
379
+ hidden_states = hidden_states + self.mlp(self.post_attention_layernorm(hidden_states))
380
+ return hidden_states, present_key_value
381
+
382
+
383
+ class MiniMindModel(nn.Module):
384
+ def __init__(self, config: MiniMindConfig):
385
+ super().__init__()
386
+ self.config = config
387
+ self.vocab_size, self.num_hidden_layers = config.vocab_size, config.num_hidden_layers
388
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size)
389
+ self.dropout = nn.Dropout(config.dropout)
390
+ self.layers = nn.ModuleList([MiniMindBlock(l, config) for l in range(self.num_hidden_layers)])
391
+ self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
392
+
393
+ freqs_cos, freqs_sin = precompute_freqs_cis(dim=config.hidden_size // config.num_attention_heads,
394
+ end=config.max_position_embeddings, rope_base=config.rope_theta,
395
+ rope_scaling=config.rope_scaling)
396
+ self.register_buffer("freqs_cos", freqs_cos, persistent=False)
397
+ self.register_buffer("freqs_sin", freqs_sin, persistent=False)
398
+
399
+ def forward(self,
400
+ input_ids: Optional[torch.Tensor] = None,
401
+ attention_mask: Optional[torch.Tensor] = None,
402
+ past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None,
403
+ use_cache: bool = False,
404
+ **kwargs):
405
+ batch_size, seq_length = input_ids.shape
406
+ if hasattr(past_key_values, 'layers'): past_key_values = None
407
+ past_key_values = past_key_values or [None] * len(self.layers)
408
+ start_pos = past_key_values[0][0].shape[1] if past_key_values[0] is not None else 0
409
+
410
+ hidden_states = self.dropout(self.embed_tokens(input_ids))
411
+
412
+ position_embeddings = (
413
+ self.freqs_cos[start_pos:start_pos + seq_length],
414
+ self.freqs_sin[start_pos:start_pos + seq_length]
415
+ )
416
+
417
+ presents = []
418
+ for layer_idx, (layer, past_key_value) in enumerate(zip(self.layers, past_key_values)):
419
+ hidden_states, present = layer(
420
+ hidden_states,
421
+ position_embeddings,
422
+ past_key_value=past_key_value,
423
+ use_cache=use_cache,
424
+ attention_mask=attention_mask
425
+ )
426
+ presents.append(present)
427
+
428
+ hidden_states = self.norm(hidden_states)
429
+
430
+ aux_loss = sum(
431
+ layer.mlp.aux_loss
432
+ for layer in self.layers
433
+ if isinstance(layer.mlp, MOEFeedForward)
434
+ )
435
+
436
+ return hidden_states, presents, aux_loss
437
+
438
+
439
+ class MiniMindForCausalLM(PreTrainedModel, GenerationMixin):
440
+ config_class = MiniMindConfig
441
+
442
+ def __init__(self, config: MiniMindConfig = None):
443
+ self.config = config or MiniMindConfig()
444
+ super().__init__(self.config)
445
+ self.model = MiniMindModel(self.config)
446
+ self.lm_head = nn.Linear(self.config.hidden_size, self.config.vocab_size, bias=False)
447
+ self.model.embed_tokens.weight = self.lm_head.weight
448
+ self.OUT = CausalLMOutputWithPast()
449
+
450
+ def forward(self,
451
+ input_ids: Optional[torch.Tensor] = None,
452
+ attention_mask: Optional[torch.Tensor] = None,
453
+ past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None,
454
+ use_cache: bool = False,
455
+ logits_to_keep: Union[int, torch.Tensor] = 0,
456
+ **args):
457
+ h, past_kvs, aux_loss = self.model(
458
+ input_ids=input_ids,
459
+ attention_mask=attention_mask,
460
+ past_key_values=past_key_values,
461
+ use_cache=use_cache,
462
+ **args
463
+ )
464
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
465
+ logits = self.lm_head(h[:, slice_indices, :])
466
+ self.OUT.__setitem__('last_hidden_state', h)
467
+ self.OUT.__setitem__('logits', logits)
468
+ self.OUT.__setitem__('aux_loss', aux_loss)
469
+ self.OUT.__setitem__('past_key_values', past_kvs)
470
+ return self.OUT
MiniMind2/tokenizer_config.json CHANGED
@@ -4,7 +4,7 @@
4
  "add_prefix_space": false,
5
  "added_tokens_decoder": {
6
  "0": {
7
- "content": "<unk>",
8
  "lstrip": false,
9
  "normalized": false,
10
  "rstrip": false,
@@ -12,7 +12,7 @@
12
  "special": true
13
  },
14
  "1": {
15
- "content": "<s>",
16
  "lstrip": false,
17
  "normalized": false,
18
  "rstrip": false,
@@ -20,7 +20,7 @@
20
  "special": true
21
  },
22
  "2": {
23
- "content": "</s>",
24
  "lstrip": false,
25
  "normalized": false,
26
  "rstrip": false,
@@ -29,16 +29,15 @@
29
  }
30
  },
31
  "additional_special_tokens": [],
32
- "bos_token": "<s>",
33
- "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{{ '<s>system\\n' + system_message + '</s>\\n' }}{% else %}{{ '<s>system\\n你是 MiniMind,是一个有用的人工智能助手。</s>\\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<s>user\\n' + content + '</s>\\n<s>assistant\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '</s>' + '\\n' }}{% endif %}{% endfor %}",
34
  "clean_up_tokenization_spaces": false,
35
- "eos_token": "</s>",
36
- "extra_special_tokens": {},
37
  "legacy": true,
38
  "model_max_length": 32768,
39
- "pad_token": "<unk>",
40
  "sp_model_kwargs": {},
41
  "spaces_between_special_tokens": false,
42
  "tokenizer_class": "PreTrainedTokenizerFast",
43
- "unk_token": "<unk>"
44
- }
 
 
4
  "add_prefix_space": false,
5
  "added_tokens_decoder": {
6
  "0": {
7
+ "content": "<|endoftext|>",
8
  "lstrip": false,
9
  "normalized": false,
10
  "rstrip": false,
 
12
  "special": true
13
  },
14
  "1": {
15
+ "content": "<|im_start|>",
16
  "lstrip": false,
17
  "normalized": false,
18
  "rstrip": false,
 
20
  "special": true
21
  },
22
  "2": {
23
+ "content": "<|im_end|>",
24
  "lstrip": false,
25
  "normalized": false,
26
  "rstrip": false,
 
29
  }
30
  },
31
  "additional_special_tokens": [],
32
+ "bos_token": "<|im_start|>",
 
33
  "clean_up_tokenization_spaces": false,
34
+ "eos_token": "<|im_end|>",
 
35
  "legacy": true,
36
  "model_max_length": 32768,
37
+ "pad_token": "<|endoftext|>",
38
  "sp_model_kwargs": {},
39
  "spaces_between_special_tokens": false,
40
  "tokenizer_class": "PreTrainedTokenizerFast",
41
+ "unk_token": "<|endoftext|>",
42
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].role == 'system' %}\n {{- messages[0].content + '\\n\\n' }}\n {%- endif %}\n {{- \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' -%}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else -%}\n {{- '<|im_start|>system\\nYou are a helpful assistant<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}\n{%- for message in messages[::-1] %}\n {%- set index = (messages|length - 1) - loop.index0 %}\n {%- if ns.multi_step_tool and message.role == \"user\" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}\n {%- set ns.multi_step_tool = false %}\n {%- set ns.last_query_index = index %}\n {%- endif %}\n{%- endfor %}\n{%- for message in messages %}\n {%- if message.content is string %}\n {%- set content = message.content %}\n {%- else %}\n {%- set content = '' %}\n {%- endif %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|im_start|>' + message.role + '\\n' + content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- if message.tool_calls %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and content) or (not loop.first) %}\n {{- '\\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments | tojson }}\n {%- endif %}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n {%- if enable_thinking is defined and enable_thinking is false %}\n {{- '<think>\\n\\n</think>\\n\\n' }}\n {%- endif %}\n{%- endif %}"
43
+ }