fariasultana commited on
Commit
4336553
·
verified ·
1 Parent(s): 1d1f00b

feat: Add modeling_minimind.py for AutoModelForCausalLM support

Browse files
Files changed (1) hide show
  1. modeling_minimind.py +163 -0
modeling_minimind.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """MiniMind Max2 Model for Transformers"""
2
+ import math
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ from typing import Optional, Tuple, List, Union
7
+ from transformers import PreTrainedModel
8
+ from transformers.modeling_outputs import CausalLMOutputWithPast
9
+ from .configuration_minimind import MiniMindConfig
10
+
11
+ class RMSNorm(nn.Module):
12
+ def __init__(self, dim, eps=1e-6):
13
+ super().__init__()
14
+ self.weight = nn.Parameter(torch.ones(dim))
15
+ self.eps = eps
16
+ def forward(self, x):
17
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) * self.weight
18
+
19
+ class RotaryEmbedding(nn.Module):
20
+ def __init__(self, dim, max_pos=32768, base=10000.0):
21
+ super().__init__()
22
+ inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
23
+ self.register_buffer("inv_freq", inv_freq)
24
+ def forward(self, x, pos_ids):
25
+ freqs = torch.outer(pos_ids.float(), self.inv_freq)
26
+ emb = torch.cat((freqs, freqs), dim=-1)
27
+ return emb.cos(), emb.sin()
28
+
29
+ def rotate_half(x):
30
+ x1, x2 = x.chunk(2, dim=-1)
31
+ return torch.cat((-x2, x1), dim=-1)
32
+
33
+ def apply_rope(q, k, cos, sin):
34
+ cos, sin = cos.unsqueeze(0).unsqueeze(0), sin.unsqueeze(0).unsqueeze(0)
35
+ return (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin)
36
+
37
+ class Attention(nn.Module):
38
+ def __init__(self, config, layer_idx):
39
+ super().__init__()
40
+ self.num_heads = config.num_attention_heads
41
+ self.num_kv_heads = config.num_key_value_heads
42
+ self.head_dim = config.hidden_size // self.num_heads
43
+ self.kv_groups = self.num_heads // self.num_kv_heads
44
+ self.q_proj = nn.Linear(config.hidden_size, self.num_heads * self.head_dim, bias=False)
45
+ self.k_proj = nn.Linear(config.hidden_size, self.num_kv_heads * self.head_dim, bias=False)
46
+ self.v_proj = nn.Linear(config.hidden_size, self.num_kv_heads * self.head_dim, bias=False)
47
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, config.hidden_size, bias=False)
48
+ self.rotary = RotaryEmbedding(self.head_dim, config.max_position_embeddings, config.rope_theta)
49
+
50
+ def forward(self, x, mask=None, pos_ids=None, past_kv=None, use_cache=False):
51
+ B, L, _ = x.shape
52
+ q = self.q_proj(x).view(B, L, self.num_heads, self.head_dim).transpose(1, 2)
53
+ k = self.k_proj(x).view(B, L, self.num_kv_heads, self.head_dim).transpose(1, 2)
54
+ v = self.v_proj(x).view(B, L, self.num_kv_heads, self.head_dim).transpose(1, 2)
55
+ if pos_ids is None: pos_ids = torch.arange(L, device=x.device)
56
+ cos, sin = self.rotary(v, pos_ids)
57
+ q, k = apply_rope(q, k, cos, sin)
58
+ if past_kv: k, v = torch.cat([past_kv[0], k], 2), torch.cat([past_kv[1], v], 2)
59
+ new_kv = (k, v) if use_cache else None
60
+ k = k.repeat_interleave(self.kv_groups, 1)
61
+ v = v.repeat_interleave(self.kv_groups, 1)
62
+ attn = (q @ k.transpose(-2, -1)) / math.sqrt(self.head_dim)
63
+ if mask is not None: attn = attn + mask
64
+ attn = F.softmax(attn, dim=-1)
65
+ out = (attn @ v).transpose(1, 2).reshape(B, L, -1)
66
+ return self.o_proj(out), new_kv
67
+
68
+ class Expert(nn.Module):
69
+ def __init__(self, config):
70
+ super().__init__()
71
+ self.gate = nn.Linear(config.hidden_size, config.intermediate_size // config.num_experts, bias=False)
72
+ self.up = nn.Linear(config.hidden_size, config.intermediate_size // config.num_experts, bias=False)
73
+ self.down = nn.Linear(config.intermediate_size // config.num_experts, config.hidden_size, bias=False)
74
+ def forward(self, x):
75
+ return self.down(F.silu(self.gate(x)) * self.up(x))
76
+
77
+ class MoE(nn.Module):
78
+ def __init__(self, config):
79
+ super().__init__()
80
+ self.num_experts = config.num_experts
81
+ self.top_k = config.num_experts_per_token
82
+ self.router = nn.Linear(config.hidden_size, self.num_experts, bias=False)
83
+ self.experts = nn.ModuleList([Expert(config) for _ in range(self.num_experts)])
84
+
85
+ def forward(self, x):
86
+ B, L, D = x.shape
87
+ x_flat = x.view(-1, D)
88
+ logits = self.router(x_flat)
89
+ weights = F.softmax(logits, dim=-1)
90
+ top_w, top_i = torch.topk(weights, self.top_k, dim=-1)
91
+ top_w = top_w / top_w.sum(-1, keepdim=True)
92
+ out = torch.zeros_like(x_flat)
93
+ for i, exp in enumerate(self.experts):
94
+ mask = (top_i == i).any(-1)
95
+ if mask.any():
96
+ w = (top_w * (top_i == i).float()).sum(-1, keepdim=True)[mask]
97
+ out[mask] += w * exp(x_flat[mask])
98
+ return out.view(B, L, D), torch.tensor(0.0, device=x.device)
99
+
100
+ class DecoderLayer(nn.Module):
101
+ def __init__(self, config, idx):
102
+ super().__init__()
103
+ self.attn = Attention(config, idx)
104
+ self.moe = MoE(config)
105
+ self.norm1 = RMSNorm(config.hidden_size, config.rms_norm_eps)
106
+ self.norm2 = RMSNorm(config.hidden_size, config.rms_norm_eps)
107
+
108
+ def forward(self, x, mask=None, pos_ids=None, past_kv=None, use_cache=False):
109
+ h, kv = self.attn(self.norm1(x), mask, pos_ids, past_kv, use_cache)
110
+ x = x + h
111
+ m, aux = self.moe(self.norm2(x))
112
+ return x + m, kv, aux
113
+
114
+ class MiniMindPreTrainedModel(PreTrainedModel):
115
+ config_class = MiniMindConfig
116
+ base_model_prefix = "model"
117
+ supports_gradient_checkpointing = True
118
+
119
+ class MiniMindModel(MiniMindPreTrainedModel):
120
+ def __init__(self, config):
121
+ super().__init__(config)
122
+ self.embed = nn.Embedding(config.vocab_size, config.hidden_size)
123
+ self.layers = nn.ModuleList([DecoderLayer(config, i) for i in range(config.num_hidden_layers)])
124
+ self.norm = RMSNorm(config.hidden_size, config.rms_norm_eps)
125
+ self.post_init()
126
+
127
+ def forward(self, input_ids, attention_mask=None, position_ids=None, past_key_values=None, use_cache=False, **kwargs):
128
+ B, L = input_ids.shape
129
+ h = self.embed(input_ids)
130
+ mask = torch.triu(torch.full((L, L), float("-inf"), device=h.device), 1).unsqueeze(0).unsqueeze(0)
131
+ cache = [] if use_cache else None
132
+ aux = 0.0
133
+ for i, layer in enumerate(self.layers):
134
+ pkv = past_key_values[i] if past_key_values else None
135
+ h, kv, a = layer(h, mask, position_ids, pkv, use_cache)
136
+ if use_cache: cache.append(kv)
137
+ aux += a
138
+ return self.norm(h), cache, aux
139
+
140
+ class MiniMindForCausalLM(MiniMindPreTrainedModel):
141
+ _tied_weights_keys = ["lm_head.weight"]
142
+
143
+ def __init__(self, config):
144
+ super().__init__(config)
145
+ self.model = MiniMindModel(config)
146
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
147
+ self.post_init()
148
+
149
+ def get_input_embeddings(self): return self.model.embed
150
+ def get_output_embeddings(self): return self.lm_head
151
+
152
+ def forward(self, input_ids=None, attention_mask=None, position_ids=None, past_key_values=None,
153
+ labels=None, use_cache=None, return_dict=True, **kwargs):
154
+ h, cache, aux = self.model(input_ids, attention_mask, position_ids, past_key_values, use_cache or False)
155
+ logits = self.lm_head(h)
156
+ loss = None
157
+ if labels is not None:
158
+ loss = F.cross_entropy(logits[..., :-1, :].reshape(-1, logits.size(-1)), labels[..., 1:].reshape(-1))
159
+ return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=cache)
160
+
161
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs):
162
+ if past_key_values: input_ids = input_ids[:, -1:]
163
+ return {"input_ids": input_ids, "past_key_values": past_key_values, "use_cache": True}