SnifferCaptain commited on
Commit
8c54c2e
·
verified ·
1 Parent(s): c11beae

Upload model

Browse files
config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "YForCausalLM2"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "ymodel2.YConfig2",
7
+ "AutoModelForCausalLM": "ymodel2.YForCausalLM2"
8
+ },
9
+ "bos_token_id": 1,
10
+ "dropout": 0.1,
11
+ "eos_token_id": 2,
12
+ "force_flash_attn": false,
13
+ "head_dim": 64,
14
+ "hidden_act": "gelu_pytorch_tanh",
15
+ "hidden_size": 512,
16
+ "intermediate_size": 1024,
17
+ "max_position_embeddings": 4096,
18
+ "model_type": "ynet2",
19
+ "num_heads": 8,
20
+ "num_layers": 4,
21
+ "rms_norm_eps": 1e-08,
22
+ "rope_theta": 50000.0,
23
+ "self_distill": true,
24
+ "torch_dtype": "bfloat16",
25
+ "transformers_version": "4.51.3",
26
+ "vocab_size": 6400
27
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.51.3"
6
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f009ae7adf7f652f2931d1ae6b716724a8dedde3536af43e76b942aa85c8bc5
3
+ size 20794209
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|im_start|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|im_end|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": false,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<|endoftext|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<|im_start|>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "<|im_end|>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "additional_special_tokens": [],
32
+ "bos_token": "<|im_start|>",
33
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{{ '<|im_start|>system\\n' + system_message + '<|im_end|>\\n' }}{% else %}{{ '<|im_start|>system\\nYou are a helpful assistant<|im_end|>\\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\\n' + content + '<|im_end|>\\n<|im_start|>assistant\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\\n' }}{% endif %}{% endfor %}",
34
+ "clean_up_tokenization_spaces": false,
35
+ "eos_token": "<|im_end|>",
36
+ "extra_special_tokens": {},
37
+ "legacy": true,
38
+ "model_max_length": 32768,
39
+ "pad_token": "<|endoftext|>",
40
+ "sp_model_kwargs": {},
41
+ "spaces_between_special_tokens": false,
42
+ "tokenizer_class": "PreTrainedTokenizer",
43
+ "unk_token": "<|endoftext|>"
44
+ }
ymodel2.py ADDED
@@ -0,0 +1,534 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ import torch.nn as nn
4
+ from typing import Optional, Tuple, Union, List
5
+ from transformers import PreTrainedModel, GenerationMixin
6
+ from transformers.activations import ACT2FN
7
+ from transformers.modeling_outputs import CausalLMOutputWithPast
8
+ from transformers.configuration_utils import PretrainedConfig
9
+
10
+
11
+ class YConfig2(PretrainedConfig):
12
+ model_type = "ynet2"
13
+
14
+ def __init__(
15
+ self,
16
+ dropout: float = 0.1,
17
+ bos_token_id: int = 1,
18
+ eos_token_id: int = 2,
19
+ hidden_act: str = 'gelu_pytorch_tanh',# silu 4.687 / gelu 4.662 / mish 4.695 / relu2 4.755 / laplace
20
+ hidden_size: int = 768,
21
+ num_layers: int = 9,
22
+ max_position_embeddings: int = 8192,
23
+ vocab_size: int = 6400,
24
+ rms_norm_eps: float = 1e-8,
25
+ rope_theta: int = 5e4,# 5e4
26
+ self_distill: bool = True,
27
+ force_flash_attn=False,
28
+ ### FFN ###
29
+ intermediate_size: int = None, # 512 * 4 (full [4] / 256) = 2048 (2 ** 17)
30
+ ### attn ###
31
+ num_heads: int = 4,
32
+ head_dim: int = 64,
33
+ **kwargs
34
+ ):
35
+ super().__init__(**kwargs)
36
+ self.dropout = dropout
37
+ self.bos_token_id = bos_token_id
38
+ self.eos_token_id = eos_token_id
39
+ self.hidden_act = hidden_act
40
+ self.hidden_size = hidden_size
41
+ self.num_layers = num_layers # 层数
42
+ self.max_position_embeddings = max_position_embeddings
43
+ self.vocab_size = vocab_size
44
+ self.rms_norm_eps = rms_norm_eps
45
+ self.rope_theta = rope_theta
46
+ self.self_distill = self_distill
47
+ self.force_flash_attn = force_flash_attn
48
+ ### FFN ###
49
+ self.intermediate_size = intermediate_size # FFN中间维度
50
+ ### attn ###
51
+ self.num_heads = num_heads # q头数
52
+ self.head_dim = head_dim # 头维度
53
+
54
+ def scale_lvl(self, lvl:int=0):
55
+ if lvl == 0:
56
+ # normal settings [99.312m]
57
+ self.num_layers = 16
58
+ self.hidden_size = 768
59
+ self.num_heads = 16
60
+ self.head_dim = 128
61
+ self.intermediate_size = 2048
62
+ elif lvl == -1:
63
+ self.num_layers = 8
64
+ self.hidden_size = 512 # base = 4.662 16h/64d 30
65
+ self.num_heads = 8 # 2*heads 4.578/20.84
66
+ self.head_dim = 64 # 2*dim 4.576/22.8
67
+ self.intermediate_size = 1536
68
+ elif lvl == -2:
69
+ self.num_layers = 4
70
+ self.hidden_size = 512
71
+ self.num_heads = 8
72
+ self.head_dim = 64
73
+ self.intermediate_size = 1024
74
+ else:
75
+ raise ValueError(f"Invalid level: {lvl}")
76
+
77
+ class RMSNorm(torch.nn.Module):
78
+ def __init__(self, dim: int, eps: float = 1e-6):
79
+ super().__init__()
80
+ self.eps = eps
81
+ self.weight = nn.Parameter(torch.ones(dim, dtype=torch.float32))
82
+
83
+ def _norm(self, x):
84
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
85
+
86
+ def forward(self, x):
87
+ output = self._norm(x.float())
88
+ output = output * self.weight.float()
89
+ return output.type_as(x)
90
+
91
+
92
+ def precompute_freqs_cis(dim: int, end: int = int(32 * 1024), theta: float = 5e4):
93
+ freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
94
+ t = torch.arange(end, device=freqs.device)
95
+ freqs = torch.outer(t, freqs).float()
96
+ freqs_cos = torch.cat([torch.cos(freqs), torch.cos(freqs)], dim=-1)
97
+ freqs_sin = torch.cat([torch.sin(freqs), torch.sin(freqs)], dim=-1)
98
+ return freqs_cos, freqs_sin
99
+
100
+
101
+ def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=0):
102
+ def rotate_half(x):
103
+ return torch.cat((-x[..., x.shape[-1] // 2:], x[..., : x.shape[-1] // 2]), dim=-1)
104
+
105
+ q_embed = (q * cos.unsqueeze(unsqueeze_dim)) + (rotate_half(q) * sin.unsqueeze(unsqueeze_dim))
106
+ k_embed = (k * cos.unsqueeze(unsqueeze_dim)) + (rotate_half(k) * sin.unsqueeze(unsqueeze_dim))
107
+ return q_embed, k_embed
108
+
109
+
110
+ def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
111
+ """torch.repeat_interleave(x, dim=2, repeats=n_rep)"""
112
+ b, h, l, ch = x.shape
113
+ if n_rep == 1:
114
+ return x
115
+ return (
116
+ x[:, :, None, :, :]
117
+ .expand(b, h, n_rep, l, ch)
118
+ .reshape(b, h * n_rep, l, ch)
119
+ )
120
+
121
+
122
+ class FFN(nn.Module):
123
+ def __init__(self, config: YConfig2):
124
+ super().__init__()
125
+ self.hidden_size = config.hidden_size
126
+ self.intermediate_size = config.intermediate_size or int(2.5 * config.hidden_size)
127
+ self.gate_act = ACT2FN[config.hidden_act]
128
+
129
+ self.up = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=False)
130
+ # self.up = nn.Linear(self.hidden_size, self.intermediate_size)
131
+ # self.gate = nn.Linear(self.hidden_size, self.intermediate_size)
132
+ self.down = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
133
+
134
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
135
+ x, g = self.up(x).chunk(2, dim=-1)
136
+ # x, g = self.up(x), self.gate(x)
137
+ x = self.gate_act(g) * x
138
+ x = self.down(x)
139
+ return x
140
+
141
+
142
+ class PEGA2(nn.Module):
143
+ def __init__(self, config: YConfig2):
144
+ super().__init__()
145
+ self.dropout = config.dropout # dropout rate
146
+ self.hidden_size = config.hidden_size # 输入通道大小
147
+ self.num_heads = config.num_heads # 总注意力头数
148
+ self.head_dim = config.head_dim # 每个头的维度
149
+ self.gate_act = ACT2FN[config.hidden_act]
150
+ self.delta_kv_only = False
151
+ self.force_flash_attn = config.force_flash_attn
152
+
153
+ assert self.num_heads % 2 == 0, "num_heads must be even."
154
+ # 2d opt: fused 29.5/4.693 split: 28.7/4.791
155
+ # qpe, q
156
+ self.qkv_list = [
157
+ self.num_heads // 2 * self.head_dim, # qpe
158
+ self.num_heads // 2 * self.head_dim, # qnope
159
+ self.head_dim, # kpe
160
+ self.head_dim, # kv
161
+ ]
162
+ self.qkv = nn.Sequential(
163
+ nn.Linear(self.hidden_size, self.head_dim, bias=False),
164
+ nn.Linear(self.head_dim, sum(self.qkv_list), bias=False)
165
+ )
166
+
167
+ # self.z = nn.Linear(self.hidden_size, self.head_dim, bias=False)
168
+ # self.qpe = nn.Linear(self.head_dim, self.num_heads // 2 * self.head_dim, bias=False)
169
+ # self.qnope = nn.Linear(self.head_dim, self.num_heads // 2 * self.head_dim, bias=False)
170
+ # self.kpe = nn.Linear(self.head_dim, self.head_dim, bias=False)
171
+ # self.kv = nn.Linear(self.head_dim, self.head_dim, bias=False)
172
+
173
+ self.o = nn.Linear(self.head_dim // 2 * self.num_heads, self.hidden_size, bias=False)
174
+ self.rsqrt_dim = 1.0 / math.sqrt(self.head_dim)
175
+ # init 2k 4.693 --> 4.687
176
+ scale_lora = math.sqrt(
177
+ (sum(self.qkv_list) + self.head_dim) * (self.head_dim + self.head_dim) /
178
+ (2 * self.head_dim * (self.hidden_size + sum(self.qkv_list)))
179
+ )
180
+ self.qkv[1].weight.data *= scale_lora
181
+
182
+ def forward(
183
+ self,
184
+ x: torch.Tensor,
185
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
186
+ past_key_value: Optional[torch.Tensor] = None,
187
+ attention_mask: Optional[torch.Tensor] = None,
188
+ use_cache: bool = False,
189
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
190
+
191
+ cos, sin = position_embeddings # [L, head_dim]
192
+ b, l, _ = x.shape
193
+
194
+ # fused
195
+ qkv = self.qkv(x)
196
+ qpe, q, kpe, kv = torch.split(qkv, self.qkv_list, dim=-1)# [b, l, hd * h // 2] [b, l, hd]
197
+
198
+ # z = self.z(x)
199
+ # qpe, q, kpe, kv = (
200
+ # self.qpe(z),
201
+ # self.qnope(z),
202
+ # self.kpe(z),
203
+ # self.kv(z)
204
+ # )
205
+
206
+ # 应用 RoPE
207
+ q = q.view(b, l, self.num_heads // 2, self.head_dim).permute(0, 2, 1, 3) # [b, l, h // 2, hd]
208
+ qpe = qpe.view(b, l, self.num_heads // 2, self.head_dim).permute(0, 2, 1, 3)# [b, l, h // 2, hd]
209
+ kv = kv.unsqueeze(1) # [b, 1, l, hd]
210
+ kpe = kpe.unsqueeze(1) # [b, 1, l, hd]
211
+ qpe, kpe = apply_rotary_pos_emb(qpe, kpe, cos[:l], sin[:l])
212
+ # 拼合
213
+ q = torch.cat([qpe, q], dim=1) # [b, h, l, hd]
214
+ kv = torch.cat([kpe, kv], dim=1) # [b, 2, l, hd]
215
+ deltakv = None
216
+ if self.delta_kv_only:
217
+ # 仅返回 delta kv
218
+ deltakv = kv
219
+
220
+ # kv_cache实现
221
+ if past_key_value is not None:
222
+ kv = torch.cat([past_key_value, kv], dim=2)
223
+ past_kv = kv if use_cache else None
224
+ _, _, l_all, _ = kv.shape
225
+
226
+ dropout_p = self.dropout if self.training else 0.0
227
+ attn_mask = None
228
+ if attention_mask is not None:
229
+ attn_mask = attention_mask.view(b, 1, 1, -1).expand(b, 1, l, -1)
230
+ attn_mask = attn_mask.bool() if attention_mask is not None else None
231
+
232
+ if self.training or self.force_flash_attn:
233
+ o = nn.functional.scaled_dot_product_attention(
234
+ q, repeat_kv(kv, self.num_heads // 2), repeat_kv(kv[:, 1:, :, :], self.num_heads),
235
+ attn_mask=attn_mask, dropout_p=dropout_p if self.training else 0.0, is_causal=True
236
+ )
237
+ else:
238
+ o = self.sdpa_math(
239
+ q, repeat_kv(kv, self.num_heads // 2), repeat_kv(kv[:, 1:, :, :], self.num_heads),
240
+ attn_mask, 0.0
241
+ )
242
+ # o: [b, h, l, hc]
243
+
244
+ # gate 2k4b peg: 5.169 nopeg: 5.179 +gate:5.210(4.622)
245
+ ope, onope = o.permute(0, 2, 1, 3).chunk(2, dim=2) # [b, l, h // 2, hc]
246
+ # o = onope * self.gate_act(ope) # [b, l, h // 2, hc] not stable
247
+ o = ope * self.gate_act(onope) # [b, l, h // 2, hc] testing
248
+ out = o.reshape(b, l, -1)
249
+
250
+ out = self.o(out)
251
+ out = nn.functional.dropout(out, p=self.dropout, training=self.training)
252
+ return out, (deltakv if self.delta_kv_only else past_kv)
253
+
254
+ def sdpa_math(self, q:torch.Tensor, k:torch.Tensor, v:torch.Tensor, attn_mask: Optional[torch.Tensor] = None,
255
+ dropout_p: float = 0.0) -> torch.Tensor:
256
+ b, h, l, c = q.shape
257
+ scores = (q @ k.transpose(-2, -1)) * self.rsqrt_dim
258
+ casual_mask = torch.triu(
259
+ torch.full((l, l), float("-inf"), device=scores.device),
260
+ diagonal=1
261
+ ).unsqueeze(0).unsqueeze(0)# [1, 1, l, l]
262
+ # 在左侧 zero pad 到 scores 的形状 [1, 1, l, l_all]
263
+ casual_mask = nn.functional.pad(casual_mask, (scores.shape[-1] - l, 0), "constant", 0.0)# [1, 1, l, l_all]
264
+ scores += casual_mask
265
+
266
+ if attn_mask is not None:
267
+ attn_mask = (1.0 - attn_mask.type_as(scores)) * -1e9
268
+ scores = scores + attn_mask
269
+
270
+ scores = nn.functional.softmax(scores.float(), dim=-1).type_as(q)
271
+ scores = nn.functional.dropout(scores, p=dropout_p, training=self.training)# [b, h, l, l]
272
+ output = scores @ v
273
+ return output
274
+
275
+ def use_delta_kv_only(self, enable:bool=True):
276
+ # 仅返回 delta kv,减少内存开销
277
+ self.delta_kv_only = enable
278
+
279
+
280
+ class Attn(nn.Module):
281
+ def __init__(self, config: YConfig2):
282
+ super().__init__()
283
+ self.dropout = config.dropout # dropout rate
284
+ self.hidden_size = config.hidden_size # 输入通道大小
285
+ self.num_heads = config.num_heads # 总注意力头数
286
+ self.head_dim = config.head_dim # 每个头的维度
287
+ self.gate_act = ACT2FN[config.hidden_act]
288
+ self.delta_kv_only = False
289
+
290
+ assert self.num_heads % 2 == 0, "num_heads must be even."
291
+ ##### sparse #####
292
+ # qpe, q
293
+ self.qkv_list = [
294
+ self.num_heads * self.head_dim, # q
295
+ 2 * self.head_dim, # k
296
+ 2 * self.head_dim, # v
297
+ ]
298
+ self.qkv = nn.Linear(self.hidden_size, sum(self.qkv_list), bias=False)
299
+ self.o = nn.Linear(self.head_dim * self.num_heads, self.hidden_size, bias=False)
300
+
301
+ def forward(
302
+ self,
303
+ x: torch.Tensor,
304
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
305
+ past_key_value: Optional[torch.Tensor] = None,
306
+ attention_mask: Optional[torch.Tensor] = None,
307
+ use_cache: bool = False,
308
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
309
+
310
+ cos, sin = position_embeddings # [L, head_dim]
311
+ b, l, _ = x.shape
312
+
313
+ # dense
314
+ qkv = self.qkv(x)
315
+ q, k, v = torch.split(qkv, self.qkv_list, dim=-1)# [b, l, hd * h // 2] [b, l, hd]
316
+ # qpe, q, kpe, kv = (
317
+ # self.qpe(x),
318
+ # self.qnope(x),
319
+ # self.kpe(x),
320
+ # self.kv(x)
321
+ # )
322
+
323
+ # 应用 RoPE
324
+ q = q.view(b, l, self.num_heads, self.head_dim).permute(0, 2, 1, 3) # [b, l, h // 2, hd]
325
+ k = k.view(b, l, 2, self.head_dim).permute(0, 2, 1, 3) # [b, 2, l, hd]
326
+ v = v.view(b, l, 2, self.head_dim).permute(0, 2, 1, 3) # [b, 2, l, hd]
327
+ q, k = apply_rotary_pos_emb(q, k, cos[:l], sin[:l])
328
+ deltakv = None
329
+ if self.delta_kv_only:
330
+ # 仅返回 delta kv
331
+ deltakv = None
332
+
333
+ # kv_cache实现
334
+ if past_key_value is not None:
335
+ k = torch.cat([past_key_value[0], k], dim=1)
336
+ v = torch.cat([past_key_value[1], v], dim=1)
337
+ past_kv = (k, v) if use_cache else None
338
+ _, _, l_all, _ = k.shape
339
+
340
+ dropout_p = self.dropout if self.training else 0.0
341
+ attn_mask = None
342
+ if attention_mask is not None:
343
+ attn_mask = attention_mask.view(b, 1, 1, -1).expand(b, 1, l, -1)
344
+ attn_mask = attn_mask.bool() if attention_mask is not None else None
345
+
346
+ if self.training:
347
+ o = nn.functional.scaled_dot_product_attention(
348
+ q, repeat_kv(k, self.num_heads//2), repeat_kv(v, self.num_heads//2),
349
+ attn_mask=attn_mask, dropout_p=dropout_p if self.training else 0.0, is_causal=True
350
+ )
351
+ else:
352
+ o = self.sdpa_math(
353
+ q, repeat_kv(k, self.num_heads // 2), repeat_kv(v, self.num_heads),
354
+ attn_mask, 0.0
355
+ )
356
+ # o: [b, h, l, hc]
357
+ out = o.permute(0, 2, 1, 3).reshape(b, l, -1)
358
+ out = self.o(out)
359
+ out = nn.functional.dropout(out, p=self.dropout, training=self.training)
360
+ return out, (deltakv if self.delta_kv_only else past_kv)
361
+
362
+ def sdpa_math(self, q:torch.Tensor, k:torch.Tensor, v:torch.Tensor, attn_mask: Optional[torch.Tensor] = None,
363
+ dropout_p: float = 0.0) -> torch.Tensor:
364
+ b, h, l, c = q.shape
365
+ scores = (q @ k.transpose(-2, -1)) * self.rsqrt_dim
366
+ casual_mask = torch.triu(
367
+ torch.full((l, l), float("-inf"), device=scores.device),
368
+ diagonal=1
369
+ ).unsqueeze(0).unsqueeze(0)# [1, 1, l, l]
370
+ # 在左侧 zero pad 到 scores 的形状 [1, 1, l, l_all]
371
+ casual_mask = nn.functional.pad(casual_mask, (scores.shape[-1] - l, 0), "constant", 0.0)# [1, 1, l, l_all]
372
+ scores += casual_mask
373
+
374
+ if attn_mask is not None:
375
+ attn_mask = (1.0 - attn_mask.type_as(scores)) * -1e9
376
+ scores = scores + attn_mask
377
+
378
+ scores = nn.functional.softmax(scores.float(), dim=-1).type_as(q)
379
+ scores = nn.functional.dropout(scores, p=dropout_p, training=self.training)# [b, h, l, l]
380
+ output = scores @ v
381
+ return output
382
+
383
+ def use_delta_kv_only(self, enable:bool=True):
384
+ # 仅返回 delta kv,减少内存开销
385
+ self.delta_kv_only = enable
386
+
387
+
388
+ class YBlock2(nn.Module):
389
+ def __init__(self, config: YConfig2):
390
+ super().__init__()
391
+ self.attn = PEGA2(config)
392
+ self.ffn = FFN(config)
393
+ self.norm1 = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
394
+ self.norm2 = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
395
+
396
+ def forward(self,
397
+ x: torch.Tensor,
398
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
399
+ past_key_value: Optional[torch.Tensor] = None, # ffn_shard * kv cache
400
+ use_cache: bool = False,
401
+ attention_mask: Optional[torch.Tensor] = None
402
+ ):
403
+ # attention
404
+ residual = x
405
+ x = self.norm1(x)
406
+ attn_out, past_kv = self.attn(
407
+ x,
408
+ position_embeddings,
409
+ past_key_value=past_key_value,
410
+ attention_mask=attention_mask,
411
+ use_cache=use_cache,
412
+ )
413
+ x = residual + attn_out
414
+ # ffn
415
+ residual = x
416
+ x = self.norm2(x)
417
+ moe_out = self.ffn(x)
418
+ x = residual + moe_out
419
+ return x, past_kv
420
+
421
+ def use_delta_kv_only(self, enable:bool=True):
422
+ self.attn.use_delta_kv_only(enable)
423
+
424
+
425
+ class YModel2(nn.Module):
426
+ def __init__(self, config: YConfig2):
427
+ super().__init__()
428
+ self.vocab_size = config.vocab_size
429
+ self.num_layers = config.num_layers
430
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size)
431
+ self.dropout = config.dropout
432
+ self.use_self_distill = config.self_distill
433
+
434
+ self.layers = nn.ModuleList([
435
+ YBlock2(config) for _ in range(config.num_layers)
436
+ ])
437
+
438
+ self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
439
+
440
+ freqs_cos, freqs_sin = precompute_freqs_cis(dim=config.head_dim,
441
+ end=config.max_position_embeddings, theta=config.rope_theta)
442
+ self.register_buffer("freqs_cos", freqs_cos, persistent=False)
443
+ self.register_buffer("freqs_sin", freqs_sin, persistent=False)
444
+
445
+ def forward(self,
446
+ input_ids: Optional[torch.Tensor] = None,
447
+ attention_mask: Optional[torch.Tensor] = None,
448
+ past_key_values: Optional[List[torch.Tensor]] = None,
449
+ use_cache: bool = False,
450
+ **kwargs
451
+ ):
452
+ batch_size, seq_length = input_ids.shape
453
+ past_key_values = past_key_values or [None] * self.num_layers
454
+ start_pos = past_key_values[0].shape[-2] if past_key_values[0] is not None else 0
455
+
456
+ x = self.embed_tokens(input_ids)
457
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
458
+
459
+ position_embeddings = (
460
+ self.freqs_cos[start_pos:start_pos + seq_length],
461
+ self.freqs_sin[start_pos:start_pos + seq_length]
462
+ )
463
+
464
+ presents = []
465
+ cos_loss = None
466
+ for i, layer in enumerate(self.layers):
467
+ x0 = x
468
+ x, past_kv = layer(
469
+ x=x,
470
+ position_embeddings=position_embeddings,
471
+ past_key_value=past_key_values[i],
472
+ attention_mask=attention_mask,
473
+ use_cache=use_cache
474
+ )
475
+ if self.training and self.use_self_distill:
476
+ xd = x.detach()
477
+ # cosine loss
478
+ c_loss = 1.0 - nn.functional.cosine_similarity(x0, xd, dim=-1).mean()
479
+ cos_loss = c_loss + cos_loss if cos_loss is not None else c_loss
480
+ presents.append(past_kv)
481
+ if cos_loss is not None:
482
+ cos_loss = cos_loss / self.num_layers
483
+ x = self.norm(x)
484
+ return x, presents, cos_loss
485
+
486
+ def delta_kv_only(self, delta_kv:bool=True):
487
+ for layer in self.layers:
488
+ layer.use_delta_kv_only(delta_kv)
489
+
490
+ class YForCausalLM2(PreTrainedModel, GenerationMixin):
491
+ config_class = YConfig2
492
+
493
+ def __init__(self, config: YConfig2 = None, **kwargs):
494
+ self.config = config or YConfig2()
495
+ super().__init__(self.config)
496
+ self.model = YModel2(self.config)
497
+ self.lm_head = nn.Linear(self.config.hidden_size, self.config.vocab_size, bias=False)
498
+ self.model.embed_tokens.weight = self.lm_head.weight
499
+ self.OUT = CausalLMOutputWithPast()
500
+ if kwargs.get('dtype') is not None:
501
+ dtype = kwargs['dtype']
502
+ m_dtype = torch.float32
503
+ if dtype == 'bfloat16':
504
+ m_dtype = torch.bfloat16
505
+ elif dtype == 'float16':
506
+ m_dtype = torch.float16
507
+ self.model.to(m_dtype)
508
+ self.lm_head.to(m_dtype)
509
+
510
+ def forward(self,
511
+ input_ids: Optional[torch.Tensor] = None,
512
+ attention_mask: Optional[torch.Tensor] = None,
513
+ past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None,
514
+ use_cache: bool = False,
515
+ logits_to_keep: Union[int, torch.Tensor] = 0,
516
+ **args):
517
+ h, past_kvs, cos_loss = self.model(
518
+ input_ids=input_ids,
519
+ attention_mask=attention_mask,
520
+ past_key_values=past_key_values,
521
+ use_cache=use_cache,
522
+ **args
523
+ )
524
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
525
+ logits = self.lm_head(h[:, slice_indices, :])
526
+ self.OUT.__setitem__('last_hidden_state', h)
527
+ self.OUT.__setitem__('logits', logits)
528
+ self.OUT.__setitem__('past_key_values', past_kvs)
529
+ if self.config.self_distill:
530
+ self.OUT.__setitem__('dist_loss', cos_loss)
531
+ return self.OUT
532
+
533
+ def delta_kv_only(self, delta_kv:bool=True):
534
+ self.model.delta_kv_only(delta_kv)