SnifferCaptain commited on
Commit
fc37649
·
verified ·
1 Parent(s): cda4888

upload model

Browse files
config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "YForCausalLM2"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "ymodel2.YConfig2",
7
+ "AutoModelForCausalLM": "ymodel2.YForCausalLM2"
8
+ },
9
+ "bos_token_id": 1,
10
+ "dropout": 0.1,
11
+ "eos_token_id": 2,
12
+ "head_dim": 128,
13
+ "hidden_act": "gelu_pytorch_tanh",
14
+ "hidden_size": 768,
15
+ "intermediate_size": 2048,
16
+ "max_position_embeddings": 4096,
17
+ "model_type": "ynet2",
18
+ "num_heads": 16,
19
+ "num_layers": 16,
20
+ "rms_norm_eps": 1e-08,
21
+ "rope_theta": 50000.0,
22
+ "self_distill": true,
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.51.3",
25
+ "vocab_size": 6400
26
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.51.3"
6
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59cd2c3419ffa2a461c81598f77a33c5b631e49cf02b823320a6cec0cd27e666
3
+ size 198661163
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|im_start|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|im_end|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": false,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<|endoftext|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<|im_start|>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "<|im_end|>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "additional_special_tokens": [],
32
+ "bos_token": "<|im_start|>",
33
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{{ '<|im_start|>system\\n' + system_message + '<|im_end|>\\n' }}{% else %}{{ '<|im_start|>system\\nYou are a helpful assistant<|im_end|>\\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\\n' + content + '<|im_end|>\\n<|im_start|>assistant\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\\n' }}{% endif %}{% endfor %}",
34
+ "clean_up_tokenization_spaces": false,
35
+ "eos_token": "<|im_end|>",
36
+ "extra_special_tokens": {},
37
+ "legacy": true,
38
+ "model_max_length": 32768,
39
+ "pad_token": "<|endoftext|>",
40
+ "sp_model_kwargs": {},
41
+ "spaces_between_special_tokens": false,
42
+ "tokenizer_class": "PreTrainedTokenizer",
43
+ "unk_token": "<|endoftext|>"
44
+ }
ymodel2.py ADDED
@@ -0,0 +1,516 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ import torch.nn as nn
4
+ from typing import Optional, Tuple, Union, List
5
+ from transformers import PreTrainedModel, GenerationMixin
6
+ from transformers.activations import ACT2FN
7
+ from transformers.modeling_outputs import CausalLMOutputWithPast
8
+ from transformers.configuration_utils import PretrainedConfig
9
+
10
+
11
+ class YConfig2(PretrainedConfig):
12
+ model_type = "ynet2"
13
+
14
+ def __init__(
15
+ self,
16
+ dropout: float = 0.1,
17
+ bos_token_id: int = 1,
18
+ eos_token_id: int = 2,
19
+ hidden_act: str = 'gelu_pytorch_tanh',# silu 4.687 / gelu 4.662 / mish 4.695 / relu2 4.755 / laplace
20
+ hidden_size: int = 768,
21
+ num_layers: int = 9,
22
+ max_position_embeddings: int = 8192,
23
+ vocab_size: int = 6400,
24
+ rms_norm_eps: float = 1e-8,
25
+ rope_theta: int = 5e4,# 5e4
26
+ self_distill: bool = True,
27
+ ### FFN ###
28
+ intermediate_size: int = None, # 512 * 4 (full [4] / 256) = 2048 (2 ** 17)
29
+ ### attn ###
30
+ num_heads: int = 4,
31
+ head_dim: int = 64,
32
+ **kwargs
33
+ ):
34
+ super().__init__(**kwargs)
35
+ self.dropout = dropout
36
+ self.bos_token_id = bos_token_id
37
+ self.eos_token_id = eos_token_id
38
+ self.hidden_act = hidden_act
39
+ self.hidden_size = hidden_size
40
+ self.num_layers = num_layers # 层数
41
+ self.max_position_embeddings = max_position_embeddings
42
+ self.vocab_size = vocab_size
43
+ self.rms_norm_eps = rms_norm_eps
44
+ self.rope_theta = rope_theta
45
+ self.self_distill = self_distill
46
+ ### FFN ###
47
+ self.intermediate_size = intermediate_size # FFN中间维度
48
+ ### attn ###
49
+ self.num_heads = num_heads # q头数
50
+ self.head_dim = head_dim # 头维度
51
+
52
+ def scale_lvl(self, lvl:int=0):
53
+ if lvl == 0:
54
+ # normal settings [99.312m]
55
+ self.num_layers = 16
56
+ self.hidden_size = 768
57
+ self.num_heads = 16
58
+ self.head_dim = 128
59
+ self.intermediate_size = 2048
60
+ elif lvl == -1:
61
+ self.num_layers = 8
62
+ self.hidden_size = 512 # base = 4.662 16h/64d 30
63
+ self.num_heads = 8 # 2*heads 4.578/20.84
64
+ self.head_dim = 64 # 2*dim 4.576/22.8
65
+ self.intermediate_size = 1536
66
+ else:
67
+ raise ValueError(f"Invalid level: {lvl}")
68
+
69
+ class RMSNorm(torch.nn.Module):
70
+ def __init__(self, dim: int, eps: float = 1e-6):
71
+ super().__init__()
72
+ self.eps = eps
73
+ self.weight = nn.Parameter(torch.ones(dim, dtype=torch.float32))
74
+
75
+ def _norm(self, x):
76
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
77
+
78
+ def forward(self, x):
79
+ output = self._norm(x.float())
80
+ output = output * self.weight.float()
81
+ return output.type_as(x)
82
+
83
+
84
+ def precompute_freqs_cis(dim: int, end: int = int(32 * 1024), theta: float = 5e4):
85
+ freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
86
+ t = torch.arange(end, device=freqs.device)
87
+ freqs = torch.outer(t, freqs).float()
88
+ freqs_cos = torch.cat([torch.cos(freqs), torch.cos(freqs)], dim=-1)
89
+ freqs_sin = torch.cat([torch.sin(freqs), torch.sin(freqs)], dim=-1)
90
+ return freqs_cos, freqs_sin
91
+
92
+
93
+ def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=0):
94
+ def rotate_half(x):
95
+ return torch.cat((-x[..., x.shape[-1] // 2:], x[..., : x.shape[-1] // 2]), dim=-1)
96
+
97
+ q_embed = (q * cos.unsqueeze(unsqueeze_dim)) + (rotate_half(q) * sin.unsqueeze(unsqueeze_dim))
98
+ k_embed = (k * cos.unsqueeze(unsqueeze_dim)) + (rotate_half(k) * sin.unsqueeze(unsqueeze_dim))
99
+ return q_embed, k_embed
100
+
101
+
102
+ def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
103
+ """torch.repeat_interleave(x, dim=2, repeats=n_rep)"""
104
+ b, h, l, ch = x.shape
105
+ if n_rep == 1:
106
+ return x
107
+ return (
108
+ x[:, :, None, :, :]
109
+ .expand(b, h, n_rep, l, ch)
110
+ .reshape(b, h * n_rep, l, ch)
111
+ )
112
+
113
+
114
+ class FFN(nn.Module):
115
+ def __init__(self, config: YConfig2):
116
+ super().__init__()
117
+ self.hidden_size = config.hidden_size
118
+ self.intermediate_size = config.intermediate_size or int(2.5 * config.hidden_size)
119
+ self.gate_act = ACT2FN[config.hidden_act]
120
+
121
+ self.up = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=False)
122
+ # self.up = nn.Linear(self.hidden_size, self.intermediate_size)
123
+ # self.gate = nn.Linear(self.hidden_size, self.intermediate_size)
124
+ self.down = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
125
+
126
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
127
+ x, g = self.up(x).chunk(2, dim=-1)
128
+ # x, g = self.up(x), self.gate(x)
129
+ x = self.gate_act(g) * x
130
+ x = self.down(x)
131
+ return x
132
+
133
+
134
+ class PEGA2(nn.Module):
135
+ def __init__(self, config: YConfig2):
136
+ super().__init__()
137
+ self.dropout = config.dropout # dropout rate
138
+ self.hidden_size = config.hidden_size # 输入通道大小
139
+ self.num_heads = config.num_heads # 总注意力头数
140
+ self.head_dim = config.head_dim # 每个头的维度
141
+ self.gate_act = ACT2FN[config.hidden_act]
142
+ self.delta_kv_only = False
143
+
144
+ assert self.num_heads % 2 == 0, "num_heads must be even."
145
+ # 2d opt: fused 29.5/4.693 split: 28.7/4.791
146
+ # qpe, q
147
+ self.qkv_list = [
148
+ self.num_heads // 2 * self.head_dim, # qpe
149
+ self.num_heads // 2 * self.head_dim, # qnope
150
+ self.head_dim, # kpe
151
+ self.head_dim, # kv
152
+ ]
153
+ self.qkv = nn.Sequential(
154
+ nn.Linear(self.hidden_size, self.head_dim, bias=False),
155
+ nn.Linear(self.head_dim, sum(self.qkv_list), bias=False)
156
+ )
157
+
158
+ # self.z = nn.Linear(self.hidden_size, self.head_dim, bias=False)
159
+ # self.qpe = nn.Linear(self.head_dim, self.num_heads // 2 * self.head_dim, bias=False)
160
+ # self.qnope = nn.Linear(self.head_dim, self.num_heads // 2 * self.head_dim, bias=False)
161
+ # self.kpe = nn.Linear(self.head_dim, self.head_dim, bias=False)
162
+ # self.kv = nn.Linear(self.head_dim, self.head_dim, bias=False)
163
+
164
+ self.o = nn.Linear(self.head_dim // 2 * self.num_heads, self.hidden_size, bias=False)
165
+ self.rsqrt_dim = 1.0 / math.sqrt(self.head_dim)
166
+ # init 2k 4.693 --> 4.687
167
+ scale_lora = math.sqrt(
168
+ (sum(self.qkv_list) + self.head_dim) * (self.head_dim + self.head_dim) /
169
+ (2 * self.head_dim * (self.hidden_size + sum(self.qkv_list)))
170
+ )
171
+ self.qkv[1].weight.data *= scale_lora
172
+
173
+ def forward(
174
+ self,
175
+ x: torch.Tensor,
176
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
177
+ past_key_value: Optional[torch.Tensor] = None,
178
+ attention_mask: Optional[torch.Tensor] = None,
179
+ use_cache: bool = False,
180
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
181
+
182
+ cos, sin = position_embeddings # [L, head_dim]
183
+ b, l, _ = x.shape
184
+
185
+ # fused
186
+ qkv = self.qkv(x)
187
+ qpe, q, kpe, kv = torch.split(qkv, self.qkv_list, dim=-1)# [b, l, hd * h // 2] [b, l, hd]
188
+
189
+ # z = self.z(x)
190
+ # qpe, q, kpe, kv = (
191
+ # self.qpe(z),
192
+ # self.qnope(z),
193
+ # self.kpe(z),
194
+ # self.kv(z)
195
+ # )
196
+
197
+ # 应用 RoPE
198
+ q = q.view(b, l, self.num_heads // 2, self.head_dim).permute(0, 2, 1, 3) # [b, l, h // 2, hd]
199
+ qpe = qpe.view(b, l, self.num_heads // 2, self.head_dim).permute(0, 2, 1, 3)# [b, l, h // 2, hd]
200
+ kv = kv.unsqueeze(1) # [b, 1, l, hd]
201
+ kpe = kpe.unsqueeze(1) # [b, 1, l, hd]
202
+ qpe, kpe = apply_rotary_pos_emb(qpe, kpe, cos[:l], sin[:l])
203
+ # 拼合
204
+ q = torch.cat([qpe, q], dim=1) # [b, h, l, hd]
205
+ kv = torch.cat([kpe, kv], dim=1) # [b, 2, l, hd]
206
+ deltakv = None
207
+ if self.delta_kv_only:
208
+ # 仅返回 delta kv
209
+ deltakv = kv
210
+
211
+ # kv_cache实现
212
+ if past_key_value is not None:
213
+ kv = torch.cat([past_key_value, kv], dim=2)
214
+ past_kv = kv if use_cache else None
215
+ _, _, l_all, _ = kv.shape
216
+
217
+ dropout_p = self.dropout if self.training else 0.0
218
+ attn_mask = None
219
+ if attention_mask is not None:
220
+ attn_mask = attention_mask.view(b, 1, 1, -1).expand(b, 1, l, -1)
221
+ attn_mask = attn_mask.bool() if attention_mask is not None else None
222
+
223
+ if self.training:
224
+ o = nn.functional.scaled_dot_product_attention(
225
+ q, repeat_kv(kv, self.num_heads // 2), repeat_kv(kv[:, 1:, :, :], self.num_heads),
226
+ attn_mask=attn_mask, dropout_p=dropout_p if self.training else 0.0, is_causal=True
227
+ )
228
+ else:
229
+ o = self.sdpa_math(
230
+ q, repeat_kv(kv, self.num_heads // 2), repeat_kv(kv[:, 1:, :, :], self.num_heads),
231
+ attn_mask, 0.0
232
+ )
233
+ # o: [b, h, l, hc]
234
+
235
+ # gate 2k4b peg: 5.169 nopeg: 5.179 +gate:5.210(4.622)
236
+ ope, onope = o.permute(0, 2, 1, 3).chunk(2, dim=2) # [b, l, h // 2, hc]
237
+ # o = onope * self.gate_act(ope) # [b, l, h // 2, hc] not stable
238
+ o = ope * self.gate_act(onope) # [b, l, h // 2, hc] testing
239
+ out = o.reshape(b, l, -1)
240
+
241
+ out = self.o(out)
242
+ out = nn.functional.dropout(out, p=self.dropout, training=self.training)
243
+ return out, (deltakv if self.delta_kv_only else past_kv)
244
+
245
+ def sdpa_math(self, q:torch.Tensor, k:torch.Tensor, v:torch.Tensor, attn_mask: Optional[torch.Tensor] = None,
246
+ dropout_p: float = 0.0) -> torch.Tensor:
247
+ b, h, l, c = q.shape
248
+ scores = (q @ k.transpose(-2, -1)) * self.rsqrt_dim
249
+ casual_mask = torch.triu(
250
+ torch.full((l, l), float("-inf"), device=scores.device),
251
+ diagonal=1
252
+ ).unsqueeze(0).unsqueeze(0)# [1, 1, l, l]
253
+ # 在左侧 zero pad 到 scores 的形状 [1, 1, l, l_all]
254
+ casual_mask = nn.functional.pad(casual_mask, (scores.shape[-1] - l, 0), "constant", 0.0)# [1, 1, l, l_all]
255
+ scores += casual_mask
256
+
257
+ if attn_mask is not None:
258
+ attn_mask = (1.0 - attn_mask.type_as(scores)) * -1e9
259
+ scores = scores + attn_mask
260
+
261
+ scores = nn.functional.softmax(scores.float(), dim=-1).type_as(q)
262
+ scores = nn.functional.dropout(scores, p=dropout_p, training=self.training)# [b, h, l, l]
263
+ output = scores @ v
264
+ return output
265
+
266
+ def use_delta_kv_only(self, enable:bool=True):
267
+ # 仅返回 delta kv,减少内存开销
268
+ self.delta_kv_only = enable
269
+
270
+
271
+ class Attn(nn.Module):
272
+ def __init__(self, config: YConfig2):
273
+ super().__init__()
274
+ self.dropout = config.dropout # dropout rate
275
+ self.hidden_size = config.hidden_size # 输入通道大小
276
+ self.num_heads = config.num_heads # 总注意力头数
277
+ self.head_dim = config.head_dim # 每个头的维度
278
+ self.gate_act = ACT2FN[config.hidden_act]
279
+ self.delta_kv_only = False
280
+
281
+ assert self.num_heads % 2 == 0, "num_heads must be even."
282
+ ##### sparse #####
283
+ # qpe, q
284
+ self.qkv_list = [
285
+ self.num_heads * self.head_dim, # q
286
+ 2 * self.head_dim, # k
287
+ 2 * self.head_dim, # v
288
+ ]
289
+ self.qkv = nn.Linear(self.hidden_size, sum(self.qkv_list), bias=False)
290
+ self.o = nn.Linear(self.head_dim * self.num_heads, self.hidden_size, bias=False)
291
+
292
+ def forward(
293
+ self,
294
+ x: torch.Tensor,
295
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
296
+ past_key_value: Optional[torch.Tensor] = None,
297
+ attention_mask: Optional[torch.Tensor] = None,
298
+ use_cache: bool = False,
299
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
300
+
301
+ cos, sin = position_embeddings # [L, head_dim]
302
+ b, l, _ = x.shape
303
+
304
+ # dense
305
+ qkv = self.qkv(x)
306
+ q, k, v = torch.split(qkv, self.qkv_list, dim=-1)# [b, l, hd * h // 2] [b, l, hd]
307
+ # qpe, q, kpe, kv = (
308
+ # self.qpe(x),
309
+ # self.qnope(x),
310
+ # self.kpe(x),
311
+ # self.kv(x)
312
+ # )
313
+
314
+ # 应用 RoPE
315
+ q = q.view(b, l, self.num_heads, self.head_dim).permute(0, 2, 1, 3) # [b, l, h // 2, hd]
316
+ k = k.view(b, l, 2, self.head_dim).permute(0, 2, 1, 3) # [b, 2, l, hd]
317
+ v = v.view(b, l, 2, self.head_dim).permute(0, 2, 1, 3) # [b, 2, l, hd]
318
+ q, k = apply_rotary_pos_emb(q, k, cos[:l], sin[:l])
319
+ deltakv = None
320
+ if self.delta_kv_only:
321
+ # 仅返回 delta kv
322
+ deltakv = None
323
+
324
+ # kv_cache实现
325
+ if past_key_value is not None:
326
+ k = torch.cat([past_key_value[0], k], dim=1)
327
+ v = torch.cat([past_key_value[1], v], dim=1)
328
+ past_kv = (k, v) if use_cache else None
329
+ _, _, l_all, _ = k.shape
330
+
331
+ dropout_p = self.dropout if self.training else 0.0
332
+ attn_mask = None
333
+ if attention_mask is not None:
334
+ attn_mask = attention_mask.view(b, 1, 1, -1).expand(b, 1, l, -1)
335
+ attn_mask = attn_mask.bool() if attention_mask is not None else None
336
+
337
+ if self.training:
338
+ o = nn.functional.scaled_dot_product_attention(
339
+ q, repeat_kv(k, self.num_heads//2), repeat_kv(v, self.num_heads//2),
340
+ attn_mask=attn_mask, dropout_p=dropout_p if self.training else 0.0, is_causal=True
341
+ )
342
+ else:
343
+ o = self.sdpa_math(
344
+ q, repeat_kv(k, self.num_heads // 2), repeat_kv(v, self.num_heads),
345
+ attn_mask, 0.0
346
+ )
347
+ # o: [b, h, l, hc]
348
+ out = o.permute(0, 2, 1, 3).reshape(b, l, -1)
349
+ out = self.o(out)
350
+ out = nn.functional.dropout(out, p=self.dropout, training=self.training)
351
+ return out, (deltakv if self.delta_kv_only else past_kv)
352
+
353
+ def sdpa_math(self, q:torch.Tensor, k:torch.Tensor, v:torch.Tensor, attn_mask: Optional[torch.Tensor] = None,
354
+ dropout_p: float = 0.0) -> torch.Tensor:
355
+ b, h, l, c = q.shape
356
+ scores = (q @ k.transpose(-2, -1)) * self.rsqrt_dim
357
+ casual_mask = torch.triu(
358
+ torch.full((l, l), float("-inf"), device=scores.device),
359
+ diagonal=1
360
+ ).unsqueeze(0).unsqueeze(0)# [1, 1, l, l]
361
+ # 在左侧 zero pad 到 scores 的形状 [1, 1, l, l_all]
362
+ casual_mask = nn.functional.pad(casual_mask, (scores.shape[-1] - l, 0), "constant", 0.0)# [1, 1, l, l_all]
363
+ scores += casual_mask
364
+
365
+ if attn_mask is not None:
366
+ attn_mask = (1.0 - attn_mask.type_as(scores)) * -1e9
367
+ scores = scores + attn_mask
368
+
369
+ scores = nn.functional.softmax(scores.float(), dim=-1).type_as(q)
370
+ scores = nn.functional.dropout(scores, p=dropout_p, training=self.training)# [b, h, l, l]
371
+ output = scores @ v
372
+ return output
373
+
374
+ def use_delta_kv_only(self, enable:bool=True):
375
+ # 仅返回 delta kv,减少内存开销
376
+ self.delta_kv_only = enable
377
+
378
+
379
+ class YBlock2(nn.Module):
380
+ def __init__(self, config: YConfig2):
381
+ super().__init__()
382
+ self.attn = PEGA2(config)
383
+ self.ffn = FFN(config)
384
+ self.norm1 = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
385
+ self.norm2 = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
386
+
387
+ def forward(self,
388
+ x: torch.Tensor,
389
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
390
+ past_key_value: Optional[torch.Tensor] = None, # ffn_shard * kv cache
391
+ use_cache: bool = False,
392
+ attention_mask: Optional[torch.Tensor] = None
393
+ ):
394
+ # attention
395
+ residual = x
396
+ x = self.norm1(x)
397
+ attn_out, past_kv = self.attn(
398
+ x,
399
+ position_embeddings,
400
+ past_key_value=past_key_value,
401
+ attention_mask=attention_mask,
402
+ use_cache=use_cache,
403
+ )
404
+ x = residual + attn_out
405
+ # ffn
406
+ residual = x
407
+ x = self.norm2(x)
408
+ moe_out = self.ffn(x)
409
+ x = residual + moe_out
410
+ return x, past_kv
411
+
412
+ def use_delta_kv_only(self, enable:bool=True):
413
+ self.attn.use_delta_kv_only(enable)
414
+
415
+
416
+ class YModel2(nn.Module):
417
+ def __init__(self, config: YConfig2):
418
+ super().__init__()
419
+ self.vocab_size = config.vocab_size
420
+ self.num_layers = config.num_layers
421
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size)
422
+ self.dropout = config.dropout
423
+ self.use_self_distill = config.self_distill
424
+
425
+ self.layers = nn.ModuleList([
426
+ YBlock2(config) for _ in range(config.num_layers)
427
+ ])
428
+
429
+ self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
430
+
431
+ freqs_cos, freqs_sin = precompute_freqs_cis(dim=config.head_dim,
432
+ end=config.max_position_embeddings, theta=config.rope_theta)
433
+ self.register_buffer("freqs_cos", freqs_cos, persistent=False)
434
+ self.register_buffer("freqs_sin", freqs_sin, persistent=False)
435
+
436
+ def forward(self,
437
+ input_ids: Optional[torch.Tensor] = None,
438
+ attention_mask: Optional[torch.Tensor] = None,
439
+ past_key_values: Optional[List[torch.Tensor]] = None,
440
+ use_cache: bool = False,
441
+ **kwargs
442
+ ):
443
+ batch_size, seq_length = input_ids.shape
444
+ past_key_values = past_key_values or [None] * self.num_layers
445
+ start_pos = past_key_values[0].shape[-2] if past_key_values[0] is not None else 0
446
+
447
+ x = self.embed_tokens(input_ids)
448
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
449
+
450
+ position_embeddings = (
451
+ self.freqs_cos[start_pos:start_pos + seq_length],
452
+ self.freqs_sin[start_pos:start_pos + seq_length]
453
+ )
454
+
455
+ presents = []
456
+ cos_loss = None
457
+ for i, layer in enumerate(self.layers):
458
+ x0 = x
459
+ x, past_kv = layer(
460
+ x=x,
461
+ position_embeddings=position_embeddings,
462
+ past_key_value=past_key_values[i],
463
+ attention_mask=attention_mask,
464
+ use_cache=use_cache
465
+ )
466
+ if self.training and self.use_self_distill:
467
+ xd = x.detach()
468
+ # cosine loss
469
+ c_loss = 1.0 - nn.functional.cosine_similarity(x0, xd, dim=-1).mean()
470
+ cos_loss = c_loss + cos_loss if cos_loss is not None else c_loss
471
+ presents.append(past_kv)
472
+ if cos_loss is not None:
473
+ cos_loss = cos_loss / self.num_layers
474
+ x = self.norm(x)
475
+ return x, presents, cos_loss
476
+
477
+ def delta_kv_only(self, delta_kv:bool=True):
478
+ for layer in self.layers:
479
+ layer.use_delta_kv_only(delta_kv)
480
+
481
+ class YForCausalLM2(PreTrainedModel, GenerationMixin):
482
+ config_class = YConfig2
483
+
484
+ def __init__(self, config: YConfig2 = None):
485
+ self.config = config or YConfig2()
486
+ super().__init__(self.config)
487
+ self.model = YModel2(self.config)
488
+ self.lm_head = nn.Linear(self.config.hidden_size, self.config.vocab_size, bias=False)
489
+ self.model.embed_tokens.weight = self.lm_head.weight
490
+ self.OUT = CausalLMOutputWithPast()
491
+
492
+ def forward(self,
493
+ input_ids: Optional[torch.Tensor] = None,
494
+ attention_mask: Optional[torch.Tensor] = None,
495
+ past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None,
496
+ use_cache: bool = False,
497
+ logits_to_keep: Union[int, torch.Tensor] = 0,
498
+ **args):
499
+ h, past_kvs, cos_loss = self.model(
500
+ input_ids=input_ids,
501
+ attention_mask=attention_mask,
502
+ past_key_values=past_key_values,
503
+ use_cache=use_cache,
504
+ **args
505
+ )
506
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
507
+ logits = self.lm_head(h[:, slice_indices, :])
508
+ self.OUT.__setitem__('last_hidden_state', h)
509
+ self.OUT.__setitem__('logits', logits)
510
+ self.OUT.__setitem__('past_key_values', past_kvs)
511
+ if self.config.self_distill:
512
+ self.OUT.__setitem__('dist_loss', cos_loss)
513
+ return self.OUT
514
+
515
+ def delta_kv_only(self, delta_kv:bool=True):
516
+ self.model.delta_kv_only(delta_kv)