abcsk123 commited on
Commit
f98f57f
·
verified ·
1 Parent(s): 7955339

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2HybridForCausalLM"
4
+ ],
5
+ "model_type": "qwen2_hybrid",
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_qwen2_hybrid.Qwen2HybridConfig",
8
+ "AutoModelForCausalLM": "modeling_qwen2_hybrid.Qwen2HybridForCausalLM"
9
+ },
10
+
11
+ "gqa_sliding_window": 32768,
12
+ "soft_sliding_window": 8192,
13
+ "sink_size": 64,
14
+
15
+ "attention_dropout": 0.0,
16
+ "bos_token_id": 151643,
17
+ "dtype": "bfloat16",
18
+ "eos_token_id": 151643,
19
+ "hidden_act": "silu",
20
+ "hidden_size": 1536,
21
+ "initializer_range": 0.02,
22
+ "intermediate_size": 8960,
23
+ "layer_types": [
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention"
52
+ ],
53
+ "max_position_embeddings": 32768,
54
+ "max_window_layers": 28,
55
+ "num_attention_heads": 12,
56
+ "num_hidden_layers": 28,
57
+ "num_key_value_heads": 2,
58
+ "rms_norm_eps": 1e-06,
59
+ "rope_scaling": null,
60
+ "rope_theta": 1000000.0,
61
+ "sliding_window": null,
62
+ "tie_word_embeddings": true,
63
+ "transformers_version": "4.57.6",
64
+ "use_cache": true,
65
+ "use_sliding_window": false,
66
+ "vocab_size": 151936
67
+ }
configuration_qwen2_hybrid.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # configuration_qwen2_hybrid.py
2
+ from transformers.models.qwen2.configuration_qwen2 import Qwen2Config
3
+
4
+ class Qwen2HybridConfig(Qwen2Config):
5
+ model_type = "qwen2_hybrid"
6
+
7
+ def __init__(
8
+ self,
9
+ gqa_layers=7, # 0~6层为GQA
10
+ shared_layer_idx=7, # 第7层为Shared MLA
11
+ soft_mid_layers_end=23, # 8~22为Soft Mid
12
+ soft_deep_layers_end=28, # 23~27为Soft Deep
13
+ gqa_sliding_window=32768,
14
+ soft_sliding_window=8192,
15
+ shared_rank=320,
16
+ soft_rank_mid=192,
17
+ soft_rank_deep=128,
18
+ sink_size=64,
19
+ **kwargs,
20
+ ):
21
+ self.gqa_layers = gqa_layers
22
+ self.shared_layer_idx = shared_layer_idx
23
+ self.soft_mid_layers_end = soft_mid_layers_end
24
+ self.soft_deep_layers_end = soft_deep_layers_end
25
+ self.gqa_sliding_window = gqa_sliding_window
26
+ self.soft_sliding_window = soft_sliding_window
27
+ self.shared_rank = shared_rank
28
+ self.soft_rank_mid = soft_rank_mid
29
+ self.soft_rank_deep = soft_rank_deep
30
+ self.sink_size = sink_size
31
+
32
+ super().__init__(**kwargs)
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 151643,
4
+ "eos_token_id": 151643,
5
+ "transformers_version": "4.57.6"
6
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:240ee62038beb60098490fa1438df7e646f18e5dc6c64640d9488645dd47f0fa
3
+ size 3089978898
modeling_qwen2_hybrid.py ADDED
@@ -0,0 +1,797 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from .configuration_qwen2_hybrid import Qwen2HybridConfig
3
+ from typing import Dict, List, Optional, Tuple, Union
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+ import torch.utils.checkpoint
9
+
10
+ from transformers.cache_utils import Cache
11
+ from transformers.generation.utils import GenerationMixin
12
+ from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
13
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
14
+ from transformers.utils import add_start_docstrings, logging
15
+
16
+ from transformers.models.qwen2.configuration_qwen2 import Qwen2Config
17
+ from transformers.models.qwen2.modeling_qwen2 import (
18
+ Qwen2Attention,
19
+ Qwen2MLP,
20
+ Qwen2PreTrainedModel,
21
+ Qwen2RMSNorm,
22
+ Qwen2RotaryEmbedding,
23
+ apply_rotary_pos_emb,
24
+ repeat_kv,
25
+ )
26
+ import transformers.models.qwen2.modeling_qwen2 as qwen2_modeling
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ _GQA_LAYERS = set(range(0, 7))
31
+ _SHARED_LAYER = 7
32
+ _SOFT_MID_LAYERS = set(range(8, 23))
33
+ _SOFT_DEEP_LAYERS = set(range(23, 28))
34
+
35
+ _GQA_SLIDING_WINDOW = 32768 # 前几层的SW为什么这么大
36
+ # _SOFT_SLIDING_WINDOW = 4096
37
+ _SOFT_SLIDING_WINDOW = 8192
38
+
39
+ _SHARED_RANK = 320 # hidenstage是1536
40
+ _SOFT_RANK_MID = 192
41
+ _SOFT_RANK_DEEP = 128
42
+
43
+ def _layer_role(layer_idx: int) -> str:
44
+ if layer_idx in _GQA_LAYERS: return "gqa"
45
+ if layer_idx == _SHARED_LAYER: return "shared_mla"
46
+ return "soft_mla"
47
+
48
+ def _mla_rank(layer_idx: int) -> int:
49
+ if layer_idx == _SHARED_LAYER: return _SHARED_RANK
50
+ if layer_idx in _SOFT_MID_LAYERS: return _SOFT_RANK_MID
51
+ return _SOFT_RANK_DEEP
52
+
53
+ def _mla_sliding_window(layer_idx: int) -> Optional[int]:
54
+ return None if layer_idx == _SHARED_LAYER else _SOFT_SLIDING_WINDOW
55
+
56
+ def _mla_zone(layer_idx: int) -> str:
57
+ if layer_idx in _GQA_LAYERS: return "gqa"
58
+ if layer_idx == _SHARED_LAYER: return "shared"
59
+ if layer_idx in _SOFT_MID_LAYERS: return "mid"
60
+ return "deep"
61
+
62
+ # HybridCache:支持"Attention Sinks"的双模缓存
63
+ # 这部分的两个关键:混合缓存管理(HybridCache) 与 跨层特征共享(SharedLatentGate)
64
+ # HybirdModle主干文件中有实例化HybridCache的代码
65
+ class HybridCache(Cache): # 这里继承了hf的Cache类
66
+ def __init__(self, config: Qwen2Config):
67
+ try:
68
+ super().__init__(layers=config.num_hidden_layers) # 新版本需要传入模型的层数
69
+ except TypeError:
70
+ super().__init__()
71
+
72
+ self.config = config
73
+ n = config.num_hidden_layers
74
+ self._gqa_k: List[Optional[torch.Tensor]] = [None] * n # 维度:通常为 [batch, num_kv_heads, seq_len, head_dim]
75
+ self._gqa_v: List[Optional[torch.Tensor]] = [None] * n
76
+ self._latent: List[Optional[torch.Tensor]] =[None] * n # 第 7 层的 _latent 还会被 SharedLatentGate 调用,实现跨层特征传递
77
+ self._seen_tokens: int = 0 # 记录模型迄今为止已经处理过的Token总数,计算CachePosition和RoPE的关键
78
+
79
+ # 感觉好多此一举,为什么不直接调用update_gqa函数
80
+ def update(self, key_states, value_states, layer_idx, cache_kwargs=None):
81
+ return self.update_gqa(key_states, value_states, layer_idx)
82
+
83
+ # 返回现在已经处理了多长的序列了
84
+ def get_seq_length(self, layer_idx: int = 0) -> int:
85
+ return self._seen_tokens
86
+
87
+ # 这啥意思?
88
+ def get_max_cache_shape(self) -> Optional[int]:
89
+ return None
90
+
91
+ def update_gqa(self, key, value, layer_idx, sliding_window=_GQA_SLIDING_WINDOW):
92
+ if self._gqa_k[layer_idx] is None:
93
+ self._gqa_k[layer_idx] = key
94
+ self._gqa_v[layer_idx] = value
95
+ else:
96
+ self._gqa_k[layer_idx] = torch.cat([self._gqa_k[layer_idx], key], dim=2)
97
+ self._gqa_v[layer_idx] = torch.cat([self._gqa_v[layer_idx], value], dim=2)
98
+ T = self._gqa_k[layer_idx].shape[2] # seq_len当前历史信息长度
99
+
100
+ # update_gqa的话只保留最后的sliding_window大小
101
+ if T > sliding_window:
102
+ self._gqa_k[layer_idx] = self._gqa_k[layer_idx][:, :, -sliding_window:, :]
103
+ self._gqa_v[layer_idx] = self._gqa_v[layer_idx][:, :, -sliding_window:, :]
104
+ if layer_idx == 0:
105
+ self._seen_tokens += key.shape[2] # 我对一次输入一个token还能理解,一会儿一次输入一个一会儿一次输出多个这件事不是特别理解
106
+ return self._gqa_k[layer_idx], self._gqa_v[layer_idx] # 返回加上了历史信息的KVCache
107
+
108
+ # 我要修改一下这个方法,变成StreamingLLM的思路
109
+ # def update_latent(self, c_kv, layer_idx, sliding_window=None):
110
+ # if self._latent[layer_idx] is None:
111
+ # self._latent[layer_idx] = c_kv
112
+ # else:
113
+ # self._latent[layer_idx] = torch.cat([self._latent[layer_idx], c_kv], dim=1)
114
+ # if sliding_window is not None:
115
+ # T = self._latent[layer_idx].shape[1]
116
+ # if T > sliding_window:
117
+ # self._latent[layer_idx] = self._latent[layer_idx][:, -sliding_window:, :]
118
+ # return self._latent[layer_idx]
119
+
120
+ # 更新隐藏状态
121
+ def update_latent(self, c_kv, layer_idx, sliding_window=None, sink_size=64): # MLA因为SW比GQA小很多,所以需要sink
122
+ if self._latent[layer_idx] is None:
123
+ self._latent[layer_idx] = c_kv
124
+ else:
125
+ self._latent[layer_idx] = torch.cat([self._latent[layer_idx], c_kv], dim=1) # latent这里的dim和上面gqa不太一样...
126
+
127
+ if sliding_window is not None:
128
+ T = self._latent[layer_idx].shape[1]
129
+ if T > sliding_window:
130
+ # 🚀 Attention Sinks: 保留头部 sink_size 个 Token,和尾部最新 Token!
131
+ sink_tokens = self._latent[layer_idx][:, :sink_size, :] # 保留前sink_size个记忆,这段记忆会一直保留,因为每次超出size,获取sink_size获取的都是sink_tokens
132
+ recent_tokens = self._latent[layer_idx][:, -(sliding_window - sink_size):, :] # 因为加入了sink_tokens所以SW要适当减小
133
+ self._latent[layer_idx] = torch.cat([sink_tokens, recent_tokens], dim=1) # 不过感觉这部分有些荣誉计算
134
+ return self._latent[layer_idx] # 返回新缓存
135
+
136
+ # 返回SHARED_LAYER的Cache
137
+ def get_shared_latent(self) -> Optional[torch.Tensor]:
138
+ return self._latent[_SHARED_LAYER]
139
+
140
+ # 好像是个移动都某个设备不是特别理解
141
+ def to(self, device):
142
+ # 模型参数一般调用model.to('cuda')还是device就可以移动到显卡了
143
+ # 但是Cache类里的张量列表需要手动移动到GPU中确保可以顺利进行计算
144
+ for i in range(len(self._gqa_k)):
145
+ if self._gqa_k[i] is not None:
146
+ self._gqa_k[i] = self._gqa_k[i].to(device)
147
+ self._gqa_v[i] = self._gqa_v[i].to(device)
148
+ if self._latent[i] is not None:
149
+ self._latent[i] = self._latent[i].to(device)
150
+ return self
151
+
152
+ # 为了把HybridCache伪装成一个Cache,从而兼容之前的代码逻辑
153
+ # 大概理解它的用途,但是不清楚调用和使用时机
154
+ class _GQASlotAdapter:
155
+ def __init__(self, cache: HybridCache, sliding_window: int = _GQA_SLIDING_WINDOW):
156
+ self._cache = cache
157
+ self._window = sliding_window
158
+
159
+ def update(self, key_states, value_states, layer_idx, cache_kwargs=None):
160
+ return self._cache.update_gqa(key_states, value_states, layer_idx, self._window)
161
+
162
+ def get_seq_length(self, layer_idx: int = 0) -> int:
163
+ return self._cache.get_seq_length(layer_idx)
164
+
165
+ def get_max_cache_shape(self) -> Optional[int]:
166
+ return None
167
+
168
+ # 主要实现跨层特征通信和平滑微调
169
+ # 本质是一个带门控的残差投影器
170
+ # 让深层网络能够站在巨人的肩膀上,直接利用已经提取好的特征
171
+ class SharedLatentGate(nn.Module):
172
+ def __init__(self, config: Qwen2Config):
173
+ super().__init__()
174
+ H = config.hidden_size
175
+ self.cross_proj = nn.Linear(_SHARED_RANK, H, bias=False) # 从SHARED_RANK投影会H维度
176
+ self.gate = nn.Parameter(torch.full((H,), -4.0)) # H是标量,(H,)是一维向量,每个维度一个独立的门控机制
177
+ self.warmup_alpha = nn.Parameter(torch.tensor(0.0)) # warmup_alpha是控制整体的一个加入比列,总阀门
178
+ self.norm = Qwen2RMSNorm(H, eps=config.rms_norm_eps)
179
+
180
+ def forward(self, hidden_states, cache=None, explicit_shared=None):
181
+ # 为了兼容训练/预填充模式和推理生成模式
182
+ # 训练或首次输入时会使用explicit_shared
183
+ if cache is not None and cache.get_shared_latent() is not None: # 这里get_shared_latent是什么意思?
184
+ shared = cache.get_shared_latent() # 返回第七层截止目前的Cache
185
+ elif explicit_shared is not None: # 训练时选择显示传参,可以减少频繁读写Cache带来的不必要的开销
186
+ shared = explicit_shared
187
+ else: # else主要是处理
188
+ return hidden_states
189
+
190
+ B, T, _ = hidden_states.shape # 这不是当前输入长度吗
191
+ T_full = shared.shape[1] # 获取shared info的序列长度
192
+
193
+ # 🚀 降维打击修复:只提取当前需要的 Token 进行投影,防止历史污染
194
+ # 保证长度一致,就是每个ids的token只能获得相同ids token的浅层抽象信息
195
+ # 这里其实让我有些疑惑,这样的机制是否真的有用,把浅层的东西往深层直接传递的意义是什么?
196
+ if T_full != T:
197
+ shared = shared[:, -T:, :]
198
+
199
+ # 对我们把符合要求的C_kv找出来,然后要把维度从rank扩张会H,因为这个要加到当前输入的token的H上。
200
+ proj = self.cross_proj(shared)
201
+ proj = self.norm(proj)
202
+
203
+ # 制作gate
204
+ gate_weight = torch.sigmoid(self.gate) * self.warmup_alpha
205
+ # hidden_states应该是[batchsize,seqlen,dim]
206
+ return hidden_states + gate_weight.unsqueeze(0).unsqueeze(0) * proj # unsqueeze是解压缩,也有增加维度的意思
207
+
208
+
209
+ class Qwen2MLASoftAttention(nn.Module):
210
+ def __init__(self, config, layer_idx, kv_lora_rank, sliding_window):
211
+ super().__init__()
212
+ self.config = config
213
+ self.layer_idx = layer_idx
214
+ self.kv_lora_rank = kv_lora_rank
215
+ self.sliding_window = sliding_window
216
+
217
+ H = config.hidden_size
218
+ nh = config.num_attention_heads # config中是12吧,能求出head_dim是128
219
+ nkv = config.num_key_value_heads # config中是2,用的也是GQA
220
+ self.head_dim = getattr(config, "head_dim", H // nh)
221
+ self.num_heads = nh
222
+ self.num_kv_heads = nkv
223
+ self.num_kv_groups = nh // nkv # repeatKV的时候需要这个group的参数,12heads 2kvheads,kv_group就是6(每6个heads一组)
224
+ self.scaling = self.head_dim ** -0.5 # 缩放系数,通过把方差拉回1来避免,softmax前数据分布太大,导致梯度消失,参数不更新
225
+
226
+ self.q_proj = nn.Linear(H, nh * self.head_dim, bias=True)
227
+ self.kv_down_proj = nn.Linear(H, kv_lora_rank, bias=False) # 原本是2 x self.num_kv_heads x self.head_dim = 512 , 这里直接压成了kv_lora_rank{7:320,8~22:192,23~27:128},最后实测表明这里压得有些多了
228
+ self.k_up_proj = nn.Linear(kv_lora_rank, nkv * self.head_dim, bias=True) # 把低秩投会全注意力做计算这种合适吗,信息不是还是低秩的吗?
229
+ self.v_up_proj = nn.Linear(kv_lora_rank, nkv * self.head_dim, bias=True) # 低秩投影回全注意力和GQA复制回全注意力,哪种更好?
230
+
231
+ self.o_proj = nn.Linear(nh * self.head_dim, H, bias=False)
232
+ # 下面这两个norm是哪里做的?
233
+ self.k_norm = Qwen2RMSNorm(self.head_dim, eps=config.rms_norm_eps)
234
+ self.v_norm = Qwen2RMSNorm(self.head_dim, eps=config.rms_norm_eps)
235
+ # 旋转emb层
236
+ self.rotary_emb = Qwen2RotaryEmbedding(config=config)
237
+ self.output_alpha = nn.Parameter(torch.tensor(0.0))
238
+
239
+ # 这份代码中主要是一个是KVCache,一个是Mask,一个是postion的问题,不容易想明白
240
+ def forward(
241
+ self,
242
+ hidden_states: torch.Tensor,
243
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor], # 这个还有些疑惑,position_embeddings是如何工作的?
244
+ attention_mask: Optional[torch.Tensor], # 这里传入的mask是4D的形式吗?
245
+ past_key_values: Optional[HybridCache] = None, # 这个是怎么用?
246
+ cache_position: Optional[torch.LongTensor] = None, # cache_Position怎么用?
247
+ full_position_ids: Optional[torch.LongTensor] = None, # 这里还有个position如何用?
248
+ **kwargs, # 这里有什么参数?
249
+ ) -> Tuple[torch.Tensor, None]:
250
+ B, T, H = hidden_states.shape
251
+ cos, sin = position_embeddings # 还没看内部
252
+
253
+ # 这里q投影前后都没有进行norm,难道是上一层对输入x进行的norm吗
254
+ q = self.q_proj(hidden_states)
255
+ q = q.view(B, T, self.num_heads, self.head_dim).transpose(1, 2)
256
+ q, _ = apply_rotary_pos_emb(q, q, cos, sin) # 这个要看一下
257
+
258
+ # [batch_size,seq_len,kv_latent_dim]
259
+ c_kv = self.kv_down_proj(hidden_states)
260
+
261
+ # 🚀 终极防切片崩溃修复:独立拼接与缓存
262
+ # 这里涉及kvcache的使用,是推理部分的核心,需要我去好好看一下,等下回来我先去看kvcache
263
+ if past_key_values is not None:
264
+ past_latent = past_key_values._latent[self.layer_idx] # 这是什么意思,为什么这里获取past还有这个奇怪逻辑
265
+ if past_latent is not None:
266
+ full_c_kv = torch.cat([past_latent, c_kv], dim=1)
267
+ else:
268
+ full_c_kv = c_kv
269
+ past_key_values.update_latent(c_kv, self.layer_idx, sliding_window=self.sliding_window)
270
+ else:
271
+ full_c_kv = c_kv
272
+
273
+ T_kv = full_c_kv.shape[1]
274
+
275
+ k = self.k_up_proj(full_c_kv).view(B, T_kv, self.num_kv_heads, self.head_dim)
276
+ v = self.v_up_proj(full_c_kv).view(B, T_kv, self.num_kv_heads, self.head_dim)
277
+ # 这里这个norm我不是很理解,为什么要获取kv后进行一次norm,为什么是先norm再transpose
278
+ k = self.k_norm(k).transpose(1, 2)
279
+ v = self.v_norm(v).transpose(1, 2)
280
+
281
+ # # 🚀 绝对时空锁定修复:完美支持 bs>1 的 Left-Padding
282
+ # if full_position_ids is not None:
283
+ # full_pos_ids = full_position_ids[:, -T_kv:]
284
+
285
+ # 🚀 绝对时空锁定修复:完美支持 bs>1 的 Left-Padding
286
+ # 下面这三行我也要替换掉
287
+ # if full_position_ids is not None:
288
+ # full_pos_ids = full_position_ids[:, -T_kv:].contiguous()
289
+ # elif cache_position is not None:
290
+
291
+ # 🚀 绝对时空锁定修复:支持 Attention Sinks 与 Left-Padding
292
+ S = 64 # Sink 大小,必须与 Cache 中保持一致
293
+ # 这个full_position_ids还有些不清楚
294
+ if full_position_ids is not None:
295
+ total_seq_len = full_position_ids.shape[1]
296
+ # 如果没超过滑动窗口,或者处于 Prefill 阶段 (T_kv == total_seq_len),则直接取尾部
297
+ if self.sliding_window is None or total_seq_len <= self.sliding_window or T_kv == total_seq_len:
298
+ full_pos_ids = full_position_ids[:, -T_kv:].contiguous()
299
+ else:
300
+ # 触发 Sink 拼接逻辑:提取头部的 S 个位置,和尾部的残余位置
301
+ sink_pos = full_position_ids[:, :S]
302
+ recent_pos = full_position_ids[:, -(T_kv - S):]
303
+ full_pos_ids = torch.cat([sink_pos, recent_pos], dim=1).contiguous()
304
+ elif cache_position is not None:
305
+ last_abs_pos_t = cache_position[-1]
306
+ full_pos_ids = (torch.arange(T_kv, device=hidden_states.device, dtype=torch.long) + (last_abs_pos_t + 1 - T_kv)).unsqueeze(0)
307
+ else:
308
+ full_pos_ids = torch.arange(T_kv, device=hidden_states.device, dtype=torch.long).unsqueeze(0)
309
+
310
+ # 生成rotary的逻辑也需要好好看一下
311
+ cos_k, sin_k = self.rotary_emb(k, full_pos_ids)
312
+ k, _ = apply_rotary_pos_emb(k, k, cos_k, sin_k)
313
+
314
+ k = repeat_kv(k, self.num_kv_groups)
315
+ v = repeat_kv(v, self.num_kv_groups)
316
+
317
+ # 这里切换成连续是什么意思?
318
+ q, k, v = q.contiguous(), k.contiguous(), v.contiguous()
319
+
320
+ # kv_seq_len = k.shape[2]
321
+ # if attention_mask is not None and attention_mask.shape[-1] > kv_seq_len:
322
+ # attention_mask = attention_mask[..., :, -kv_seq_len:]
323
+ # 修改后逻辑,加contiguous
324
+ # kv_seq_len = k.shape[2]
325
+ # if attention_mask is not None and attention_mask.shape[-1] > kv_seq_len:
326
+ # attention_mask = attention_mask[..., :, -kv_seq_len:].contiguous()
327
+ # 下面这里也是我新修改的,稍微有些难理解,和sink有关系
328
+ kv_seq_len = k.shape[2]
329
+ if attention_mask is not None and attention_mask.shape[-1] > kv_seq_len:
330
+ total_mask_len = attention_mask.shape[-1]
331
+ if self.sliding_window is None or total_mask_len <= self.sliding_window or kv_seq_len == total_mask_len:
332
+ attention_mask = attention_mask[..., :, -kv_seq_len:].contiguous()
333
+ else:
334
+ # 🚀 掩码也要同步拼接 Sink
335
+ sink_mask = attention_mask[..., :, :S]
336
+ recent_mask = attention_mask[..., :, -(kv_seq_len - S):]
337
+ attention_mask = torch.cat([sink_mask, recent_mask], dim=-1).contiguous()
338
+
339
+
340
+ is_causal = True if (attention_mask is None and T > 1) else False
341
+
342
+ out = F.scaled_dot_product_attention(
343
+ q, k, v,
344
+ attn_mask=attention_mask,
345
+ dropout_p=0.0,
346
+ is_causal=is_causal,
347
+ scale=self.scaling
348
+ )
349
+
350
+ out = out.transpose(1, 2).contiguous().view(B, T, -1)
351
+ out = self.o_proj(out) * self.output_alpha
352
+ return out, c_kv
353
+
354
+
355
+ # 上一个层self.layers就是堆叠了一堆decoder
356
+ class Qwen2HybridDecoderLayer(nn.Module):
357
+ def __init__(self, config: Qwen2Config, layer_idx: int):
358
+ super().__init__()
359
+ self.layer_idx = layer_idx
360
+ self.layer_role = _layer_role(layer_idx)
361
+
362
+ if self.layer_role == "gqa":
363
+ attn_impl = getattr(config, "_attn_implementation", "sdpa")
364
+ attn_class = getattr(qwen2_modeling, "QWEN2_ATTENTION_CLASSES", {}).get(attn_impl, Qwen2Attention)
365
+ self.self_attn = attn_class(config=config, layer_idx=layer_idx)
366
+ else:
367
+ self.self_attn = Qwen2MLASoftAttention(
368
+ config=config, layer_idx=layer_idx,
369
+ kv_lora_rank=_mla_rank(layer_idx), sliding_window=_mla_sliding_window(layer_idx)
370
+ )
371
+
372
+ self.shared_gate = SharedLatentGate(config) if self.layer_role == "soft_mla" else None
373
+ self.mlp = Qwen2MLP(config)
374
+ self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
375
+ self.post_attention_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
376
+
377
+ def forward(
378
+ self, hidden_states, attention_mask=None, position_ids=None, past_key_values=None,
379
+ use_cache=False, cache_position=None, position_embeddings=None, output_attentions=False,
380
+ shared_latent=None, full_position_ids=None, **kwargs,
381
+ ):
382
+ if self.shared_gate is not None:
383
+ # 在模型的前 6 层,为了兼容 GQA,传入的是 _GQASlotAdapter
384
+ # 不是很理解这里的适配,前六层既然是适配器了,为什么还需要调用sharedgate
385
+ real_cache = past_key_values._cache if isinstance(past_key_values, _GQASlotAdapter) else past_key_values
386
+ # 这里的real_cache是一个HybridCache对象
387
+ hidden_states = self.shared_gate(hidden_states, cache=real_cache, explicit_shared=shared_latent)
388
+
389
+ # Decoder的前半部分mid_output = x + Atten(Norm(x))
390
+ residual = hidden_states # 一个decoder要进行残差链接的
391
+ normed_input = self.input_layernorm(hidden_states) # Attention前做了input_norm了
392
+
393
+ # 为什么gqa传的position_ids,mla传的是full_position_ids
394
+ if self.layer_role == "gqa":
395
+ attn_outputs = self.self_attn(
396
+ hidden_states=normed_input, attention_mask=attention_mask, position_ids=position_ids,
397
+ past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache,
398
+ cache_position=cache_position, position_embeddings=position_embeddings, # gqa的位置信息已经被处理过一部分了,是增量处理
399
+ )
400
+ if len(attn_outputs) == 3:
401
+ attn_out, _, past_key_values = attn_outputs
402
+ elif len(attn_outputs) == 2:
403
+ attn_out, past_key_values = attn_outputs
404
+ else:
405
+ attn_out = attn_outputs[0]; past_key_values = None
406
+ hidden_states = attn_out
407
+ else:
408
+ attn_out, c_kv = self.self_attn(
409
+ hidden_states=normed_input, position_embeddings=position_embeddings, attention_mask=attention_mask,
410
+ past_key_values=past_key_values, cache_position=cache_position, full_position_ids=full_position_ids, # mla需要全量处理所有位置信息(),是全量处理
411
+ )
412
+ hidden_states = attn_out
413
+ if self.layer_role == "shared_mla":
414
+ shared_latent = c_kv
415
+
416
+ hidden_states = residual + hidden_states
417
+
418
+ # 下面是标准Decoder的后半块,output = x + MLP(Norm(x))
419
+ residual = hidden_states
420
+ hidden_states = self.post_attention_layernorm(hidden_states)
421
+ hidden_states = self.mlp(hidden_states)
422
+ hidden_states = residual + hidden_states
423
+
424
+ return hidden_states, shared_latent # 返回残差块输出hidden_states可以理解,但shared_latent是什么意思,是训练时的显示串联吗?
425
+
426
+ @add_start_docstrings("Qwen2.5-Coder 非对称混合架构主干,v9。")
427
+ class Qwen2HybridModel(Qwen2PreTrainedModel):
428
+ config_class = Qwen2HybridConfig # <--- 就是缺了这一行!
429
+ def __init__(self, config: Qwen2HybridConfig):
430
+ super().__init__(config)
431
+ self.padding_idx = config.pad_token_id
432
+ self.vocab_size = config.vocab_size
433
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
434
+ self.layers = nn.ModuleList([Qwen2HybridDecoderLayer(config, i) for i in range(config.num_hidden_layers)])
435
+ self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
436
+ self.rotary_emb = Qwen2RotaryEmbedding(config=config)
437
+ self.gradient_checkpointing = False
438
+ self.post_init()
439
+
440
+ def get_input_embeddings(self): return self.embed_tokens
441
+ def set_input_embeddings(self, value): self.embed_tokens = value
442
+
443
+ def forward(
444
+ self, input_ids=None, attention_mask=None, position_ids=None, past_key_values=None,
445
+ inputs_embeds=None, use_cache=None, cache_position=None, output_attentions=False,
446
+ output_hidden_states=False, return_dict=True, **kwargs,
447
+ ):
448
+ # 输入处理
449
+ if (input_ids is None) == (inputs_embeds is None):
450
+ raise ValueError("必须且只能指定 input_ids 或 inputs_embeds 之一")
451
+ if inputs_embeds is None:
452
+ inputs_embeds = self.embed_tokens(input_ids)
453
+
454
+ B, T, _ = inputs_embeds.shape
455
+
456
+ # 判断是否使用Cache,如果使用且没有创建合适类型就在这里创建
457
+ if use_cache:
458
+ if not isinstance(past_key_values, HybridCache):
459
+ past_key_values = HybridCache(config=self.config)
460
+
461
+ # 生成当前输入token在整个序列中的"绝对位置索引流水号"
462
+ # cache_postion是给新来的每个Token分配的唯一门牌号,有些迷惑
463
+ if cache_position is None:
464
+ past_seen = past_key_values.get_seq_length() if past_key_values is not None else 0
465
+ cache_position = torch.arange(past_seen, past_seen + T, device=inputs_embeds.device)
466
+
467
+ if position_ids is None:
468
+ position_ids = cache_position.unsqueeze(0)
469
+ # # 🚀 绝对时空锁定:提取真实的 Position IDs,完美解决 Left-Padding 导致的 RoPE 错位!
470
+ # if getattr(self.config, "_attn_implementation", "sdpa") == "sdpa" and not output_attentions and attention_mask is None:
471
+ # causal_4d = None
472
+ # full_position_ids = None
473
+ # else:
474
+ # past_kv_len = int(cache_position[0].item()) if T > 0 else 0
475
+ # causal_4d = _prepare_4d_causal_attention_mask(
476
+ # attention_mask, (B, T), inputs_embeds, past_kv_len, sliding_window=None
477
+ # )
478
+ # if attention_mask is not None and attention_mask.dim() == 2:
479
+ # full_position_ids = attention_mask.long().cumsum(-1) - 1
480
+ # full_position_ids = full_position_ids.masked_fill(attention_mask == 0, 1)
481
+ # else:
482
+ # full_position_ids = None
483
+
484
+ # 🚀 绝对时空锁定:提取真实的 Position IDs,完美解决 Left-Padding 导致的 RoPE 错位!
485
+ # 解决Left-Padding导致的位移偏差
486
+ # 下面这部分代码只有预填充阶段进行,会根据attention_mask的情况计算每个token在序列中的绝对位置,同时能够处理好Left-Padding
487
+ # 训练阶段是不是也一直走这部分逻辑,但是我传入的bin文件,是如何产生attention_mask的?
488
+ if attention_mask is not None and attention_mask.dim() == 2: # !只有预填充时mask才是2d,推理Decoder到之后传递的就变成4d的mask了
489
+ full_position_ids = attention_mask.long().cumsum(-1) - 1 # 前缀和累加+索引对齐
490
+ full_position_ids = full_position_ids.masked_fill(attention_mask == 0, 1) #
491
+ else:
492
+ full_position_ids = None
493
+
494
+ # 🌟 新增拦截器:如果 mask 存在但全是 1(无 padding),强行设为 None,保住 Flash Attention!
495
+ # attention_mask是一个2d的提示器,主要适用于识别padding的,全1说明没有Padding
496
+ is_all_ones = (attention_mask is None) or (attention_mask.min() == 1)
497
+ # output_attentions是布尔开关,是否需要每层计算出注意力权重(应该是用来调试的,观察每层的状态)
498
+ if getattr(self.config, "_attn_implementation", "sdpa") == "sdpa" and not output_attentions and is_all_ones:
499
+ causal_4d = None # 没有padding直接用None,启用sdpa内部的causal mask逻辑
500
+ else: # 这里的意思是,如果没有加速,或者说就是需要使用自定义mask,走下面的逻辑
501
+ past_kv_len = int(cache_position[0].item()) if T > 0 else 0
502
+ # 将2d的attention_mask转成4d的mask张量
503
+ causal_4d = _prepare_4d_causal_attention_mask(
504
+ attention_mask, (B, T), inputs_embeds, past_kv_len, sliding_window=None
505
+ )
506
+
507
+ hidden_states = inputs_embeds
508
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
509
+ # 构建一个gqa适配器,给前六层用,7层以后的模型直接用past_key_values就行
510
+ # 主要是因为前六层调用的是Transformers库里的Attention所以得把HybridCache封装的和之前的DynamicCache一样
511
+ gqa_adapter = _GQASlotAdapter(past_key_values) if past_key_values is not None else None
512
+ all_hidden_states = () if output_hidden_states else None
513
+ shared_latent = None
514
+
515
+ # 这里是按层遍历的逻辑
516
+ for layer in self.layers:
517
+ if output_hidden_states:
518
+ all_hidden_states = all_hidden_states + (hidden_states,)
519
+ effective_cache = gqa_adapter if layer.layer_role == "gqa" else past_key_values
520
+
521
+ if self.gradient_checkpointing and self.training:
522
+ if cache_position is not None:
523
+ assert cache_position.device == inputs_embeds.device
524
+ outputs = torch.utils.checkpoint.checkpoint(
525
+ layer, hidden_states, causal_4d, position_ids, None, False, cache_position,
526
+ position_embeddings, output_attentions, shared_latent, full_position_ids,
527
+ use_reentrant=False,
528
+ )
529
+ hidden_states, shared_latent = outputs[0], outputs[1]
530
+ else:
531
+ outputs = layer(
532
+ hidden_states, attention_mask=causal_4d, position_ids=position_ids,
533
+ past_key_values=effective_cache, use_cache=use_cache, cache_position=cache_position,
534
+ position_embeddings=position_embeddings, output_attentions=output_attentions,
535
+ shared_latent=shared_latent, full_position_ids=full_position_ids,
536
+ )
537
+ hidden_states, shared_latent = outputs[0], outputs[1]
538
+
539
+ # 遍历完要进行一下norm这里是RMSnorm
540
+ hidden_states = self.norm(hidden_states)
541
+ if output_hidden_states:
542
+ all_hidden_states = all_hidden_states + (hidden_states,)
543
+
544
+ if not return_dict:
545
+ return tuple(v for v in[hidden_states, past_key_values if use_cache else None, all_hidden_states, None] if v is not None)
546
+
547
+ return BaseModelOutputWithPast(
548
+ last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None,
549
+ hidden_states=all_hidden_states, attentions=None,
550
+ )
551
+
552
+ class Qwen2HybridForCausalLM(Qwen2PreTrainedModel, GenerationMixin):
553
+ _tied_weights_keys = ["lm_head.weight"]
554
+ config_class = Qwen2HybridConfig # <--- 就是缺了这一行!
555
+ def __init__(self, config: Qwen2HybridConfig):
556
+ super().__init__(config)
557
+ self.model = Qwen2HybridModel(config)
558
+ self.vocab_size = config.vocab_size
559
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
560
+ self.post_init()
561
+
562
+ def _init_weights(self, module: nn.Module):
563
+ super()._init_weights(module)
564
+ if isinstance(module, Qwen2MLASoftAttention):
565
+ nn.init.zeros_(module.output_alpha)
566
+ elif isinstance(module, SharedLatentGate):
567
+ nn.init.zeros_(module.warmup_alpha)
568
+ nn.init.constant_(module.gate, -4.0)
569
+
570
+ def get_input_embeddings(self): return self.model.embed_tokens
571
+ def set_input_embeddings(self, value): self.model.embed_tokens = value
572
+ def get_output_embeddings(self): return self.lm_head
573
+ def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings
574
+ def set_decoder(self, decoder): self.model = decoder
575
+ def get_decoder(self): return self.model
576
+
577
+ def forward(
578
+ self, input_ids=None, attention_mask=None, position_ids=None, past_key_values=None,
579
+ inputs_embeds=None, labels=None, use_cache=None, cache_position=None, output_attentions=False,
580
+ output_hidden_states=False, return_dict=True, **kwargs,
581
+ ) -> Union[CausalLMOutputWithPast, Tuple]:
582
+
583
+ outputs = self.model(
584
+ input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids,
585
+ past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache,
586
+ cache_position=cache_position, output_attentions=output_attentions,
587
+ output_hidden_states=output_hidden_states, return_dict=True,
588
+ )
589
+ hidden_states = outputs.last_hidden_state
590
+ logits = self.lm_head(hidden_states).float()
591
+
592
+ loss = None
593
+ if labels is not None:
594
+ shift_logits = logits[..., :-1, :].contiguous()
595
+ shift_labels = labels[..., 1:].contiguous()
596
+ loss = F.cross_entropy(shift_logits.view(-1, self.config.vocab_size), shift_labels.view(-1), ignore_index=-100)
597
+
598
+ if not return_dict:
599
+ out = (logits,)
600
+ if use_cache: out = out + (outputs.past_key_values,)
601
+ if output_hidden_states: out = out + (outputs.hidden_states,)
602
+ return ((loss,) + out) if loss is not None else out
603
+
604
+ return CausalLMOutputWithPast(
605
+ loss=loss, logits=logits, past_key_values=outputs.past_key_values,
606
+ hidden_states=outputs.hidden_states, attentions=outputs.attentions,
607
+ )
608
+
609
+ def prepare_inputs_for_generation(
610
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, **kwargs,
611
+ ) -> dict:
612
+ past_len = past_key_values.get_seq_length() if past_key_values is not None else 0
613
+
614
+ if past_len > 0:
615
+ if inputs_embeds is not None:
616
+ inputs_embeds = inputs_embeds[:, -1:]
617
+ else:
618
+ input_ids = input_ids[:, -1:]
619
+
620
+ position_ids = kwargs.get("position_ids", None)
621
+ if attention_mask is not None and position_ids is None:
622
+ position_ids = attention_mask.long().cumsum(-1) - 1
623
+ position_ids = position_ids.masked_fill(attention_mask == 0, 1)
624
+ if past_len > 0:
625
+ position_ids = position_ids[:, -input_ids.shape[1]:]
626
+
627
+ # 好像是decode的生成阶段执行的
628
+ if cache_position is None:
629
+ cache_position = torch.arange(past_len, past_len + input_ids.shape[1], device=input_ids.device)
630
+
631
+ model_inputs = {}
632
+ if inputs_embeds is not None and past_len == 0:
633
+ model_inputs["inputs_embeds"] = inputs_embeds
634
+ else:
635
+ model_inputs["input_ids"] = input_ids
636
+
637
+ model_inputs.update({
638
+ "past_key_values": past_key_values,
639
+ "use_cache": kwargs.get("use_cache", True),
640
+ "attention_mask": attention_mask,
641
+ "position_ids": position_ids,
642
+ "cache_position": cache_position,
643
+ })
644
+ return model_inputs
645
+
646
+ @staticmethod
647
+ def _reorder_cache(past_key_values, beam_idx):
648
+ for i in range(len(past_key_values._gqa_k)):
649
+ if past_key_values._gqa_k[i] is not None:
650
+ past_key_values._gqa_k[i] = past_key_values._gqa_k[i].index_select(0, beam_idx)
651
+ past_key_values._gqa_v[i] = past_key_values._gqa_v[i].index_select(0, beam_idx)
652
+ if past_key_values._latent[i] is not None:
653
+ past_key_values._latent[i] = past_key_values._latent[i].index_select(0, beam_idx)
654
+ return past_key_values
655
+
656
+ def _svd_project_kv(k_weight, v_weight, kv_rank, k_bias=None, v_bias=None):
657
+ nkv_d = k_weight.shape[0]
658
+ orig_dtype = k_weight.dtype
659
+ M = torch.cat([k_weight, v_weight], dim=0).float()
660
+ U, S, Vh = torch.linalg.svd(M, full_matrices=False)
661
+ r = min(kv_rank, S.shape[0])
662
+ S_sqrt = S[:r].sqrt().unsqueeze(0)
663
+ down_w = Vh[:r, :].to(orig_dtype)
664
+ k_up_w = (U[:nkv_d, :r] * S_sqrt).to(orig_dtype)
665
+ v_up_w = (U[nkv_d:, :r] * S_sqrt).to(orig_dtype)
666
+ k_up_bias = k_bias.to(orig_dtype) if k_bias is not None else None
667
+ v_up_bias = v_bias.to(orig_dtype) if v_bias is not None else None
668
+ return down_w, k_up_w, v_up_w, k_up_bias, v_up_bias
669
+
670
+ def migrate_weights_from_qwen2(hybrid_model, original_state_dict, svd_verbose=True):
671
+ hybrid_sd = hybrid_model.state_dict()
672
+ new_sd, unmapped = {},[]
673
+ layer_kv = {}
674
+ for orig_key, orig_val in original_state_dict.items():
675
+ if not orig_key.startswith("model.layers."): continue
676
+ parts = orig_key.split(".")
677
+ layer_idx = int(parts[2])
678
+ suffix = ".".join(parts[3:])
679
+ if _layer_role(layer_idx) == "gqa": continue
680
+ if suffix == "self_attn.k_proj.weight": layer_kv.setdefault(layer_idx, {})["k_w"] = orig_val
681
+ elif suffix == "self_attn.v_proj.weight": layer_kv.setdefault(layer_idx, {})["v_w"] = orig_val
682
+ elif suffix == "self_attn.k_proj.bias": layer_kv.setdefault(layer_idx, {})["k_b"] = orig_val
683
+ elif suffix == "self_attn.v_proj.bias": layer_kv.setdefault(layer_idx, {})["v_b"] = orig_val
684
+
685
+ for orig_key, orig_val in original_state_dict.items():
686
+ if not orig_key.startswith("model.layers."):
687
+ if orig_key in hybrid_sd: new_sd[orig_key] = orig_val
688
+ else: unmapped.append(orig_key)
689
+ continue
690
+ parts = orig_key.split(".")
691
+ layer_idx = int(parts[2])
692
+ suffix = ".".join(parts[3:])
693
+ role = _layer_role(layer_idx)
694
+ tgt = f"model.layers.{layer_idx}.{suffix}"
695
+ if role == "gqa":
696
+ if tgt in hybrid_sd: new_sd[tgt] = orig_val
697
+ else: unmapped.append(orig_key)
698
+ continue
699
+ if suffix in ("self_attn.q_proj.weight", "self_attn.q_proj.bias"):
700
+ if tgt in hybrid_sd: new_sd[tgt] = orig_val
701
+ elif suffix in ("self_attn.k_proj.weight", "self_attn.v_proj.weight", "self_attn.k_proj.bias", "self_attn.v_proj.bias"):
702
+ pass
703
+ elif suffix == "self_attn.o_proj.weight":
704
+ if tgt in hybrid_sd and hybrid_sd[tgt].shape == orig_val.shape: new_sd[tgt] = orig_val
705
+ else: unmapped.append(f"{orig_key} [shape mismatch or missing]")
706
+ elif "mlp." in suffix or "layernorm" in suffix:
707
+ if tgt in hybrid_sd: new_sd[tgt] = orig_val
708
+ else:
709
+ unmapped.append(orig_key)
710
+
711
+ svd_done, svd_errors = 0,[]
712
+ for layer_idx in sorted(layer_kv.keys()):
713
+ kv = layer_kv[layer_idx]
714
+ k_w, v_w = kv.get("k_w"), kv.get("v_w")
715
+ if k_w is None or v_w is None:
716
+ svd_errors.append(f"Layer {layer_idx}: 缺少 k_w 或 v_w")
717
+ continue
718
+ rank = _mla_rank(layer_idx)
719
+ zone = _mla_zone(layer_idx)
720
+ k_b, v_b = kv.get("k_b"), kv.get("v_b")
721
+ if svd_verbose:
722
+ bias_info = "w/ bias" if k_b is not None else "no bias"
723
+ print(f" [SVD] Layer {layer_idx:2d} [{zone:6s}] k{list(k_w.shape)} + v{list(v_w.shape)} → rank={rank:3d} ({bias_info})")
724
+ try:
725
+ down_w, k_up_w, v_up_w, k_up_b, v_up_b = _svd_project_kv(k_w, v_w, rank, k_bias=k_b, v_bias=v_b)
726
+ except Exception as exc:
727
+ svd_errors.append(f"Layer {layer_idx}: SVD failed — {exc}")
728
+ continue
729
+ pfx = f"model.layers.{layer_idx}.self_attn"
730
+ for key, weight in[(f"{pfx}.kv_down_proj.weight", down_w), (f"{pfx}.k_up_proj.weight", k_up_w), (f"{pfx}.v_up_proj.weight", v_up_w)]:
731
+ if key in hybrid_sd and hybrid_sd[key].shape == weight.shape: new_sd[key] = weight
732
+ else: svd_errors.append(f"{key}: shape mismatch")
733
+ for key, bias_val in[(f"{pfx}.k_up_proj.bias", k_up_b), (f"{pfx}.v_up_proj.bias", v_up_b)]:
734
+ if bias_val is not None and key in hybrid_sd:
735
+ if hybrid_sd[key].shape == bias_val.shape: new_sd[key] = bias_val
736
+ svd_done += 1
737
+
738
+ custom_written = 0
739
+ for key in hybrid_sd:
740
+ if key.endswith(".self_attn.output_alpha"):
741
+ new_sd[key] = torch.tensor(0.0)
742
+ custom_written += 1
743
+ elif key.endswith(".shared_gate.warmup_alpha"):
744
+ new_sd[key] = torch.tensor(0.0)
745
+ custom_written += 1
746
+ elif key.endswith(".shared_gate.gate"):
747
+ new_sd[key] = torch.full(hybrid_sd[key].shape, -4.0)
748
+ custom_written += 1
749
+
750
+ missing, unexpected = hybrid_model.load_state_dict(new_sd, strict=False)
751
+ if svd_verbose:
752
+ sep = "=" * 65
753
+ print(f"\n{sep}\n[migrate_weights_v9] Qwen2 → Hybrid v9 迁移完成\n{sep}")
754
+ print(f" Rank: shared(L7)={_SHARED_RANK} | mid(L8-22)={_SOFT_RANK_MID} | deep(L23-27)={_SOFT_RANK_DEEP}")
755
+ print(f" SVD 热启动 : {svd_done} 层\n 自定义参数写入 : {custom_written} 个\n 总写入 keys : {len(new_sd)}")
756
+ print(f" 缺失(新增模块) : {len(missing):3d}\n 意外(多余) : {len(unexpected):3d}\n 未映射原始 keys : {len(unmapped):3d}")
757
+ if svd_errors:
758
+ for e in svd_errors: print(f" ⚠ {e}")
759
+ print(f"{sep}\n")
760
+ return unmapped
761
+
762
+ def get_alpha_param_groups(model, base_lr, alpha_lr_scale=10.0):
763
+ alpha_params, base_params, alpha_names = [], [],[]
764
+ for name, param in model.named_parameters():
765
+ if not param.requires_grad: continue
766
+ if name.endswith(".self_attn.output_alpha") or name.endswith(".shared_gate.warmup_alpha"):
767
+ alpha_params.append(param)
768
+ alpha_names.append(name)
769
+ else: base_params.append(param)
770
+ print(f"[get_alpha_param_groups]\n Base params : {len(base_params):4d} lr={base_lr:.2e}\n Alpha params : {len(alpha_params):4d} lr={base_lr * alpha_lr_scale:.2e}")
771
+ return[{"params": base_params, "lr": base_lr, "name": "base"}, {"params": alpha_params, "lr": base_lr * alpha_lr_scale, "name": "alpha_gate"}]
772
+
773
+ def verify_no_nan(model):
774
+ nan_params =[f" ✗ NaN in {n} shape={list(p.shape)}" for n, p in model.named_parameters() if p.data.isnan().any()]
775
+ if nan_params:
776
+ print("[verify_no_nan] 发现 NaN 参数:\n" + "\n".join(nan_params))
777
+ return False
778
+ print(f"[verify_no_nan] ✓ 所有 {sum(1 for _ in model.parameters())} 个参数均无 NaN")
779
+ return True
780
+
781
+ def verify_alpha_zero(model):
782
+ problems =[]
783
+ for name, param in model.named_parameters():
784
+ if name.endswith(".self_attn.output_alpha") or name.endswith(".shared_gate.warmup_alpha"):
785
+ if abs(param.item()) > 1e-6: problems.append(f" ✗ {name} = {param.item():.6f}(应为 0.0)")
786
+ if problems:
787
+ print("[verify_alpha_zero] Alpha 初始化异常:\n" + "\n".join(problems))
788
+ return False
789
+ print("[verify_alpha_zero] ✓ 所有 output_alpha / warmup_alpha = 0.0")
790
+ return True
791
+
792
+ __all__ =[
793
+ "_SHARED_RANK", "_SOFT_RANK_MID", "_SOFT_RANK_DEEP", "_layer_role", "_mla_rank", "_mla_zone", "_mla_sliding_window",
794
+ "_svd_project_kv", "HybridCache", "SharedLatentGate", "Qwen2MLASoftAttention", "Qwen2HybridDecoderLayer",
795
+ "Qwen2HybridModel", "Qwen2HybridForCausalLM", "migrate_weights_from_qwen2", "get_alpha_param_groups",
796
+ "verify_no_nan", "verify_alpha_zero",
797
+ ]
special_tokens_map.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": "<|endoftext|>"
25
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
tokenizer_config.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "clean_up_tokenization_spaces": false,
199
+ "eos_token": "<|endoftext|>",
200
+ "errors": "replace",
201
+ "extra_special_tokens": {},
202
+ "model_max_length": 32768,
203
+ "pad_token": "<|endoftext|>",
204
+ "split_special_tokens": false,
205
+ "tokenizer_class": "Qwen2Tokenizer",
206
+ "unk_token": null
207
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff