smy111 commited on
Commit
83ed10f
·
verified ·
1 Parent(s): 27ed951

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ headwise.png filter=lfs diff=lfs merge=lfs -text
37
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,87 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Qwen3-Coder-30B-A3B-Instruct-RTPurbo
2
+
3
+ ## Model Overview
4
+ - **Model Optimizations:**
5
+ - **Sliding Window Attention:** 85%
6
+ - **Full Attention:** 15%
7
+ - **Version:** 1.0
8
+
9
+ <img src="./headwise.png" alt="screenshot" width="60%">
10
+
11
+ RTPurbo uses hybrid HeadWise Attention to compress the Qwen3Coder model. Specifically, it divides attention into two parts according to attention type:
12
+
13
+ 1. **Retrieval Heads**: These heads perform **Full Attention** over the entire sequence (or a large chunk), allowing them to capture rich, long-range dependencies and act as a powerful information retrieval component.
14
+ 2. **non Retrieval Heads**: These heads use **Sink SWA Attention**, processing tokens in a sliding-window or fixed-cache manner. They are highly efficient and ideal for handling very long sequences while maintaining local context.
15
+
16
+ ## Evaluation
17
+
18
+ This model was evaluated in the [lm_eval](https://github.com/EleutherAI/lm-evaluation-harness) benchmark using [Qwen3-Coder-30B-A3B-Instruct](https://www.modelscope.cn/models/Qwen/Qwen3-Coder-30B-A3B-Instruct) as evaluator.
19
+
20
+ <table style="border-collapse:collapse; border-top:2px solid #000; border-bottom:2px solid #000;">
21
+ <thead>
22
+ <tr style="border-bottom:2px solid #000;">
23
+ <th align="center" style="padding:8px 14px;">Longbench</th>
24
+ <th align="center" style="padding:8px 14px;">lcc</th>
25
+ <th align="center" style="padding:8px 14px;">repo-p</th>
26
+ <th align="center" style="padding:8px 14px;">samsum</th>
27
+ <th align="center" style="padding:8px 14px;">trec</th>
28
+ <th align="center" style="padding:8px 14px;">lsht</th>
29
+ <th align="center" style="padding:8px 14px;">2wikim</th>
30
+ <th align="center" style="padding:8px 14px;">hotpot</th>
31
+ <th align="center" style="padding:8px 14px;">multi-en</th>
32
+ <th align="center" style="padding:8px 14px;">multi-zh</th>
33
+ <th align="center" style="padding:8px 14px;">musique</th>
34
+ <th align="center" style="padding:8px 14px;">qasper</th>
35
+ <th align="center" style="padding:8px 14px;">vcsum</th>
36
+ <th align="center" style="padding:8px 14px;">qmsum</th>
37
+ <th align="center" style="padding:8px 14px;">PR-en</th>
38
+ <th align="center" style="padding:8px 14px;">PR-zh</th>
39
+ <th align="center" style="padding:8px 14px;">Avg. (%)</th>
40
+ </tr>
41
+ <tr style="border-bottom:2px solid #000;">
42
+ <th align="center" colspan="17" style="padding:10px 14px;">Qwen3-Coder-30B-A3B</th>
43
+ </tr>
44
+ </thead>
45
+
46
+ <tbody>
47
+ <tr style="border-bottom:2px solid #000;">
48
+ <td align="center" style="padding:8px 14px;"><b>Full Attn</b></td>
49
+ <td align="center" style="padding:8px 14px;">34.34</td>
50
+ <td align="center" style="padding:8px 14px;">27.14</td>
51
+ <td align="center" style="padding:8px 14px;">45.80</td>
52
+ <td align="center" style="padding:8px 14px;">81.00</td>
53
+ <td align="center" style="padding:8px 14px;">47.50</td>
54
+ <td align="center" style="padding:8px 14px;">42.08</td>
55
+ <td align="center" style="padding:8px 14px;">57.64</td>
56
+ <td align="center" style="padding:8px 14px;">52.89</td>
57
+ <td align="center" style="padding:8px 14px;">65.99</td>
58
+ <td align="center" style="padding:8px 14px;">38.30</td>
59
+ <td align="center" style="padding:8px 14px;">39.25</td>
60
+ <td align="center" style="padding:8px 14px;">13.55</td>
61
+ <td align="center" style="padding:8px 14px;">23.77</td>
62
+ <td align="center" style="padding:8px 14px;">99.00</td>
63
+ <td align="center" style="padding:8px 14px;">99.75</td>
64
+ <td align="center" style="padding:8px 14px;">51.20</td>
65
+ </tr>
66
+ <tr style="border-bottom:2px solid #000;">
67
+ <td align="center" style="padding:8px 14px;"><b>RTPurbo</b></td>
68
+ <td align="center" style="padding:8px 14px;">35.96</td>
69
+ <td align="center" style="padding:8px 14px;">35.21</td>
70
+ <td align="center" style="padding:8px 14px;">46.49</td>
71
+ <td align="center" style="padding:8px 14px;">81.00</td>
72
+ <td align="center" style="padding:8px 14px;">49.00</td>
73
+ <td align="center" style="padding:8px 14px;">47.39</td>
74
+ <td align="center" style="padding:8px 14px;">55.44</td>
75
+ <td align="center" style="padding:8px 14px;">52.93</td>
76
+ <td align="center" style="padding:8px 14px;">65.23</td>
77
+ <td align="center" style="padding:8px 14px;">35.58</td>
78
+ <td align="center" style="padding:8px 14px;">39.78</td>
79
+ <td align="center" style="padding:8px 14px;">13.80</td>
80
+ <td align="center" style="padding:8px 14px;">23.68</td>
81
+ <td align="center" style="padding:8px 14px;">99.00</td>
82
+ <td align="center" style="padding:8px 14px;">99.75</td>
83
+ <td align="center" style="padding:8px 14px;">52.02</td>
84
+ </tr>
85
+ </tbody>
86
+ </table>
87
+
added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
chat_template.jinja ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{ '<|im_start|>' + message['role'] + '
2
+ ' + message['content'] | trim + '<|im_end|>
3
+ ' }}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
4
+ ' }}{% endif %}
config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3MoeForCausalLM"
4
+ ],
5
+ "auto_map": {
6
+ "AutoModelForCausalLM": "modeling_qwen3_moe.Qwen3MoeForCausalLM"
7
+ },
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "decoder_sparse_step": 1,
11
+ "dtype": "bfloat16",
12
+ "eos_token_id": 151645,
13
+ "head_dim": 128,
14
+ "hidden_act": "silu",
15
+ "hidden_size": 2048,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 5472,
18
+ "max_position_embeddings": 262144,
19
+ "max_window_layers": 28,
20
+ "mlp_only_layers": [],
21
+ "model_type": "qwen3_moe",
22
+ "moe_intermediate_size": 768,
23
+ "norm_topk_prob": true,
24
+ "num_attention_heads": 32,
25
+ "num_experts": 128,
26
+ "num_experts_per_tok": 8,
27
+ "num_hidden_layers": 48,
28
+ "num_key_value_heads": 4,
29
+ "output_router_logits": false,
30
+ "qkv_bias": false,
31
+ "rms_norm_eps": 1e-06,
32
+ "rope_scaling": null,
33
+ "rope_theta": 10000000,
34
+ "router_aux_loss_coef": 0.0,
35
+ "shared_expert_intermediate_size": 0,
36
+ "sliding_window": null,
37
+ "tie_word_embeddings": false,
38
+ "transformers_version": "4.56.2",
39
+ "use_cache": true,
40
+ "use_qk_norm": true,
41
+ "use_sliding_window": false,
42
+ "vocab_size": 151936
43
+ }
headwise.png ADDED

Git LFS Details

  • SHA256: fe46448353ae2914fb970a4f388859b2c0155837f66443345a404ee7137f7c89
  • Pointer size: 131 Bytes
  • Size of remote file: 124 kB
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c67e37ee77641d4b926db6b422ee6e6799be1019eca8175ee6690fb4051ef741
3
+ size 4998060192
model-00002-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18772dc9cb50b4cba314ca9d808c198e74eb00926cafa2ab6f3f6488ae7a002a
3
+ size 4998757312
model-00003-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:801e6b2f5278ab6a039b324c4564bfe9ea625c12e11a62780377d42b6b588498
3
+ size 4998757352
model-00004-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d17c9c614166be1e6784df3ec413204a4e47686934798ea0aae50ebf36b153a
3
+ size 4998758920
model-00005-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10a34cf24b06b5f173b60e974755aadeba1c1c72f5bbe3c27193169888cdf506
3
+ size 4998758928
model-00006-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa3d0f92089d8ae514b0c1a6c98a0fbc767a30d998ae8f5f9b50b4568048c070
3
+ size 4998758944
model-00007-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cac4c45424cde74961934ee4db2a23b8a2be8a4289ae85042a2239cf071bc18a
3
+ size 4998758888
model-00008-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6da8f79cc1d57508966a1dee4166297df90ccd147616026841f1ef4c1997ff7a
3
+ size 4998758832
model-00009-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d732a86c4e3c2ff9bfeb975080959e604563320bac1e46165c7c8a5d377bea71
3
+ size 4998758872
model-00010-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53af218d23528ae6b31beb50a8bbd7e44aebbce1c6c1741ed21e2b8103fd3de2
3
+ size 4998758944
model-00011-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1605272017e7469082664848388452a8e80dd0620207adc7d696d8d3d60516e
3
+ size 4998758960
model-00012-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06fb299e6a6196184b92e61b693f447ca9ef4fe3f47e8b9aa024df8af79a22df
3
+ size 4998758944
model-00013-of-00013.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb573142ef67f1141ddb81d624d0df1c8a60b0a1803f3dcd63bfca73cddea069
3
+ size 1082172576
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_qwen3_moe.py ADDED
@@ -0,0 +1,760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/qwen3_moe/modular_qwen3_moe.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_qwen3_moe.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+ from typing import Callable, Optional, Union
23
+
24
+ import torch
25
+ import torch.nn.functional as F
26
+ from torch import nn
27
+
28
+ from transformers.activations import ACT2FN
29
+ from transformers.cache_utils import Cache, DynamicCache
30
+ from transformers.generation import GenerationMixin
31
+ from transformers.integrations import use_kernel_forward_from_hub
32
+ from transformers.masking_utils import create_causal_mask, create_sliding_window_causal_mask
33
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
34
+ from transformers.modeling_layers import (
35
+ GenericForQuestionAnswering,
36
+ GenericForSequenceClassification,
37
+ GenericForTokenClassification,
38
+ GradientCheckpointingLayer,
39
+ )
40
+ from transformers.modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
41
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
42
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
43
+ from transformers.processing_utils import Unpack
44
+ from transformers.utils import TransformersKwargs, auto_docstring, can_return_tuple
45
+ from transformers.utils.deprecation import deprecate_kwarg
46
+ from transformers.utils.generic import OutputRecorder, check_model_inputs
47
+ from transformers.models.qwen3_moe.configuration_qwen3_moe import Qwen3MoeConfig
48
+
49
+ from torch.nn.attention.flex_attention import flex_attention, create_block_mask
50
+ flex_attention = torch.compile(flex_attention, dynamic=True)
51
+
52
+ SWA_TOKEN = 8192
53
+ SINK_TOKEN = 4
54
+
55
+ def rotate_half(x):
56
+ """Rotates half the hidden dims of the input."""
57
+ x1 = x[..., : x.shape[-1] // 2]
58
+ x2 = x[..., x.shape[-1] // 2 :]
59
+ return torch.cat((-x2, x1), dim=-1)
60
+
61
+
62
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
63
+ """Applies Rotary Position Embedding to the query and key tensors.
64
+
65
+ Args:
66
+ q (`torch.Tensor`): The query tensor.
67
+ k (`torch.Tensor`): The key tensor.
68
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
69
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
70
+ position_ids (`torch.Tensor`, *optional*):
71
+ Deprecated and unused.
72
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
73
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
74
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
75
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
76
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
77
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
78
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
79
+ Returns:
80
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
81
+ """
82
+ cos = cos.unsqueeze(unsqueeze_dim)
83
+ sin = sin.unsqueeze(unsqueeze_dim)
84
+ q_embed = (q * cos) + (rotate_half(q) * sin)
85
+ k_embed = (k * cos) + (rotate_half(k) * sin)
86
+ return q_embed, k_embed
87
+
88
+
89
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
90
+ """
91
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
92
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
93
+ """
94
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
95
+ if n_rep == 1:
96
+ return hidden_states
97
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
98
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
99
+
100
+ def sink_mask(b, h, q_idx, kv_idx):
101
+ causal_window = q_idx >= kv_idx
102
+ sliding_window = q_idx - kv_idx <= SWA_TOKEN
103
+ sink_window = kv_idx < SINK_TOKEN
104
+ return (sliding_window | sink_window) & causal_window
105
+
106
+ def flex_attention_call(
107
+ query: torch.Tensor,
108
+ key: torch.Tensor,
109
+ value: torch.Tensor,
110
+ ):
111
+ S = query.shape[2]
112
+ block_mask = create_block_mask(sink_mask, 1, 1, S, S, device=query.device)
113
+ attn_output: torch.Tensor = flex_attention(query, key, value, block_mask=block_mask)
114
+
115
+ return attn_output
116
+
117
+ def flex_attention_forward(
118
+ module: nn.Module,
119
+ query: torch.Tensor,
120
+ key: torch.Tensor,
121
+ value: torch.Tensor,
122
+ attention_mask: Optional[torch.Tensor],
123
+ scaling: float,
124
+ dropout: float = 0.0,
125
+ **kwargs: Unpack[TransformersKwargs],
126
+ ):
127
+
128
+ seq_len, q_head_num = query.shape[2], query.shape[1]
129
+ kv_head_num = key.shape[1]
130
+
131
+ n_repeat = q_head_num // kv_head_num
132
+ key = repeat_kv(key, n_repeat)
133
+ value = repeat_kv(value, n_repeat)
134
+
135
+ attn_output = flex_attention_call(query, key, value)
136
+
137
+ # return attn_output, None
138
+ return attn_output.transpose(1, 2), None
139
+
140
+ def eager_attention_forward(
141
+ module: nn.Module,
142
+ query: torch.Tensor,
143
+ key: torch.Tensor,
144
+ value: torch.Tensor,
145
+ attention_mask: Optional[torch.Tensor],
146
+ scaling: float,
147
+ dropout: float = 0.0,
148
+ **kwargs: Unpack[TransformersKwargs],
149
+ ):
150
+ key_states = repeat_kv(key, module.num_key_value_groups)
151
+ value_states = repeat_kv(value, module.num_key_value_groups)
152
+
153
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
154
+ if attention_mask is not None:
155
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
156
+ attn_weights = attn_weights + causal_mask
157
+
158
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
159
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
160
+ attn_output = torch.matmul(attn_weights, value_states)
161
+ attn_output = attn_output.transpose(1, 2).contiguous()
162
+
163
+ return attn_output, attn_weights
164
+
165
+
166
+ class Qwen3MoeAttention(nn.Module):
167
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
168
+
169
+ def __init__(self, config: Qwen3MoeConfig, layer_idx: int):
170
+ super().__init__()
171
+ self.config = config
172
+ self.layer_idx = layer_idx
173
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
174
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
175
+ self.scaling = self.head_dim**-0.5
176
+ self.attention_dropout = config.attention_dropout
177
+ self.is_causal = True
178
+
179
+ self.q_proj = nn.Linear(
180
+ config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
181
+ )
182
+ self.k_proj = nn.Linear(
183
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
184
+ )
185
+ self.v_proj = nn.Linear(
186
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
187
+ )
188
+ self.o_proj = nn.Linear(
189
+ config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
190
+ )
191
+ self.q_norm = Qwen3MoeRMSNorm(self.head_dim, eps=config.rms_norm_eps) # unlike olmo, only on the head dim!
192
+ self.k_norm = Qwen3MoeRMSNorm(self.head_dim, eps=config.rms_norm_eps) # thus post q_norm does not need reshape
193
+ self.sliding_window = getattr(config, "sliding_window", None)
194
+
195
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
196
+ def forward(
197
+ self,
198
+ hidden_states: torch.Tensor,
199
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
200
+ attention_mask: Optional[torch.Tensor],
201
+ past_key_values: Optional[Cache] = None,
202
+ cache_position: Optional[torch.LongTensor] = None,
203
+ **kwargs: Unpack[FlashAttentionKwargs],
204
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
205
+ input_shape = hidden_states.shape[:-1]
206
+ hidden_shape = (*input_shape, -1, self.head_dim)
207
+
208
+ query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
209
+ key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
210
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
211
+
212
+ cos, sin = position_embeddings
213
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
214
+
215
+ if past_key_values is not None:
216
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
217
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
218
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
219
+
220
+ import pdb; pdb.set_trace()
221
+
222
+ attention_interface: Callable = eager_attention_forward
223
+ if self.config._attn_implementation == "headwise":
224
+ attention_interface = flex_attention_forward
225
+ elif self.config._attn_implementation != "eager":
226
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
227
+
228
+ attn_output, attn_weights = attention_interface(
229
+ self,
230
+ query_states,
231
+ key_states,
232
+ value_states,
233
+ attention_mask,
234
+ dropout=0.0 if not self.training else self.attention_dropout,
235
+ scaling=self.scaling,
236
+ sliding_window=self.sliding_window, # diff with Llama
237
+ **kwargs,
238
+ )
239
+
240
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
241
+ attn_output = self.o_proj(attn_output)
242
+ return attn_output, attn_weights
243
+
244
+
245
+ class Qwen3MoeMLP(nn.Module):
246
+ def __init__(self, config, intermediate_size=None):
247
+ super().__init__()
248
+ self.config = config
249
+ self.hidden_size = config.hidden_size
250
+ self.intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size
251
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
252
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
253
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
254
+ self.act_fn = ACT2FN[config.hidden_act]
255
+
256
+ def forward(self, x):
257
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
258
+ return down_proj
259
+
260
+
261
+ class Qwen3MoeSparseMoeBlock(nn.Module):
262
+ def __init__(self, config):
263
+ super().__init__()
264
+ self.num_experts = config.num_experts
265
+ self.top_k = config.num_experts_per_tok
266
+ self.norm_topk_prob = config.norm_topk_prob
267
+
268
+ # gating
269
+ self.gate = nn.Linear(config.hidden_size, config.num_experts, bias=False)
270
+ self.experts = nn.ModuleList(
271
+ [Qwen3MoeMLP(config, intermediate_size=config.moe_intermediate_size) for _ in range(self.num_experts)]
272
+ )
273
+
274
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
275
+ """ """
276
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
277
+ hidden_states = hidden_states.view(-1, hidden_dim)
278
+ # router_logits: (batch * sequence_length, n_experts)
279
+ router_logits = self.gate(hidden_states)
280
+
281
+ routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
282
+ routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
283
+ if self.norm_topk_prob: # only diff with mixtral sparse moe block!
284
+ routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
285
+ # we cast back to the input dtype
286
+ routing_weights = routing_weights.to(hidden_states.dtype)
287
+
288
+ final_hidden_states = torch.zeros(
289
+ (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device
290
+ )
291
+
292
+ # One hot encode the selected experts to create an expert mask
293
+ # this will be used to easily index which expert is going to be sollicitated
294
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
295
+
296
+ # Loop over all available experts in the model and perform the computation on each expert
297
+ expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
298
+ for expert_idx in expert_hit:
299
+ expert_layer = self.experts[expert_idx]
300
+ idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0))
301
+
302
+ # Index the correct hidden states and compute the expert hidden state for
303
+ # the current expert. We need to make sure to multiply the output hidden
304
+ # states by `routing_weights` on the corresponding tokens (top-1 and top-2)
305
+ current_state = hidden_states[None, top_x].reshape(-1, hidden_dim)
306
+ current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None]
307
+
308
+ # However `index_add_` only support torch tensors for indexing so we'll use
309
+ # the `top_x` tensor here.
310
+ final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
311
+ final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
312
+ return final_hidden_states, router_logits
313
+
314
+
315
+ @use_kernel_forward_from_hub("RMSNorm")
316
+ class Qwen3MoeRMSNorm(nn.Module):
317
+ def __init__(self, hidden_size, eps=1e-6):
318
+ """
319
+ Qwen3MoeRMSNorm is equivalent to T5LayerNorm
320
+ """
321
+ super().__init__()
322
+ self.weight = nn.Parameter(torch.ones(hidden_size))
323
+ self.variance_epsilon = eps
324
+
325
+ def forward(self, hidden_states):
326
+ input_dtype = hidden_states.dtype
327
+ hidden_states = hidden_states.to(torch.float32)
328
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
329
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
330
+ return self.weight * hidden_states.to(input_dtype)
331
+
332
+ def extra_repr(self):
333
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
334
+
335
+
336
+ class Qwen3MoeDecoderLayer(GradientCheckpointingLayer):
337
+ def __init__(self, config: Qwen3MoeConfig, layer_idx: int):
338
+ super().__init__()
339
+ self.hidden_size = config.hidden_size
340
+
341
+ self.self_attn = Qwen3MoeAttention(config, layer_idx)
342
+
343
+ if (layer_idx not in config.mlp_only_layers) and (
344
+ config.num_experts > 0 and (layer_idx + 1) % config.decoder_sparse_step == 0
345
+ ):
346
+ self.mlp = Qwen3MoeSparseMoeBlock(config)
347
+ else:
348
+ self.mlp = Qwen3MoeMLP(config, intermediate_size=config.intermediate_size)
349
+
350
+ self.input_layernorm = Qwen3MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
351
+ self.post_attention_layernorm = Qwen3MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
352
+
353
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
354
+ def forward(
355
+ self,
356
+ hidden_states: torch.Tensor,
357
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
358
+ attention_mask: Optional[torch.Tensor] = None,
359
+ position_ids: Optional[torch.LongTensor] = None,
360
+ past_key_values: Optional[tuple[torch.Tensor]] = None,
361
+ cache_position: Optional[torch.LongTensor] = None,
362
+ **kwargs: Unpack[FlashAttentionKwargs],
363
+ ) -> torch.FloatTensor:
364
+ """
365
+ Args:
366
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
367
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
368
+ `(batch, sequence_length)` where padding elements are indicated by 0.
369
+ output_attentions (`bool`, *optional*):
370
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
371
+ returned tensors for more detail.
372
+ output_router_logits (`bool`, *optional*):
373
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss,
374
+ and should not be returned during inference.
375
+ use_cache (`bool`, *optional*):
376
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
377
+ (see `past_key_values`).
378
+ past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
379
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
380
+ Indices depicting the position of the input sequence tokens in the sequence.
381
+ position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
382
+ Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
383
+ with `head_dim` being the embedding dimension of each attention head.
384
+ kwargs (`dict`, *optional*):
385
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
386
+ into the model
387
+ """
388
+ residual = hidden_states
389
+
390
+ hidden_states = self.input_layernorm(hidden_states)
391
+
392
+ # Self Attention
393
+ hidden_states, _ = self.self_attn(
394
+ hidden_states=hidden_states,
395
+ position_embeddings=position_embeddings,
396
+ attention_mask=attention_mask,
397
+ position_ids=position_ids,
398
+ past_key_values=past_key_values,
399
+ cache_position=cache_position,
400
+ **kwargs,
401
+ )
402
+ hidden_states = residual + hidden_states
403
+
404
+ # Fully Connected
405
+ residual = hidden_states
406
+ hidden_states = self.post_attention_layernorm(hidden_states)
407
+ hidden_states = self.mlp(hidden_states)
408
+ # For the MoE layers, we need to unpack
409
+ if isinstance(hidden_states, tuple):
410
+ hidden_states, _ = hidden_states
411
+ hidden_states = residual + hidden_states
412
+
413
+ return hidden_states
414
+
415
+
416
+ class Qwen3MoeRotaryEmbedding(nn.Module):
417
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
418
+
419
+ def __init__(self, config: Qwen3MoeConfig, device=None):
420
+ super().__init__()
421
+ # BC: "rope_type" was originally "type"
422
+ if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
423
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
424
+ else:
425
+ self.rope_type = "default"
426
+ self.max_seq_len_cached = config.max_position_embeddings
427
+ self.original_max_seq_len = config.max_position_embeddings
428
+
429
+ self.config = config
430
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
431
+
432
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
433
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
434
+ self.original_inv_freq = self.inv_freq
435
+
436
+ @torch.no_grad()
437
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
438
+ def forward(self, x, position_ids):
439
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
440
+ position_ids_expanded = position_ids[:, None, :].float()
441
+
442
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
443
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
444
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
445
+ emb = torch.cat((freqs, freqs), dim=-1)
446
+ cos = emb.cos() * self.attention_scaling
447
+ sin = emb.sin() * self.attention_scaling
448
+
449
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
450
+
451
+
452
+ @auto_docstring
453
+ class Qwen3MoePreTrainedModel(PreTrainedModel):
454
+ config: Qwen3MoeConfig
455
+ base_model_prefix = "model"
456
+ supports_gradient_checkpointing = True
457
+ _no_split_modules = ["Qwen3MoeDecoderLayer"]
458
+ _skip_keys_device_placement = ["past_key_values"]
459
+ _supports_flash_attn = True
460
+ _supports_sdpa = True
461
+ _supports_flex_attn = True
462
+ _can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported)
463
+ _supports_attention_backend = True
464
+ _can_record_outputs = {
465
+ "router_logits": OutputRecorder(Qwen3MoeSparseMoeBlock, index=1),
466
+ "hidden_states": Qwen3MoeDecoderLayer,
467
+ "attentions": Qwen3MoeAttention,
468
+ }
469
+
470
+
471
+ @auto_docstring
472
+ class Qwen3MoeModel(Qwen3MoePreTrainedModel):
473
+ def __init__(self, config: Qwen3MoeConfig):
474
+ super().__init__(config)
475
+ self.padding_idx = config.pad_token_id
476
+ self.vocab_size = config.vocab_size
477
+
478
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
479
+ self.layers = nn.ModuleList(
480
+ [Qwen3MoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
481
+ )
482
+ self.norm = Qwen3MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
483
+ self.rotary_emb = Qwen3MoeRotaryEmbedding(config=config)
484
+ self.gradient_checkpointing = False
485
+
486
+ # Initialize weights and apply final processing
487
+ self.post_init()
488
+
489
+ @check_model_inputs
490
+ @auto_docstring
491
+ def forward(
492
+ self,
493
+ input_ids: Optional[torch.LongTensor] = None,
494
+ attention_mask: Optional[torch.Tensor] = None,
495
+ position_ids: Optional[torch.LongTensor] = None,
496
+ past_key_values: Optional[Cache] = None,
497
+ inputs_embeds: Optional[torch.FloatTensor] = None,
498
+ use_cache: Optional[bool] = None,
499
+ cache_position: Optional[torch.LongTensor] = None,
500
+ **kwargs: Unpack[TransformersKwargs],
501
+ ) -> MoeModelOutputWithPast:
502
+ if (input_ids is None) ^ (inputs_embeds is not None):
503
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
504
+
505
+ if use_cache and past_key_values is None:
506
+ past_key_values = DynamicCache(config=self.config)
507
+
508
+ if inputs_embeds is None:
509
+ inputs_embeds = self.embed_tokens(input_ids)
510
+
511
+ if cache_position is None:
512
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
513
+ cache_position = torch.arange(
514
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
515
+ )
516
+ if position_ids is None:
517
+ position_ids = cache_position.unsqueeze(0)
518
+
519
+ mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
520
+ causal_mask = mask_function(
521
+ config=self.config,
522
+ input_embeds=inputs_embeds,
523
+ attention_mask=attention_mask,
524
+ cache_position=cache_position,
525
+ past_key_values=past_key_values,
526
+ position_ids=position_ids,
527
+ )
528
+
529
+ hidden_states = inputs_embeds
530
+
531
+ # create position embeddings to be shared across the decoder layers
532
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
533
+
534
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
535
+ hidden_states = decoder_layer(
536
+ hidden_states,
537
+ position_embeddings=position_embeddings,
538
+ attention_mask=causal_mask,
539
+ position_ids=position_ids,
540
+ past_key_values=past_key_values,
541
+ use_cache=use_cache,
542
+ cache_position=cache_position,
543
+ **kwargs,
544
+ )
545
+
546
+ hidden_states = self.norm(hidden_states)
547
+
548
+ return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE
549
+ last_hidden_state=hidden_states,
550
+ past_key_values=past_key_values,
551
+ )
552
+
553
+
554
+ def load_balancing_loss_func(
555
+ gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None],
556
+ num_experts: Optional[int] = None,
557
+ top_k=2,
558
+ attention_mask: Optional[torch.Tensor] = None,
559
+ ) -> Union[torch.Tensor, int]:
560
+ r"""
561
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
562
+
563
+ See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
564
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
565
+ experts is too unbalanced.
566
+
567
+ Args:
568
+ gate_logits:
569
+ Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
570
+ shape [batch_size X sequence_length, num_experts].
571
+ num_experts:
572
+ Number of experts
573
+ top_k:
574
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
575
+ parameter.
576
+ attention_mask (`torch.Tensor`, *optional*):
577
+ The attention_mask used in forward function
578
+ shape [batch_size X sequence_length] if not None.
579
+
580
+ Returns:
581
+ The auxiliary loss.
582
+ """
583
+ if gate_logits is None or not isinstance(gate_logits, tuple):
584
+ return 0
585
+
586
+ if isinstance(gate_logits, tuple):
587
+ compute_device = gate_logits[0].device
588
+ concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
589
+
590
+ routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
591
+
592
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
593
+
594
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
595
+
596
+ if attention_mask is None:
597
+ # Compute the percentage of tokens routed to each experts
598
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
599
+
600
+ # Compute the average probability of routing to these experts
601
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
602
+ else:
603
+ batch_size, sequence_length = attention_mask.shape
604
+ num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
605
+
606
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
607
+ expert_attention_mask = (
608
+ attention_mask[None, :, :, None, None]
609
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
610
+ .reshape(-1, top_k, num_experts)
611
+ .to(compute_device)
612
+ )
613
+
614
+ # Compute the percentage of tokens routed to each experts
615
+ tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
616
+ expert_attention_mask, dim=0
617
+ )
618
+
619
+ # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
620
+ router_per_expert_attention_mask = (
621
+ attention_mask[None, :, :, None]
622
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
623
+ .reshape(-1, num_experts)
624
+ .to(compute_device)
625
+ )
626
+
627
+ # Compute the average probability of routing to these experts
628
+ router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
629
+ router_per_expert_attention_mask, dim=0
630
+ )
631
+
632
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
633
+ return overall_loss * num_experts
634
+
635
+
636
+ @auto_docstring
637
+ class Qwen3MoeForCausalLM(Qwen3MoePreTrainedModel, GenerationMixin):
638
+ _tied_weights_keys = ["lm_head.weight"]
639
+ _tp_plan = {"lm_head": "colwise_rep"}
640
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
641
+
642
+ def __init__(self, config):
643
+ super().__init__(config)
644
+ self.model = Qwen3MoeModel(config)
645
+ self.vocab_size = config.vocab_size
646
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
647
+ self.router_aux_loss_coef = config.router_aux_loss_coef
648
+ self.num_experts = config.num_experts
649
+ self.num_experts_per_tok = config.num_experts_per_tok
650
+
651
+ # Initialize weights and apply final processing
652
+ self.post_init()
653
+
654
+ @can_return_tuple
655
+ @auto_docstring
656
+ def forward(
657
+ self,
658
+ input_ids: Optional[torch.LongTensor] = None,
659
+ attention_mask: Optional[torch.Tensor] = None,
660
+ position_ids: Optional[torch.LongTensor] = None,
661
+ past_key_values: Optional[Cache] = None,
662
+ inputs_embeds: Optional[torch.FloatTensor] = None,
663
+ labels: Optional[torch.LongTensor] = None,
664
+ use_cache: Optional[bool] = None,
665
+ output_router_logits: Optional[bool] = None,
666
+ cache_position: Optional[torch.LongTensor] = None,
667
+ logits_to_keep: Union[int, torch.Tensor] = 0,
668
+ **kwargs: Unpack[TransformersKwargs],
669
+ ) -> MoeCausalLMOutputWithPast:
670
+ r"""
671
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
672
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
673
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
674
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
675
+
676
+ Example:
677
+
678
+ ```python
679
+ >>> from transformers import AutoTokenizer, Qwen3MoeForCausalLM
680
+
681
+ >>> model = Qwen3MoeForCausalLM.from_pretrained("Qwen/Qwen3-MoE-15B-A2B")
682
+ >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-MoE-15B-A2B")
683
+
684
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
685
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
686
+
687
+ >>> # Generate
688
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
689
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
690
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
691
+ ```"""
692
+
693
+ output_router_logits = (
694
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
695
+ )
696
+
697
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
698
+ outputs: MoeModelOutputWithPast = self.model(
699
+ input_ids=input_ids,
700
+ attention_mask=attention_mask,
701
+ position_ids=position_ids,
702
+ past_key_values=past_key_values,
703
+ inputs_embeds=inputs_embeds,
704
+ use_cache=use_cache,
705
+ output_router_logits=output_router_logits,
706
+ cache_position=cache_position,
707
+ **kwargs,
708
+ )
709
+
710
+ hidden_states = outputs.last_hidden_state
711
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
712
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
713
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
714
+
715
+ loss = None
716
+ if labels is not None:
717
+ loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
718
+
719
+ aux_loss = None
720
+ if output_router_logits:
721
+ aux_loss = load_balancing_loss_func(
722
+ outputs.router_logits,
723
+ self.num_experts,
724
+ self.num_experts_per_tok,
725
+ attention_mask,
726
+ )
727
+ if labels is not None:
728
+ loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
729
+
730
+ return MoeCausalLMOutputWithPast(
731
+ loss=loss,
732
+ aux_loss=aux_loss,
733
+ logits=logits,
734
+ past_key_values=outputs.past_key_values,
735
+ hidden_states=outputs.hidden_states,
736
+ attentions=outputs.attentions,
737
+ router_logits=outputs.router_logits,
738
+ )
739
+
740
+
741
+ class Qwen3MoeForSequenceClassification(GenericForSequenceClassification, Qwen3MoePreTrainedModel):
742
+ pass
743
+
744
+
745
+ class Qwen3MoeForTokenClassification(GenericForTokenClassification, Qwen3MoePreTrainedModel):
746
+ pass
747
+
748
+
749
+ class Qwen3MoeForQuestionAnswering(GenericForQuestionAnswering, Qwen3MoePreTrainedModel):
750
+ base_model_prefix = "transformer" # For BC, where `transformer` was used instead of `model`
751
+
752
+
753
+ __all__ = [
754
+ "Qwen3MoeForCausalLM",
755
+ "Qwen3MoeForQuestionAnswering",
756
+ "Qwen3MoeModel",
757
+ "Qwen3MoePreTrainedModel",
758
+ "Qwen3MoeForSequenceClassification",
759
+ "Qwen3MoeForTokenClassification",
760
+ ]
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
+ size 11422654
tokenizer_config.json ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|im_end|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "model_max_length": 1048576,
235
+ "pad_token": "<|endoftext|>",
236
+ "padding_side": "right",
237
+ "split_special_tokens": false,
238
+ "tokenizer_class": "Qwen2Tokenizer",
239
+ "unk_token": null
240
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff