Commit ·
c31cf57
0
Parent(s):
Duplicate from sarvamai/sarvam-30b
Browse filesCo-authored-by: Rahul <rahular@users.noreply.huggingface.co>
- .gitattributes +36 -0
- README.md +296 -0
- chat_template.jinja +97 -0
- config.json +53 -0
- configuration_sarvam_moe.py +103 -0
- generation_config.json +6 -0
- hotpatch_vllm.py +114 -0
- model-00001-of-00026.safetensors +3 -0
- model-00002-of-00026.safetensors +3 -0
- model-00003-of-00026.safetensors +3 -0
- model-00004-of-00026.safetensors +3 -0
- model-00005-of-00026.safetensors +3 -0
- model-00006-of-00026.safetensors +3 -0
- model-00007-of-00026.safetensors +3 -0
- model-00008-of-00026.safetensors +3 -0
- model-00009-of-00026.safetensors +3 -0
- model-00010-of-00026.safetensors +3 -0
- model-00011-of-00026.safetensors +3 -0
- model-00012-of-00026.safetensors +3 -0
- model-00013-of-00026.safetensors +3 -0
- model-00014-of-00026.safetensors +3 -0
- model-00015-of-00026.safetensors +3 -0
- model-00016-of-00026.safetensors +3 -0
- model-00017-of-00026.safetensors +3 -0
- model-00018-of-00026.safetensors +3 -0
- model-00019-of-00026.safetensors +3 -0
- model-00020-of-00026.safetensors +3 -0
- model-00021-of-00026.safetensors +3 -0
- model-00022-of-00026.safetensors +3 -0
- model-00023-of-00026.safetensors +3 -0
- model-00024-of-00026.safetensors +3 -0
- model-00025-of-00026.safetensors +3 -0
- model-00026-of-00026.safetensors +3 -0
- model.safetensors.index.json +0 -0
- modeling_sarvam_moe.py +1025 -0
- sarvam.py +788 -0
- special_tokens_map.json +33 -0
- tokenizer.json +3 -0
- tokenizer_config.json +0 -0
.gitattributes
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,296 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
language:
|
| 3 |
+
- en
|
| 4 |
+
- hi
|
| 5 |
+
- bn
|
| 6 |
+
- ta
|
| 7 |
+
- te
|
| 8 |
+
- mr
|
| 9 |
+
- gu
|
| 10 |
+
- kn
|
| 11 |
+
- ml
|
| 12 |
+
- pa
|
| 13 |
+
- or
|
| 14 |
+
- as
|
| 15 |
+
- ur
|
| 16 |
+
- sa
|
| 17 |
+
- ne
|
| 18 |
+
- sd
|
| 19 |
+
- kok
|
| 20 |
+
- mai
|
| 21 |
+
- doi
|
| 22 |
+
- mni
|
| 23 |
+
- sat
|
| 24 |
+
- ks
|
| 25 |
+
- bo
|
| 26 |
+
library_name: transformers
|
| 27 |
+
license: apache-2.0
|
| 28 |
+
pipeline_tag: text-generation
|
| 29 |
+
---
|
| 30 |
+
|
| 31 |
+

|
| 32 |
+
|
| 33 |
+
Want a bigger model? Download [Sarvam-105B](https://huggingface.co/sarvamai/sarvam-105b)!
|
| 34 |
+
|
| 35 |
+
## Index
|
| 36 |
+
|
| 37 |
+
1. [Introduction](#introduction)
|
| 38 |
+
2. [Architecture](#architecture)
|
| 39 |
+
3. [Benchmarks](#benchmarks)
|
| 40 |
+
- Knowledge & Coding
|
| 41 |
+
- Reasoning & Math
|
| 42 |
+
- Agentic
|
| 43 |
+
4. [Inference](#inference)
|
| 44 |
+
- Hugging Face
|
| 45 |
+
- [vLLM](https://github.com/vllm-project/vllm)
|
| 46 |
+
- [SGLang](https://github.com/sgl-project/sglang)
|
| 47 |
+
5. [Footnote](#footnote)
|
| 48 |
+
6. [Citation](#citation)
|
| 49 |
+
|
| 50 |
+
## Introduction
|
| 51 |
+
|
| 52 |
+
**Sarvam-30B** is an advanced Mixture-of-Experts (MoE) model with 2.4B non-embedding active parameters, designed primarily for practical deployment. It combines strong reasoning, reliable coding ability, and best-in-class conversational quality across Indian languages. Sarvam-30B is built to run reliably in resource-constrained environments and can handle multilingual voice calls while performing tool calls.
|
| 53 |
+
|
| 54 |
+
A major focus during training was the Indian context and languages, resulting in **state-of-the-art performance across 22 Indian languages** for its model size.
|
| 55 |
+
|
| 56 |
+
Sarvam-30B is open-sourced under the **Apache License**. For more details, see our [blog](https://www.sarvam.ai/blogs/sarvam-30b-105b).
|
| 57 |
+
|
| 58 |
+
## Architecture
|
| 59 |
+
|
| 60 |
+
The 30B MoE model is designed for throughput and memory efficiency. It uses 19 layers, a dense FFN `intermediate_size` of 8192, `moe_intermediate_size` of 1024, top-6 routing, grouped KV heads (`num_key_value_heads=4`), and an extremely high rope_theta (`8e6`) for long-context stability without RoPE scaling. It has 128 experts with a shared expert, a routed scaling factor of 2.5, and auxiliary-loss-free router balancing. The 30B model focuses on throughput and memory efficiency through fewer layers, grouped KV attention, and smaller experts.
|
| 61 |
+
|
| 62 |
+
## Benchmarks
|
| 63 |
+
|
| 64 |
+
<details>
|
| 65 |
+
<summary>Knowledge & Coding</summary>
|
| 66 |
+
|
| 67 |
+
| Benchmark | Sarvam-30B | Gemma 27B It | Mistral-3.2-24B | OLMo 3.1 32B Think | Nemotron-3-Nano-30B-A3B | Qwen3-30B-Thinking-2507 | GLM 4.7 Flash | GPT-OSS-20B |
|
| 68 |
+
|---|---|---|---|---|---|---|---|---|
|
| 69 |
+
| Math500 | 97.0 | 87.4 | 69.4 | 96.2 | 98.0 | 97.6 | 97.0 | 94.2 |
|
| 70 |
+
| HumanEval | 92.1 | 88.4 | 92.9 | 95.1 | 97.6 | 95.7 | 96.3 | 95.7 |
|
| 71 |
+
| MBPP | 92.7 | 81.8 | 78.3 | 58.7 | 91.9 | 94.3 | 91.8 | 95.3 |
|
| 72 |
+
| Live Code Bench v6 | 70.0 | 28.0 | 26.0 | 73.0 | 68.3 | 66.0 | 64.0 | 61.0 |
|
| 73 |
+
| MMLU | 85.1 | 81.2 | 80.5 | 86.4 | 84.0 | 88.4 | 86.9 | 85.3 |
|
| 74 |
+
| MMLU Pro | 80.0 | 68.1 | 69.1 | 72.0 | 78.3 | 80.9 | 73.6 | 75.0 |
|
| 75 |
+
| MILU | 76.8 | 69.2 | 67.9 | 69.9 | 64.8 | 82.6 | 75.6 | 73.7 |
|
| 76 |
+
| Arena Hard v2 | 49.0 | 50.1 | 43.1 | 42.0 | 67.7 | 72.1 | 58.1 | 62.9 |
|
| 77 |
+
| Writing Bench | 78.7 | 71.4 | 70.3 | 75.7 | 83.7 | 85.0 | 79.2 | 79.1 |
|
| 78 |
+
|
| 79 |
+
</details>
|
| 80 |
+
|
| 81 |
+
<details>
|
| 82 |
+
<summary>Reasoning & Math</summary>
|
| 83 |
+
|
| 84 |
+
| Benchmark | Sarvam-30B | OLMo 3.1 32B | Nemotron-3-Nano-30B | Qwen3-30B-Thinking-2507 | GLM 4.7 Flash | GPT-OSS-20B |
|
| 85 |
+
|---|---|---|---|---|---|---|
|
| 86 |
+
| GPQA Diamond | 66.5 | 57.5 | 73.0 | 73.4 | 75.2 | 71.5 |
|
| 87 |
+
| AIME 25 (w/ Tools) | 88.3 (96.7) | 78.1 (81.7) | 89.1 (99.2) | 85.0 (-) | 91.6 (-) | 91.7 (98.7) |
|
| 88 |
+
| HMMT (Feb 25) | 73.3 | 51.7 | 85.0 | 71.4 | 85.0 | 76.7 |
|
| 89 |
+
| HMMT (Nov 25) | 74.2 | 58.3 | 75.0 | 73.3 | 81.7 | 68.3 |
|
| 90 |
+
| Beyond AIME | 58.3 | 48.5 | 64.0 | 61.0 | 60.0 | 46.0 |
|
| 91 |
+
|
| 92 |
+
</details>
|
| 93 |
+
|
| 94 |
+
<details>
|
| 95 |
+
<summary>Agentic</summary>
|
| 96 |
+
|
| 97 |
+
| Benchmark | Sarvam-30B | Nemotron-3-Nano-30B | Qwen3-30B-Thinking-2507 | GLM 4.7 Flash | GPT-OSS-20B |
|
| 98 |
+
|---|---|---|---|---|---|
|
| 99 |
+
| BrowseComp | 35.5 | 23.8 | 2.9 | 42.8 | 28.3 |
|
| 100 |
+
| SWE Bench Verified | 34.0 | 38.8 | 22.0 | 59.2 | 34.0 |
|
| 101 |
+
| τ² Bench (avg.) | 45.7 | 49.0 | 47.7 | 79.5 | 48.7 |
|
| 102 |
+
|
| 103 |
+
> See footnote for evaluation details.
|
| 104 |
+
|
| 105 |
+
</details>
|
| 106 |
+
|
| 107 |
+
## Inference
|
| 108 |
+
|
| 109 |
+
<details>
|
| 110 |
+
<summary>Huggingface</summary>
|
| 111 |
+
|
| 112 |
+
```python
|
| 113 |
+
import torch
|
| 114 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
|
| 115 |
+
|
| 116 |
+
model_name = "sarvamai/sarvam-30b"
|
| 117 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 118 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, device_map="auto")
|
| 119 |
+
|
| 120 |
+
def generate_text(
|
| 121 |
+
prompt: str,
|
| 122 |
+
max_new_tokens: int = 2048,
|
| 123 |
+
temperature: float = 0.8,
|
| 124 |
+
top_p: float = 0.95,
|
| 125 |
+
repetition_penalty: float = 1.0,
|
| 126 |
+
) -> None:
|
| 127 |
+
inputs = tokenizer(prompt, return_tensors="pt").to("cuda:0")
|
| 128 |
+
|
| 129 |
+
generation_config = GenerationConfig(
|
| 130 |
+
max_new_tokens=max_new_tokens,
|
| 131 |
+
repetition_penalty=repetition_penalty,
|
| 132 |
+
temperature=temperature,
|
| 133 |
+
top_p=top_p,
|
| 134 |
+
do_sample=True,
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
with torch.no_grad():
|
| 138 |
+
output_ids = model.generate(
|
| 139 |
+
input_ids=inputs["input_ids"],
|
| 140 |
+
attention_mask=inputs["attention_mask"],
|
| 141 |
+
generation_config=generation_config,
|
| 142 |
+
)
|
| 143 |
+
return tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
| 144 |
+
|
| 145 |
+
prompts = [
|
| 146 |
+
"What is the capital city of New Zealand?",
|
| 147 |
+
]
|
| 148 |
+
|
| 149 |
+
for prompt in prompts:
|
| 150 |
+
templated_prompt = tokenizer.apply_chat_template(
|
| 151 |
+
[{"role": "user", "content": prompt}],
|
| 152 |
+
tokenize=False,
|
| 153 |
+
add_generation_prompt=True,
|
| 154 |
+
enable_thinking=True
|
| 155 |
+
)
|
| 156 |
+
output = generate_text(templated_prompt, max_new_tokens=512)
|
| 157 |
+
print("Prompt: ", prompt)
|
| 158 |
+
print("Generated text: ", output)
|
| 159 |
+
print("=" * 100)
|
| 160 |
+
```
|
| 161 |
+
</details>
|
| 162 |
+
|
| 163 |
+
<details>
|
| 164 |
+
<summary>SGLang</summary>
|
| 165 |
+
|
| 166 |
+
**Install latest SGLang from source**
|
| 167 |
+
|
| 168 |
+
```bash
|
| 169 |
+
git clone https://github.com/sgl-project/sglang.git
|
| 170 |
+
cd sglang
|
| 171 |
+
pip install -e "python[all]"
|
| 172 |
+
```
|
| 173 |
+
|
| 174 |
+
**Instantiate model and Run**
|
| 175 |
+
|
| 176 |
+
```python
|
| 177 |
+
import sglang as sgl
|
| 178 |
+
from transformers import AutoTokenizer
|
| 179 |
+
|
| 180 |
+
model_path = "sarvamai/sarvam-30b"
|
| 181 |
+
engine = sgl.Engine(
|
| 182 |
+
model_path=model_path,
|
| 183 |
+
tp_size=2,
|
| 184 |
+
mem_fraction_static=0.8,
|
| 185 |
+
trust_remote_code=True,
|
| 186 |
+
dtype="bfloat16",
|
| 187 |
+
prefill_attention_backend="fa3",
|
| 188 |
+
decode_attention_backend="fa3",
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
sampling_params = {
|
| 192 |
+
"temperature": 0.8,
|
| 193 |
+
"max_new_tokens": 2048,
|
| 194 |
+
"repetition_penalty": 1.0,
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
prompts = [
|
| 198 |
+
"Which treaty formally ended World War I and imposed heavy reparations on Germany?",
|
| 199 |
+
]
|
| 200 |
+
|
| 201 |
+
outputs = engine.generate([
|
| 202 |
+
tokenizer.apply_chat_template([
|
| 203 |
+
{"role": "user", "content": prompt}],
|
| 204 |
+
tokenize=False,
|
| 205 |
+
add_generation_prompt=True,
|
| 206 |
+
enable_thinking=True)
|
| 207 |
+
for prompt in prompts],
|
| 208 |
+
sampling_params)
|
| 209 |
+
for p, o in zip(prompts, outputs):
|
| 210 |
+
print("Prompt: ", p)
|
| 211 |
+
print("Generated text: ", o['text'])
|
| 212 |
+
print("=" * 100)
|
| 213 |
+
```
|
| 214 |
+
</details>
|
| 215 |
+
|
| 216 |
+
<details>
|
| 217 |
+
<summary>vLLM</summary>
|
| 218 |
+
|
| 219 |
+
Note: currently a PR is open for native support for the Sarvam models in vLLM ([link](https://github.com/vllm-project/vllm/pull/33942)). Therefore, we have 2 options here.
|
| 220 |
+
|
| 221 |
+
#### Option 1: install from source (hard)
|
| 222 |
+
|
| 223 |
+
* Use the custom fork here: [link](https://github.com/rahul-sarvam/vllm)
|
| 224 |
+
* Follow the instructions here to install from source: [link](https://docs.vllm.ai/en/latest/getting_started/installation/gpu/index.html#build-wheel-from-source)
|
| 225 |
+
|
| 226 |
+
#### Option 2: hot-patch (easy)
|
| 227 |
+
|
| 228 |
+
* Run [hotpatch_vllm.py](./hotpatch_vllm.py)
|
| 229 |
+
* This will do the following:
|
| 230 |
+
* install vllm=0.15.0
|
| 231 |
+
* add 2 model entries to `registry.py`
|
| 232 |
+
* download the model executors for `sarvam-105b` and `sarvam-30b`
|
| 233 |
+
|
| 234 |
+
Once this is done, you can run vLLM as usual
|
| 235 |
+
|
| 236 |
+
```python
|
| 237 |
+
from vllm import LLM, SamplingParams
|
| 238 |
+
from transformers import AutoTokenizer
|
| 239 |
+
|
| 240 |
+
model_path = "sarvamai/sarvam-30b"
|
| 241 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
| 242 |
+
llm = LLM(model=model_path,
|
| 243 |
+
trust_remote_code=True,
|
| 244 |
+
max_model_len=2048,
|
| 245 |
+
tensor_parallel_size=8,
|
| 246 |
+
max_num_seqs=16,
|
| 247 |
+
)
|
| 248 |
+
sampling_params = SamplingParams(
|
| 249 |
+
temperature=0.8,
|
| 250 |
+
max_tokens=2048,
|
| 251 |
+
repetition_penalty=1.0,
|
| 252 |
+
spaces_between_special_tokens=True
|
| 253 |
+
)
|
| 254 |
+
|
| 255 |
+
prompts = [
|
| 256 |
+
"Who wrote The Picture of Dorian Gray?",
|
| 257 |
+
]
|
| 258 |
+
|
| 259 |
+
outputs = llm.generate([
|
| 260 |
+
tokenizer.apply_chat_template([
|
| 261 |
+
{"role": "user", "content": prompt}],
|
| 262 |
+
tokenize=False,
|
| 263 |
+
add_generation_prompt=True,
|
| 264 |
+
enable_thinking=True)
|
| 265 |
+
for prompt in prompts],
|
| 266 |
+
sampling_params)
|
| 267 |
+
for p, o in zip(prompts, outputs):
|
| 268 |
+
print("Prompt: ", p)
|
| 269 |
+
print("Generated text: ", o.outputs[0].text)
|
| 270 |
+
print("=" * 100)
|
| 271 |
+
```
|
| 272 |
+
</details>
|
| 273 |
+
|
| 274 |
+
### Footnote
|
| 275 |
+
|
| 276 |
+
* **General settings**: All benchmarks are evaluated with a maximum context length of 65,536 tokens.
|
| 277 |
+
* **Reasoning & Math benchmarks** (Math500, MMLU, MMLU Pro, GPQA Diamond, AIME 25, Beyond AIME, HMMT, HumanEval, MBPP): Evaluated with `temperature=1.0, top_p=1.0, max_new_tokens=65536`.
|
| 278 |
+
* **Coding & Knowledge benchmarks** (Live Code Bench v6, Arena Hard v2, IF Eval):
|
| 279 |
+
Evaluated with `temperature=1.0, top_p=1.0, max_new_tokens=65536`.
|
| 280 |
+
* **Writing Bench**:
|
| 281 |
+
Responses generated using official Writing-Bench parameters:
|
| 282 |
+
`temperature=0.7, top_p=0.8, top_k=20, max_length=16000`.
|
| 283 |
+
Scoring performed using the official Writing-Bench critic model with:
|
| 284 |
+
`temperature=1.0, top_p=0.95, max_length=2048`.
|
| 285 |
+
* **Agentic benchmarks** (BrowseComp, SWE Bench Verified, τ² Bench): Evaluated with `temperature=0.5, top_p=1.0, max_new_tokens=32768`.
|
| 286 |
+
|
| 287 |
+
## Citation
|
| 288 |
+
```
|
| 289 |
+
@misc{sarvam_sovereign_models,
|
| 290 |
+
title = {Introducing Sarvam's Sovereign Models},
|
| 291 |
+
author = {{Sarvam Foundation Models Team}},
|
| 292 |
+
year = {2026},
|
| 293 |
+
howpublished = {\url{https://www.sarvam.ai/blogs/sarvam-30b-105b}},
|
| 294 |
+
note = {Accessed: 2026-03-03}
|
| 295 |
+
}
|
| 296 |
+
```
|
chat_template.jinja
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{{- '[@BOS@]\n' }}
|
| 2 |
+
{%- if tools -%}
|
| 3 |
+
<|start_of_turn|><|tool_declare|>
|
| 4 |
+
<tools>
|
| 5 |
+
{% for tool in tools %}
|
| 6 |
+
{{ tool | tojson(ensure_ascii=False) }}
|
| 7 |
+
{% endfor %}
|
| 8 |
+
</tools>
|
| 9 |
+
{{- '<|end_of_turn|>\n' }}{%- endif -%}
|
| 10 |
+
{%- macro visible_text(content) -%}
|
| 11 |
+
{%- if content is string -%}
|
| 12 |
+
{{- content }}
|
| 13 |
+
{%- elif content is iterable and content is not mapping -%}
|
| 14 |
+
{%- for item in content -%}
|
| 15 |
+
{%- if item is mapping and item.type == 'text' -%}
|
| 16 |
+
{{- item.text }}
|
| 17 |
+
{%- elif item is string -%}
|
| 18 |
+
{{- item }}
|
| 19 |
+
{%- endif -%}
|
| 20 |
+
{%- endfor -%}
|
| 21 |
+
{%- elif content is none -%}
|
| 22 |
+
{{- '' }}
|
| 23 |
+
{%- else -%}
|
| 24 |
+
{{- content }}
|
| 25 |
+
{%- endif -%}
|
| 26 |
+
{%- endmacro -%}
|
| 27 |
+
{%- set ns = namespace(last_user_index=-1) %}
|
| 28 |
+
{%- for m in messages %}
|
| 29 |
+
{%- if m.role == 'user' %}
|
| 30 |
+
{% set ns.last_user_index = loop.index0 -%}
|
| 31 |
+
{%- endif %}
|
| 32 |
+
{%- endfor %}
|
| 33 |
+
{% for m in messages %}
|
| 34 |
+
{%- if m.role == 'user' -%}<|start_of_turn|><|user|>
|
| 35 |
+
{{ visible_text(m.content) }}
|
| 36 |
+
{{- '<|nothink|>' if (enable_thinking is defined and not enable_thinking and not visible_text(m.content).endswith("<|nothink|>")) else '' -}}
|
| 37 |
+
{{- '<|end_of_turn|>\n' }}
|
| 38 |
+
{%- elif m.role == 'assistant' -%}
|
| 39 |
+
{{- '<|start_of_turn|><|assistant|>\n' }}
|
| 40 |
+
{%- set reasoning_content = '' %}
|
| 41 |
+
{%- set content = visible_text(m.content) %}
|
| 42 |
+
{%- if m.reasoning_content is string %}
|
| 43 |
+
{%- set reasoning_content = m.reasoning_content %}
|
| 44 |
+
{%- else %}
|
| 45 |
+
{%- if '</think>' in content %}
|
| 46 |
+
{%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
|
| 47 |
+
{%- set content = content.split('</think>')[-1].lstrip('\n') %}
|
| 48 |
+
{%- endif %}
|
| 49 |
+
{%- endif %}
|
| 50 |
+
{%- if loop.index0 > ns.last_user_index and reasoning_content -%}
|
| 51 |
+
{{ '<think>' + reasoning_content.strip() + '</think>'}}
|
| 52 |
+
{%- else -%}
|
| 53 |
+
{{ '<think></think>' }}
|
| 54 |
+
{%- endif -%}
|
| 55 |
+
{%- if content.strip() -%}
|
| 56 |
+
{{ '\n' + content.strip() }}
|
| 57 |
+
{%- endif -%}
|
| 58 |
+
{% if m.tool_calls %}
|
| 59 |
+
{% for tc in m.tool_calls %}
|
| 60 |
+
{%- if tc.function %}
|
| 61 |
+
{%- set tc = tc.function %}
|
| 62 |
+
{%- endif %}
|
| 63 |
+
{{ '\n<tool_call>' + tc.name }}
|
| 64 |
+
{% set _args = tc.arguments %}
|
| 65 |
+
{% for k, v in _args.items() %}
|
| 66 |
+
<arg_key>{{ k }}</arg_key>
|
| 67 |
+
<arg_value>{{ v | tojson(ensure_ascii=False) if v is not string else v }}</arg_value>
|
| 68 |
+
{% endfor %}
|
| 69 |
+
</tool_call>{% endfor %}
|
| 70 |
+
{% endif %}
|
| 71 |
+
{{- '<|end_of_turn|>\n' }}
|
| 72 |
+
{%- elif m.role == 'tool' -%}
|
| 73 |
+
{%- if m.content is string -%}
|
| 74 |
+
{%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
|
| 75 |
+
{{- '<|start_of_turn|><|observation|>' }}
|
| 76 |
+
{%- endif %}
|
| 77 |
+
{{- '\n<tool_response>\n' }}
|
| 78 |
+
{{- m.content }}
|
| 79 |
+
{{- '\n</tool_response>' }}
|
| 80 |
+
{%- else -%}
|
| 81 |
+
<|start_of_turn|><|observation|>{% for tr in m.content %}
|
| 82 |
+
|
| 83 |
+
<tool_response>
|
| 84 |
+
{{ tr.output if tr.output is defined else tr }}
|
| 85 |
+
</tool_response>{% endfor -%}
|
| 86 |
+
{% endif -%}
|
| 87 |
+
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
|
| 88 |
+
{{- '<|end_of_turn|>\n' }}{%- endif -%}
|
| 89 |
+
{%- elif m.role == 'system' -%}
|
| 90 |
+
<|start_of_turn|><|system|>
|
| 91 |
+
{{ visible_text(m.content) }}
|
| 92 |
+
{{- '<|end_of_turn|>\n' }}
|
| 93 |
+
{%- endif -%}
|
| 94 |
+
{%- endfor -%}
|
| 95 |
+
{%- if add_generation_prompt -%}
|
| 96 |
+
{{- '<|start_of_turn|><|assistant|>\n' }}
|
| 97 |
+
{%- endif -%}
|
config.json
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"SarvamMoEForCausalLM"
|
| 4 |
+
],
|
| 5 |
+
"attention_dropout": 0.0,
|
| 6 |
+
"attn_implementation": null,
|
| 7 |
+
"auto_map": {
|
| 8 |
+
"AutoConfig": "configuration_sarvam_moe.SarvamMoEConfig",
|
| 9 |
+
"AutoModel": "modeling_sarvam_moe.SarvamMoEModel",
|
| 10 |
+
"AutoModelForCausalLM": "modeling_sarvam_moe.SarvamMoEForCausalLM"
|
| 11 |
+
},
|
| 12 |
+
"dtype": "float32",
|
| 13 |
+
"embedding_dropout": 0.0,
|
| 14 |
+
"eos_token_id": 1,
|
| 15 |
+
"first_k_dense_replace": 1,
|
| 16 |
+
"head_dim": 64,
|
| 17 |
+
"hidden_act": "silu",
|
| 18 |
+
"hidden_size": 4096,
|
| 19 |
+
"initializer_range": 0.006,
|
| 20 |
+
"intermediate_size": 8192,
|
| 21 |
+
"max_position_embeddings": 131072,
|
| 22 |
+
"max_window_layers": 19,
|
| 23 |
+
"model_type": "sarvam_moe",
|
| 24 |
+
"moe_intermediate_size": 1024,
|
| 25 |
+
"moe_router_enable_expert_bias": true,
|
| 26 |
+
"moe_shared_expert_intermediate_size": 1024,
|
| 27 |
+
"n_group": 1,
|
| 28 |
+
"norm_topk_prob": true,
|
| 29 |
+
"num_attention_heads": 64,
|
| 30 |
+
"num_experts": 128,
|
| 31 |
+
"num_experts_per_tok": 6,
|
| 32 |
+
"num_hidden_layers": 19,
|
| 33 |
+
"num_key_value_heads": 4,
|
| 34 |
+
"num_shared_experts": 1,
|
| 35 |
+
"output_dropout": 0.0,
|
| 36 |
+
"output_router_logits": false,
|
| 37 |
+
"pad_token_id": 0,
|
| 38 |
+
"rms_norm_eps": 1e-06,
|
| 39 |
+
"rope_scaling": null,
|
| 40 |
+
"rope_theta": 8000000,
|
| 41 |
+
"routed_scaling_factor": 2.5,
|
| 42 |
+
"router_dtype": "fp32",
|
| 43 |
+
"score_function": "sigmoid",
|
| 44 |
+
"tie_word_embeddings": false,
|
| 45 |
+
"topk_group": 1,
|
| 46 |
+
"transformers_version": "4.57.2",
|
| 47 |
+
"use_bias": false,
|
| 48 |
+
"use_cache": true,
|
| 49 |
+
"use_qk_norm": true,
|
| 50 |
+
"use_qkv_bias": false,
|
| 51 |
+
"use_rmsnorm": true,
|
| 52 |
+
"vocab_size": 262144
|
| 53 |
+
}
|
configuration_sarvam_moe.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class SarvamMoEConfig(PretrainedConfig):
|
| 5 |
+
model_type = "sarvam_moe"
|
| 6 |
+
def __init__(
|
| 7 |
+
self,
|
| 8 |
+
vocab_size=262144,
|
| 9 |
+
hidden_size=4096,
|
| 10 |
+
intermediate_size=8192,
|
| 11 |
+
num_hidden_layers=19,
|
| 12 |
+
num_attention_heads=16,
|
| 13 |
+
num_key_value_heads=4,
|
| 14 |
+
hidden_act="silu",
|
| 15 |
+
use_qkv_bias=False,
|
| 16 |
+
use_bias=False,
|
| 17 |
+
rms_norm_eps=1e-06,
|
| 18 |
+
tie_word_embeddings=False,
|
| 19 |
+
embedding_dropout=0.0,
|
| 20 |
+
attention_dropout=0.0,
|
| 21 |
+
output_dropout=0.0,
|
| 22 |
+
initializer_range=0.006,
|
| 23 |
+
max_position_embeddings=4096,
|
| 24 |
+
rope_theta=10000.0,
|
| 25 |
+
use_cache=True,
|
| 26 |
+
max_window_layers=19,
|
| 27 |
+
rope_scaling=None,
|
| 28 |
+
pad_token_id=0,
|
| 29 |
+
eos_token_id=1,
|
| 30 |
+
num_experts=128,
|
| 31 |
+
num_shared_experts=1,
|
| 32 |
+
num_experts_per_tok=6,
|
| 33 |
+
n_group=1,
|
| 34 |
+
topk_group=1,
|
| 35 |
+
moe_intermediate_size=1024,
|
| 36 |
+
first_k_dense_replace=1,
|
| 37 |
+
head_dim=256,
|
| 38 |
+
output_router_logits=False,
|
| 39 |
+
use_qk_norm=True,
|
| 40 |
+
moe_router_enable_expert_bias=True,
|
| 41 |
+
routed_scaling_factor=2.5,
|
| 42 |
+
attn_implementation: str = "eager",
|
| 43 |
+
**kwargs,
|
| 44 |
+
):
|
| 45 |
+
self.num_hidden_layers = num_hidden_layers
|
| 46 |
+
self.vocab_size = vocab_size
|
| 47 |
+
self.hidden_size = hidden_size
|
| 48 |
+
self.intermediate_size = intermediate_size
|
| 49 |
+
self.num_attention_heads = num_attention_heads
|
| 50 |
+
self.num_key_value_heads = num_key_value_heads
|
| 51 |
+
self.hidden_act = hidden_act
|
| 52 |
+
self.use_qkv_bias = use_qkv_bias
|
| 53 |
+
self.use_bias = use_bias
|
| 54 |
+
self.rms_norm_eps = rms_norm_eps
|
| 55 |
+
self.embedding_dropout = embedding_dropout
|
| 56 |
+
self.attention_dropout = attention_dropout
|
| 57 |
+
self.output_dropout = output_dropout
|
| 58 |
+
self.initializer_range = initializer_range
|
| 59 |
+
self.max_position_embeddings = max_position_embeddings
|
| 60 |
+
self.rope_theta = rope_theta
|
| 61 |
+
self.use_cache = use_cache
|
| 62 |
+
self.max_window_layers = max_window_layers
|
| 63 |
+
self.head_dim = head_dim or hidden_size // num_attention_heads
|
| 64 |
+
self.rope_scaling = rope_scaling
|
| 65 |
+
self.use_qk_norm = use_qk_norm
|
| 66 |
+
self.moe_router_enable_expert_bias = moe_router_enable_expert_bias
|
| 67 |
+
self.routed_scaling_factor = routed_scaling_factor
|
| 68 |
+
self.num_experts = num_experts
|
| 69 |
+
self.num_shared_experts = num_shared_experts
|
| 70 |
+
self.num_experts_per_tok = num_experts_per_tok
|
| 71 |
+
self.n_group = n_group
|
| 72 |
+
self.topk_group = topk_group
|
| 73 |
+
self.moe_intermediate_size = moe_intermediate_size
|
| 74 |
+
self.first_k_dense_replace = first_k_dense_replace
|
| 75 |
+
self.output_router_logits = output_router_logits
|
| 76 |
+
self.attn_implementation = attn_implementation
|
| 77 |
+
self._attn_implementation = attn_implementation
|
| 78 |
+
|
| 79 |
+
self.base_model_tp_plan = {
|
| 80 |
+
"layers.*.attention.query_key_value": "colwise",
|
| 81 |
+
"layers.*.attention.dense": "rowwise",
|
| 82 |
+
"layers.*.mlp.gate_proj": "colwise",
|
| 83 |
+
"layers.*.mlp.up_proj": "colwise",
|
| 84 |
+
"layers.*.mlp.down_proj": "rowwise",
|
| 85 |
+
"layers.*.mlp.experts.*.gate_proj": "colwise",
|
| 86 |
+
"layers.*.mlp.experts.*.up_proj": "colwise",
|
| 87 |
+
"layers.*.mlp.experts.*.down_proj": "rowwise",
|
| 88 |
+
"layers.*.mlp.shared_experts.gate_proj": "colwise",
|
| 89 |
+
"layers.*.mlp.shared_experts.up_proj": "colwise",
|
| 90 |
+
"layers.*.mlp.shared_experts.down_proj": "rowwise",
|
| 91 |
+
}
|
| 92 |
+
self.base_model_pp_plan = {
|
| 93 |
+
"word_embeddings": (["input_ids"], ["inputs_embeds"]),
|
| 94 |
+
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
|
| 95 |
+
"norm": (["hidden_states"], ["hidden_states"]),
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
super().__init__(
|
| 99 |
+
pad_token_id=pad_token_id,
|
| 100 |
+
eos_token_id=eos_token_id,
|
| 101 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 102 |
+
**kwargs,
|
| 103 |
+
)
|
generation_config.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"eos_token_id": 26,
|
| 4 |
+
"pad_token_id": 0,
|
| 5 |
+
"transformers_version": "4.57.2"
|
| 6 |
+
}
|
hotpatch_vllm.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
import sys
|
| 5 |
+
import subprocess
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from urllib.request import urlopen, Request
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
HF_BLOB_URL = "https://huggingface.co/sarvamai/sarvam-30b/blob/main/sarvam.py"
|
| 11 |
+
|
| 12 |
+
NEW_LINES = [
|
| 13 |
+
' "SarvamMoEForCausalLM": ("sarvam", "SarvamMoEForCausalLM"),\n',
|
| 14 |
+
' "SarvamMLAForCausalLM": ("sarvam", "SarvamMLAForCausalLM"),\n',
|
| 15 |
+
]
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def run(cmd: list[str]) -> None:
|
| 19 |
+
print(f"+ {' '.join(cmd)}")
|
| 20 |
+
subprocess.check_call(cmd)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def pip_install_vllm() -> None:
|
| 24 |
+
run([sys.executable, "-m", "pip", "install", "vllm==0.15.0"])
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def find_vllm_dir() -> Path:
|
| 28 |
+
import vllm # type: ignore
|
| 29 |
+
|
| 30 |
+
vllm_dir = Path(vllm.__file__).resolve().parent
|
| 31 |
+
print(f"Detected vLLM package dir: {vllm_dir}")
|
| 32 |
+
return vllm_dir
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def patch_text_generation_models(registry_path: Path) -> None:
|
| 36 |
+
if not registry_path.exists():
|
| 37 |
+
raise FileNotFoundError(f"registry.py not found at: {registry_path}")
|
| 38 |
+
|
| 39 |
+
text = registry_path.read_text(encoding="utf-8")
|
| 40 |
+
lines = text.splitlines(keepends=True)
|
| 41 |
+
|
| 42 |
+
# Idempotency: if both keys already present, do nothing
|
| 43 |
+
if (
|
| 44 |
+
any('"SarvamMoEForCausalLM"' in l for l in lines)
|
| 45 |
+
and any('"SarvamMLAForCausalLM"' in l for l in lines)
|
| 46 |
+
):
|
| 47 |
+
print("registry.py already contains Sarvam entries. Skipping patch.")
|
| 48 |
+
return
|
| 49 |
+
|
| 50 |
+
# Find the start of the _TEXT_GENERATION_MODELS dict
|
| 51 |
+
start_idx = None
|
| 52 |
+
for i, line in enumerate(lines):
|
| 53 |
+
if line.strip() == "_TEXT_GENERATION_MODELS = {":
|
| 54 |
+
start_idx = i
|
| 55 |
+
break
|
| 56 |
+
|
| 57 |
+
if start_idx is None:
|
| 58 |
+
raise RuntimeError(
|
| 59 |
+
"Could not find '_TEXT_GENERATION_MODELS = {' in registry.py. "
|
| 60 |
+
"vLLM version/layout may differ."
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
# Find the matching closing brace for that dict using brace depth
|
| 64 |
+
depth = 0
|
| 65 |
+
end_idx = None
|
| 66 |
+
for j in range(start_idx, len(lines)):
|
| 67 |
+
depth += lines[j].count("{")
|
| 68 |
+
depth -= lines[j].count("}")
|
| 69 |
+
if j > start_idx and depth == 0:
|
| 70 |
+
end_idx = j
|
| 71 |
+
break
|
| 72 |
+
|
| 73 |
+
if end_idx is None:
|
| 74 |
+
raise RuntimeError("Failed to find end of _TEXT_GENERATION_MODELS dict (brace matching).")
|
| 75 |
+
|
| 76 |
+
# Insert new entries just before the closing brace line
|
| 77 |
+
insert_at = end_idx
|
| 78 |
+
lines[insert_at:insert_at] = NEW_LINES
|
| 79 |
+
|
| 80 |
+
registry_path.write_text("".join(lines), encoding="utf-8")
|
| 81 |
+
print(f"Patched _TEXT_GENERATION_MODELS in: {registry_path}")
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def download_sarvam_py(dst: Path) -> None:
|
| 85 |
+
# Use /raw/ to download file contents, not HTML
|
| 86 |
+
raw_url = HF_BLOB_URL.replace("/blob/", "/raw/")
|
| 87 |
+
print(f"Downloading sarvam.py from: {raw_url}")
|
| 88 |
+
|
| 89 |
+
req = Request(raw_url, headers={"User-Agent": "vllm-hotpatch-script"})
|
| 90 |
+
with urlopen(req) as resp:
|
| 91 |
+
data = resp.read()
|
| 92 |
+
|
| 93 |
+
dst.parent.mkdir(parents=True, exist_ok=True)
|
| 94 |
+
dst.write_bytes(data)
|
| 95 |
+
print(f"Wrote: {dst}")
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def main() -> None:
|
| 99 |
+
pip_install_vllm()
|
| 100 |
+
|
| 101 |
+
vllm_dir = find_vllm_dir()
|
| 102 |
+
registry_path = vllm_dir / "model_executor" / "models" / "registry.py"
|
| 103 |
+
sarvam_path = vllm_dir / "model_executor" / "models" / "sarvam.py"
|
| 104 |
+
|
| 105 |
+
patch_text_generation_models(registry_path)
|
| 106 |
+
download_sarvam_py(sarvam_path)
|
| 107 |
+
|
| 108 |
+
print("\nDone.")
|
| 109 |
+
print(f"- Registry patched: {registry_path}")
|
| 110 |
+
print(f"- Sarvam module installed: {sarvam_path}")
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
if __name__ == "__main__":
|
| 114 |
+
main()
|
model-00001-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:efeac3cd0e9eb5a276aea37056e3abfd7cd0583b3e4f83ec871bf6fb62ead7ed
|
| 3 |
+
size 4999645904
|
model-00002-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8fd3a943ae19559d1e9e0f5c3e6289843404bcfd7a699caf2f70bcc97fda2ba2
|
| 3 |
+
size 4999647000
|
model-00003-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0ef5e541b4732507466cecfad0f32810f677127d4739cbd23b74c23acdf5bde4
|
| 3 |
+
size 4993389224
|
model-00004-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e9928a30813e2de87e91b06b56a2ffe18f2e46049a434eb585b351a52e8c3dbb
|
| 3 |
+
size 4993389224
|
model-00005-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:718de0adb892541452bd3f9abbe11f108789b58c41d5bc776dbc0963f715ee6d
|
| 3 |
+
size 4993389248
|
model-00006-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0a475d430246ce0f64689ed324f98283e27d6e469e35c8c3050a5b3a819b500c
|
| 3 |
+
size 4999647008
|
model-00007-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a612e55a2ce14c6df0a369564099635883235e80d93783e965742e61b645eda4
|
| 3 |
+
size 4993389224
|
model-00008-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:01f625b20f0716e7eaf2b2bb2bc5ffdcb590ffcd91e790dfdd5850a080c1174a
|
| 3 |
+
size 4993389224
|
model-00009-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c900c904bf8e6c1c4269bcd5669039be7afc6b62ad55675a8baedce82657aad7
|
| 3 |
+
size 4993389248
|
model-00010-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a17cf9a02bfab152109d0894d8299e247c690d59a5dac705415d9c148dab3e8a
|
| 3 |
+
size 4999647016
|
model-00011-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:69d7ef47df1b3ec6fcb6b0be099abdb8d832db13f3dad323ad49395b8fa38ccf
|
| 3 |
+
size 4993389216
|
model-00012-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7ea7b63b62612fe78d9dd2e4e144d4f5a31c84dc8882cb4ec63f9c082dd86cc9
|
| 3 |
+
size 4993389224
|
model-00013-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c138ca20e37dc040af76b10232bc9dffe496da28aff969a42816044f1efc4d1f
|
| 3 |
+
size 4993389280
|
model-00014-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a22c6fb81a8f357eaa545a99da1fed0df6452e91253013c4f7d24a974e6cc724
|
| 3 |
+
size 4999647320
|
model-00015-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b36d14b6ec4bd108331d91ecd405bebbba3d857c4c21b87a6371972c3e828cd0
|
| 3 |
+
size 4993389512
|
model-00016-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5f35318a600e102dbcb06473c6c8d3f15ec880b56bd42c2026cd1f9227ccaeb1
|
| 3 |
+
size 4993389528
|
model-00017-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:14515810ae889da143c4f17c273f992ce577e8f8545c426093c5d763f5ce4663
|
| 3 |
+
size 4993389536
|
model-00018-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ae4825d8232c200053b0b571937d299f58a7563b43c4e927e44dbdf51ac4b3e9
|
| 3 |
+
size 4999647328
|
model-00019-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:06e433bb2c02ca76a1e6f702a57f89d4d25b3fab5b832a4dbdea4aa888409316
|
| 3 |
+
size 4993389512
|
model-00020-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0c6c0d47e3005836553859a42e0699235cc4a856c6954fe6d7f07ad59a5b6083
|
| 3 |
+
size 4993389520
|
model-00021-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b9a8df1a1b2f740927500ac34b30ccea5faa4158b99550f533a51ae7505fcb8a
|
| 3 |
+
size 4993389536
|
model-00022-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5b63c24103409fc251ac09b281d607a9ced0b7e25da5e61d8142e2204bb2e405
|
| 3 |
+
size 4999647336
|
model-00023-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:88dbc5536a7bfcc89bf1c7f060c58f7c072012129ba60f64a173f52f75aa6244
|
| 3 |
+
size 4993389504
|
model-00024-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f65c99212588029b979610c0f4b94dd18b22788284e9cbfbcf9ec4b70681dbf6
|
| 3 |
+
size 4993389528
|
model-00025-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e359c0b5b2096d800a6923279a3dbaa15389b6b118b8a0fb907f9c31dc34a6af
|
| 3 |
+
size 4431365192
|
model-00026-of-00026.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8f39a62c958f34788e01d2c45715a75c2a1238286994ebfb57335775beb2b5bf
|
| 3 |
+
size 4294967424
|
model.safetensors.index.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
modeling_sarvam_moe.py
ADDED
|
@@ -0,0 +1,1025 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""PyTorch Sarvam MoE model."""
|
| 2 |
+
|
| 3 |
+
import math
|
| 4 |
+
from typing import List, Optional, Tuple, Union
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.nn.functional as F
|
| 8 |
+
from torch import nn
|
| 9 |
+
|
| 10 |
+
from transformers.activations import ACT2FN
|
| 11 |
+
from transformers.cache_utils import Cache, DynamicCache
|
| 12 |
+
from transformers.modeling_attn_mask_utils import (
|
| 13 |
+
AttentionMaskConverter,
|
| 14 |
+
_prepare_4d_attention_mask,
|
| 15 |
+
_prepare_4d_causal_attention_mask,
|
| 16 |
+
_prepare_4d_causal_attention_mask_for_sdpa,
|
| 17 |
+
)
|
| 18 |
+
from transformers.modeling_outputs import MoeModelOutputWithPast
|
| 19 |
+
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
|
| 20 |
+
from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
|
| 21 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 22 |
+
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
|
| 23 |
+
from transformers.utils import (
|
| 24 |
+
is_flash_attn_2_available,
|
| 25 |
+
is_flash_attn_greater_or_equal_2_10,
|
| 26 |
+
logging,
|
| 27 |
+
)
|
| 28 |
+
from transformers.generation.utils import GenerationMixin
|
| 29 |
+
from dataclasses import dataclass
|
| 30 |
+
from transformers.utils import ModelOutput
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
if is_flash_attn_2_available():
|
| 34 |
+
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
| 35 |
+
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input
|
| 36 |
+
|
| 37 |
+
from .configuration_sarvam_moe import SarvamMoEConfig
|
| 38 |
+
|
| 39 |
+
logger = logging.get_logger(__name__)
|
| 40 |
+
|
| 41 |
+
_CONFIG_FOR_DOC = "SarvamMoEConfig"
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
@dataclass
|
| 45 |
+
class SarvamMoECausalLMOutputWithPast(ModelOutput):
|
| 46 |
+
loss: Optional[torch.FloatTensor] = None
|
| 47 |
+
logits: Optional[torch.FloatTensor] = None
|
| 48 |
+
past_key_values: Optional[Cache] = None
|
| 49 |
+
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
|
| 50 |
+
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
|
| 51 |
+
z_loss: Optional[torch.FloatTensor] = None
|
| 52 |
+
aux_loss: Optional[torch.FloatTensor] = None
|
| 53 |
+
router_logits: Optional[tuple[torch.FloatTensor]] = None
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class SarvamMoEModelOutputWithPast(MoeModelOutputWithPast):
|
| 57 |
+
pass
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def _get_unpad_data(attention_mask):
|
| 61 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
| 62 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
| 63 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
| 64 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
|
| 65 |
+
return indices, cu_seqlens, max_seqlen_in_batch
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
|
| 69 |
+
return _prepare_4d_attention_mask(mask=mask, dtype=dtype, tgt_len=tgt_len)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def _make_causal_mask(
|
| 73 |
+
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
|
| 74 |
+
):
|
| 75 |
+
return AttentionMaskConverter._make_causal_mask(
|
| 76 |
+
input_ids_shape=input_ids_shape, dtype=dtype, device=device, past_key_values_length=past_key_values_length
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
class SarvamMoERMSNorm(nn.Module):
|
| 81 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 82 |
+
super().__init__()
|
| 83 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 84 |
+
self.variance_epsilon = eps
|
| 85 |
+
|
| 86 |
+
def forward(self, hidden_states):
|
| 87 |
+
input_dtype = hidden_states.dtype
|
| 88 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 89 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 90 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 91 |
+
return self.weight * hidden_states.to(input_dtype)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
ALL_LAYERNORM_LAYERS.append(SarvamMoERMSNorm)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class SarvamMoERotaryEmbedding(nn.Module):
|
| 98 |
+
def __init__(self, config: SarvamMoEConfig, device=None):
|
| 99 |
+
super().__init__()
|
| 100 |
+
self.config = config
|
| 101 |
+
self.max_seq_len_cached = config.max_position_embeddings
|
| 102 |
+
self.original_max_seq_len = config.max_position_embeddings
|
| 103 |
+
rope_scaling = getattr(config, "rope_scaling", None)
|
| 104 |
+
if rope_scaling is None:
|
| 105 |
+
self.rope_type = "default"
|
| 106 |
+
inv_freq, self.attention_scaling = self.compute_default_rope_parameters(
|
| 107 |
+
config, device
|
| 108 |
+
)
|
| 109 |
+
else:
|
| 110 |
+
self.rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", "default"))
|
| 111 |
+
if self.rope_type == "default":
|
| 112 |
+
inv_freq, self.attention_scaling = self.compute_default_rope_parameters(
|
| 113 |
+
config, device
|
| 114 |
+
)
|
| 115 |
+
else:
|
| 116 |
+
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
|
| 117 |
+
inv_freq, self.attention_scaling = rope_init_fn(config, device)
|
| 118 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
| 119 |
+
self.original_inv_freq = self.inv_freq
|
| 120 |
+
|
| 121 |
+
@staticmethod
|
| 122 |
+
def compute_default_rope_parameters(
|
| 123 |
+
config: SarvamMoEConfig,
|
| 124 |
+
device: Optional[torch.device] = None,
|
| 125 |
+
seq_len: Optional[int] = None,
|
| 126 |
+
) -> Tuple[torch.Tensor, float]:
|
| 127 |
+
"""
|
| 128 |
+
Default RoPE parameters (classic rotary embedding).
|
| 129 |
+
|
| 130 |
+
Mirrors HF's default implementation: use `rope_theta`, head_dim and
|
| 131 |
+
return (inv_freq, attention_scaling).
|
| 132 |
+
"""
|
| 133 |
+
base = config.rope_theta
|
| 134 |
+
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
|
| 135 |
+
inv_freq = 1.0 / (
|
| 136 |
+
base
|
| 137 |
+
** (
|
| 138 |
+
torch.arange(0, dim, 2, dtype=torch.int64, device=device)
|
| 139 |
+
.to(dtype=torch.float32)
|
| 140 |
+
/ dim
|
| 141 |
+
)
|
| 142 |
+
)
|
| 143 |
+
attention_factor = 1.0
|
| 144 |
+
return inv_freq, attention_factor
|
| 145 |
+
|
| 146 |
+
@torch.no_grad()
|
| 147 |
+
@dynamic_rope_update
|
| 148 |
+
def forward(self, x, position_ids):
|
| 149 |
+
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
|
| 150 |
+
position_ids_expanded = position_ids[:, None, :].float()
|
| 151 |
+
|
| 152 |
+
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
|
| 153 |
+
with torch.autocast(device_type=device_type, enabled=False):
|
| 154 |
+
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
|
| 155 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 156 |
+
cos = emb.cos() * self.attention_scaling
|
| 157 |
+
sin = emb.sin() * self.attention_scaling
|
| 158 |
+
|
| 159 |
+
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def rotate_half(x):
|
| 163 |
+
x1 = x[..., : x.shape[-1] // 2]
|
| 164 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
| 165 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
|
| 169 |
+
cos = cos.unsqueeze(unsqueeze_dim)
|
| 170 |
+
sin = sin.unsqueeze(unsqueeze_dim)
|
| 171 |
+
rotary_dim = cos.shape[-1]
|
| 172 |
+
q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
|
| 173 |
+
k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
|
| 174 |
+
q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin)
|
| 175 |
+
k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin)
|
| 176 |
+
q_embed = torch.cat([q_embed, q_pass], dim=-1)
|
| 177 |
+
k_embed = torch.cat([k_embed, k_pass], dim=-1)
|
| 178 |
+
return q_embed, k_embed
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
class SarvamMoEMLP(nn.Module):
|
| 182 |
+
def __init__(self, config: SarvamMoEConfig, intermediate_size: int):
|
| 183 |
+
super().__init__()
|
| 184 |
+
self.config = config
|
| 185 |
+
self.hidden_size = config.hidden_size
|
| 186 |
+
self.intermediate_size = intermediate_size
|
| 187 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 188 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 189 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
| 190 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
| 191 |
+
|
| 192 |
+
def forward(self, x):
|
| 193 |
+
return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
class SarvamMoEGate(nn.Module):
|
| 197 |
+
def __init__(self, config):
|
| 198 |
+
super().__init__()
|
| 199 |
+
self.config = config
|
| 200 |
+
self.top_k = config.num_experts_per_tok
|
| 201 |
+
self.num_experts = config.num_experts
|
| 202 |
+
self.n_group = config.n_group
|
| 203 |
+
self.topk_group = config.topk_group
|
| 204 |
+
self.gating_dim = config.hidden_size
|
| 205 |
+
self.weight = nn.Parameter(torch.empty((self.num_experts, self.gating_dim)))
|
| 206 |
+
self.routed_scaling_factor = config.routed_scaling_factor
|
| 207 |
+
self.score_function = config.score_function
|
| 208 |
+
# Ideally, we should register the expert_bias as a buffer, but vllm complains about it.
|
| 209 |
+
# self.register_buffer("expert_bias", torch.zeros((self.num_experts)))
|
| 210 |
+
self.expert_bias = nn.Parameter(
|
| 211 |
+
torch.zeros((self.num_experts)),
|
| 212 |
+
requires_grad=False,
|
| 213 |
+
)
|
| 214 |
+
self.reset_parameters()
|
| 215 |
+
|
| 216 |
+
def reset_parameters(self) -> None:
|
| 217 |
+
import torch.nn.init as init
|
| 218 |
+
|
| 219 |
+
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
|
| 220 |
+
|
| 221 |
+
def group_limited_topk(self, scores: torch.Tensor):
|
| 222 |
+
num_tokens, _ = scores.size()
|
| 223 |
+
group_scores = scores.view(num_tokens, self.n_group, -1).topk(2, dim=-1)[0].sum(dim=-1)
|
| 224 |
+
group_idx = torch.topk(group_scores, k=self.topk_group, dim=-1, sorted=False)[1]
|
| 225 |
+
group_mask = torch.zeros_like(group_scores)
|
| 226 |
+
group_mask.scatter_(1, group_idx, 1)
|
| 227 |
+
score_mask = (
|
| 228 |
+
group_mask.unsqueeze(-1)
|
| 229 |
+
.expand(num_tokens, self.n_group, self.num_experts // self.n_group)
|
| 230 |
+
.reshape(num_tokens, -1)
|
| 231 |
+
)
|
| 232 |
+
masked_scores = scores.masked_fill(~score_mask.bool(), float("-inf"))
|
| 233 |
+
probs, top_indices = torch.topk(masked_scores, k=self.top_k, dim=-1)
|
| 234 |
+
return probs, top_indices
|
| 235 |
+
|
| 236 |
+
def forward(self, hidden_states):
|
| 237 |
+
hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
|
| 238 |
+
logits = F.linear(hidden_states.type(torch.float32), self.weight.type(torch.float32))
|
| 239 |
+
scores = torch.sigmoid(logits.float()).type_as(logits)
|
| 240 |
+
scores_for_routing = scores + self.expert_bias
|
| 241 |
+
_, topk_idx = self.group_limited_topk(scores_for_routing)
|
| 242 |
+
scores = torch.gather(scores, dim=1, index=topk_idx).type_as(logits)
|
| 243 |
+
topk_weight = scores / (scores.sum(dim=-1, keepdim=True) + 1e-20) if self.top_k > 1 else scores
|
| 244 |
+
topk_weight = topk_weight * self.routed_scaling_factor
|
| 245 |
+
return topk_idx, topk_weight, logits
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
class SarvamMoEExperts(nn.ModuleList):
|
| 249 |
+
def __init__(self, config: SarvamMoEConfig):
|
| 250 |
+
# one MLP per expert
|
| 251 |
+
experts = [
|
| 252 |
+
SarvamMoEMLP(config=config, intermediate_size=config.moe_intermediate_size)
|
| 253 |
+
for _ in range(config.num_experts)
|
| 254 |
+
]
|
| 255 |
+
super().__init__(experts)
|
| 256 |
+
self.config = config
|
| 257 |
+
self.num_experts_per_tok = config.num_experts_per_tok
|
| 258 |
+
|
| 259 |
+
def forward(
|
| 260 |
+
self,
|
| 261 |
+
hidden_states: torch.Tensor,
|
| 262 |
+
top_k_index: torch.LongTensor,
|
| 263 |
+
top_k_weights: torch.Tensor,
|
| 264 |
+
) -> torch.Tensor:
|
| 265 |
+
"""
|
| 266 |
+
hidden_states: (tokens, hidden_size) or (batch * seq, hidden_size)
|
| 267 |
+
top_k_index: (tokens, top_k)
|
| 268 |
+
top_k_weights: (tokens, top_k)
|
| 269 |
+
"""
|
| 270 |
+
tokens, hidden_dim = hidden_states.shape
|
| 271 |
+
flat_topk_idx = top_k_index.view(-1)
|
| 272 |
+
|
| 273 |
+
if self.training:
|
| 274 |
+
# training path: same as your previous logic
|
| 275 |
+
x = hidden_states.repeat_interleave(self.num_experts_per_tok, dim=0)
|
| 276 |
+
y = torch.empty_like(x)
|
| 277 |
+
for i, expert in enumerate(self):
|
| 278 |
+
mask = flat_topk_idx == i
|
| 279 |
+
if mask.any():
|
| 280 |
+
y[mask] = expert(x[mask])
|
| 281 |
+
y = (y.view(*top_k_weights.shape, -1) * top_k_weights.unsqueeze(-1)).sum(dim=1)
|
| 282 |
+
return y.to(hidden_states.dtype)
|
| 283 |
+
|
| 284 |
+
# inference path: previous moe_infer logic
|
| 285 |
+
num_experts = len(self)
|
| 286 |
+
cnts = top_k_index.new_zeros((tokens, num_experts))
|
| 287 |
+
cnts.scatter_(1, top_k_index, 1)
|
| 288 |
+
tokens_per_expert = cnts.sum(dim=0)
|
| 289 |
+
|
| 290 |
+
idxs = top_k_index.view(-1).argsort()
|
| 291 |
+
sorted_tokens = hidden_states[idxs // top_k_index.shape[1]]
|
| 292 |
+
|
| 293 |
+
tokens_per_expert = tokens_per_expert.cpu().numpy().tolist()
|
| 294 |
+
outputs = []
|
| 295 |
+
start_idx = 0
|
| 296 |
+
for i, num_tokens in enumerate(tokens_per_expert):
|
| 297 |
+
end_idx = start_idx + num_tokens
|
| 298 |
+
if num_tokens == 0:
|
| 299 |
+
continue
|
| 300 |
+
expert = self[i]
|
| 301 |
+
tokens_for_expert = sorted_tokens[start_idx:end_idx]
|
| 302 |
+
expert_out = expert(tokens_for_expert)
|
| 303 |
+
outputs.append(expert_out.to(hidden_states.device))
|
| 304 |
+
start_idx = end_idx
|
| 305 |
+
|
| 306 |
+
outs = torch.cat(outputs, dim=0) if len(outputs) else sorted_tokens.new_empty(0)
|
| 307 |
+
new_x = torch.empty_like(outs)
|
| 308 |
+
new_x[idxs] = outs
|
| 309 |
+
|
| 310 |
+
final_out = (
|
| 311 |
+
new_x.view(*top_k_index.shape, -1)
|
| 312 |
+
.type(top_k_weights.dtype)
|
| 313 |
+
.mul_(top_k_weights.unsqueeze(dim=-1))
|
| 314 |
+
.sum(dim=1)
|
| 315 |
+
.type(new_x.dtype)
|
| 316 |
+
)
|
| 317 |
+
return final_out
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
class SarvamMoESparseMoeBlock(nn.Module):
|
| 321 |
+
def __init__(self, config: SarvamMoEConfig):
|
| 322 |
+
super().__init__()
|
| 323 |
+
self.config = config
|
| 324 |
+
self.num_experts_per_tok = config.num_experts_per_tok
|
| 325 |
+
|
| 326 |
+
# use the new experts container
|
| 327 |
+
self.experts = SarvamMoEExperts(config)
|
| 328 |
+
self.gate = SarvamMoEGate(config)
|
| 329 |
+
|
| 330 |
+
if config.num_shared_experts is not None:
|
| 331 |
+
self.shared_experts = SarvamMoEMLP(
|
| 332 |
+
config=config,
|
| 333 |
+
intermediate_size=config.moe_intermediate_size * config.num_shared_experts,
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
# _setup_experts no longer needed
|
| 337 |
+
|
| 338 |
+
def forward(self, hidden_states):
|
| 339 |
+
identity = hidden_states
|
| 340 |
+
bsz, seq_len, h = hidden_states.shape
|
| 341 |
+
|
| 342 |
+
topk_idx, topk_weight, router_logits = self.gate(hidden_states)
|
| 343 |
+
|
| 344 |
+
# flatten batch+seq for experts
|
| 345 |
+
flat_hidden = hidden_states.view(-1, h)
|
| 346 |
+
flat_topk_idx = topk_idx.view(-1, topk_idx.shape[-1])
|
| 347 |
+
flat_topk_weight = topk_weight.view(-1, topk_weight.shape[-1])
|
| 348 |
+
|
| 349 |
+
y = self.experts(flat_hidden, flat_topk_idx, flat_topk_weight)
|
| 350 |
+
y = y.view(bsz, seq_len, h)
|
| 351 |
+
|
| 352 |
+
if self.config.num_shared_experts is not None:
|
| 353 |
+
y = y + self.shared_experts(identity)
|
| 354 |
+
|
| 355 |
+
# router logits shape: (bsz, seq_len, num_experts)
|
| 356 |
+
router_info = (
|
| 357 |
+
router_logits.view(bsz, seq_len, -1),
|
| 358 |
+
topk_idx.view(bsz, seq_len, -1),
|
| 359 |
+
)
|
| 360 |
+
return y, router_info
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
| 364 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
| 365 |
+
if n_rep == 1:
|
| 366 |
+
return hidden_states
|
| 367 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
| 368 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
class SarvamMoEAttention(nn.Module):
|
| 372 |
+
is_causal = True # vLLM / Transformers backend critical flag
|
| 373 |
+
def __init__(self, config: SarvamMoEConfig, layer_idx: Optional[int] = None):
|
| 374 |
+
super().__init__()
|
| 375 |
+
self.config = config
|
| 376 |
+
self.layer_idx = layer_idx
|
| 377 |
+
if layer_idx is None:
|
| 378 |
+
logger.warning_once(
|
| 379 |
+
f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
|
| 380 |
+
"to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
|
| 381 |
+
"when creating this class."
|
| 382 |
+
)
|
| 383 |
+
self.attention_dropout = config.attention_dropout
|
| 384 |
+
self.hidden_size = config.hidden_size
|
| 385 |
+
self.num_heads = config.num_attention_heads
|
| 386 |
+
self.head_dim = config.head_dim or self.hidden_size // self.num_heads
|
| 387 |
+
partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0
|
| 388 |
+
self.rope_dim = int(self.head_dim * partial_rotary_factor)
|
| 389 |
+
self.num_key_value_heads = config.num_key_value_heads
|
| 390 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
| 391 |
+
self.max_position_embeddings = config.max_position_embeddings
|
| 392 |
+
self.rope_theta = config.rope_theta
|
| 393 |
+
self.query_key_value = nn.Linear(
|
| 394 |
+
self.hidden_size,
|
| 395 |
+
(self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
|
| 396 |
+
bias=config.use_qkv_bias,
|
| 397 |
+
)
|
| 398 |
+
if self.config.use_qk_norm:
|
| 399 |
+
self.query_layernorm = SarvamMoERMSNorm(self.head_dim, eps=config.rms_norm_eps)
|
| 400 |
+
self.key_layernorm = SarvamMoERMSNorm(self.head_dim, eps=config.rms_norm_eps)
|
| 401 |
+
self.dense = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.use_bias)
|
| 402 |
+
self.scaling = self.head_dim**-0.5
|
| 403 |
+
|
| 404 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
| 405 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
| 406 |
+
|
| 407 |
+
def forward(
|
| 408 |
+
self,
|
| 409 |
+
hidden_states: torch.Tensor,
|
| 410 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 411 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 412 |
+
past_key_value: Optional[Cache] = None,
|
| 413 |
+
output_attentions: bool = False,
|
| 414 |
+
use_cache: bool = False,
|
| 415 |
+
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
|
| 416 |
+
**kwargs,
|
| 417 |
+
):
|
| 418 |
+
bsz, q_len, _ = hidden_states.size()
|
| 419 |
+
qkv = self.query_key_value(hidden_states)
|
| 420 |
+
qkv = qkv.view(
|
| 421 |
+
bsz, q_len, self.num_heads + 2 * self.num_key_value_heads, self.head_dim
|
| 422 |
+
)
|
| 423 |
+
query_states, key_states, value_states = qkv.split(
|
| 424 |
+
[self.num_heads, self.num_key_value_heads, self.num_key_value_heads],
|
| 425 |
+
dim=-2,
|
| 426 |
+
)
|
| 427 |
+
query_states = query_states.transpose(1, 2).contiguous()
|
| 428 |
+
key_states = key_states.transpose(1, 2).contiguous()
|
| 429 |
+
value_states = value_states.transpose(1, 2).contiguous()
|
| 430 |
+
if self.config.use_qk_norm:
|
| 431 |
+
query_states = self.query_layernorm(query_states)
|
| 432 |
+
key_states = self.key_layernorm(key_states)
|
| 433 |
+
cos, sin = position_embeddings
|
| 434 |
+
query_states, key_states = apply_rotary_pos_emb(
|
| 435 |
+
query_states, key_states, cos, sin
|
| 436 |
+
)
|
| 437 |
+
if past_key_value is not None:
|
| 438 |
+
if self.layer_idx is None:
|
| 439 |
+
raise ValueError(
|
| 440 |
+
"When using cache, SarvamMoEAttention must be initialized with layer_idx."
|
| 441 |
+
)
|
| 442 |
+
cache_kwargs = {"sin": sin, "cos": cos}
|
| 443 |
+
key_states, value_states = past_key_value.update(
|
| 444 |
+
key_states, value_states, self.layer_idx, cache_kwargs
|
| 445 |
+
)
|
| 446 |
+
# NOTE: vLLM will set config._attn_implementation = "vllm"
|
| 447 |
+
if self.config._attn_implementation == "vllm":
|
| 448 |
+
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
|
| 449 |
+
attn_output, attn_weights = attention_interface(
|
| 450 |
+
self,
|
| 451 |
+
query_states,
|
| 452 |
+
key_states,
|
| 453 |
+
value_states,
|
| 454 |
+
attention_mask,
|
| 455 |
+
dropout=0.0 if not self.training else self.attention_dropout,
|
| 456 |
+
scaling=self.scaling,
|
| 457 |
+
**kwargs,
|
| 458 |
+
)
|
| 459 |
+
# vLLM backend may return [B, L, hidden] or [B*L, hidden]
|
| 460 |
+
if attn_output.dim() == 4:
|
| 461 |
+
# [B, H, L, Dh] -> [B, L, hidden]
|
| 462 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 463 |
+
attn_output = attn_output.view(bsz, q_len, -1)
|
| 464 |
+
elif attn_output.dim() == 3:
|
| 465 |
+
if attn_output.shape[0] != bsz or attn_output.shape[1] != q_len:
|
| 466 |
+
raise ValueError(
|
| 467 |
+
f"Unexpected vLLM attention output shape {attn_output.shape}, "
|
| 468 |
+
f"expected (bsz={bsz}, q_len={q_len}, hidden=*)"
|
| 469 |
+
)
|
| 470 |
+
elif attn_output.dim() == 2:
|
| 471 |
+
attn_output = attn_output.view(bsz, q_len, -1)
|
| 472 |
+
else:
|
| 473 |
+
raise ValueError(
|
| 474 |
+
f"Unsupported vLLM attention output rank {attn_output.dim()} "
|
| 475 |
+
f"with shape {attn_output.shape}"
|
| 476 |
+
)
|
| 477 |
+
attn_output = self.dense(attn_output)
|
| 478 |
+
if not output_attentions:
|
| 479 |
+
attn_weights = None
|
| 480 |
+
return attn_output, attn_weights, past_key_value
|
| 481 |
+
|
| 482 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 483 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 484 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
| 485 |
+
kv_seq_len = key_states.shape[-2]
|
| 486 |
+
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
| 487 |
+
raise ValueError(
|
| 488 |
+
f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
|
| 489 |
+
f" {attn_weights.size()}"
|
| 490 |
+
)
|
| 491 |
+
if attention_mask is not None:
|
| 492 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| 493 |
+
raise ValueError(
|
| 494 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| 495 |
+
)
|
| 496 |
+
attn_weights = attn_weights + attention_mask
|
| 497 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
| 498 |
+
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
| 499 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
| 500 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
| 501 |
+
raise ValueError(
|
| 502 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
| 503 |
+
f" {attn_output.size()}"
|
| 504 |
+
)
|
| 505 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 506 |
+
attn_output = attn_output.reshape(bsz, q_len, -1)
|
| 507 |
+
attn_output = self.dense(attn_output)
|
| 508 |
+
if not output_attentions:
|
| 509 |
+
attn_weights = None
|
| 510 |
+
return attn_output, attn_weights, past_key_value
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
class SarvamMoEFlashAttention2(SarvamMoEAttention):
|
| 514 |
+
def __init__(self, *args, **kwargs):
|
| 515 |
+
super().__init__(*args, **kwargs)
|
| 516 |
+
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
|
| 517 |
+
|
| 518 |
+
def forward(
|
| 519 |
+
self,
|
| 520 |
+
hidden_states: torch.Tensor,
|
| 521 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 522 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 523 |
+
past_key_value: Optional[Cache] = None,
|
| 524 |
+
output_attentions: bool = False,
|
| 525 |
+
use_cache: bool = False,
|
| 526 |
+
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
|
| 527 |
+
**kwargs,
|
| 528 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 529 |
+
output_attentions = False
|
| 530 |
+
bsz, q_len, _ = hidden_states.size()
|
| 531 |
+
qkv = self.query_key_value(hidden_states)
|
| 532 |
+
qkv = qkv.view(bsz, q_len, self.num_heads + 2 * self.num_key_value_heads, self.head_dim)
|
| 533 |
+
query_states, key_states, value_states = qkv.split(
|
| 534 |
+
[self.num_heads, self.num_key_value_heads, self.num_key_value_heads], dim=-2
|
| 535 |
+
)
|
| 536 |
+
query_states = query_states.transpose(1, 2)
|
| 537 |
+
key_states = key_states.transpose(1, 2)
|
| 538 |
+
value_states = value_states.transpose(1, 2)
|
| 539 |
+
if self.config.use_qk_norm:
|
| 540 |
+
query_states = self.query_layernorm(query_states)
|
| 541 |
+
key_states = self.key_layernorm(key_states)
|
| 542 |
+
cos, sin = position_embeddings
|
| 543 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
|
| 544 |
+
if past_key_value is not None:
|
| 545 |
+
cache_kwargs = {"sin": sin, "cos": cos}
|
| 546 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 547 |
+
query_states = query_states.transpose(1, 2)
|
| 548 |
+
key_states = key_states.transpose(1, 2)
|
| 549 |
+
value_states = value_states.transpose(1, 2)
|
| 550 |
+
dropout_rate = self.attention_dropout if self.training else 0.0
|
| 551 |
+
input_dtype = query_states.dtype
|
| 552 |
+
if input_dtype == torch.float32:
|
| 553 |
+
if hasattr(self.config, "_pre_quantization_dtype"):
|
| 554 |
+
target_dtype = self.config._pre_quantization_dtype
|
| 555 |
+
elif torch.is_autocast_enabled():
|
| 556 |
+
target_dtype = torch.get_autocast_gpu_dtype()
|
| 557 |
+
else:
|
| 558 |
+
target_dtype = self.query_key_value.weight.dtype
|
| 559 |
+
logger.warning_once(
|
| 560 |
+
f"The input hidden states seems to be silently casted in float32, this might be related to"
|
| 561 |
+
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
| 562 |
+
f" {target_dtype}."
|
| 563 |
+
)
|
| 564 |
+
query_states = query_states.to(target_dtype)
|
| 565 |
+
key_states = key_states.to(target_dtype)
|
| 566 |
+
value_states = value_states.to(target_dtype)
|
| 567 |
+
attn_output = self._flash_attention_forward(
|
| 568 |
+
query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
|
| 569 |
+
)
|
| 570 |
+
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
|
| 571 |
+
attn_output = self.dense(attn_output)
|
| 572 |
+
if not output_attentions:
|
| 573 |
+
attn_weights = None
|
| 574 |
+
return attn_output, attn_weights, past_key_value
|
| 575 |
+
|
| 576 |
+
def _flash_attention_forward(
|
| 577 |
+
self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
|
| 578 |
+
):
|
| 579 |
+
if not self._flash_attn_uses_top_left_mask:
|
| 580 |
+
causal = self.is_causal
|
| 581 |
+
else:
|
| 582 |
+
causal = self.is_causal and query_length != 1
|
| 583 |
+
if attention_mask is not None:
|
| 584 |
+
batch_size = query_states.shape[0]
|
| 585 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
|
| 586 |
+
query_states, key_states, value_states, attention_mask, query_length
|
| 587 |
+
)
|
| 588 |
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
| 589 |
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
| 590 |
+
attn_output_unpad = flash_attn_varlen_func(
|
| 591 |
+
query_states,
|
| 592 |
+
key_states,
|
| 593 |
+
value_states,
|
| 594 |
+
cu_seqlens_q=cu_seqlens_q,
|
| 595 |
+
cu_seqlens_k=cu_seqlens_k,
|
| 596 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
| 597 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
| 598 |
+
dropout_p=dropout,
|
| 599 |
+
softmax_scale=softmax_scale,
|
| 600 |
+
causal=causal,
|
| 601 |
+
)
|
| 602 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
| 603 |
+
else:
|
| 604 |
+
attn_output = flash_attn_func(
|
| 605 |
+
query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
|
| 606 |
+
)
|
| 607 |
+
return attn_output
|
| 608 |
+
|
| 609 |
+
def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
|
| 610 |
+
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
| 611 |
+
batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
|
| 612 |
+
key_layer = index_first_axis(
|
| 613 |
+
key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
|
| 614 |
+
)
|
| 615 |
+
value_layer = index_first_axis(
|
| 616 |
+
value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
|
| 617 |
+
)
|
| 618 |
+
if query_length == kv_seq_len:
|
| 619 |
+
query_layer = index_first_axis(
|
| 620 |
+
query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
|
| 621 |
+
)
|
| 622 |
+
cu_seqlens_q = cu_seqlens_k
|
| 623 |
+
max_seqlen_in_batch_q = max_seqlen_in_batch_k
|
| 624 |
+
indices_q = indices_k
|
| 625 |
+
elif query_length == 1:
|
| 626 |
+
max_seqlen_in_batch_q = 1
|
| 627 |
+
cu_seqlens_q = torch.arange(
|
| 628 |
+
batch_size + 1, dtype=torch.int32, device=query_layer.device
|
| 629 |
+
)
|
| 630 |
+
indices_q = cu_seqlens_q[:-1]
|
| 631 |
+
query_layer = query_layer.squeeze(1)
|
| 632 |
+
else:
|
| 633 |
+
attention_mask = attention_mask[:, -query_length:]
|
| 634 |
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
|
| 635 |
+
return (
|
| 636 |
+
query_layer,
|
| 637 |
+
key_layer,
|
| 638 |
+
value_layer,
|
| 639 |
+
indices_q,
|
| 640 |
+
(cu_seqlens_q, cu_seqlens_k),
|
| 641 |
+
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
| 642 |
+
)
|
| 643 |
+
|
| 644 |
+
|
| 645 |
+
class SarvamMoESdpaAttention(SarvamMoEAttention):
|
| 646 |
+
def forward(
|
| 647 |
+
self,
|
| 648 |
+
hidden_states: torch.Tensor,
|
| 649 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 650 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 651 |
+
past_key_value: Optional[Cache] = None,
|
| 652 |
+
output_attentions: Optional[bool] = False,
|
| 653 |
+
use_cache: Optional[bool] = False,
|
| 654 |
+
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
|
| 655 |
+
**kwargs,
|
| 656 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 657 |
+
if output_attentions:
|
| 658 |
+
return super().forward(
|
| 659 |
+
hidden_states=hidden_states,
|
| 660 |
+
attention_mask=attention_mask,
|
| 661 |
+
position_ids=position_ids,
|
| 662 |
+
past_key_value=past_key_value,
|
| 663 |
+
output_attentions=output_attentions,
|
| 664 |
+
use_cache=use_cache,
|
| 665 |
+
**kwargs,
|
| 666 |
+
)
|
| 667 |
+
bsz, q_len, _ = hidden_states.size()
|
| 668 |
+
qkv = self.query_key_value(hidden_states)
|
| 669 |
+
qkv = qkv.view(bsz, q_len, self.num_heads + 2 * self.num_key_value_heads, self.head_dim)
|
| 670 |
+
query_states, key_states, value_states = qkv.split(
|
| 671 |
+
[self.num_heads, self.num_key_value_heads, self.num_key_value_heads], dim=-2
|
| 672 |
+
)
|
| 673 |
+
query_states = query_states.transpose(1, 2)
|
| 674 |
+
key_states = key_states.transpose(1, 2)
|
| 675 |
+
value_states = value_states.transpose(1, 2)
|
| 676 |
+
if self.config.use_qk_norm:
|
| 677 |
+
query_states = self.query_layernorm(query_states)
|
| 678 |
+
key_states = self.key_layernorm(key_states)
|
| 679 |
+
cos, sin = position_embeddings
|
| 680 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
|
| 681 |
+
if past_key_value is not None:
|
| 682 |
+
cache_kwargs = {"sin": sin, "cos": cos}
|
| 683 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 684 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 685 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 686 |
+
if attention_mask is not None:
|
| 687 |
+
kv_seq_len = key_states.shape[-2]
|
| 688 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| 689 |
+
raise ValueError(
|
| 690 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| 691 |
+
)
|
| 692 |
+
if query_states.device.type == "cuda" and attention_mask is not None:
|
| 693 |
+
query_states = query_states.contiguous()
|
| 694 |
+
key_states = key_states.contiguous()
|
| 695 |
+
value_states = value_states.contiguous()
|
| 696 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
| 697 |
+
query_states,
|
| 698 |
+
key_states,
|
| 699 |
+
value_states,
|
| 700 |
+
attn_mask=attention_mask,
|
| 701 |
+
dropout_p=self.attention_dropout if self.training else 0.0,
|
| 702 |
+
is_causal=self.is_causal and attention_mask is None and q_len > 1,
|
| 703 |
+
)
|
| 704 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 705 |
+
attn_output = attn_output.reshape(bsz, q_len, -1)
|
| 706 |
+
attn_output = self.dense(attn_output)
|
| 707 |
+
return attn_output, None, past_key_value
|
| 708 |
+
|
| 709 |
+
|
| 710 |
+
ATTENTION_CLASSES = {
|
| 711 |
+
"eager": SarvamMoEAttention,
|
| 712 |
+
"flash_attention_2": SarvamMoEFlashAttention2,
|
| 713 |
+
"sdpa": SarvamMoESdpaAttention,
|
| 714 |
+
"vllm": SarvamMoEAttention,
|
| 715 |
+
}
|
| 716 |
+
|
| 717 |
+
|
| 718 |
+
class SarvamMoEDecoderLayer(nn.Module):
|
| 719 |
+
def __init__(self, config: SarvamMoEConfig, layer_idx: int):
|
| 720 |
+
super().__init__()
|
| 721 |
+
self.hidden_size = config.hidden_size
|
| 722 |
+
self.attention = ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
|
| 723 |
+
self.mlp = (
|
| 724 |
+
SarvamMoESparseMoeBlock(config)
|
| 725 |
+
if (config.num_experts is not None and layer_idx >= config.first_k_dense_replace)
|
| 726 |
+
else SarvamMoEMLP(config=config, intermediate_size=config.intermediate_size)
|
| 727 |
+
)
|
| 728 |
+
self.input_layernorm = SarvamMoERMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 729 |
+
self.post_attention_layernorm = SarvamMoERMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 730 |
+
|
| 731 |
+
def forward(
|
| 732 |
+
self,
|
| 733 |
+
hidden_states: torch.Tensor,
|
| 734 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 735 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 736 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
| 737 |
+
output_attentions: Optional[bool] = False,
|
| 738 |
+
output_router_logits: Optional[bool] = False,
|
| 739 |
+
use_cache: Optional[bool] = False,
|
| 740 |
+
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
|
| 741 |
+
**kwargs,
|
| 742 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 743 |
+
residual = hidden_states
|
| 744 |
+
hidden_states = self.input_layernorm(hidden_states)
|
| 745 |
+
hidden_states, self_attn_weights, present_key_value = self.attention(
|
| 746 |
+
hidden_states=hidden_states,
|
| 747 |
+
attention_mask=attention_mask,
|
| 748 |
+
position_ids=position_ids,
|
| 749 |
+
past_key_value=past_key_value,
|
| 750 |
+
output_attentions=output_attentions,
|
| 751 |
+
position_embeddings=position_embeddings,
|
| 752 |
+
use_cache=use_cache,
|
| 753 |
+
**kwargs,
|
| 754 |
+
)
|
| 755 |
+
hidden_states = residual + hidden_states
|
| 756 |
+
residual = hidden_states
|
| 757 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
| 758 |
+
hidden_states = self.mlp(hidden_states)
|
| 759 |
+
if isinstance(hidden_states, tuple):
|
| 760 |
+
hidden_states, router_logits = hidden_states
|
| 761 |
+
else:
|
| 762 |
+
router_logits = None
|
| 763 |
+
hidden_states = residual + hidden_states.to(residual.device)
|
| 764 |
+
outputs = (hidden_states,)
|
| 765 |
+
if output_attentions:
|
| 766 |
+
outputs += (self_attn_weights,)
|
| 767 |
+
if use_cache:
|
| 768 |
+
outputs += (present_key_value,)
|
| 769 |
+
if output_router_logits:
|
| 770 |
+
outputs += (router_logits,)
|
| 771 |
+
return outputs
|
| 772 |
+
|
| 773 |
+
class SarvamMoEPreTrainedModel(PreTrainedModel):
|
| 774 |
+
config_class = SarvamMoEConfig
|
| 775 |
+
base_model_prefix = "model"
|
| 776 |
+
supports_gradient_checkpointing = True
|
| 777 |
+
_no_split_modules = ["SarvamMoEDecoderLayer"]
|
| 778 |
+
_skip_keys_device_placement = "past_key_values"
|
| 779 |
+
_supports_flash_attn_2 = True
|
| 780 |
+
_supports_sdpa = True
|
| 781 |
+
_supports_cache_class = True
|
| 782 |
+
|
| 783 |
+
def _init_weights(self, module):
|
| 784 |
+
std = self.config.initializer_range
|
| 785 |
+
if isinstance(module, nn.Linear):
|
| 786 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 787 |
+
if module.bias is not None:
|
| 788 |
+
module.bias.data.zero_()
|
| 789 |
+
elif isinstance(module, nn.Embedding):
|
| 790 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 791 |
+
if module.padding_idx is not None:
|
| 792 |
+
module.weight.data[module.padding_idx].zero_()
|
| 793 |
+
|
| 794 |
+
|
| 795 |
+
|
| 796 |
+
class SarvamMoEModel(SarvamMoEPreTrainedModel):
|
| 797 |
+
_supports_attention_backend = True
|
| 798 |
+
def __init__(self, config: SarvamMoEConfig):
|
| 799 |
+
super().__init__(config)
|
| 800 |
+
self.padding_idx = config.pad_token_id
|
| 801 |
+
self.vocab_size = config.vocab_size
|
| 802 |
+
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 803 |
+
self.layers = []
|
| 804 |
+
for layer_idx in range(config.num_hidden_layers):
|
| 805 |
+
self.layers.append(SarvamMoEDecoderLayer(config, layer_idx))
|
| 806 |
+
self.layers = nn.ModuleList(self.layers)
|
| 807 |
+
self._use_sdpa = config._attn_implementation == "sdpa"
|
| 808 |
+
self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
|
| 809 |
+
self.norm = SarvamMoERMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 810 |
+
self.rotary_emb = SarvamMoERotaryEmbedding(config=config)
|
| 811 |
+
self.gradient_checkpointing = False
|
| 812 |
+
self.post_init()
|
| 813 |
+
|
| 814 |
+
def get_input_embeddings(self):
|
| 815 |
+
return self.word_embeddings
|
| 816 |
+
|
| 817 |
+
def set_input_embeddings(self, value):
|
| 818 |
+
self.word_embeddings = value
|
| 819 |
+
|
| 820 |
+
def forward(
|
| 821 |
+
self,
|
| 822 |
+
input_ids: torch.LongTensor = None,
|
| 823 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 824 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 825 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 826 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 827 |
+
use_cache: Optional[bool] = None,
|
| 828 |
+
output_attentions: Optional[bool] = None,
|
| 829 |
+
output_hidden_states: Optional[bool] = None,
|
| 830 |
+
output_router_logits: Optional[bool] = None,
|
| 831 |
+
return_dict: Optional[bool] = None,
|
| 832 |
+
**kwargs,
|
| 833 |
+
) -> Union[Tuple, SarvamMoEModelOutputWithPast]:
|
| 834 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 835 |
+
output_hidden_states = (
|
| 836 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 837 |
+
)
|
| 838 |
+
output_router_logits = (
|
| 839 |
+
output_router_logits if output_router_logits is not None else self.config.output_router_logits
|
| 840 |
+
)
|
| 841 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 842 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 843 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 844 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 845 |
+
elif input_ids is not None:
|
| 846 |
+
batch_size, seq_length = input_ids.shape[:2]
|
| 847 |
+
elif inputs_embeds is not None:
|
| 848 |
+
batch_size, seq_length = inputs_embeds.shape[:2]
|
| 849 |
+
else:
|
| 850 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 851 |
+
if self.gradient_checkpointing and self.training:
|
| 852 |
+
if use_cache:
|
| 853 |
+
logger.warning_once(
|
| 854 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`transformers."
|
| 855 |
+
)
|
| 856 |
+
use_cache = False
|
| 857 |
+
if use_cache and past_key_values is None:
|
| 858 |
+
past_key_values = DynamicCache()
|
| 859 |
+
if inputs_embeds is None:
|
| 860 |
+
inputs_embeds = self.word_embeddings(input_ids)
|
| 861 |
+
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
| 862 |
+
if position_ids is None:
|
| 863 |
+
position_ids = torch.arange(
|
| 864 |
+
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
|
| 865 |
+
)
|
| 866 |
+
position_ids = position_ids.unsqueeze(0)
|
| 867 |
+
if self._use_flash_attention_2:
|
| 868 |
+
attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
|
| 869 |
+
elif self._use_sdpa and not output_attentions:
|
| 870 |
+
attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
|
| 871 |
+
attention_mask,
|
| 872 |
+
(batch_size, seq_length),
|
| 873 |
+
inputs_embeds,
|
| 874 |
+
past_seen_tokens,
|
| 875 |
+
)
|
| 876 |
+
else:
|
| 877 |
+
attention_mask = _prepare_4d_causal_attention_mask(
|
| 878 |
+
attention_mask, (batch_size, seq_length), inputs_embeds, past_seen_tokens
|
| 879 |
+
)
|
| 880 |
+
hidden_states = inputs_embeds
|
| 881 |
+
position_embeddings = self.rotary_emb(hidden_states, position_ids)
|
| 882 |
+
all_hidden_states = () if output_hidden_states else None
|
| 883 |
+
all_self_attns = () if output_attentions else None
|
| 884 |
+
all_router_logits = () if output_router_logits else None
|
| 885 |
+
next_decoder_cache = None
|
| 886 |
+
layers = self.layers
|
| 887 |
+
for decoder_layer in layers:
|
| 888 |
+
if output_hidden_states:
|
| 889 |
+
all_hidden_states += (hidden_states,)
|
| 890 |
+
if self.gradient_checkpointing and self.training:
|
| 891 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 892 |
+
decoder_layer.__call__,
|
| 893 |
+
hidden_states,
|
| 894 |
+
attention_mask,
|
| 895 |
+
position_ids,
|
| 896 |
+
past_key_values,
|
| 897 |
+
output_attentions,
|
| 898 |
+
output_router_logits,
|
| 899 |
+
use_cache,
|
| 900 |
+
position_embeddings,
|
| 901 |
+
**kwargs,
|
| 902 |
+
)
|
| 903 |
+
else:
|
| 904 |
+
layer_outputs = decoder_layer(
|
| 905 |
+
hidden_states,
|
| 906 |
+
attention_mask=attention_mask,
|
| 907 |
+
position_ids=position_ids,
|
| 908 |
+
past_key_value=past_key_values,
|
| 909 |
+
output_attentions=output_attentions,
|
| 910 |
+
output_router_logits=output_router_logits,
|
| 911 |
+
use_cache=use_cache,
|
| 912 |
+
position_embeddings=position_embeddings,
|
| 913 |
+
**kwargs,
|
| 914 |
+
)
|
| 915 |
+
hidden_states = layer_outputs[0]
|
| 916 |
+
if use_cache:
|
| 917 |
+
next_decoder_cache = layer_outputs[2 if output_attentions else 1]
|
| 918 |
+
if output_attentions:
|
| 919 |
+
all_self_attns += (layer_outputs[1],)
|
| 920 |
+
if output_router_logits and layer_outputs[-1] is not None:
|
| 921 |
+
all_router_logits += (layer_outputs[-1],)
|
| 922 |
+
hidden_states = self.norm(hidden_states)
|
| 923 |
+
if output_hidden_states:
|
| 924 |
+
all_hidden_states += (hidden_states,)
|
| 925 |
+
next_cache = None
|
| 926 |
+
if use_cache:
|
| 927 |
+
next_cache = next_decoder_cache
|
| 928 |
+
if not return_dict:
|
| 929 |
+
return tuple(
|
| 930 |
+
v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_router_logits] if v is not None
|
| 931 |
+
)
|
| 932 |
+
return SarvamMoEModelOutputWithPast(
|
| 933 |
+
last_hidden_state=hidden_states,
|
| 934 |
+
past_key_values=next_cache,
|
| 935 |
+
hidden_states=all_hidden_states,
|
| 936 |
+
attentions=all_self_attns,
|
| 937 |
+
router_logits=all_router_logits,
|
| 938 |
+
)
|
| 939 |
+
|
| 940 |
+
|
| 941 |
+
class SarvamMoEForCausalLM(SarvamMoEPreTrainedModel, GenerationMixin):
|
| 942 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 943 |
+
|
| 944 |
+
def __init__(self, config: SarvamMoEConfig):
|
| 945 |
+
super().__init__(config)
|
| 946 |
+
self.model = SarvamMoEModel(config)
|
| 947 |
+
self.vocab_size = config.vocab_size
|
| 948 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 949 |
+
self.post_init()
|
| 950 |
+
|
| 951 |
+
def get_input_embeddings(self):
|
| 952 |
+
return self.model.word_embeddings
|
| 953 |
+
|
| 954 |
+
def set_input_embeddings(self, value):
|
| 955 |
+
self.model.word_embeddings = value
|
| 956 |
+
|
| 957 |
+
def get_output_embeddings(self):
|
| 958 |
+
return self.lm_head
|
| 959 |
+
|
| 960 |
+
def set_output_embeddings(self, new_embeddings):
|
| 961 |
+
self.lm_head = new_embeddings
|
| 962 |
+
|
| 963 |
+
def set_decoder(self, decoder):
|
| 964 |
+
self.model = decoder
|
| 965 |
+
|
| 966 |
+
def get_decoder(self):
|
| 967 |
+
return self.model
|
| 968 |
+
|
| 969 |
+
def forward(
|
| 970 |
+
self,
|
| 971 |
+
input_ids: torch.LongTensor = None,
|
| 972 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 973 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 974 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 975 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 976 |
+
labels: Optional[torch.LongTensor] = None,
|
| 977 |
+
use_cache: Optional[bool] = None,
|
| 978 |
+
output_attentions: Optional[bool] = None,
|
| 979 |
+
output_hidden_states: Optional[bool] = None,
|
| 980 |
+
output_router_logits: Optional[bool] = None,
|
| 981 |
+
return_dict: Optional[bool] = None,
|
| 982 |
+
**kwargs,
|
| 983 |
+
) -> Union[Tuple, SarvamMoEModelOutputWithPast]:
|
| 984 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 985 |
+
output_hidden_states = (
|
| 986 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 987 |
+
)
|
| 988 |
+
output_router_logits = (
|
| 989 |
+
output_router_logits if output_router_logits is not None else self.config.output_router_logits
|
| 990 |
+
)
|
| 991 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 992 |
+
outputs = self.model(
|
| 993 |
+
input_ids=input_ids,
|
| 994 |
+
attention_mask=attention_mask,
|
| 995 |
+
position_ids=position_ids,
|
| 996 |
+
past_key_values=past_key_values,
|
| 997 |
+
inputs_embeds=inputs_embeds,
|
| 998 |
+
use_cache=use_cache,
|
| 999 |
+
output_attentions=output_attentions,
|
| 1000 |
+
output_hidden_states=output_hidden_states,
|
| 1001 |
+
output_router_logits=output_router_logits,
|
| 1002 |
+
return_dict=return_dict,
|
| 1003 |
+
**kwargs,
|
| 1004 |
+
)
|
| 1005 |
+
loss = None
|
| 1006 |
+
aux_loss = None
|
| 1007 |
+
hidden_states = outputs[0]
|
| 1008 |
+
logits = self.lm_head(hidden_states)
|
| 1009 |
+
logits = logits.float()
|
| 1010 |
+
if labels is not None:
|
| 1011 |
+
loss = self.loss_function(logits, labels, self.config.vocab_size, **kwargs)
|
| 1012 |
+
if not return_dict:
|
| 1013 |
+
output = (logits,) + outputs[1:]
|
| 1014 |
+
if output_router_logits:
|
| 1015 |
+
output = (aux_loss,) + output
|
| 1016 |
+
return (loss,) + output if loss is not None else output
|
| 1017 |
+
return SarvamMoECausalLMOutputWithPast(
|
| 1018 |
+
loss=loss,
|
| 1019 |
+
logits=logits,
|
| 1020 |
+
past_key_values=outputs.past_key_values,
|
| 1021 |
+
hidden_states=outputs.hidden_states,
|
| 1022 |
+
attentions=outputs.attentions,
|
| 1023 |
+
aux_loss=aux_loss,
|
| 1024 |
+
router_logits=outputs.router_logits,
|
| 1025 |
+
)
|
sarvam.py
ADDED
|
@@ -0,0 +1,788 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
#
|
| 4 |
+
# Copyright 2026 Sarvam AI team. All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# This code is based on Llama, Deepseek, and Bailing MoE implementations
|
| 7 |
+
# in this library. It has been modified from its original forms to
|
| 8 |
+
# accommodate Sarvam's MoE architectures.
|
| 9 |
+
#
|
| 10 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 11 |
+
# you may not use this file except in compliance with the License.
|
| 12 |
+
# You may obtain a copy of the License at
|
| 13 |
+
#
|
| 14 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 15 |
+
#
|
| 16 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 17 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 18 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 19 |
+
# See the License for the specific language governing permissions and
|
| 20 |
+
# limitations under the License.
|
| 21 |
+
|
| 22 |
+
from __future__ import annotations
|
| 23 |
+
|
| 24 |
+
import math
|
| 25 |
+
from collections.abc import Iterable, Iterator
|
| 26 |
+
from itertools import islice
|
| 27 |
+
|
| 28 |
+
import torch
|
| 29 |
+
from torch import nn
|
| 30 |
+
|
| 31 |
+
from vllm.config import CacheConfig, ParallelConfig, VllmConfig
|
| 32 |
+
from vllm.distributed import (
|
| 33 |
+
get_pp_group,
|
| 34 |
+
get_tensor_model_parallel_rank,
|
| 35 |
+
get_tensor_model_parallel_world_size,
|
| 36 |
+
)
|
| 37 |
+
from vllm.model_executor.layers.activation import SiluAndMul
|
| 38 |
+
from vllm.model_executor.layers.fused_moe import SharedFusedMoE
|
| 39 |
+
from vllm.model_executor.layers.layernorm import RMSNorm
|
| 40 |
+
from vllm.model_executor.layers.linear import (
|
| 41 |
+
ColumnParallelLinear,
|
| 42 |
+
MergedColumnParallelLinear,
|
| 43 |
+
ReplicatedLinear,
|
| 44 |
+
RowParallelLinear,
|
| 45 |
+
)
|
| 46 |
+
from vllm.model_executor.layers.logits_processor import LogitsProcessor
|
| 47 |
+
from vllm.model_executor.layers.mla import MLAModules, MultiHeadLatentAttentionWrapper
|
| 48 |
+
from vllm.model_executor.layers.quantization import QuantizationConfig
|
| 49 |
+
from vllm.model_executor.layers.rotary_embedding import get_rope
|
| 50 |
+
from vllm.model_executor.layers.vocab_parallel_embedding import (
|
| 51 |
+
ParallelLMHead,
|
| 52 |
+
VocabParallelEmbedding,
|
| 53 |
+
)
|
| 54 |
+
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
|
| 55 |
+
from vllm.sequence import IntermediateTensors
|
| 56 |
+
|
| 57 |
+
from .bailing_moe import BailingMoeForCausalLM
|
| 58 |
+
from .interfaces import MixtureOfExperts, SupportsLoRA, SupportsPP
|
| 59 |
+
from .utils import (
|
| 60 |
+
AutoWeightsLoader,
|
| 61 |
+
PPMissingLayer,
|
| 62 |
+
is_pp_missing_parameter,
|
| 63 |
+
make_empty_intermediate_tensors_factory,
|
| 64 |
+
make_layers,
|
| 65 |
+
maybe_prefix,
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def yarn_get_mscale(scale: float = 1, mscale: float = 1) -> float:
|
| 70 |
+
if scale <= 1:
|
| 71 |
+
return 1.0
|
| 72 |
+
return 0.1 * mscale * math.log(scale) + 1.0
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def _is_gate_expert_bias_name(name: str) -> bool:
|
| 76 |
+
return name.endswith(".mlp.gate.e_score_correction_bias") or name.endswith(
|
| 77 |
+
".gate.e_score_correction_bias"
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def _zero_mean_tensor(t: torch.Tensor) -> torch.Tensor:
|
| 82 |
+
if t.numel() == 0:
|
| 83 |
+
return t
|
| 84 |
+
return t - t.mean()
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def _normalized_weights(
|
| 88 |
+
weights: Iterable[tuple[str, torch.Tensor]],
|
| 89 |
+
) -> Iterator[tuple[str, torch.Tensor]]:
|
| 90 |
+
for name, w in weights:
|
| 91 |
+
if _is_gate_expert_bias_name(name):
|
| 92 |
+
yield name, _zero_mean_tensor(w)
|
| 93 |
+
else:
|
| 94 |
+
yield name, w
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class SarvamMLAAttention(nn.Module):
|
| 98 |
+
def __init__(
|
| 99 |
+
self,
|
| 100 |
+
vllm_config: VllmConfig,
|
| 101 |
+
config,
|
| 102 |
+
cache_config: CacheConfig | None = None,
|
| 103 |
+
quant_config: QuantizationConfig | None = None,
|
| 104 |
+
prefix: str = "",
|
| 105 |
+
) -> None:
|
| 106 |
+
super().__init__()
|
| 107 |
+
|
| 108 |
+
self.config = config
|
| 109 |
+
self.hidden_size = config.hidden_size
|
| 110 |
+
self.qk_nope_head_dim = config.qk_nope_head_dim
|
| 111 |
+
self.qk_rope_head_dim = config.qk_rope_head_dim
|
| 112 |
+
self.qk_head_dim = self.qk_nope_head_dim + self.qk_rope_head_dim
|
| 113 |
+
self.v_head_dim = config.v_head_dim
|
| 114 |
+
|
| 115 |
+
self.q_lora_rank = getattr(config, "q_lora_rank", None)
|
| 116 |
+
self.kv_lora_rank = config.kv_lora_rank
|
| 117 |
+
|
| 118 |
+
self.total_num_heads = config.num_attention_heads
|
| 119 |
+
tp_size = get_tensor_model_parallel_world_size()
|
| 120 |
+
assert self.total_num_heads % tp_size == 0
|
| 121 |
+
self.num_local_heads = self.total_num_heads // tp_size
|
| 122 |
+
|
| 123 |
+
self.scaling = self.qk_head_dim**-0.5
|
| 124 |
+
self.max_position_embeddings = config.max_position_embeddings
|
| 125 |
+
|
| 126 |
+
if self.q_lora_rank is not None:
|
| 127 |
+
self.q_a_proj = ReplicatedLinear(
|
| 128 |
+
self.hidden_size,
|
| 129 |
+
self.q_lora_rank,
|
| 130 |
+
bias=False,
|
| 131 |
+
quant_config=quant_config,
|
| 132 |
+
prefix=f"{prefix}.q_a_proj",
|
| 133 |
+
)
|
| 134 |
+
self.q_a_layernorm = RMSNorm(self.q_lora_rank, eps=config.rms_norm_eps)
|
| 135 |
+
self.q_b_proj = ColumnParallelLinear(
|
| 136 |
+
self.q_lora_rank,
|
| 137 |
+
self.total_num_heads * self.qk_head_dim,
|
| 138 |
+
bias=False,
|
| 139 |
+
quant_config=quant_config,
|
| 140 |
+
prefix=f"{prefix}.q_b_proj",
|
| 141 |
+
)
|
| 142 |
+
self.q_proj = None # type: ignore
|
| 143 |
+
else:
|
| 144 |
+
self.q_proj = ColumnParallelLinear(
|
| 145 |
+
self.hidden_size,
|
| 146 |
+
self.total_num_heads * self.qk_head_dim,
|
| 147 |
+
bias=False,
|
| 148 |
+
quant_config=quant_config,
|
| 149 |
+
prefix=f"{prefix}.q_proj",
|
| 150 |
+
)
|
| 151 |
+
self.q_a_proj = None # type: ignore
|
| 152 |
+
self.q_a_layernorm = None # type: ignore
|
| 153 |
+
self.q_b_proj = None # type: ignore
|
| 154 |
+
|
| 155 |
+
# KV latent (MQA-style) A-proj
|
| 156 |
+
self.kv_a_proj_with_mqa = ReplicatedLinear(
|
| 157 |
+
self.hidden_size,
|
| 158 |
+
self.kv_lora_rank + self.qk_rope_head_dim,
|
| 159 |
+
bias=False,
|
| 160 |
+
quant_config=quant_config,
|
| 161 |
+
prefix=f"{prefix}.kv_a_proj_with_mqa",
|
| 162 |
+
)
|
| 163 |
+
self.kv_a_layernorm = RMSNorm(self.kv_lora_rank, eps=config.rms_norm_eps)
|
| 164 |
+
|
| 165 |
+
# KV B-proj produces per-head K_nope and V
|
| 166 |
+
self.kv_b_proj = ColumnParallelLinear(
|
| 167 |
+
self.kv_lora_rank,
|
| 168 |
+
self.total_num_heads * (self.qk_nope_head_dim + self.v_head_dim),
|
| 169 |
+
bias=False,
|
| 170 |
+
quant_config=quant_config,
|
| 171 |
+
prefix=f"{prefix}.kv_b_proj",
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
self.o_proj = RowParallelLinear(
|
| 175 |
+
self.total_num_heads * self.v_head_dim,
|
| 176 |
+
self.hidden_size,
|
| 177 |
+
bias=False,
|
| 178 |
+
quant_config=quant_config,
|
| 179 |
+
prefix=f"{prefix}.o_proj",
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
self.rotary_emb = get_rope(
|
| 183 |
+
self.qk_rope_head_dim,
|
| 184 |
+
# rotary_dim=self.qk_rope_head_dim,
|
| 185 |
+
max_position=config.max_position_embeddings,
|
| 186 |
+
rope_parameters=config.rope_parameters,
|
| 187 |
+
is_neox_style=False,
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
if config.rope_parameters.get("rope_type", None) == "deepseek_yarn":
|
| 191 |
+
mscale_all_dim = config.rope_parameters.get("mscale_all_dim", False)
|
| 192 |
+
scaling_factor = config.rope_parameters["factor"]
|
| 193 |
+
mscale = yarn_get_mscale(scaling_factor, float(mscale_all_dim))
|
| 194 |
+
self.scaling = self.scaling * mscale * mscale
|
| 195 |
+
|
| 196 |
+
mla_modules = MLAModules(
|
| 197 |
+
kv_a_layernorm=self.kv_a_layernorm,
|
| 198 |
+
kv_b_proj=self.kv_b_proj,
|
| 199 |
+
rotary_emb=self.rotary_emb,
|
| 200 |
+
o_proj=self.o_proj,
|
| 201 |
+
fused_qkv_a_proj=None,
|
| 202 |
+
kv_a_proj_with_mqa=self.kv_a_proj_with_mqa,
|
| 203 |
+
q_a_layernorm=self.q_a_layernorm if self.q_lora_rank is not None else None,
|
| 204 |
+
q_b_proj=self.q_b_proj if self.q_lora_rank is not None else None,
|
| 205 |
+
q_proj=self.q_proj if self.q_lora_rank is None else None,
|
| 206 |
+
indexer=None,
|
| 207 |
+
indexer_rotary_emb=None,
|
| 208 |
+
is_sparse=False,
|
| 209 |
+
topk_indices_buffer=None,
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
+
self.mla_attn = MultiHeadLatentAttentionWrapper(
|
| 213 |
+
self.hidden_size,
|
| 214 |
+
self.num_local_heads,
|
| 215 |
+
self.scaling,
|
| 216 |
+
self.qk_nope_head_dim,
|
| 217 |
+
self.qk_rope_head_dim,
|
| 218 |
+
self.v_head_dim,
|
| 219 |
+
self.q_lora_rank,
|
| 220 |
+
self.kv_lora_rank,
|
| 221 |
+
mla_modules,
|
| 222 |
+
cache_config=cache_config,
|
| 223 |
+
quant_config=quant_config,
|
| 224 |
+
prefix=prefix,
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
def forward(
|
| 228 |
+
self,
|
| 229 |
+
positions: torch.Tensor,
|
| 230 |
+
hidden_states: torch.Tensor,
|
| 231 |
+
) -> torch.Tensor:
|
| 232 |
+
return self.mla_attn(positions, hidden_states, llama_4_scaling=None)
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
class SarvamMLAMLP(nn.Module):
|
| 236 |
+
def __init__(
|
| 237 |
+
self,
|
| 238 |
+
intermediate_size: int,
|
| 239 |
+
config,
|
| 240 |
+
quant_config: QuantizationConfig | None = None,
|
| 241 |
+
reduce_results: bool = True,
|
| 242 |
+
prefix: str = "",
|
| 243 |
+
) -> None:
|
| 244 |
+
super().__init__()
|
| 245 |
+
|
| 246 |
+
self.gate_up_proj = MergedColumnParallelLinear(
|
| 247 |
+
config.hidden_size,
|
| 248 |
+
[intermediate_size] * 2,
|
| 249 |
+
bias=False,
|
| 250 |
+
quant_config=quant_config,
|
| 251 |
+
prefix=f"{prefix}.gate_up_proj",
|
| 252 |
+
)
|
| 253 |
+
self.down_proj = RowParallelLinear(
|
| 254 |
+
intermediate_size,
|
| 255 |
+
config.hidden_size,
|
| 256 |
+
bias=False,
|
| 257 |
+
quant_config=quant_config,
|
| 258 |
+
reduce_results=reduce_results,
|
| 259 |
+
prefix=f"{prefix}.down_proj",
|
| 260 |
+
)
|
| 261 |
+
self.act_fn = SiluAndMul()
|
| 262 |
+
|
| 263 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 264 |
+
gate_up, _ = self.gate_up_proj(x)
|
| 265 |
+
x = self.act_fn(gate_up)
|
| 266 |
+
x, _ = self.down_proj(x)
|
| 267 |
+
return x
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
class SarvamMLAMoE(nn.Module):
|
| 271 |
+
def __init__(
|
| 272 |
+
self,
|
| 273 |
+
config,
|
| 274 |
+
parallel_config: ParallelConfig,
|
| 275 |
+
quant_config: QuantizationConfig | None = None,
|
| 276 |
+
prefix: str = "",
|
| 277 |
+
) -> None:
|
| 278 |
+
super().__init__()
|
| 279 |
+
|
| 280 |
+
self.config = config
|
| 281 |
+
self.tp_size = get_tensor_model_parallel_world_size()
|
| 282 |
+
self.tp_rank = get_tensor_model_parallel_rank()
|
| 283 |
+
self.hidden_size = config.hidden_size
|
| 284 |
+
|
| 285 |
+
self.num_experts = config.num_experts
|
| 286 |
+
self.top_k = config.num_experts_per_tok
|
| 287 |
+
self.routed_scaling_factor = getattr(config, "routed_scaling_factor", 2.5)
|
| 288 |
+
|
| 289 |
+
self.n_group = getattr(config, "n_group", None)
|
| 290 |
+
self.topk_group = getattr(config, "topk_group", None)
|
| 291 |
+
self.use_grouped_topk = self.n_group is not None and self.topk_group is not None
|
| 292 |
+
|
| 293 |
+
self.norm_expert_prob = getattr(config, "norm_topk_prob", True)
|
| 294 |
+
|
| 295 |
+
router_dtype_cfg = getattr(config, "router_dtype", "fp32")
|
| 296 |
+
if router_dtype_cfg is None:
|
| 297 |
+
self.router_dtype = None
|
| 298 |
+
elif router_dtype_cfg == "fp32":
|
| 299 |
+
self.router_dtype = torch.float32
|
| 300 |
+
else:
|
| 301 |
+
self.router_dtype = torch.bfloat16
|
| 302 |
+
|
| 303 |
+
self.gate = nn.Linear(
|
| 304 |
+
self.hidden_size,
|
| 305 |
+
self.num_experts,
|
| 306 |
+
bias=False,
|
| 307 |
+
dtype=self.router_dtype,
|
| 308 |
+
)
|
| 309 |
+
|
| 310 |
+
if getattr(config, "moe_router_enable_expert_bias", True):
|
| 311 |
+
self.gate.e_score_correction_bias = nn.Parameter(
|
| 312 |
+
torch.empty(
|
| 313 |
+
(self.num_experts,),
|
| 314 |
+
dtype=torch.float32,
|
| 315 |
+
)
|
| 316 |
+
)
|
| 317 |
+
else:
|
| 318 |
+
self.gate.e_score_correction_bias = None
|
| 319 |
+
|
| 320 |
+
self.score_function = getattr(config, "score_function", "sigmoid")
|
| 321 |
+
self.num_shared_experts = getattr(config, "num_shared_experts", 1)
|
| 322 |
+
if self.num_shared_experts > 0:
|
| 323 |
+
if hasattr(config, "moe_shared_expert_intermediate_size"):
|
| 324 |
+
shared_int = config.moe_shared_expert_intermediate_size
|
| 325 |
+
else:
|
| 326 |
+
shared_int = config.moe_intermediate_size
|
| 327 |
+
shared_int *= self.num_shared_experts
|
| 328 |
+
self.shared_experts = SarvamMLAMLP(
|
| 329 |
+
intermediate_size=shared_int,
|
| 330 |
+
config=config,
|
| 331 |
+
quant_config=quant_config,
|
| 332 |
+
reduce_results=False,
|
| 333 |
+
prefix=f"{prefix}.shared_experts",
|
| 334 |
+
)
|
| 335 |
+
else:
|
| 336 |
+
self.shared_experts = None
|
| 337 |
+
|
| 338 |
+
self.experts = SharedFusedMoE(
|
| 339 |
+
shared_experts=self.shared_experts,
|
| 340 |
+
num_experts=self.num_experts,
|
| 341 |
+
top_k=self.top_k,
|
| 342 |
+
hidden_size=self.hidden_size,
|
| 343 |
+
intermediate_size=config.moe_intermediate_size,
|
| 344 |
+
reduce_results=False,
|
| 345 |
+
renormalize=self.norm_expert_prob,
|
| 346 |
+
quant_config=quant_config,
|
| 347 |
+
prefix=f"{prefix}.experts",
|
| 348 |
+
scoring_func=self.score_function,
|
| 349 |
+
e_score_correction_bias=self.gate.e_score_correction_bias,
|
| 350 |
+
num_expert_group=self.n_group,
|
| 351 |
+
topk_group=self.topk_group,
|
| 352 |
+
use_grouped_topk=self.use_grouped_topk,
|
| 353 |
+
routed_scaling_factor=self.routed_scaling_factor,
|
| 354 |
+
)
|
| 355 |
+
|
| 356 |
+
def maybe_get_fused_moe(self) -> SharedFusedMoE:
|
| 357 |
+
return self.experts
|
| 358 |
+
|
| 359 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 360 |
+
num_tokens, hidden_dim = hidden_states.shape
|
| 361 |
+
hidden_states = hidden_states.view(-1, hidden_dim)
|
| 362 |
+
router_logits = self.gate(
|
| 363 |
+
hidden_states.to(self.router_dtype)
|
| 364 |
+
if self.router_dtype is not None
|
| 365 |
+
else hidden_states
|
| 366 |
+
)
|
| 367 |
+
router_logits = router_logits.to(hidden_states.dtype)
|
| 368 |
+
final_hidden = self.experts(
|
| 369 |
+
hidden_states=hidden_states,
|
| 370 |
+
router_logits=router_logits,
|
| 371 |
+
)
|
| 372 |
+
|
| 373 |
+
if self.shared_experts is not None:
|
| 374 |
+
shared_output, expert_output = final_hidden
|
| 375 |
+
else:
|
| 376 |
+
shared_output, expert_output = None, final_hidden
|
| 377 |
+
|
| 378 |
+
# expert_output *= self.routed_scaling_factor
|
| 379 |
+
|
| 380 |
+
if shared_output is not None:
|
| 381 |
+
expert_output = expert_output + shared_output
|
| 382 |
+
|
| 383 |
+
if self.tp_size > 1:
|
| 384 |
+
expert_output = self.experts.maybe_all_reduce_tensor_model_parallel(
|
| 385 |
+
expert_output
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
return expert_output.view(num_tokens, hidden_dim)
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
class SarvamMLABlock(nn.Module):
|
| 392 |
+
def __init__(
|
| 393 |
+
self,
|
| 394 |
+
vllm_config: VllmConfig,
|
| 395 |
+
prefix: str = "",
|
| 396 |
+
) -> None:
|
| 397 |
+
super().__init__()
|
| 398 |
+
config = vllm_config.model_config.hf_config
|
| 399 |
+
cache_config = vllm_config.cache_config
|
| 400 |
+
quant_config = vllm_config.quant_config
|
| 401 |
+
parallel_config = vllm_config.parallel_config
|
| 402 |
+
layer_idx = int(prefix.split(".")[-1])
|
| 403 |
+
hidden_size = config.hidden_size
|
| 404 |
+
dense_intermediate = getattr(config, "intermediate_size", 16384)
|
| 405 |
+
|
| 406 |
+
self.input_layernorm = RMSNorm(hidden_size, eps=config.rms_norm_eps)
|
| 407 |
+
self.self_attn = SarvamMLAAttention(
|
| 408 |
+
vllm_config=vllm_config,
|
| 409 |
+
config=config,
|
| 410 |
+
cache_config=cache_config,
|
| 411 |
+
quant_config=quant_config,
|
| 412 |
+
prefix=f"{prefix}.self_attn",
|
| 413 |
+
)
|
| 414 |
+
self.post_attention_layernorm = RMSNorm(hidden_size, eps=config.rms_norm_eps)
|
| 415 |
+
use_moe = hasattr(config, "num_experts") and config.num_experts is not None
|
| 416 |
+
first_k_dense = getattr(config, "first_k_dense_replace", 1)
|
| 417 |
+
moe_layer_freq = getattr(config, "moe_layer_freq", 1)
|
| 418 |
+
if use_moe:
|
| 419 |
+
is_moe_layer = layer_idx >= first_k_dense and (
|
| 420 |
+
(layer_idx - first_k_dense) % moe_layer_freq == 0
|
| 421 |
+
)
|
| 422 |
+
else:
|
| 423 |
+
is_moe_layer = False
|
| 424 |
+
|
| 425 |
+
if is_moe_layer:
|
| 426 |
+
self.mlp = SarvamMLAMoE(
|
| 427 |
+
config=config,
|
| 428 |
+
parallel_config=parallel_config,
|
| 429 |
+
quant_config=quant_config,
|
| 430 |
+
prefix=f"{prefix}.mlp",
|
| 431 |
+
)
|
| 432 |
+
else:
|
| 433 |
+
self.mlp = SarvamMLAMLP(
|
| 434 |
+
intermediate_size=dense_intermediate,
|
| 435 |
+
config=config,
|
| 436 |
+
quant_config=quant_config,
|
| 437 |
+
reduce_results=True,
|
| 438 |
+
prefix=f"{prefix}.mlp",
|
| 439 |
+
)
|
| 440 |
+
|
| 441 |
+
def forward(
|
| 442 |
+
self,
|
| 443 |
+
hidden_states: torch.Tensor,
|
| 444 |
+
positions: torch.Tensor,
|
| 445 |
+
residual: torch.Tensor | None,
|
| 446 |
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 447 |
+
if residual is None:
|
| 448 |
+
residual = hidden_states
|
| 449 |
+
hidden_states = self.input_layernorm(hidden_states)
|
| 450 |
+
else:
|
| 451 |
+
hidden_states, residual = self.input_layernorm(hidden_states, residual)
|
| 452 |
+
|
| 453 |
+
hidden_states = self.self_attn(
|
| 454 |
+
positions=positions,
|
| 455 |
+
hidden_states=hidden_states,
|
| 456 |
+
)
|
| 457 |
+
hidden_states, residual = self.post_attention_layernorm(hidden_states, residual)
|
| 458 |
+
hidden_states = self.mlp(hidden_states)
|
| 459 |
+
return hidden_states, residual
|
| 460 |
+
|
| 461 |
+
|
| 462 |
+
class SarvamMLAModel(nn.Module):
|
| 463 |
+
def __init__(
|
| 464 |
+
self,
|
| 465 |
+
*,
|
| 466 |
+
vllm_config: VllmConfig,
|
| 467 |
+
prefix: str = "",
|
| 468 |
+
) -> None:
|
| 469 |
+
super().__init__()
|
| 470 |
+
|
| 471 |
+
config = vllm_config.model_config.hf_config
|
| 472 |
+
quant_config = vllm_config.quant_config
|
| 473 |
+
|
| 474 |
+
self.config = config
|
| 475 |
+
self.vocab_size = config.vocab_size
|
| 476 |
+
self.embed_dim = config.hidden_size
|
| 477 |
+
self.tie_word_embeddings = getattr(config, "tie_word_embeddings", False)
|
| 478 |
+
if get_pp_group().is_first_rank or (
|
| 479 |
+
self.tie_word_embeddings and get_pp_group().is_last_rank
|
| 480 |
+
):
|
| 481 |
+
self.embed_tokens = VocabParallelEmbedding(
|
| 482 |
+
self.vocab_size,
|
| 483 |
+
self.embed_dim,
|
| 484 |
+
quant_config=quant_config,
|
| 485 |
+
prefix=f"{prefix}.embed_tokens",
|
| 486 |
+
)
|
| 487 |
+
else:
|
| 488 |
+
self.embed_tokens = PPMissingLayer()
|
| 489 |
+
|
| 490 |
+
self.embedding_dropout = torch.nn.Dropout(
|
| 491 |
+
getattr(config, "embedding_dropout", 0.0)
|
| 492 |
+
)
|
| 493 |
+
self.start_layer, self.end_layer, self.layers = make_layers(
|
| 494 |
+
config.num_hidden_layers,
|
| 495 |
+
lambda prefix: SarvamMLABlock(
|
| 496 |
+
vllm_config=vllm_config,
|
| 497 |
+
prefix=prefix,
|
| 498 |
+
),
|
| 499 |
+
prefix=f"{prefix}.layers",
|
| 500 |
+
)
|
| 501 |
+
self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory(
|
| 502 |
+
["hidden_states", "residual"], config.hidden_size
|
| 503 |
+
)
|
| 504 |
+
if get_pp_group().is_last_rank:
|
| 505 |
+
self.norm = RMSNorm(self.embed_dim, eps=config.rms_norm_eps)
|
| 506 |
+
else:
|
| 507 |
+
self.norm = PPMissingLayer()
|
| 508 |
+
|
| 509 |
+
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
|
| 510 |
+
return self.embed_tokens(input_ids)
|
| 511 |
+
|
| 512 |
+
def forward(
|
| 513 |
+
self,
|
| 514 |
+
input_ids: torch.Tensor,
|
| 515 |
+
positions: torch.Tensor,
|
| 516 |
+
intermediate_tensors: IntermediateTensors | None,
|
| 517 |
+
inputs_embeds: torch.Tensor | None = None,
|
| 518 |
+
) -> torch.Tensor | IntermediateTensors:
|
| 519 |
+
if get_pp_group().is_first_rank:
|
| 520 |
+
if inputs_embeds is not None:
|
| 521 |
+
hidden_states = inputs_embeds
|
| 522 |
+
else:
|
| 523 |
+
hidden_states = self.embed_input_ids(input_ids)
|
| 524 |
+
hidden_states = self.embedding_dropout(hidden_states)
|
| 525 |
+
residual = None
|
| 526 |
+
else:
|
| 527 |
+
assert intermediate_tensors is not None
|
| 528 |
+
hidden_states = intermediate_tensors["hidden_states"]
|
| 529 |
+
residual = intermediate_tensors["residual"]
|
| 530 |
+
|
| 531 |
+
for layer in islice(self.layers, self.start_layer, self.end_layer):
|
| 532 |
+
hidden_states, residual = layer(
|
| 533 |
+
hidden_states,
|
| 534 |
+
positions,
|
| 535 |
+
residual,
|
| 536 |
+
)
|
| 537 |
+
if not get_pp_group().is_last_rank:
|
| 538 |
+
return IntermediateTensors(
|
| 539 |
+
{"hidden_states": hidden_states, "residual": residual}
|
| 540 |
+
)
|
| 541 |
+
if residual is None:
|
| 542 |
+
hidden_states = self.norm(hidden_states)
|
| 543 |
+
else:
|
| 544 |
+
hidden_states, _ = self.norm(hidden_states, residual)
|
| 545 |
+
return hidden_states
|
| 546 |
+
|
| 547 |
+
def get_expert_mapping(self) -> list[tuple[str, str, int, str]]:
|
| 548 |
+
return SharedFusedMoE.make_expert_params_mapping(
|
| 549 |
+
self,
|
| 550 |
+
ckpt_gate_proj_name="gate_proj",
|
| 551 |
+
ckpt_down_proj_name="down_proj",
|
| 552 |
+
ckpt_up_proj_name="up_proj",
|
| 553 |
+
num_experts=self.config.num_experts,
|
| 554 |
+
)
|
| 555 |
+
|
| 556 |
+
def load_weights(
|
| 557 |
+
self,
|
| 558 |
+
weights: Iterable[tuple[str, torch.Tensor]],
|
| 559 |
+
) -> set[str]:
|
| 560 |
+
"""Load weights with stacked gate+up and MoE expert remapping."""
|
| 561 |
+
weights = _normalized_weights(weights)
|
| 562 |
+
stacked_params_mapping = [
|
| 563 |
+
("gate_up_proj", "gate_proj", 0),
|
| 564 |
+
("gate_up_proj", "up_proj", 1),
|
| 565 |
+
]
|
| 566 |
+
|
| 567 |
+
params_dict = dict(self.named_parameters(remove_duplicate=False))
|
| 568 |
+
loaded_params: set[str] = set()
|
| 569 |
+
expert_params_mapping = self.get_expert_mapping()
|
| 570 |
+
|
| 571 |
+
for name, loaded_weight in weights:
|
| 572 |
+
for param_name, weight_name, shard_id in stacked_params_mapping:
|
| 573 |
+
if weight_name not in name:
|
| 574 |
+
continue
|
| 575 |
+
if "mlp.experts" in name:
|
| 576 |
+
continue
|
| 577 |
+
new_name = name.replace(weight_name, param_name)
|
| 578 |
+
if new_name.endswith(".bias") and new_name not in params_dict:
|
| 579 |
+
continue
|
| 580 |
+
if new_name not in params_dict:
|
| 581 |
+
continue
|
| 582 |
+
if is_pp_missing_parameter(new_name, self):
|
| 583 |
+
continue
|
| 584 |
+
|
| 585 |
+
param = params_dict[new_name]
|
| 586 |
+
weight_loader = getattr(param, "weight_loader", default_weight_loader)
|
| 587 |
+
weight_loader(param, loaded_weight, shard_id)
|
| 588 |
+
loaded_params.add(new_name)
|
| 589 |
+
break
|
| 590 |
+
else:
|
| 591 |
+
mapped = False
|
| 592 |
+
for (
|
| 593 |
+
param_name,
|
| 594 |
+
weight_name,
|
| 595 |
+
expert_id,
|
| 596 |
+
shard_id,
|
| 597 |
+
) in expert_params_mapping:
|
| 598 |
+
if weight_name not in name:
|
| 599 |
+
continue
|
| 600 |
+
|
| 601 |
+
new_name = name.replace(weight_name, param_name)
|
| 602 |
+
if is_pp_missing_parameter(new_name, self):
|
| 603 |
+
continue
|
| 604 |
+
if new_name not in params_dict:
|
| 605 |
+
continue
|
| 606 |
+
|
| 607 |
+
param = params_dict[new_name]
|
| 608 |
+
weight_loader = getattr(
|
| 609 |
+
param, "weight_loader", default_weight_loader
|
| 610 |
+
)
|
| 611 |
+
weight_loader(
|
| 612 |
+
param,
|
| 613 |
+
loaded_weight,
|
| 614 |
+
name,
|
| 615 |
+
shard_id=shard_id,
|
| 616 |
+
expert_id=expert_id,
|
| 617 |
+
)
|
| 618 |
+
loaded_params.add(new_name)
|
| 619 |
+
mapped = True
|
| 620 |
+
break
|
| 621 |
+
|
| 622 |
+
if mapped:
|
| 623 |
+
continue
|
| 624 |
+
|
| 625 |
+
if name.endswith(".bias") and name not in params_dict:
|
| 626 |
+
continue
|
| 627 |
+
if name not in params_dict:
|
| 628 |
+
continue
|
| 629 |
+
if is_pp_missing_parameter(name, self):
|
| 630 |
+
continue
|
| 631 |
+
|
| 632 |
+
param = params_dict[name]
|
| 633 |
+
weight_loader = getattr(param, "weight_loader", default_weight_loader)
|
| 634 |
+
weight_loader(param, loaded_weight)
|
| 635 |
+
loaded_params.add(name)
|
| 636 |
+
|
| 637 |
+
return loaded_params
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
class SarvamMixtureOfExperts(MixtureOfExperts):
|
| 641 |
+
def extract_moe_parameters(self, example_moe: SarvamMLAMoE | None) -> None:
|
| 642 |
+
if example_moe is None:
|
| 643 |
+
raise RuntimeError("No SarvamMLAMoE layer found in model.layers.")
|
| 644 |
+
|
| 645 |
+
self.num_logical_experts = example_moe.num_experts
|
| 646 |
+
self.num_routed_experts = example_moe.num_experts # routed pool size
|
| 647 |
+
self.num_shared_experts = getattr(example_moe.config, "num_shared_experts", 1)
|
| 648 |
+
|
| 649 |
+
self.num_physical_experts = self.num_logical_experts
|
| 650 |
+
self.num_local_physical_experts = self.num_logical_experts
|
| 651 |
+
self.num_redundant_experts = 0
|
| 652 |
+
|
| 653 |
+
def update_physical_experts_metadata(
|
| 654 |
+
self,
|
| 655 |
+
num_physical_experts: int,
|
| 656 |
+
num_local_physical_experts: int,
|
| 657 |
+
) -> None:
|
| 658 |
+
self.num_physical_experts = num_physical_experts
|
| 659 |
+
self.num_local_physical_experts = num_local_physical_experts
|
| 660 |
+
self.num_redundant_experts = num_physical_experts - self.num_logical_experts
|
| 661 |
+
|
| 662 |
+
for moe in self.moe_mlp_layers:
|
| 663 |
+
moe.n_physical_experts = num_physical_experts
|
| 664 |
+
moe.n_local_physical_experts = num_local_physical_experts
|
| 665 |
+
moe.n_redundant_experts = self.num_redundant_experts
|
| 666 |
+
|
| 667 |
+
fused = moe.experts
|
| 668 |
+
if hasattr(fused, "n_local_physical_experts"):
|
| 669 |
+
fused.n_local_physical_experts = num_local_physical_experts
|
| 670 |
+
if hasattr(fused, "n_physical_experts"):
|
| 671 |
+
fused.n_physical_experts = num_physical_experts
|
| 672 |
+
if hasattr(fused, "n_redundant_experts"):
|
| 673 |
+
fused.n_redundant_experts = self.num_redundant_experts
|
| 674 |
+
if hasattr(fused, "update_expert_map"):
|
| 675 |
+
fused.update_expert_map()
|
| 676 |
+
|
| 677 |
+
def set_eplb_state(self, eplb_state) -> None:
|
| 678 |
+
self.eplb_state = eplb_state
|
| 679 |
+
for moe in self.moe_layers:
|
| 680 |
+
if hasattr(moe, "set_eplb_state"):
|
| 681 |
+
moe.set_eplb_state(eplb_state)
|
| 682 |
+
|
| 683 |
+
|
| 684 |
+
class SarvamMLAForCausalLM(nn.Module, SupportsPP, SupportsLoRA, SarvamMixtureOfExperts):
|
| 685 |
+
packed_modules_mapping = {
|
| 686 |
+
"q_proj": ["q_proj"],
|
| 687 |
+
"q_a_proj": ["q_a_proj"],
|
| 688 |
+
"q_b_proj": ["q_b_proj"],
|
| 689 |
+
"kv_a_proj_with_mqa": ["kv_a_proj_with_mqa"],
|
| 690 |
+
"kv_b_proj": ["kv_b_proj"],
|
| 691 |
+
"gate_up_proj": ["gate_proj", "up_proj"],
|
| 692 |
+
}
|
| 693 |
+
|
| 694 |
+
def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None:
|
| 695 |
+
super().__init__()
|
| 696 |
+
config = vllm_config.model_config.hf_config
|
| 697 |
+
quant_config = vllm_config.quant_config
|
| 698 |
+
self.config = config
|
| 699 |
+
self.quant_config = quant_config
|
| 700 |
+
|
| 701 |
+
self.model = SarvamMLAModel(
|
| 702 |
+
vllm_config=vllm_config,
|
| 703 |
+
prefix=maybe_prefix(prefix, "model"),
|
| 704 |
+
)
|
| 705 |
+
|
| 706 |
+
self.tie_word_embeddings = getattr(config, "tie_word_embeddings", False)
|
| 707 |
+
if get_pp_group().is_last_rank:
|
| 708 |
+
if self.tie_word_embeddings:
|
| 709 |
+
self.lm_head = self.model.embed_tokens
|
| 710 |
+
else:
|
| 711 |
+
self.lm_head = ParallelLMHead(
|
| 712 |
+
config.vocab_size,
|
| 713 |
+
config.hidden_size,
|
| 714 |
+
quant_config=quant_config,
|
| 715 |
+
prefix=maybe_prefix(prefix, "lm_head"),
|
| 716 |
+
)
|
| 717 |
+
self.logits_processor = LogitsProcessor(config.vocab_size)
|
| 718 |
+
else:
|
| 719 |
+
self.lm_head = PPMissingLayer()
|
| 720 |
+
self.logits_processor = None # type: ignore
|
| 721 |
+
|
| 722 |
+
self.make_empty_intermediate_tensors = (
|
| 723 |
+
self.model.make_empty_intermediate_tensors
|
| 724 |
+
)
|
| 725 |
+
|
| 726 |
+
self.expert_weights = []
|
| 727 |
+
self.num_moe_layers = 0
|
| 728 |
+
|
| 729 |
+
self.moe_layers = []
|
| 730 |
+
self.moe_mlp_layers = []
|
| 731 |
+
|
| 732 |
+
example_moe = None
|
| 733 |
+
for layer in self.model.layers:
|
| 734 |
+
if isinstance(layer, PPMissingLayer):
|
| 735 |
+
continue
|
| 736 |
+
if isinstance(layer.mlp, SarvamMLAMoE):
|
| 737 |
+
example_moe = layer.mlp
|
| 738 |
+
self.moe_mlp_layers.append(layer.mlp)
|
| 739 |
+
self.moe_layers.append(layer.mlp.experts)
|
| 740 |
+
self.num_moe_layers += 1
|
| 741 |
+
|
| 742 |
+
self.extract_moe_parameters(example_moe)
|
| 743 |
+
|
| 744 |
+
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
|
| 745 |
+
return self.model.embed_input_ids(input_ids)
|
| 746 |
+
|
| 747 |
+
def forward(
|
| 748 |
+
self,
|
| 749 |
+
input_ids: torch.Tensor,
|
| 750 |
+
positions: torch.Tensor,
|
| 751 |
+
intermediate_tensors: IntermediateTensors | None = None,
|
| 752 |
+
inputs_embeds: torch.Tensor | None = None,
|
| 753 |
+
) -> torch.Tensor | IntermediateTensors:
|
| 754 |
+
return self.model(
|
| 755 |
+
input_ids=input_ids,
|
| 756 |
+
positions=positions,
|
| 757 |
+
intermediate_tensors=intermediate_tensors,
|
| 758 |
+
inputs_embeds=inputs_embeds,
|
| 759 |
+
)
|
| 760 |
+
|
| 761 |
+
def compute_logits(
|
| 762 |
+
self,
|
| 763 |
+
hidden_states: torch.Tensor,
|
| 764 |
+
) -> torch.Tensor | None:
|
| 765 |
+
if not get_pp_group().is_last_rank:
|
| 766 |
+
return None
|
| 767 |
+
logits = self.logits_processor(self.lm_head, hidden_states)
|
| 768 |
+
return logits
|
| 769 |
+
|
| 770 |
+
def load_weights(
|
| 771 |
+
self,
|
| 772 |
+
weights: Iterable[tuple[str, torch.Tensor]],
|
| 773 |
+
) -> set[str]:
|
| 774 |
+
loader = AutoWeightsLoader(
|
| 775 |
+
self,
|
| 776 |
+
skip_prefixes=(["lm_head."] if self.tie_word_embeddings else None),
|
| 777 |
+
)
|
| 778 |
+
return loader.load_weights(weights)
|
| 779 |
+
|
| 780 |
+
def get_expert_mapping(self) -> list[tuple[str, str, int, str]]:
|
| 781 |
+
return self.model.get_expert_mapping()
|
| 782 |
+
|
| 783 |
+
|
| 784 |
+
class SarvamMoEForCausalLM(BailingMoeForCausalLM):
|
| 785 |
+
"""Same as BailingMoeForCausalLM, but normalizes gate expert_bias pre-load."""
|
| 786 |
+
|
| 787 |
+
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
|
| 788 |
+
return super().load_weights(_normalized_weights(weights))
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"boi_token": "<|start_of_image|>",
|
| 3 |
+
"bos_token": {
|
| 4 |
+
"content": "[@BOS@]",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false
|
| 9 |
+
},
|
| 10 |
+
"eoi_token": "<|end_of_image|>",
|
| 11 |
+
"eos_token": {
|
| 12 |
+
"content": "<|end_of_turn|>",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false
|
| 17 |
+
},
|
| 18 |
+
"image_token": "<|image_soft_token|>",
|
| 19 |
+
"pad_token": {
|
| 20 |
+
"content": "<pad>",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false
|
| 25 |
+
},
|
| 26 |
+
"unk_token": {
|
| 27 |
+
"content": "<unk>",
|
| 28 |
+
"lstrip": false,
|
| 29 |
+
"normalized": false,
|
| 30 |
+
"rstrip": false,
|
| 31 |
+
"single_word": false
|
| 32 |
+
}
|
| 33 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a574ceaaff7c7a8f091179c53fd17ae33567089c099d4ff37d4cb3bc1a87e80e
|
| 3 |
+
size 33627251
|
tokenizer_config.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|