krishnateja95 commited on
Commit
588f8ef
·
verified ·
1 Parent(s): d20154f

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
chat_template.jinja ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {{- '[@BOS@]\n' }}
2
+ {%- if tools -%}
3
+ <|start_of_turn|><|tool_declare|>
4
+ <tools>
5
+ {% for tool in tools %}
6
+ {{ tool | tojson(ensure_ascii=False) }}
7
+ {% endfor %}
8
+ </tools>
9
+ {{- '<|end_of_turn|>\n' }}{%- endif -%}
10
+ {%- macro visible_text(content) -%}
11
+ {%- if content is string -%}
12
+ {{- content }}
13
+ {%- elif content is iterable and content is not mapping -%}
14
+ {%- for item in content -%}
15
+ {%- if item is mapping and item.type == 'text' -%}
16
+ {{- item.text }}
17
+ {%- elif item is string -%}
18
+ {{- item }}
19
+ {%- endif -%}
20
+ {%- endfor -%}
21
+ {%- elif content is none -%}
22
+ {{- '' }}
23
+ {%- else -%}
24
+ {{- content }}
25
+ {%- endif -%}
26
+ {%- endmacro -%}
27
+ {%- set ns = namespace(last_user_index=-1) %}
28
+ {%- for m in messages %}
29
+ {%- if m.role == 'user' %}
30
+ {% set ns.last_user_index = loop.index0 -%}
31
+ {%- endif %}
32
+ {%- endfor %}
33
+ {% for m in messages %}
34
+ {%- if m.role == 'user' -%}<|start_of_turn|><|user|>
35
+ {{ visible_text(m.content) }}
36
+ {{- '<|nothink|>' if (enable_thinking is defined and not enable_thinking and not visible_text(m.content).endswith("<|nothink|>")) else '' -}}
37
+ {{- '<|end_of_turn|>\n' }}
38
+ {%- elif m.role == 'assistant' -%}
39
+ {{- '<|start_of_turn|><|assistant|>\n' }}
40
+ {%- set reasoning_content = '' %}
41
+ {%- set content = visible_text(m.content) %}
42
+ {%- if m.reasoning_content is string %}
43
+ {%- set reasoning_content = m.reasoning_content %}
44
+ {%- else %}
45
+ {%- if '</think>' in content %}
46
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
47
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
48
+ {%- endif %}
49
+ {%- endif %}
50
+ {%- if loop.index0 > ns.last_user_index and reasoning_content -%}
51
+ {{ '<think>' + reasoning_content.strip() + '</think>'}}
52
+ {%- else -%}
53
+ {{ '<think></think>' }}
54
+ {%- endif -%}
55
+ {%- if content.strip() -%}
56
+ {{ '\n' + content.strip() }}
57
+ {%- endif -%}
58
+ {% if m.tool_calls %}
59
+ {% for tc in m.tool_calls %}
60
+ {%- if tc.function %}
61
+ {%- set tc = tc.function %}
62
+ {%- endif %}
63
+ {{ '\n<tool_call>' + tc.name }}
64
+ {% set _args = tc.arguments %}
65
+ {% for k, v in _args.items() %}
66
+ <arg_key>{{ k }}</arg_key>
67
+ <arg_value>{{ v | tojson(ensure_ascii=False) if v is not string else v }}</arg_value>
68
+ {% endfor %}
69
+ </tool_call>{% endfor %}
70
+ {% endif %}
71
+ {{- '<|end_of_turn|>\n' }}
72
+ {%- elif m.role == 'tool' -%}
73
+ {%- if m.content is string -%}
74
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
75
+ {{- '<|start_of_turn|><|observation|>' }}
76
+ {%- endif %}
77
+ {{- '\n<tool_response>\n' }}
78
+ {{- m.content }}
79
+ {{- '\n</tool_response>' }}
80
+ {%- else -%}
81
+ <|start_of_turn|><|observation|>{% for tr in m.content %}
82
+
83
+ <tool_response>
84
+ {{ tr.output if tr.output is defined else tr }}
85
+ </tool_response>{% endfor -%}
86
+ {% endif -%}
87
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
88
+ {{- '<|end_of_turn|>\n' }}{%- endif -%}
89
+ {%- elif m.role == 'system' -%}
90
+ <|start_of_turn|><|system|>
91
+ {{ visible_text(m.content) }}
92
+ {{- '<|end_of_turn|>\n' }}
93
+ {%- endif -%}
94
+ {%- endfor -%}
95
+ {%- if add_generation_prompt -%}
96
+ {{- '<|start_of_turn|><|assistant|>\n' }}
97
+ {%- endif -%}
config.json ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "SarvamMoEForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "attn_implementation": null,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_sarvam_moe.SarvamMoEConfig",
9
+ "AutoModel": "modeling_sarvam_moe.SarvamMoEModel",
10
+ "AutoModelForCausalLM": "modeling_sarvam_moe.SarvamMoEForCausalLM"
11
+ },
12
+ "dtype": "float32",
13
+ "embedding_dropout": 0.0,
14
+ "eos_token_id": 1,
15
+ "first_k_dense_replace": 1,
16
+ "head_dim": 64,
17
+ "hidden_act": "silu",
18
+ "hidden_size": 4096,
19
+ "initializer_range": 0.006,
20
+ "intermediate_size": 8192,
21
+ "max_position_embeddings": 131072,
22
+ "max_window_layers": 19,
23
+ "model_type": "sarvam_moe",
24
+ "moe_intermediate_size": 1024,
25
+ "moe_router_enable_expert_bias": true,
26
+ "moe_shared_expert_intermediate_size": 1024,
27
+ "n_group": 1,
28
+ "norm_topk_prob": true,
29
+ "num_attention_heads": 64,
30
+ "num_experts": 128,
31
+ "num_experts_per_tok": 6,
32
+ "num_hidden_layers": 19,
33
+ "num_key_value_heads": 4,
34
+ "num_shared_experts": 1,
35
+ "output_dropout": 0.0,
36
+ "output_router_logits": false,
37
+ "pad_token_id": 0,
38
+ "quantization_config": {
39
+ "config_groups": {
40
+ "group_0": {
41
+ "format": "float-quantized",
42
+ "input_activations": {
43
+ "actorder": null,
44
+ "block_structure": null,
45
+ "dynamic": true,
46
+ "group_size": null,
47
+ "num_bits": 8,
48
+ "observer": null,
49
+ "observer_kwargs": {},
50
+ "scale_dtype": null,
51
+ "strategy": "token",
52
+ "symmetric": true,
53
+ "type": "float",
54
+ "zp_dtype": null
55
+ },
56
+ "output_activations": null,
57
+ "targets": [
58
+ "Linear"
59
+ ],
60
+ "weights": {
61
+ "actorder": null,
62
+ "block_structure": null,
63
+ "dynamic": false,
64
+ "group_size": null,
65
+ "num_bits": 8,
66
+ "observer": "memoryless_minmax",
67
+ "observer_kwargs": {},
68
+ "scale_dtype": null,
69
+ "strategy": "channel",
70
+ "symmetric": true,
71
+ "type": "float",
72
+ "zp_dtype": null
73
+ }
74
+ }
75
+ },
76
+ "format": "float-quantized",
77
+ "global_compression_ratio": null,
78
+ "ignore": [
79
+ "lm_head"
80
+ ],
81
+ "kv_cache_scheme": null,
82
+ "quant_method": "compressed-tensors",
83
+ "quantization_status": "compressed",
84
+ "sparsity_config": {},
85
+ "transform_config": {},
86
+ "version": "0.14.0"
87
+ },
88
+ "rms_norm_eps": 1e-06,
89
+ "rope_scaling": null,
90
+ "rope_theta": 8000000,
91
+ "routed_scaling_factor": 2.5,
92
+ "router_dtype": "fp32",
93
+ "score_function": "sigmoid",
94
+ "tie_word_embeddings": false,
95
+ "topk_group": 1,
96
+ "transformers_version": "4.57.6",
97
+ "use_bias": false,
98
+ "use_cache": true,
99
+ "use_qk_norm": true,
100
+ "use_qkv_bias": false,
101
+ "use_rmsnorm": true,
102
+ "vocab_size": 262144
103
+ }
configuration_sarvam_moe.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers.configuration_utils import PretrainedConfig
2
+
3
+
4
+ class SarvamMoEConfig(PretrainedConfig):
5
+ model_type = "sarvam_moe"
6
+ def __init__(
7
+ self,
8
+ vocab_size=262144,
9
+ hidden_size=4096,
10
+ intermediate_size=8192,
11
+ num_hidden_layers=19,
12
+ num_attention_heads=16,
13
+ num_key_value_heads=4,
14
+ hidden_act="silu",
15
+ use_qkv_bias=False,
16
+ use_bias=False,
17
+ rms_norm_eps=1e-06,
18
+ tie_word_embeddings=False,
19
+ embedding_dropout=0.0,
20
+ attention_dropout=0.0,
21
+ output_dropout=0.0,
22
+ initializer_range=0.006,
23
+ max_position_embeddings=4096,
24
+ rope_theta=10000.0,
25
+ use_cache=True,
26
+ max_window_layers=19,
27
+ rope_scaling=None,
28
+ pad_token_id=0,
29
+ eos_token_id=1,
30
+ num_experts=128,
31
+ num_shared_experts=1,
32
+ num_experts_per_tok=6,
33
+ n_group=1,
34
+ topk_group=1,
35
+ moe_intermediate_size=1024,
36
+ first_k_dense_replace=1,
37
+ head_dim=256,
38
+ output_router_logits=False,
39
+ use_qk_norm=True,
40
+ moe_router_enable_expert_bias=True,
41
+ routed_scaling_factor=2.5,
42
+ attn_implementation: str = "eager",
43
+ **kwargs,
44
+ ):
45
+ self.num_hidden_layers = num_hidden_layers
46
+ self.vocab_size = vocab_size
47
+ self.hidden_size = hidden_size
48
+ self.intermediate_size = intermediate_size
49
+ self.num_attention_heads = num_attention_heads
50
+ self.num_key_value_heads = num_key_value_heads
51
+ self.hidden_act = hidden_act
52
+ self.use_qkv_bias = use_qkv_bias
53
+ self.use_bias = use_bias
54
+ self.rms_norm_eps = rms_norm_eps
55
+ self.embedding_dropout = embedding_dropout
56
+ self.attention_dropout = attention_dropout
57
+ self.output_dropout = output_dropout
58
+ self.initializer_range = initializer_range
59
+ self.max_position_embeddings = max_position_embeddings
60
+ self.rope_theta = rope_theta
61
+ self.use_cache = use_cache
62
+ self.max_window_layers = max_window_layers
63
+ self.head_dim = head_dim or hidden_size // num_attention_heads
64
+ self.rope_scaling = rope_scaling
65
+ self.use_qk_norm = use_qk_norm
66
+ self.moe_router_enable_expert_bias = moe_router_enable_expert_bias
67
+ self.routed_scaling_factor = routed_scaling_factor
68
+ self.num_experts = num_experts
69
+ self.num_shared_experts = num_shared_experts
70
+ self.num_experts_per_tok = num_experts_per_tok
71
+ self.n_group = n_group
72
+ self.topk_group = topk_group
73
+ self.moe_intermediate_size = moe_intermediate_size
74
+ self.first_k_dense_replace = first_k_dense_replace
75
+ self.output_router_logits = output_router_logits
76
+ self.attn_implementation = attn_implementation
77
+ self._attn_implementation = attn_implementation
78
+
79
+ self.base_model_tp_plan = {
80
+ "layers.*.attention.query_key_value": "colwise",
81
+ "layers.*.attention.dense": "rowwise",
82
+ "layers.*.mlp.gate_proj": "colwise",
83
+ "layers.*.mlp.up_proj": "colwise",
84
+ "layers.*.mlp.down_proj": "rowwise",
85
+ "layers.*.mlp.experts.*.gate_proj": "colwise",
86
+ "layers.*.mlp.experts.*.up_proj": "colwise",
87
+ "layers.*.mlp.experts.*.down_proj": "rowwise",
88
+ "layers.*.mlp.shared_experts.gate_proj": "colwise",
89
+ "layers.*.mlp.shared_experts.up_proj": "colwise",
90
+ "layers.*.mlp.shared_experts.down_proj": "rowwise",
91
+ }
92
+ self.base_model_pp_plan = {
93
+ "word_embeddings": (["input_ids"], ["inputs_embeds"]),
94
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
95
+ "norm": (["hidden_states"], ["hidden_states"]),
96
+ }
97
+
98
+ super().__init__(
99
+ pad_token_id=pad_token_id,
100
+ eos_token_id=eos_token_id,
101
+ tie_word_embeddings=tie_word_embeddings,
102
+ **kwargs,
103
+ )
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "eos_token_id": 26,
4
+ "pad_token_id": 0,
5
+ "transformers_version": "4.57.6"
6
+ }
model-00001-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6aa15e334d46fa39a615870f8405f9cd0b9110933eaaa7ddafde1905ba2233d
3
+ size 4996666952
model-00002-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:345ab5627eb499f1daa3aa15ca1d6b68cda2175410271b6476c8ad93f45a95d4
3
+ size 4997040064
model-00003-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb6d9d093de09daafe70d421c07967c1e6a88c247b6e4ececaae22446e8e8129
3
+ size 4997040064
model-00004-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3c01a3bc520089ef190965868104394a8915a0f81b90292c014164792aa6732
3
+ size 4997052488
model-00005-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74f33a28f463b3916a4e5d9d5a30f7ef5c39dd1e8edbef3138cc6840d4c2de84
3
+ size 4997042424
model-00006-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13c77dae2fa192eb9674dbbe5243019cf889dac5bc5b21a8883ad7ae3f07b823
3
+ size 4997042416
model-00007-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37286ae7f5e44b4ef3373ecc8c5eaf224a5dd8cba563ee049e3fa23bd01795fd
3
+ size 4406620920
model-00008-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f39a62c958f34788e01d2c45715a75c2a1238286994ebfb57335775beb2b5bf
3
+ size 4294967424
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_sarvam_moe.py ADDED
@@ -0,0 +1,1025 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """PyTorch Sarvam MoE model."""
2
+
3
+ import math
4
+ from typing import List, Optional, Tuple, Union
5
+
6
+ import torch
7
+ import torch.nn.functional as F
8
+ from torch import nn
9
+
10
+ from transformers.activations import ACT2FN
11
+ from transformers.cache_utils import Cache, DynamicCache
12
+ from transformers.modeling_attn_mask_utils import (
13
+ AttentionMaskConverter,
14
+ _prepare_4d_attention_mask,
15
+ _prepare_4d_causal_attention_mask,
16
+ _prepare_4d_causal_attention_mask_for_sdpa,
17
+ )
18
+ from transformers.modeling_outputs import MoeModelOutputWithPast
19
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
20
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
21
+ from transformers.modeling_utils import PreTrainedModel
22
+ from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
23
+ from transformers.utils import (
24
+ is_flash_attn_2_available,
25
+ is_flash_attn_greater_or_equal_2_10,
26
+ logging,
27
+ )
28
+ from transformers.generation.utils import GenerationMixin
29
+ from dataclasses import dataclass
30
+ from transformers.utils import ModelOutput
31
+
32
+
33
+ if is_flash_attn_2_available():
34
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
35
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input
36
+
37
+ from .configuration_sarvam_moe import SarvamMoEConfig
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+ _CONFIG_FOR_DOC = "SarvamMoEConfig"
42
+
43
+
44
+ @dataclass
45
+ class SarvamMoECausalLMOutputWithPast(ModelOutput):
46
+ loss: Optional[torch.FloatTensor] = None
47
+ logits: Optional[torch.FloatTensor] = None
48
+ past_key_values: Optional[Cache] = None
49
+ hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
50
+ attentions: Optional[tuple[torch.FloatTensor, ...]] = None
51
+ z_loss: Optional[torch.FloatTensor] = None
52
+ aux_loss: Optional[torch.FloatTensor] = None
53
+ router_logits: Optional[tuple[torch.FloatTensor]] = None
54
+
55
+
56
+ class SarvamMoEModelOutputWithPast(MoeModelOutputWithPast):
57
+ pass
58
+
59
+
60
+ def _get_unpad_data(attention_mask):
61
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
62
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
63
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
64
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
65
+ return indices, cu_seqlens, max_seqlen_in_batch
66
+
67
+
68
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
69
+ return _prepare_4d_attention_mask(mask=mask, dtype=dtype, tgt_len=tgt_len)
70
+
71
+
72
+ def _make_causal_mask(
73
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
74
+ ):
75
+ return AttentionMaskConverter._make_causal_mask(
76
+ input_ids_shape=input_ids_shape, dtype=dtype, device=device, past_key_values_length=past_key_values_length
77
+ )
78
+
79
+
80
+ class SarvamMoERMSNorm(nn.Module):
81
+ def __init__(self, hidden_size, eps=1e-6):
82
+ super().__init__()
83
+ self.weight = nn.Parameter(torch.ones(hidden_size))
84
+ self.variance_epsilon = eps
85
+
86
+ def forward(self, hidden_states):
87
+ input_dtype = hidden_states.dtype
88
+ hidden_states = hidden_states.to(torch.float32)
89
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
90
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
91
+ return self.weight * hidden_states.to(input_dtype)
92
+
93
+
94
+ ALL_LAYERNORM_LAYERS.append(SarvamMoERMSNorm)
95
+
96
+
97
+ class SarvamMoERotaryEmbedding(nn.Module):
98
+ def __init__(self, config: SarvamMoEConfig, device=None):
99
+ super().__init__()
100
+ self.config = config
101
+ self.max_seq_len_cached = config.max_position_embeddings
102
+ self.original_max_seq_len = config.max_position_embeddings
103
+ rope_scaling = getattr(config, "rope_scaling", None)
104
+ if rope_scaling is None:
105
+ self.rope_type = "default"
106
+ inv_freq, self.attention_scaling = self.compute_default_rope_parameters(
107
+ config, device
108
+ )
109
+ else:
110
+ self.rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", "default"))
111
+ if self.rope_type == "default":
112
+ inv_freq, self.attention_scaling = self.compute_default_rope_parameters(
113
+ config, device
114
+ )
115
+ else:
116
+ rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
117
+ inv_freq, self.attention_scaling = rope_init_fn(config, device)
118
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
119
+ self.original_inv_freq = self.inv_freq
120
+
121
+ @staticmethod
122
+ def compute_default_rope_parameters(
123
+ config: SarvamMoEConfig,
124
+ device: Optional[torch.device] = None,
125
+ seq_len: Optional[int] = None,
126
+ ) -> Tuple[torch.Tensor, float]:
127
+ """
128
+ Default RoPE parameters (classic rotary embedding).
129
+
130
+ Mirrors HF's default implementation: use `rope_theta`, head_dim and
131
+ return (inv_freq, attention_scaling).
132
+ """
133
+ base = config.rope_theta
134
+ dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
135
+ inv_freq = 1.0 / (
136
+ base
137
+ ** (
138
+ torch.arange(0, dim, 2, dtype=torch.int64, device=device)
139
+ .to(dtype=torch.float32)
140
+ / dim
141
+ )
142
+ )
143
+ attention_factor = 1.0
144
+ return inv_freq, attention_factor
145
+
146
+ @torch.no_grad()
147
+ @dynamic_rope_update
148
+ def forward(self, x, position_ids):
149
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
150
+ position_ids_expanded = position_ids[:, None, :].float()
151
+
152
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
153
+ with torch.autocast(device_type=device_type, enabled=False):
154
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
155
+ emb = torch.cat((freqs, freqs), dim=-1)
156
+ cos = emb.cos() * self.attention_scaling
157
+ sin = emb.sin() * self.attention_scaling
158
+
159
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
160
+
161
+
162
+ def rotate_half(x):
163
+ x1 = x[..., : x.shape[-1] // 2]
164
+ x2 = x[..., x.shape[-1] // 2 :]
165
+ return torch.cat((-x2, x1), dim=-1)
166
+
167
+
168
+ def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
169
+ cos = cos.unsqueeze(unsqueeze_dim)
170
+ sin = sin.unsqueeze(unsqueeze_dim)
171
+ rotary_dim = cos.shape[-1]
172
+ q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
173
+ k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
174
+ q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin)
175
+ k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin)
176
+ q_embed = torch.cat([q_embed, q_pass], dim=-1)
177
+ k_embed = torch.cat([k_embed, k_pass], dim=-1)
178
+ return q_embed, k_embed
179
+
180
+
181
+ class SarvamMoEMLP(nn.Module):
182
+ def __init__(self, config: SarvamMoEConfig, intermediate_size: int):
183
+ super().__init__()
184
+ self.config = config
185
+ self.hidden_size = config.hidden_size
186
+ self.intermediate_size = intermediate_size
187
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
188
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
189
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
190
+ self.act_fn = ACT2FN[config.hidden_act]
191
+
192
+ def forward(self, x):
193
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
194
+
195
+
196
+ class SarvamMoEGate(nn.Module):
197
+ def __init__(self, config):
198
+ super().__init__()
199
+ self.config = config
200
+ self.top_k = config.num_experts_per_tok
201
+ self.num_experts = config.num_experts
202
+ self.n_group = config.n_group
203
+ self.topk_group = config.topk_group
204
+ self.gating_dim = config.hidden_size
205
+ self.weight = nn.Parameter(torch.empty((self.num_experts, self.gating_dim)))
206
+ self.routed_scaling_factor = config.routed_scaling_factor
207
+ self.score_function = config.score_function
208
+ # Ideally, we should register the expert_bias as a buffer, but vllm complains about it.
209
+ # self.register_buffer("expert_bias", torch.zeros((self.num_experts)))
210
+ self.expert_bias = nn.Parameter(
211
+ torch.zeros((self.num_experts)),
212
+ requires_grad=False,
213
+ )
214
+ self.reset_parameters()
215
+
216
+ def reset_parameters(self) -> None:
217
+ import torch.nn.init as init
218
+
219
+ init.kaiming_uniform_(self.weight, a=math.sqrt(5))
220
+
221
+ def group_limited_topk(self, scores: torch.Tensor):
222
+ num_tokens, _ = scores.size()
223
+ group_scores = scores.view(num_tokens, self.n_group, -1).topk(2, dim=-1)[0].sum(dim=-1)
224
+ group_idx = torch.topk(group_scores, k=self.topk_group, dim=-1, sorted=False)[1]
225
+ group_mask = torch.zeros_like(group_scores)
226
+ group_mask.scatter_(1, group_idx, 1)
227
+ score_mask = (
228
+ group_mask.unsqueeze(-1)
229
+ .expand(num_tokens, self.n_group, self.num_experts // self.n_group)
230
+ .reshape(num_tokens, -1)
231
+ )
232
+ masked_scores = scores.masked_fill(~score_mask.bool(), float("-inf"))
233
+ probs, top_indices = torch.topk(masked_scores, k=self.top_k, dim=-1)
234
+ return probs, top_indices
235
+
236
+ def forward(self, hidden_states):
237
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
238
+ logits = F.linear(hidden_states.type(torch.float32), self.weight.type(torch.float32))
239
+ scores = torch.sigmoid(logits.float()).type_as(logits)
240
+ scores_for_routing = scores + self.expert_bias
241
+ _, topk_idx = self.group_limited_topk(scores_for_routing)
242
+ scores = torch.gather(scores, dim=1, index=topk_idx).type_as(logits)
243
+ topk_weight = scores / (scores.sum(dim=-1, keepdim=True) + 1e-20) if self.top_k > 1 else scores
244
+ topk_weight = topk_weight * self.routed_scaling_factor
245
+ return topk_idx, topk_weight, logits
246
+
247
+
248
+ class SarvamMoEExperts(nn.ModuleList):
249
+ def __init__(self, config: SarvamMoEConfig):
250
+ # one MLP per expert
251
+ experts = [
252
+ SarvamMoEMLP(config=config, intermediate_size=config.moe_intermediate_size)
253
+ for _ in range(config.num_experts)
254
+ ]
255
+ super().__init__(experts)
256
+ self.config = config
257
+ self.num_experts_per_tok = config.num_experts_per_tok
258
+
259
+ def forward(
260
+ self,
261
+ hidden_states: torch.Tensor,
262
+ top_k_index: torch.LongTensor,
263
+ top_k_weights: torch.Tensor,
264
+ ) -> torch.Tensor:
265
+ """
266
+ hidden_states: (tokens, hidden_size) or (batch * seq, hidden_size)
267
+ top_k_index: (tokens, top_k)
268
+ top_k_weights: (tokens, top_k)
269
+ """
270
+ tokens, hidden_dim = hidden_states.shape
271
+ flat_topk_idx = top_k_index.view(-1)
272
+
273
+ if self.training:
274
+ # training path: same as your previous logic
275
+ x = hidden_states.repeat_interleave(self.num_experts_per_tok, dim=0)
276
+ y = torch.empty_like(x)
277
+ for i, expert in enumerate(self):
278
+ mask = flat_topk_idx == i
279
+ if mask.any():
280
+ y[mask] = expert(x[mask])
281
+ y = (y.view(*top_k_weights.shape, -1) * top_k_weights.unsqueeze(-1)).sum(dim=1)
282
+ return y.to(hidden_states.dtype)
283
+
284
+ # inference path: previous moe_infer logic
285
+ num_experts = len(self)
286
+ cnts = top_k_index.new_zeros((tokens, num_experts))
287
+ cnts.scatter_(1, top_k_index, 1)
288
+ tokens_per_expert = cnts.sum(dim=0)
289
+
290
+ idxs = top_k_index.view(-1).argsort()
291
+ sorted_tokens = hidden_states[idxs // top_k_index.shape[1]]
292
+
293
+ tokens_per_expert = tokens_per_expert.cpu().numpy().tolist()
294
+ outputs = []
295
+ start_idx = 0
296
+ for i, num_tokens in enumerate(tokens_per_expert):
297
+ end_idx = start_idx + num_tokens
298
+ if num_tokens == 0:
299
+ continue
300
+ expert = self[i]
301
+ tokens_for_expert = sorted_tokens[start_idx:end_idx]
302
+ expert_out = expert(tokens_for_expert)
303
+ outputs.append(expert_out.to(hidden_states.device))
304
+ start_idx = end_idx
305
+
306
+ outs = torch.cat(outputs, dim=0) if len(outputs) else sorted_tokens.new_empty(0)
307
+ new_x = torch.empty_like(outs)
308
+ new_x[idxs] = outs
309
+
310
+ final_out = (
311
+ new_x.view(*top_k_index.shape, -1)
312
+ .type(top_k_weights.dtype)
313
+ .mul_(top_k_weights.unsqueeze(dim=-1))
314
+ .sum(dim=1)
315
+ .type(new_x.dtype)
316
+ )
317
+ return final_out
318
+
319
+
320
+ class SarvamMoESparseMoeBlock(nn.Module):
321
+ def __init__(self, config: SarvamMoEConfig):
322
+ super().__init__()
323
+ self.config = config
324
+ self.num_experts_per_tok = config.num_experts_per_tok
325
+
326
+ # use the new experts container
327
+ self.experts = SarvamMoEExperts(config)
328
+ self.gate = SarvamMoEGate(config)
329
+
330
+ if config.num_shared_experts is not None:
331
+ self.shared_experts = SarvamMoEMLP(
332
+ config=config,
333
+ intermediate_size=config.moe_intermediate_size * config.num_shared_experts,
334
+ )
335
+
336
+ # _setup_experts no longer needed
337
+
338
+ def forward(self, hidden_states):
339
+ identity = hidden_states
340
+ bsz, seq_len, h = hidden_states.shape
341
+
342
+ topk_idx, topk_weight, router_logits = self.gate(hidden_states)
343
+
344
+ # flatten batch+seq for experts
345
+ flat_hidden = hidden_states.view(-1, h)
346
+ flat_topk_idx = topk_idx.view(-1, topk_idx.shape[-1])
347
+ flat_topk_weight = topk_weight.view(-1, topk_weight.shape[-1])
348
+
349
+ y = self.experts(flat_hidden, flat_topk_idx, flat_topk_weight)
350
+ y = y.view(bsz, seq_len, h)
351
+
352
+ if self.config.num_shared_experts is not None:
353
+ y = y + self.shared_experts(identity)
354
+
355
+ # router logits shape: (bsz, seq_len, num_experts)
356
+ router_info = (
357
+ router_logits.view(bsz, seq_len, -1),
358
+ topk_idx.view(bsz, seq_len, -1),
359
+ )
360
+ return y, router_info
361
+
362
+
363
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
364
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
365
+ if n_rep == 1:
366
+ return hidden_states
367
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
368
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
369
+
370
+
371
+ class SarvamMoEAttention(nn.Module):
372
+ is_causal = True # vLLM / Transformers backend critical flag
373
+ def __init__(self, config: SarvamMoEConfig, layer_idx: Optional[int] = None):
374
+ super().__init__()
375
+ self.config = config
376
+ self.layer_idx = layer_idx
377
+ if layer_idx is None:
378
+ logger.warning_once(
379
+ f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
380
+ "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
381
+ "when creating this class."
382
+ )
383
+ self.attention_dropout = config.attention_dropout
384
+ self.hidden_size = config.hidden_size
385
+ self.num_heads = config.num_attention_heads
386
+ self.head_dim = config.head_dim or self.hidden_size // self.num_heads
387
+ partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0
388
+ self.rope_dim = int(self.head_dim * partial_rotary_factor)
389
+ self.num_key_value_heads = config.num_key_value_heads
390
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
391
+ self.max_position_embeddings = config.max_position_embeddings
392
+ self.rope_theta = config.rope_theta
393
+ self.query_key_value = nn.Linear(
394
+ self.hidden_size,
395
+ (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
396
+ bias=config.use_qkv_bias,
397
+ )
398
+ if self.config.use_qk_norm:
399
+ self.query_layernorm = SarvamMoERMSNorm(self.head_dim, eps=config.rms_norm_eps)
400
+ self.key_layernorm = SarvamMoERMSNorm(self.head_dim, eps=config.rms_norm_eps)
401
+ self.dense = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.use_bias)
402
+ self.scaling = self.head_dim**-0.5
403
+
404
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
405
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
406
+
407
+ def forward(
408
+ self,
409
+ hidden_states: torch.Tensor,
410
+ attention_mask: Optional[torch.Tensor] = None,
411
+ position_ids: Optional[torch.LongTensor] = None,
412
+ past_key_value: Optional[Cache] = None,
413
+ output_attentions: bool = False,
414
+ use_cache: bool = False,
415
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
416
+ **kwargs,
417
+ ):
418
+ bsz, q_len, _ = hidden_states.size()
419
+ qkv = self.query_key_value(hidden_states)
420
+ qkv = qkv.view(
421
+ bsz, q_len, self.num_heads + 2 * self.num_key_value_heads, self.head_dim
422
+ )
423
+ query_states, key_states, value_states = qkv.split(
424
+ [self.num_heads, self.num_key_value_heads, self.num_key_value_heads],
425
+ dim=-2,
426
+ )
427
+ query_states = query_states.transpose(1, 2).contiguous()
428
+ key_states = key_states.transpose(1, 2).contiguous()
429
+ value_states = value_states.transpose(1, 2).contiguous()
430
+ if self.config.use_qk_norm:
431
+ query_states = self.query_layernorm(query_states)
432
+ key_states = self.key_layernorm(key_states)
433
+ cos, sin = position_embeddings
434
+ query_states, key_states = apply_rotary_pos_emb(
435
+ query_states, key_states, cos, sin
436
+ )
437
+ if past_key_value is not None:
438
+ if self.layer_idx is None:
439
+ raise ValueError(
440
+ "When using cache, SarvamMoEAttention must be initialized with layer_idx."
441
+ )
442
+ cache_kwargs = {"sin": sin, "cos": cos}
443
+ key_states, value_states = past_key_value.update(
444
+ key_states, value_states, self.layer_idx, cache_kwargs
445
+ )
446
+ # NOTE: vLLM will set config._attn_implementation = "vllm"
447
+ if self.config._attn_implementation == "vllm":
448
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
449
+ attn_output, attn_weights = attention_interface(
450
+ self,
451
+ query_states,
452
+ key_states,
453
+ value_states,
454
+ attention_mask,
455
+ dropout=0.0 if not self.training else self.attention_dropout,
456
+ scaling=self.scaling,
457
+ **kwargs,
458
+ )
459
+ # vLLM backend may return [B, L, hidden] or [B*L, hidden]
460
+ if attn_output.dim() == 4:
461
+ # [B, H, L, Dh] -> [B, L, hidden]
462
+ attn_output = attn_output.transpose(1, 2).contiguous()
463
+ attn_output = attn_output.view(bsz, q_len, -1)
464
+ elif attn_output.dim() == 3:
465
+ if attn_output.shape[0] != bsz or attn_output.shape[1] != q_len:
466
+ raise ValueError(
467
+ f"Unexpected vLLM attention output shape {attn_output.shape}, "
468
+ f"expected (bsz={bsz}, q_len={q_len}, hidden=*)"
469
+ )
470
+ elif attn_output.dim() == 2:
471
+ attn_output = attn_output.view(bsz, q_len, -1)
472
+ else:
473
+ raise ValueError(
474
+ f"Unsupported vLLM attention output rank {attn_output.dim()} "
475
+ f"with shape {attn_output.shape}"
476
+ )
477
+ attn_output = self.dense(attn_output)
478
+ if not output_attentions:
479
+ attn_weights = None
480
+ return attn_output, attn_weights, past_key_value
481
+
482
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
483
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
484
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
485
+ kv_seq_len = key_states.shape[-2]
486
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
487
+ raise ValueError(
488
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
489
+ f" {attn_weights.size()}"
490
+ )
491
+ if attention_mask is not None:
492
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
493
+ raise ValueError(
494
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
495
+ )
496
+ attn_weights = attn_weights + attention_mask
497
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
498
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
499
+ attn_output = torch.matmul(attn_weights, value_states)
500
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
501
+ raise ValueError(
502
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
503
+ f" {attn_output.size()}"
504
+ )
505
+ attn_output = attn_output.transpose(1, 2).contiguous()
506
+ attn_output = attn_output.reshape(bsz, q_len, -1)
507
+ attn_output = self.dense(attn_output)
508
+ if not output_attentions:
509
+ attn_weights = None
510
+ return attn_output, attn_weights, past_key_value
511
+
512
+
513
+ class SarvamMoEFlashAttention2(SarvamMoEAttention):
514
+ def __init__(self, *args, **kwargs):
515
+ super().__init__(*args, **kwargs)
516
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
517
+
518
+ def forward(
519
+ self,
520
+ hidden_states: torch.Tensor,
521
+ attention_mask: Optional[torch.LongTensor] = None,
522
+ position_ids: Optional[torch.LongTensor] = None,
523
+ past_key_value: Optional[Cache] = None,
524
+ output_attentions: bool = False,
525
+ use_cache: bool = False,
526
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
527
+ **kwargs,
528
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
529
+ output_attentions = False
530
+ bsz, q_len, _ = hidden_states.size()
531
+ qkv = self.query_key_value(hidden_states)
532
+ qkv = qkv.view(bsz, q_len, self.num_heads + 2 * self.num_key_value_heads, self.head_dim)
533
+ query_states, key_states, value_states = qkv.split(
534
+ [self.num_heads, self.num_key_value_heads, self.num_key_value_heads], dim=-2
535
+ )
536
+ query_states = query_states.transpose(1, 2)
537
+ key_states = key_states.transpose(1, 2)
538
+ value_states = value_states.transpose(1, 2)
539
+ if self.config.use_qk_norm:
540
+ query_states = self.query_layernorm(query_states)
541
+ key_states = self.key_layernorm(key_states)
542
+ cos, sin = position_embeddings
543
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
544
+ if past_key_value is not None:
545
+ cache_kwargs = {"sin": sin, "cos": cos}
546
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
547
+ query_states = query_states.transpose(1, 2)
548
+ key_states = key_states.transpose(1, 2)
549
+ value_states = value_states.transpose(1, 2)
550
+ dropout_rate = self.attention_dropout if self.training else 0.0
551
+ input_dtype = query_states.dtype
552
+ if input_dtype == torch.float32:
553
+ if hasattr(self.config, "_pre_quantization_dtype"):
554
+ target_dtype = self.config._pre_quantization_dtype
555
+ elif torch.is_autocast_enabled():
556
+ target_dtype = torch.get_autocast_gpu_dtype()
557
+ else:
558
+ target_dtype = self.query_key_value.weight.dtype
559
+ logger.warning_once(
560
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
561
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
562
+ f" {target_dtype}."
563
+ )
564
+ query_states = query_states.to(target_dtype)
565
+ key_states = key_states.to(target_dtype)
566
+ value_states = value_states.to(target_dtype)
567
+ attn_output = self._flash_attention_forward(
568
+ query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
569
+ )
570
+ attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
571
+ attn_output = self.dense(attn_output)
572
+ if not output_attentions:
573
+ attn_weights = None
574
+ return attn_output, attn_weights, past_key_value
575
+
576
+ def _flash_attention_forward(
577
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
578
+ ):
579
+ if not self._flash_attn_uses_top_left_mask:
580
+ causal = self.is_causal
581
+ else:
582
+ causal = self.is_causal and query_length != 1
583
+ if attention_mask is not None:
584
+ batch_size = query_states.shape[0]
585
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
586
+ query_states, key_states, value_states, attention_mask, query_length
587
+ )
588
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
589
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
590
+ attn_output_unpad = flash_attn_varlen_func(
591
+ query_states,
592
+ key_states,
593
+ value_states,
594
+ cu_seqlens_q=cu_seqlens_q,
595
+ cu_seqlens_k=cu_seqlens_k,
596
+ max_seqlen_q=max_seqlen_in_batch_q,
597
+ max_seqlen_k=max_seqlen_in_batch_k,
598
+ dropout_p=dropout,
599
+ softmax_scale=softmax_scale,
600
+ causal=causal,
601
+ )
602
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
603
+ else:
604
+ attn_output = flash_attn_func(
605
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
606
+ )
607
+ return attn_output
608
+
609
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
610
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
611
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
612
+ key_layer = index_first_axis(
613
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
614
+ )
615
+ value_layer = index_first_axis(
616
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
617
+ )
618
+ if query_length == kv_seq_len:
619
+ query_layer = index_first_axis(
620
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
621
+ )
622
+ cu_seqlens_q = cu_seqlens_k
623
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
624
+ indices_q = indices_k
625
+ elif query_length == 1:
626
+ max_seqlen_in_batch_q = 1
627
+ cu_seqlens_q = torch.arange(
628
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
629
+ )
630
+ indices_q = cu_seqlens_q[:-1]
631
+ query_layer = query_layer.squeeze(1)
632
+ else:
633
+ attention_mask = attention_mask[:, -query_length:]
634
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
635
+ return (
636
+ query_layer,
637
+ key_layer,
638
+ value_layer,
639
+ indices_q,
640
+ (cu_seqlens_q, cu_seqlens_k),
641
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
642
+ )
643
+
644
+
645
+ class SarvamMoESdpaAttention(SarvamMoEAttention):
646
+ def forward(
647
+ self,
648
+ hidden_states: torch.Tensor,
649
+ attention_mask: Optional[torch.Tensor] = None,
650
+ position_ids: Optional[torch.LongTensor] = None,
651
+ past_key_value: Optional[Cache] = None,
652
+ output_attentions: Optional[bool] = False,
653
+ use_cache: Optional[bool] = False,
654
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
655
+ **kwargs,
656
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
657
+ if output_attentions:
658
+ return super().forward(
659
+ hidden_states=hidden_states,
660
+ attention_mask=attention_mask,
661
+ position_ids=position_ids,
662
+ past_key_value=past_key_value,
663
+ output_attentions=output_attentions,
664
+ use_cache=use_cache,
665
+ **kwargs,
666
+ )
667
+ bsz, q_len, _ = hidden_states.size()
668
+ qkv = self.query_key_value(hidden_states)
669
+ qkv = qkv.view(bsz, q_len, self.num_heads + 2 * self.num_key_value_heads, self.head_dim)
670
+ query_states, key_states, value_states = qkv.split(
671
+ [self.num_heads, self.num_key_value_heads, self.num_key_value_heads], dim=-2
672
+ )
673
+ query_states = query_states.transpose(1, 2)
674
+ key_states = key_states.transpose(1, 2)
675
+ value_states = value_states.transpose(1, 2)
676
+ if self.config.use_qk_norm:
677
+ query_states = self.query_layernorm(query_states)
678
+ key_states = self.key_layernorm(key_states)
679
+ cos, sin = position_embeddings
680
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
681
+ if past_key_value is not None:
682
+ cache_kwargs = {"sin": sin, "cos": cos}
683
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
684
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
685
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
686
+ if attention_mask is not None:
687
+ kv_seq_len = key_states.shape[-2]
688
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
689
+ raise ValueError(
690
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
691
+ )
692
+ if query_states.device.type == "cuda" and attention_mask is not None:
693
+ query_states = query_states.contiguous()
694
+ key_states = key_states.contiguous()
695
+ value_states = value_states.contiguous()
696
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
697
+ query_states,
698
+ key_states,
699
+ value_states,
700
+ attn_mask=attention_mask,
701
+ dropout_p=self.attention_dropout if self.training else 0.0,
702
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
703
+ )
704
+ attn_output = attn_output.transpose(1, 2).contiguous()
705
+ attn_output = attn_output.reshape(bsz, q_len, -1)
706
+ attn_output = self.dense(attn_output)
707
+ return attn_output, None, past_key_value
708
+
709
+
710
+ ATTENTION_CLASSES = {
711
+ "eager": SarvamMoEAttention,
712
+ "flash_attention_2": SarvamMoEFlashAttention2,
713
+ "sdpa": SarvamMoESdpaAttention,
714
+ "vllm": SarvamMoEAttention,
715
+ }
716
+
717
+
718
+ class SarvamMoEDecoderLayer(nn.Module):
719
+ def __init__(self, config: SarvamMoEConfig, layer_idx: int):
720
+ super().__init__()
721
+ self.hidden_size = config.hidden_size
722
+ self.attention = ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
723
+ self.mlp = (
724
+ SarvamMoESparseMoeBlock(config)
725
+ if (config.num_experts is not None and layer_idx >= config.first_k_dense_replace)
726
+ else SarvamMoEMLP(config=config, intermediate_size=config.intermediate_size)
727
+ )
728
+ self.input_layernorm = SarvamMoERMSNorm(config.hidden_size, eps=config.rms_norm_eps)
729
+ self.post_attention_layernorm = SarvamMoERMSNorm(config.hidden_size, eps=config.rms_norm_eps)
730
+
731
+ def forward(
732
+ self,
733
+ hidden_states: torch.Tensor,
734
+ attention_mask: Optional[torch.Tensor] = None,
735
+ position_ids: Optional[torch.LongTensor] = None,
736
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
737
+ output_attentions: Optional[bool] = False,
738
+ output_router_logits: Optional[bool] = False,
739
+ use_cache: Optional[bool] = False,
740
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
741
+ **kwargs,
742
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
743
+ residual = hidden_states
744
+ hidden_states = self.input_layernorm(hidden_states)
745
+ hidden_states, self_attn_weights, present_key_value = self.attention(
746
+ hidden_states=hidden_states,
747
+ attention_mask=attention_mask,
748
+ position_ids=position_ids,
749
+ past_key_value=past_key_value,
750
+ output_attentions=output_attentions,
751
+ position_embeddings=position_embeddings,
752
+ use_cache=use_cache,
753
+ **kwargs,
754
+ )
755
+ hidden_states = residual + hidden_states
756
+ residual = hidden_states
757
+ hidden_states = self.post_attention_layernorm(hidden_states)
758
+ hidden_states = self.mlp(hidden_states)
759
+ if isinstance(hidden_states, tuple):
760
+ hidden_states, router_logits = hidden_states
761
+ else:
762
+ router_logits = None
763
+ hidden_states = residual + hidden_states.to(residual.device)
764
+ outputs = (hidden_states,)
765
+ if output_attentions:
766
+ outputs += (self_attn_weights,)
767
+ if use_cache:
768
+ outputs += (present_key_value,)
769
+ if output_router_logits:
770
+ outputs += (router_logits,)
771
+ return outputs
772
+
773
+ class SarvamMoEPreTrainedModel(PreTrainedModel):
774
+ config_class = SarvamMoEConfig
775
+ base_model_prefix = "model"
776
+ supports_gradient_checkpointing = True
777
+ _no_split_modules = ["SarvamMoEDecoderLayer"]
778
+ _skip_keys_device_placement = "past_key_values"
779
+ _supports_flash_attn_2 = True
780
+ _supports_sdpa = True
781
+ _supports_cache_class = True
782
+
783
+ def _init_weights(self, module):
784
+ std = self.config.initializer_range
785
+ if isinstance(module, nn.Linear):
786
+ module.weight.data.normal_(mean=0.0, std=std)
787
+ if module.bias is not None:
788
+ module.bias.data.zero_()
789
+ elif isinstance(module, nn.Embedding):
790
+ module.weight.data.normal_(mean=0.0, std=std)
791
+ if module.padding_idx is not None:
792
+ module.weight.data[module.padding_idx].zero_()
793
+
794
+
795
+
796
+ class SarvamMoEModel(SarvamMoEPreTrainedModel):
797
+ _supports_attention_backend = True
798
+ def __init__(self, config: SarvamMoEConfig):
799
+ super().__init__(config)
800
+ self.padding_idx = config.pad_token_id
801
+ self.vocab_size = config.vocab_size
802
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
803
+ self.layers = []
804
+ for layer_idx in range(config.num_hidden_layers):
805
+ self.layers.append(SarvamMoEDecoderLayer(config, layer_idx))
806
+ self.layers = nn.ModuleList(self.layers)
807
+ self._use_sdpa = config._attn_implementation == "sdpa"
808
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
809
+ self.norm = SarvamMoERMSNorm(config.hidden_size, eps=config.rms_norm_eps)
810
+ self.rotary_emb = SarvamMoERotaryEmbedding(config=config)
811
+ self.gradient_checkpointing = False
812
+ self.post_init()
813
+
814
+ def get_input_embeddings(self):
815
+ return self.word_embeddings
816
+
817
+ def set_input_embeddings(self, value):
818
+ self.word_embeddings = value
819
+
820
+ def forward(
821
+ self,
822
+ input_ids: torch.LongTensor = None,
823
+ attention_mask: Optional[torch.Tensor] = None,
824
+ position_ids: Optional[torch.LongTensor] = None,
825
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
826
+ inputs_embeds: Optional[torch.FloatTensor] = None,
827
+ use_cache: Optional[bool] = None,
828
+ output_attentions: Optional[bool] = None,
829
+ output_hidden_states: Optional[bool] = None,
830
+ output_router_logits: Optional[bool] = None,
831
+ return_dict: Optional[bool] = None,
832
+ **kwargs,
833
+ ) -> Union[Tuple, SarvamMoEModelOutputWithPast]:
834
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
835
+ output_hidden_states = (
836
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
837
+ )
838
+ output_router_logits = (
839
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
840
+ )
841
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
842
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
843
+ if input_ids is not None and inputs_embeds is not None:
844
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
845
+ elif input_ids is not None:
846
+ batch_size, seq_length = input_ids.shape[:2]
847
+ elif inputs_embeds is not None:
848
+ batch_size, seq_length = inputs_embeds.shape[:2]
849
+ else:
850
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
851
+ if self.gradient_checkpointing and self.training:
852
+ if use_cache:
853
+ logger.warning_once(
854
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`transformers."
855
+ )
856
+ use_cache = False
857
+ if use_cache and past_key_values is None:
858
+ past_key_values = DynamicCache()
859
+ if inputs_embeds is None:
860
+ inputs_embeds = self.word_embeddings(input_ids)
861
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
862
+ if position_ids is None:
863
+ position_ids = torch.arange(
864
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
865
+ )
866
+ position_ids = position_ids.unsqueeze(0)
867
+ if self._use_flash_attention_2:
868
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
869
+ elif self._use_sdpa and not output_attentions:
870
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
871
+ attention_mask,
872
+ (batch_size, seq_length),
873
+ inputs_embeds,
874
+ past_seen_tokens,
875
+ )
876
+ else:
877
+ attention_mask = _prepare_4d_causal_attention_mask(
878
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_seen_tokens
879
+ )
880
+ hidden_states = inputs_embeds
881
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
882
+ all_hidden_states = () if output_hidden_states else None
883
+ all_self_attns = () if output_attentions else None
884
+ all_router_logits = () if output_router_logits else None
885
+ next_decoder_cache = None
886
+ layers = self.layers
887
+ for decoder_layer in layers:
888
+ if output_hidden_states:
889
+ all_hidden_states += (hidden_states,)
890
+ if self.gradient_checkpointing and self.training:
891
+ layer_outputs = self._gradient_checkpointing_func(
892
+ decoder_layer.__call__,
893
+ hidden_states,
894
+ attention_mask,
895
+ position_ids,
896
+ past_key_values,
897
+ output_attentions,
898
+ output_router_logits,
899
+ use_cache,
900
+ position_embeddings,
901
+ **kwargs,
902
+ )
903
+ else:
904
+ layer_outputs = decoder_layer(
905
+ hidden_states,
906
+ attention_mask=attention_mask,
907
+ position_ids=position_ids,
908
+ past_key_value=past_key_values,
909
+ output_attentions=output_attentions,
910
+ output_router_logits=output_router_logits,
911
+ use_cache=use_cache,
912
+ position_embeddings=position_embeddings,
913
+ **kwargs,
914
+ )
915
+ hidden_states = layer_outputs[0]
916
+ if use_cache:
917
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
918
+ if output_attentions:
919
+ all_self_attns += (layer_outputs[1],)
920
+ if output_router_logits and layer_outputs[-1] is not None:
921
+ all_router_logits += (layer_outputs[-1],)
922
+ hidden_states = self.norm(hidden_states)
923
+ if output_hidden_states:
924
+ all_hidden_states += (hidden_states,)
925
+ next_cache = None
926
+ if use_cache:
927
+ next_cache = next_decoder_cache
928
+ if not return_dict:
929
+ return tuple(
930
+ v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_router_logits] if v is not None
931
+ )
932
+ return SarvamMoEModelOutputWithPast(
933
+ last_hidden_state=hidden_states,
934
+ past_key_values=next_cache,
935
+ hidden_states=all_hidden_states,
936
+ attentions=all_self_attns,
937
+ router_logits=all_router_logits,
938
+ )
939
+
940
+
941
+ class SarvamMoEForCausalLM(SarvamMoEPreTrainedModel, GenerationMixin):
942
+ _tied_weights_keys = ["lm_head.weight"]
943
+
944
+ def __init__(self, config: SarvamMoEConfig):
945
+ super().__init__(config)
946
+ self.model = SarvamMoEModel(config)
947
+ self.vocab_size = config.vocab_size
948
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
949
+ self.post_init()
950
+
951
+ def get_input_embeddings(self):
952
+ return self.model.word_embeddings
953
+
954
+ def set_input_embeddings(self, value):
955
+ self.model.word_embeddings = value
956
+
957
+ def get_output_embeddings(self):
958
+ return self.lm_head
959
+
960
+ def set_output_embeddings(self, new_embeddings):
961
+ self.lm_head = new_embeddings
962
+
963
+ def set_decoder(self, decoder):
964
+ self.model = decoder
965
+
966
+ def get_decoder(self):
967
+ return self.model
968
+
969
+ def forward(
970
+ self,
971
+ input_ids: torch.LongTensor = None,
972
+ attention_mask: Optional[torch.Tensor] = None,
973
+ position_ids: Optional[torch.LongTensor] = None,
974
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
975
+ inputs_embeds: Optional[torch.FloatTensor] = None,
976
+ labels: Optional[torch.LongTensor] = None,
977
+ use_cache: Optional[bool] = None,
978
+ output_attentions: Optional[bool] = None,
979
+ output_hidden_states: Optional[bool] = None,
980
+ output_router_logits: Optional[bool] = None,
981
+ return_dict: Optional[bool] = None,
982
+ **kwargs,
983
+ ) -> Union[Tuple, SarvamMoEModelOutputWithPast]:
984
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
985
+ output_hidden_states = (
986
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
987
+ )
988
+ output_router_logits = (
989
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
990
+ )
991
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
992
+ outputs = self.model(
993
+ input_ids=input_ids,
994
+ attention_mask=attention_mask,
995
+ position_ids=position_ids,
996
+ past_key_values=past_key_values,
997
+ inputs_embeds=inputs_embeds,
998
+ use_cache=use_cache,
999
+ output_attentions=output_attentions,
1000
+ output_hidden_states=output_hidden_states,
1001
+ output_router_logits=output_router_logits,
1002
+ return_dict=return_dict,
1003
+ **kwargs,
1004
+ )
1005
+ loss = None
1006
+ aux_loss = None
1007
+ hidden_states = outputs[0]
1008
+ logits = self.lm_head(hidden_states)
1009
+ logits = logits.float()
1010
+ if labels is not None:
1011
+ loss = self.loss_function(logits, labels, self.config.vocab_size, **kwargs)
1012
+ if not return_dict:
1013
+ output = (logits,) + outputs[1:]
1014
+ if output_router_logits:
1015
+ output = (aux_loss,) + output
1016
+ return (loss,) + output if loss is not None else output
1017
+ return SarvamMoECausalLMOutputWithPast(
1018
+ loss=loss,
1019
+ logits=logits,
1020
+ past_key_values=outputs.past_key_values,
1021
+ hidden_states=outputs.hidden_states,
1022
+ attentions=outputs.attentions,
1023
+ aux_loss=aux_loss,
1024
+ router_logits=outputs.router_logits,
1025
+ )
recipe.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ default_stage:
2
+ default_modifiers:
3
+ QuantizationModifier:
4
+ targets: [Linear]
5
+ ignore: [lm_head]
6
+ scheme: FP8_DYNAMIC
7
+ bypass_divisibility_checks: false
special_tokens_map.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "boi_token": "<|start_of_image|>",
3
+ "bos_token": {
4
+ "content": "[@BOS@]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "eoi_token": "<|end_of_image|>",
11
+ "eos_token": {
12
+ "content": "<|end_of_turn|>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "image_token": "<|image_soft_token|>",
19
+ "pad_token": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "unk_token": {
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a574ceaaff7c7a8f091179c53fd17ae33567089c099d4ff37d4cb3bc1a87e80e
3
+ size 33627251
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff