diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..52373fe24473b1aa44333d318f578ae6bf04b49b 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+tokenizer.json filter=lfs diff=lfs merge=lfs -text
diff --git a/README.md b/README.md
index 01fa0d9ff07263678daa29104bae7a3a4c6bb4b1..0a5bbe28553ad1de33642c04f7652429196d3e9d 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,7 @@
---
-license: mit
-base_model:
-- XiaomiMiMo/MiMo-V2-Flash
----
\ No newline at end of file
+language: en
+tags:
+- mlx
+pipeline_tag: text-generation
+library_name: mlx
+---
diff --git a/chat_template.jinja b/chat_template.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..3e59b05b2427fdfc4321c3497a32683954e0702d
--- /dev/null
+++ b/chat_template.jinja
@@ -0,0 +1,143 @@
+{%- if not add_generation_prompt is defined -%}
+ {%- set add_generation_prompt = false -%}
+{%- endif -%}
+{%- if not enable_thinking is defined -%}
+ {%- set enable_thinking = false -%}
+{%- endif -%}
+{%- if not keep_all_reasoning is defined -%}
+ {%- set keep_all_reasoning = false -%}
+{%- endif -%}
+{%- macro render_extra_keys(json_dict, handled_keys) -%}
+ {%- if json_dict is mapping %}
+ {%- for json_key in json_dict if json_key not in handled_keys %}
+ {%- if json_dict[json_key] is mapping or (json_dict[json_key] is sequence and json_dict[json_key] is not string) %}
+ {{- '\n<' ~ json_key ~ '>' ~ (json_dict[json_key] | tojson | safe) ~ '' ~ json_key ~ '>' }}
+ {%- else %}
+ {{-'\n<' ~ json_key ~ '>' ~ (json_dict[json_key] | string) ~ '' ~ json_key ~ '>' }}
+ {%- endif %}
+ {%- endfor %}
+ {%- endif %}
+{%- endmacro -%}
+{%- if messages[0]["role"] == "system" %}
+ {%- set system_message = messages[0]["content"] %}
+ {%- set loop_messages = messages[1:] %}
+{%- else %}
+ {%- set loop_messages = messages %}
+{%- endif %}
+{%- set ns = namespace(last_user_index=-1) %}
+{%- for m in loop_messages %}
+ {%- if m.role == 'user' %}
+ {%- set ns.last_user_index = loop.index0 -%}
+ {%- endif %}
+{%- endfor %}
+{%- if not tools is defined %}
+ {%- set tools = [] %}
+{%- endif %}
+{%- if system_message is defined %}
+ {{- "<|im_start|>system\n" + system_message }}
+{%- else %}
+ {{- "<|im_start|>system\nYou are MiMo, a helpful AI assistant engineered by Xiaomi." }}
+{%- endif %}
+{%- if tools is iterable and tools | length > 0 %}
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou have access to the following functions:\n\n" }}
+ {{- "" }}
+ {%- for tool in tools %}
+ {%- if tool.function is defined %}
+ {%- set tool = tool.function %}
+ {%- endif %}
+ {{- "\n\n" ~ tool.name ~ "" }}
+ {%- if tool.description is defined %}
+ {{- '\n' ~ (tool.description | trim) ~ '' }}
+ {%- endif %}
+ {{- '\n' }}
+ {%- if tool.parameters is defined and tool.parameters is mapping and tool.parameters.properties is defined and tool.parameters.properties is mapping %}
+ {%- for param_name, param_fields in tool.parameters.properties|items %}
+ {{- '\n' }}
+ {{- '\n' ~ param_name ~ '' }}
+ {%- if param_fields.type is defined %}
+ {{- '\n' ~ (param_fields.type | string) ~ '' }}
+ {%- endif %}
+ {%- if param_fields.description is defined %}
+ {{- '\n' ~ (param_fields.description | trim) ~ '' }}
+ {%- endif %}
+ {%- set handled_keys = ['name', 'type', 'description'] %}
+ {{- render_extra_keys(param_fields, handled_keys) }}
+ {{- '\n' }}
+ {%- endfor %}
+ {%- endif %}
+ {%- set handled_keys = ['type', 'properties'] %}
+ {{- render_extra_keys(tool.parameters, handled_keys) }}
+ {{- '\n' }}
+ {%- set handled_keys = ['type', 'name', 'description', 'parameters'] %}
+ {{- render_extra_keys(tool, handled_keys) }}
+ {{- '\n' }}
+ {%- endfor %}
+ {{- "\n" }}
+ {{- '\n\nFor each function call, output the function name and arguments in the following format:\n\n\nvalue_1\nThis is the value for the second parameter\nthat can span\nmultiple lines\n\n\n\n\n- Function calls MUST follow the specified format: an inner block must be nested within XML tags\n- DO NOT use function calls inside tags.\n- The value enclosed between parameter tags is preserved exactly as-is, including newlines and spaces.\n' }}
+{%- endif %}
+{{- '<|im_end|>' }}
+{%- for message in loop_messages %}
+ {%- if message.content is string %}
+ {%- set content = message.content %}
+ {%- else %}
+ {%- set content = '' %}
+ {%- endif %}
+ {%- if message.role == "assistant" %}
+ {%- if message.reasoning_content is string %}
+ {%- set reasoning_content = message.reasoning_content %}
+ {%- else %}
+ {%- set reasoning_content = '' %}
+ {%- if '' in content %}
+ {%- set reasoning_content = content.split('')[0].split('')[-1] %}
+ {%- set content = content.split('')[-1] %}
+ {%- endif %}
+ {%- endif %}
+ {%- if (keep_all_reasoning or loop.index0 > ns.last_user_index) and reasoning_content -%}
+ {{- '<|im_start|>' + message.role + '\n' + reasoning_content + '' + content }}
+ {%- else %}
+ {{- '<|im_start|>' + message.role + '\n' + content }}
+ {%- endif %}
+ {%- if message.tool_calls is defined and message.tool_calls is iterable and message.tool_calls | length > 0 %}
+ {%- for tool_call in message.tool_calls %}
+ {%- if tool_call.function is defined %}
+ {%- set tool_call = tool_call.function %}
+ {%- endif %}
+ {{- '\n\n' }}
+ {%- if tool_call.arguments is defined %}
+ {%- for args_name, args_value in tool_call.arguments|items %}
+ {{- '' }}
+ {%- set args_value = args_value | tojson | safe if args_value is mapping or (args_value is sequence and args_value is not string) else args_value | string %}
+ {{- args_value }}
+ {{- '\n' }}
+ {%- endfor %}
+ {%- endif %}
+ {{- '\n' }}
+ {%- endfor %}
+ {%- endif %}
+ {{- '<|im_end|>' }}
+ {%- elif message.role == "user" or message.role == "system"%}
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' }}
+ {%- elif message.role == "tool" %}
+ {%- if loop.previtem and loop.previtem.role != "tool" %}
+ {{- '<|im_start|>tool\n' }}
+ {%- endif %}
+ {{- '\n' }}
+ {{- message.content }}
+ {{- '\n\n' }}
+ {%- if not loop.last and loop.nextitem.role != "tool" %}
+ {{- '<|im_end|>' }}
+ {%- elif loop.last %}
+ {{- '<|im_end|>' }}
+ {%- endif %}
+ {%- else %}
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' }}
+ {%- endif %}
+{%- endfor %}
+{%- if add_generation_prompt %}
+ {{- '<|im_start|>assistant\n' }}
+ {%- if not enable_thinking -%}
+ {{- '' -}}
+ {%- else -%}
+ {{- '' -}}
+ {%- endif -%}
+{%- endif %}
\ No newline at end of file
diff --git a/config.json b/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..b7fea9cae0b516598dc0ad0462e147740a9f584f
--- /dev/null
+++ b/config.json
@@ -0,0 +1,162 @@
+{
+ "add_full_attention_sink_bias": false,
+ "add_swa_attention_sink_bias": true,
+ "architectures": [
+ "MiMoV2FlashForCausalLM"
+ ],
+ "attention_bias": false,
+ "attention_chunk_size": 128,
+ "attention_dropout": 0.0,
+ "attention_value_scale": 0.707,
+ "auto_map": {
+ "AutoConfig": "configuration_mimo_v2_flash.MiMoV2FlashConfig",
+ "AutoModel": "modeling_mimo_v2_flash.MiMoV2FlashModel",
+ "AutoModelForCausalLM": "modeling_mimo_v2_flash.MiMoV2FlashForCausalLM"
+ },
+ "head_dim": 192,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "hybrid_layer_pattern": [
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0
+ ],
+ "initializer_range": 0.02,
+ "intermediate_size": 16384,
+ "layernorm_epsilon": 1e-05,
+ "max_position_embeddings": 262144,
+ "model_type": "mimo_v2_flash",
+ "moe_intermediate_size": 2048,
+ "moe_layer_freq": [
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1
+ ],
+ "n_group": 1,
+ "n_routed_experts": 256,
+ "n_shared_experts": null,
+ "norm_topk_prob": true,
+ "num_attention_heads": 64,
+ "num_experts_per_tok": 8,
+ "num_hidden_layers": 48,
+ "num_key_value_heads": 4,
+ "partial_rotary_factor": 0.334,
+ "quantization": {
+ "group_size": 64,
+ "bits": 8,
+ "mode": "affine"
+ },
+ "quantization_config": {
+ "group_size": 64,
+ "bits": 8,
+ "mode": "affine"
+ },
+ "rope_theta": 5000000,
+ "routed_scaling_factor": null,
+ "scoring_func": "sigmoid",
+ "sliding_window": 128,
+ "sliding_window_size": 128,
+ "swa_head_dim": 192,
+ "swa_num_attention_heads": 64,
+ "swa_num_key_value_heads": 8,
+ "swa_rope_theta": 10000,
+ "swa_v_head_dim": 128,
+ "tie_word_embeddings": false,
+ "topk_group": 1,
+ "topk_method": "noaux_tc",
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.40.1",
+ "use_cache": true,
+ "v_head_dim": 128,
+ "vocab_size": 152576
+}
\ No newline at end of file
diff --git a/configuration_mimo_v2_flash.py b/configuration_mimo_v2_flash.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc3011a63160318fac7b277685bc8829a7e57f7b
--- /dev/null
+++ b/configuration_mimo_v2_flash.py
@@ -0,0 +1,109 @@
+# coding=utf-8
+#
+# Copyright 2025 Xiaomi Corporation.
+# Copyright 2025 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from transformers.configuration_utils import PretrainedConfig
+from transformers.modeling_rope_utils import rope_config_validation
+from transformers.utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class MiMoV2FlashConfig(PretrainedConfig):
+
+ model_type = ""
+ keys_to_ignore_at_inference = ["past_key_values"]
+
+ # Default tensor parallel plan for base model `Hybrid`
+ base_model_tp_plan = {
+ "layers.*.self_attn.q_proj": "colwise",
+ "layers.*.self_attn.k_proj": "colwise",
+ "layers.*.self_attn.v_proj": "colwise",
+ "layers.*.self_attn.o_proj": "rowwise",
+ "layers.*.mlp.gate_proj": "colwise",
+ "layers.*.mlp.up_proj": "colwise",
+ "layers.*.mlp.down_proj": "rowwise",
+ }
+ base_model_pp_plan = {
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
+ "norm": (["hidden_states"], ["hidden_states"]),
+ }
+
+ attribute_map = {
+ "num_local_experts": "n_routed_experts",
+ }
+
+ def __init__(
+ self,
+ vocab_size=151936,
+ hidden_size=4096,
+ intermediate_size=22016,
+ num_hidden_layers=32,
+ num_attention_heads=32,
+ num_key_value_heads=32,
+ hidden_act="silu",
+ max_position_embeddings=32768,
+ initializer_range=0.02,
+ layernorm_epsilon=1e-6,
+ use_cache=True,
+ tie_word_embeddings=False,
+ rope_theta=10000.0,
+ rope_scaling=None,
+ attention_dropout=0.0,
+ hybrid_block_size=None,
+ hybrid_layer_pattern=None,
+ partial_rotary_factor=1.0,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+
+ # for backward compatibility
+ if num_key_value_heads is None:
+ num_key_value_heads = num_attention_heads
+
+ self.num_key_value_heads = num_key_value_heads
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.layernorm_epsilon = layernorm_epsilon
+ self.use_cache = use_cache
+ self.rope_theta = rope_theta
+ self.rope_scaling = rope_scaling
+ self.attention_dropout = attention_dropout
+
+ if hybrid_block_size is not None and hybrid_layer_pattern is None:
+ hybrid_layer_pattern = [0 if ((i + 1) % hybrid_block_size == 0) else 1 for i in range(num_hidden_layers)]
+ self.hybrid_block_size = hybrid_block_size
+ self.hybrid_layer_pattern = hybrid_layer_pattern
+
+ self.partial_rotary_factor = partial_rotary_factor
+
+ # Validate the correctness of rotary position embeddings parameters
+ # BC: if there is a 'type' field, move it to 'rope_type'.
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
+ rope_config_validation(self)
+
+ super().__init__(
+ tie_word_embeddings=tie_word_embeddings,
+ **kwargs,
+ )
diff --git a/model-00001-of-00071.safetensors b/model-00001-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..155bee0a57a14d950b8945eede62e0a8ef9648a3
--- /dev/null
+++ b/model-00001-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1bb7fd5c06b7e1f107acf128cf74c2c98b5570f1f7008dda90caf257521dccae
+size 3354612527
diff --git a/model-00002-of-00071.safetensors b/model-00002-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4deb516202633ceabb9efcc752e0660321f4fe59
--- /dev/null
+++ b/model-00002-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4fa7b4eb05bfbd2967c1194d5a9b8a7095dacdbe17f5adbddacb4254143d78d0
+size 4665790200
diff --git a/model-00003-of-00071.safetensors b/model-00003-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..32e8540588b2c47fcbae4c2b69fe68be66207675
--- /dev/null
+++ b/model-00003-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b65d7aeb850f6c8a0333395febaddf9b3d504cfb59a7572cd0ea999c7efd6e60
+size 4563403538
diff --git a/model-00004-of-00071.safetensors b/model-00004-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0b0adbb22d70a970cf24af58279fc06eb2f1c986
--- /dev/null
+++ b/model-00004-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:680445d66ecfc0b641e2243ef09cf6b8164f352470a6695abb12dd3c0e1db274
+size 4665790258
diff --git a/model-00005-of-00071.safetensors b/model-00005-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ef4579e9f6d6649cc39921c3f565f8ebcc9a4d27
--- /dev/null
+++ b/model-00005-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1588f60a7171d23145af2af7c5cc06431ae8115521b0392c70bb4105595d08ef
+size 4665790214
diff --git a/model-00006-of-00071.safetensors b/model-00006-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..bbd3a061872121fefd32ceffb8b5fa9693879d4b
--- /dev/null
+++ b/model-00006-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7e203c3580a25abd1b67f32839030e262b30c3e06a230b7a4532d14ff9f34dc7
+size 4563403538
diff --git a/model-00007-of-00071.safetensors b/model-00007-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..345d1003b2afbafc89871135395e0311d54e831e
--- /dev/null
+++ b/model-00007-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e0a9c2a7b69bed7031202d998968c38b75c7fa75ebfd8af02f95d25a179db49
+size 4660219434
diff --git a/model-00008-of-00071.safetensors b/model-00008-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..60abc8a28b3bdb4b99a249523ccd132d08b46a0d
--- /dev/null
+++ b/model-00008-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae0ce749b6ca1c7b3da3b94105df792c122ad34e7661edb3bc501a45b921af43
+size 4665790218
diff --git a/model-00009-of-00071.safetensors b/model-00009-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..632ac7cabeef92422d289f3d547a2bf59814bcea
--- /dev/null
+++ b/model-00009-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:31ae522990dd285ccea63e10e8c489aba1be33d3e1582058bc18926bbfb14047
+size 4563403534
diff --git a/model-00010-of-00071.safetensors b/model-00010-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..5038b8d7c1d96cc06c1db682987981a4bc3b0163
--- /dev/null
+++ b/model-00010-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f65e4a670144b6395ca49fcdc40691f36ef3072fc02b657557a707249df21d33
+size 4665790242
diff --git a/model-00011-of-00071.safetensors b/model-00011-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..da0d82e24f173663ecb6507b49a42d46dc6d35cd
--- /dev/null
+++ b/model-00011-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d511ad2f1463d7b42aca2159f91d76d20cb9e6e27c3a32ca95ead8b46150bcdb
+size 4665790214
diff --git a/model-00012-of-00071.safetensors b/model-00012-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..8d4d0d8659d54c8165457a9e9f2572a5f265e33d
--- /dev/null
+++ b/model-00012-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e1c8b8994c371a76e7c8a5f0509f0822aeb5e0aeb51b7dba36764079e2999f3d
+size 4563403538
diff --git a/model-00013-of-00071.safetensors b/model-00013-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..1af628583976b2148dfdf8115b75692455c5f0c0
--- /dev/null
+++ b/model-00013-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9c41574a1adfb130f0b820bca9b17ae289286f98d37dced042986f4a006f7544
+size 4665790278
diff --git a/model-00014-of-00071.safetensors b/model-00014-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..15f135c36a08dd68b65df832f4f3b019fc2df63f
--- /dev/null
+++ b/model-00014-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:76bb12fd2c24249314985d828ffcabbfeec1f5d828d990dd1bd52a7e6df15aed
+size 4665790215
diff --git a/model-00015-of-00071.safetensors b/model-00015-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9825930dd47fe200d8aaec5a6ab5925fde880cf1
--- /dev/null
+++ b/model-00015-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f8c74834c4c8696ad07240625c3d1126976218091b6e480b8cedc6dd0782f1f9
+size 4563403544
diff --git a/model-00016-of-00071.safetensors b/model-00016-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..afa22740c4e50e1c6b624f2972f2e55f24b605ce
--- /dev/null
+++ b/model-00016-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:560793a235440990d113351d43ef3ddc46aee9eba869f0aada63acee9b6532e6
+size 4660219486
diff --git a/model-00017-of-00071.safetensors b/model-00017-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..3229921ab759e579e54452e0737298fd0eb5f91f
--- /dev/null
+++ b/model-00017-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3de7d4937528bcf9f850d875a9f5ba2b72276025361cb9fa18e2726be98f85a8
+size 4665790245
diff --git a/model-00018-of-00071.safetensors b/model-00018-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..8d2fd1a25fbc2a7ae4a5338b3f64a6ddee089a6c
--- /dev/null
+++ b/model-00018-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b04899fe5d628af0fb11a0d2a4016f585242f5c3827f3145fbeb68ac20d294b1
+size 4563403544
diff --git a/model-00019-of-00071.safetensors b/model-00019-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..03c72a875764acbe3fc9d61b158ee853f7646237
--- /dev/null
+++ b/model-00019-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:417b6c17b1d4354ff57b140f9dec2547e24783ae9a2f843e33000831b10f2b85
+size 4665790277
diff --git a/model-00020-of-00071.safetensors b/model-00020-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..db6c79749ff0d4e99835fb6bcffc843a8e61016a
--- /dev/null
+++ b/model-00020-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e41608d494035d42c8352c95b28c6b373bfc003bc84784d823fdfd34504d74a2
+size 4665790235
diff --git a/model-00021-of-00071.safetensors b/model-00021-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..7ca1ae00aaf9a495fcc7ae855d3bd540ffc1a930
--- /dev/null
+++ b/model-00021-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1b584a5ba56cf748a4ef65bd152453ca4c5e5b5038a226138f464ee5da476156
+size 4563403540
diff --git a/model-00022-of-00071.safetensors b/model-00022-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..428c554c5d2ce78ffca4ca4de113fbe8c858cae3
--- /dev/null
+++ b/model-00022-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:88e03f2a83bff63a78db1a1be9ac53d0f102f7453fc820fcef81f324dac6fc26
+size 4665790301
diff --git a/model-00023-of-00071.safetensors b/model-00023-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4afcff8bbf7712eb390c5bcf45b6285fb7779e8e
--- /dev/null
+++ b/model-00023-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e7d83c7c8bde832f5b0dc2a4e9919df1cf894ba22b5e2b30fc160cb35ae2b773
+size 4665790245
diff --git a/model-00024-of-00071.safetensors b/model-00024-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..dbd4ea3578a765234191f74b69ba156f1e92090e
--- /dev/null
+++ b/model-00024-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bb68ac6bed83189a0828140d83538d51bd00a432dc8b443131f31dc3df0e214a
+size 4563403544
diff --git a/model-00025-of-00071.safetensors b/model-00025-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..8309f2fb1afac4c6036ae2f26f3decbfff525ec9
--- /dev/null
+++ b/model-00025-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5e2ed8d67b55cc932d50b7bc8abbe8fe4abc9698880fac0dd4aaf47636565a48
+size 4660219480
diff --git a/model-00026-of-00071.safetensors b/model-00026-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..defaae49273218ff9e00665f7119c7811172108c
--- /dev/null
+++ b/model-00026-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:967d7a215440b937d109907cacf165be4f9066adfa8b30814cd1bbc889f2b1d9
+size 4665790253
diff --git a/model-00027-of-00071.safetensors b/model-00027-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..622de16a81f5d1984fd59132da55d828757abeb9
--- /dev/null
+++ b/model-00027-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2c95c534e2f67161905c31d6fa874a8141f6dee4e0547e82e21026fa51c860c4
+size 4563403548
diff --git a/model-00028-of-00071.safetensors b/model-00028-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c9c9318ed67915550803670391bab295b3c461c8
--- /dev/null
+++ b/model-00028-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2e0dc81536758f328ed1684041a6bf89fddbf7ff23caf660c307eab23058fcff
+size 4665790297
diff --git a/model-00029-of-00071.safetensors b/model-00029-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..7ff540f29fe23ff39bd4ce064463cf85fa25a0f5
--- /dev/null
+++ b/model-00029-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e1c0ca11a1cbf6e81fef8b23a65650394bfc0f1ebbe7798e8ef57989235d9520
+size 4665790243
diff --git a/model-00030-of-00071.safetensors b/model-00030-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..fa38a681aa6841962e3ed9da1dbaa1fba175d2b1
--- /dev/null
+++ b/model-00030-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4fb42852a702315a891e731d0341b0bfe25c9accd1dc6de35261bc4b544f4165
+size 4563403540
diff --git a/model-00031-of-00071.safetensors b/model-00031-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..1269f55f2059557e835d844ed32fa6b648ab2618
--- /dev/null
+++ b/model-00031-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:59f5616157fd4a827724be5e896f3a679a8528a3bdd15dbbca41ec39c776354d
+size 4665790297
diff --git a/model-00032-of-00071.safetensors b/model-00032-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..873981b328d5bc585bc2ff4403b68e42a9b46dd2
--- /dev/null
+++ b/model-00032-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fb0ce6aa499ea51c2d3fd3d0f93a2de2f13f60f3f78ab5d00bff34ca55d655e4
+size 4665790227
diff --git a/model-00033-of-00071.safetensors b/model-00033-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..8490d02afe0809d4dc55bf8d610fff18895ad26e
--- /dev/null
+++ b/model-00033-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3a761e02adac0f006080631e355c7555050dcb5785c987e5536264904969648c
+size 4563403542
diff --git a/model-00034-of-00071.safetensors b/model-00034-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..7ef1428cf3aed446d27cbf6f3e563cd5d64ad032
--- /dev/null
+++ b/model-00034-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b5a02ed506734fe44957ce3dde1c3380ffde6b907da2977763b7c1fb86451e01
+size 4660219490
diff --git a/model-00035-of-00071.safetensors b/model-00035-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ea7a9538234db0d825a274f846935ff49f567e59
--- /dev/null
+++ b/model-00035-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:562743c69b9e994423f975206674102e2552eab601d6b8836845b98ebd49bb10
+size 4665790227
diff --git a/model-00036-of-00071.safetensors b/model-00036-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..bf1ddd91f1ec48676133c9b4ce047ab9751f4bf3
--- /dev/null
+++ b/model-00036-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1f5e8bea6bcd4f7e3a80cc7f89ee4e3ac557b52038b21a82a7d2683efc64faf5
+size 4563403538
diff --git a/model-00037-of-00071.safetensors b/model-00037-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..79556241d7b7c5542a95a5801790054e281b1461
--- /dev/null
+++ b/model-00037-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5d93ba8fa21d29cbca7c9f23b55ee918f04665156a3ae307f673453ceb103a42
+size 4665790295
diff --git a/model-00038-of-00071.safetensors b/model-00038-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c3b351459aad7d01fc16f15f5a79c0242a75d982
--- /dev/null
+++ b/model-00038-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ed23e90aacfc6105e43265511afec0ff53bee622058b19a66dd7a07f92cf016a
+size 4665790237
diff --git a/model-00039-of-00071.safetensors b/model-00039-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..1352316b83bf13d9967d558205cc83c899be49fc
--- /dev/null
+++ b/model-00039-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c0884eac62194fc6c0a4999e70ff80b9a9cf835054eac2ab063cc13cbfd9b0f7
+size 4563403544
diff --git a/model-00040-of-00071.safetensors b/model-00040-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..feb7770a9c99c265ecbcfbfcf89f9747727a1bd1
--- /dev/null
+++ b/model-00040-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d25fea33679489343feec234e13505e706746db3f8f3b42d6c89cc7e985a525e
+size 4665790297
diff --git a/model-00041-of-00071.safetensors b/model-00041-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..77f661858485a43a462bd61fb7cb905482095eaf
--- /dev/null
+++ b/model-00041-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:979f3e691c04ba9790a7b09a495710f550e5c5093fa184af17e210cb3c022b26
+size 4665790237
diff --git a/model-00042-of-00071.safetensors b/model-00042-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..550b138a48879419a9d95b422585c6408c58526d
--- /dev/null
+++ b/model-00042-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5199bd040ce697f3c0747e3266c7a8163d0ee4cf25bdb0b94052d8c6f287e0f9
+size 4563403540
diff --git a/model-00043-of-00071.safetensors b/model-00043-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..a3b65742ae1bf72c93a096bd96c24c48206f1451
--- /dev/null
+++ b/model-00043-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bd34a9626d1606ac67d4b746fdd84c4088e8e8a9edfc95274f59f355c949b901
+size 4660219482
diff --git a/model-00044-of-00071.safetensors b/model-00044-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..131c31b9be910f5f72f018ff52544a1d749807c3
--- /dev/null
+++ b/model-00044-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cdd44b44d0569d00967320f2b94e793d33c26fd88a6201340a69ca92095374b7
+size 4665790243
diff --git a/model-00045-of-00071.safetensors b/model-00045-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..5d932fb68758400b66044f9b37290e8764fcceb6
--- /dev/null
+++ b/model-00045-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:60b781998bb199e2d49bd234a01668d7bb6db752c903e2fc287f633e027d9932
+size 4563403544
diff --git a/model-00046-of-00071.safetensors b/model-00046-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..e11bc98aff41868c23d93ff3bcc8c3763de11876
--- /dev/null
+++ b/model-00046-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6cc533656f87db41c0e592705830e35ff45728a76bd08a258030ec5fe42c6cee
+size 4665790261
diff --git a/model-00047-of-00071.safetensors b/model-00047-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..107032adcd540380546895df735bb50f7086001d
--- /dev/null
+++ b/model-00047-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2f7c30dac4abf6a9d4303d4e4cbf00169dccadb545baa227a5423ff5fd0d6462
+size 4665790259
diff --git a/model-00048-of-00071.safetensors b/model-00048-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..6f59584194e60718328d0bf5d9020499a9972617
--- /dev/null
+++ b/model-00048-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6c4e8b85e45caee29f9d0fa5920d5ae134f311d0ef71f85540edc8667fdd8f2f
+size 4563403544
diff --git a/model-00049-of-00071.safetensors b/model-00049-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9c34640f96b6c231828eac69e18ecbc0f817be43
--- /dev/null
+++ b/model-00049-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0bbafdd537fa42294dd247c034153323a73c222f7c76e66da1b1c4269f94cf1b
+size 4665790301
diff --git a/model-00050-of-00071.safetensors b/model-00050-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..26390e625235cf7b03d355befadb920cb71701f7
--- /dev/null
+++ b/model-00050-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:609dd17914d90988ac45938fc92473bc82e001130cd6903d54f4e3670e6608a4
+size 4665790243
diff --git a/model-00051-of-00071.safetensors b/model-00051-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..da8fe68c04b7b099f9732b273a8d641c110ba504
--- /dev/null
+++ b/model-00051-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:22c318c666af50cdc10a71352b0a9ae4ece7dae90022e8568fd287f396fcbf9b
+size 4563403544
diff --git a/model-00052-of-00071.safetensors b/model-00052-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ce687c340bbff64127c5d08a03369d89429e676c
--- /dev/null
+++ b/model-00052-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1c1f2b23f2fc0a600b3cd1af5c9764eefaf088f44ffc55c9a66c3f41cd07d089
+size 4660219454
diff --git a/model-00053-of-00071.safetensors b/model-00053-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..3b607bfbb565dac5f7664d9e7f8448a6169bd9b1
--- /dev/null
+++ b/model-00053-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aa4aebfa432d40e64d4a9c4bfbcf803d453cb4e1732247a75642140108126bac
+size 4665790229
diff --git a/model-00054-of-00071.safetensors b/model-00054-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..bc726d70e120c616ca74e61abaaa81930a5f19e2
--- /dev/null
+++ b/model-00054-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4de9898038e097fd84d730dcb153b88eace59e8552bb71597d45a3b4b9ff838e
+size 4563403548
diff --git a/model-00055-of-00071.safetensors b/model-00055-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c9c70c7297732a172599233aeb02a34617c96c3e
--- /dev/null
+++ b/model-00055-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cfc091838b6d5622ab542c32656a33902f5102af9177d6ab638670809c380c7c
+size 4665790285
diff --git a/model-00056-of-00071.safetensors b/model-00056-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c1cfe7f949d10adb81e9e6059deaf56770dc00da
--- /dev/null
+++ b/model-00056-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a6522935023c0811956a0e042b4b449a4817f3715161fe3b31d8c88112f2cfc7
+size 4665790217
diff --git a/model-00057-of-00071.safetensors b/model-00057-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..966bb9f24dbbedf65a542ae821f81ce2c8c094ba
--- /dev/null
+++ b/model-00057-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:802edc0c23c82bbc43fd4111100e3ab1f0c72546c58b7e3903888996849de745
+size 4563403542
diff --git a/model-00058-of-00071.safetensors b/model-00058-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..6715f0a7fecc92009793376b2b337d529feec9d7
--- /dev/null
+++ b/model-00058-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d4182f319082938007d2de8d5891d5ba9d41e83698551dfa9b5a265b4e1a4aa1
+size 4665790255
diff --git a/model-00059-of-00071.safetensors b/model-00059-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..21766779d315f9ddeee5e8f5b68042d2f107d325
--- /dev/null
+++ b/model-00059-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:89d9bdf00b31d1e7b7453fb4188f5656684b2476053fbb75cc01123bd2a336ca
+size 4665790229
diff --git a/model-00060-of-00071.safetensors b/model-00060-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..440ec73feb04b04d7178db039a34ffc0c6fe4d34
--- /dev/null
+++ b/model-00060-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8fba3f574ea35253430a6221945e42247220b84cb6ed07dc068397e238a4824a
+size 4563403544
diff --git a/model-00061-of-00071.safetensors b/model-00061-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ec47bca178e693d5568055f5b35b9a719a1e1d03
--- /dev/null
+++ b/model-00061-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c5373d0f4b6774ac645477eba3cb413839e59337293d8ca7ce6607aed47d8904
+size 4660219484
diff --git a/model-00062-of-00071.safetensors b/model-00062-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..60ec167548230f41806f1f06902c6d60ce036dbe
--- /dev/null
+++ b/model-00062-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:df0e2638d45d29efb89a7c5f5e9b9704ec66ad172cd03ca2f2e329f9473baad9
+size 4665790235
diff --git a/model-00063-of-00071.safetensors b/model-00063-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0cdf5776b6244f8d761581db5b852c6638dc2268
--- /dev/null
+++ b/model-00063-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:238622ff7856572901fdf8c56046febc2f1c5e831299ea6df0e035f7bbff8d10
+size 4563403544
diff --git a/model-00064-of-00071.safetensors b/model-00064-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..7db50c3c9604320ef1088b4c35c7bb11d158be03
--- /dev/null
+++ b/model-00064-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:311a30ee89272025fa3132de04538617152c44acca197a6528191354b014b9a9
+size 4665790259
diff --git a/model-00065-of-00071.safetensors b/model-00065-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4bb1abea1a1e3e880f4128c1aa60b64fade2a290
--- /dev/null
+++ b/model-00065-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5477844de20793bf0f36544741a3f0af6eb66fdf981df031bfe9c1534287dc72
+size 4665790213
diff --git a/model-00066-of-00071.safetensors b/model-00066-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..e25d15413de70e2349e61c540f0d951c2dcb2ee8
--- /dev/null
+++ b/model-00066-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5a05ddab42b321fb3a2bee8fad4f97447ffea609b96bcf27a8f5f28f135728b5
+size 4563403544
diff --git a/model-00067-of-00071.safetensors b/model-00067-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c30e62da2367f17726d428ff967c3e3551c9f422
--- /dev/null
+++ b/model-00067-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0281b2846d95fce382fb34fe547a1a855e8323f78530b9a4fb36122da7acc454
+size 4665790301
diff --git a/model-00068-of-00071.safetensors b/model-00068-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0fcc95616415bb8096dbcc8c41aa3fc13e8927cf
--- /dev/null
+++ b/model-00068-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:525b3b7d4fcb431c5c327ff53447d28587e69e9f8d4abbcf950879b5139cb2e3
+size 4665790227
diff --git a/model-00069-of-00071.safetensors b/model-00069-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c05b8d2c0ef547d58578aa8a8ba00fbf1881bf53
--- /dev/null
+++ b/model-00069-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a1f6d65752ab462e743d74a77517da6a0de05d0053bc627cb17311a1374d1396
+size 4563403538
diff --git a/model-00070-of-00071.safetensors b/model-00070-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..be9dcd5dc4784d081d9c0f7ba3f50f72b0fcbecb
--- /dev/null
+++ b/model-00070-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2cf5cc859cab662c3f3772fdd085015f636717ecfa4f9a3b6a09cc4751310cf7
+size 4660219462
diff --git a/model-00071-of-00071.safetensors b/model-00071-of-00071.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..66917cfc60d7fc6722e49fe4e9c0386796b165ef
--- /dev/null
+++ b/model-00071-of-00071.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1e0367ad5fca9ff3e339cc85d559895976545883265f4a3aad69a009893fe5b0
+size 5229537864
diff --git a/model.safetensors.index.json b/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2639c64197b4758d0285d4fd0394f5245bc8ed30
--- /dev/null
+++ b/model.safetensors.index.json
@@ -0,0 +1,1252 @@
+{
+ "metadata": {
+ "total_size": 328124067712,
+ "total_parameters": 308778778368
+ },
+ "weight_map": {
+ "lm_head.biases": "model-00071-of-00071.safetensors",
+ "lm_head.scales": "model-00071-of-00071.safetensors",
+ "lm_head.weight": "model-00071-of-00071.safetensors",
+ "model.embed_tokens.biases": "model-00001-of-00071.safetensors",
+ "model.embed_tokens.scales": "model-00001-of-00071.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00071.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00071.safetensors",
+ "model.layers.0.mlp.down_proj.biases": "model-00001-of-00071.safetensors",
+ "model.layers.0.mlp.down_proj.scales": "model-00001-of-00071.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00071.safetensors",
+ "model.layers.0.mlp.gate_proj.biases": "model-00001-of-00071.safetensors",
+ "model.layers.0.mlp.gate_proj.scales": "model-00001-of-00071.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00071.safetensors",
+ "model.layers.0.mlp.up_proj.biases": "model-00001-of-00071.safetensors",
+ "model.layers.0.mlp.up_proj.scales": "model-00001-of-00071.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00071.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00071.safetensors",
+ "model.layers.0.self_attn.k_proj.biases": "model-00001-of-00071.safetensors",
+ "model.layers.0.self_attn.k_proj.scales": "model-00001-of-00071.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00071.safetensors",
+ "model.layers.0.self_attn.o_proj.biases": "model-00001-of-00071.safetensors",
+ "model.layers.0.self_attn.o_proj.scales": "model-00001-of-00071.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00071.safetensors",
+ "model.layers.0.self_attn.q_proj.biases": "model-00001-of-00071.safetensors",
+ "model.layers.0.self_attn.q_proj.scales": "model-00001-of-00071.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00071.safetensors",
+ "model.layers.0.self_attn.v_proj.biases": "model-00001-of-00071.safetensors",
+ "model.layers.0.self_attn.v_proj.scales": "model-00001-of-00071.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00071.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00002-of-00071.safetensors",
+ "model.layers.1.mlp.gate.e_score_correction_bias": "model-00002-of-00071.safetensors",
+ "model.layers.1.mlp.gate.weight": "model-00002-of-00071.safetensors",
+ "model.layers.1.mlp.switch_mlp.down_proj.biases": "model-00002-of-00071.safetensors",
+ "model.layers.1.mlp.switch_mlp.down_proj.scales": "model-00002-of-00071.safetensors",
+ "model.layers.1.mlp.switch_mlp.down_proj.weight": "model-00002-of-00071.safetensors",
+ "model.layers.1.mlp.switch_mlp.gate_proj.biases": "model-00001-of-00071.safetensors",
+ "model.layers.1.mlp.switch_mlp.gate_proj.scales": "model-00001-of-00071.safetensors",
+ "model.layers.1.mlp.switch_mlp.gate_proj.weight": "model-00001-of-00071.safetensors",
+ "model.layers.1.mlp.switch_mlp.up_proj.biases": "model-00002-of-00071.safetensors",
+ "model.layers.1.mlp.switch_mlp.up_proj.scales": "model-00002-of-00071.safetensors",
+ "model.layers.1.mlp.switch_mlp.up_proj.weight": "model-00002-of-00071.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00002-of-00071.safetensors",
+ "model.layers.1.self_attn.attention_sink_bias": "model-00001-of-00071.safetensors",
+ "model.layers.1.self_attn.k_proj.biases": "model-00001-of-00071.safetensors",
+ "model.layers.1.self_attn.k_proj.scales": "model-00001-of-00071.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00071.safetensors",
+ "model.layers.1.self_attn.o_proj.biases": "model-00001-of-00071.safetensors",
+ "model.layers.1.self_attn.o_proj.scales": "model-00001-of-00071.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00071.safetensors",
+ "model.layers.1.self_attn.q_proj.biases": "model-00001-of-00071.safetensors",
+ "model.layers.1.self_attn.q_proj.scales": "model-00001-of-00071.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00071.safetensors",
+ "model.layers.1.self_attn.v_proj.biases": "model-00001-of-00071.safetensors",
+ "model.layers.1.self_attn.v_proj.scales": "model-00001-of-00071.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00071.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00016-of-00071.safetensors",
+ "model.layers.10.mlp.gate.e_score_correction_bias": "model-00016-of-00071.safetensors",
+ "model.layers.10.mlp.gate.weight": "model-00016-of-00071.safetensors",
+ "model.layers.10.mlp.switch_mlp.down_proj.biases": "model-00016-of-00071.safetensors",
+ "model.layers.10.mlp.switch_mlp.down_proj.scales": "model-00016-of-00071.safetensors",
+ "model.layers.10.mlp.switch_mlp.down_proj.weight": "model-00016-of-00071.safetensors",
+ "model.layers.10.mlp.switch_mlp.gate_proj.biases": "model-00015-of-00071.safetensors",
+ "model.layers.10.mlp.switch_mlp.gate_proj.scales": "model-00015-of-00071.safetensors",
+ "model.layers.10.mlp.switch_mlp.gate_proj.weight": "model-00015-of-00071.safetensors",
+ "model.layers.10.mlp.switch_mlp.up_proj.biases": "model-00015-of-00071.safetensors",
+ "model.layers.10.mlp.switch_mlp.up_proj.scales": "model-00015-of-00071.safetensors",
+ "model.layers.10.mlp.switch_mlp.up_proj.weight": "model-00015-of-00071.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00016-of-00071.safetensors",
+ "model.layers.10.self_attn.attention_sink_bias": "model-00014-of-00071.safetensors",
+ "model.layers.10.self_attn.k_proj.biases": "model-00014-of-00071.safetensors",
+ "model.layers.10.self_attn.k_proj.scales": "model-00014-of-00071.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00014-of-00071.safetensors",
+ "model.layers.10.self_attn.o_proj.biases": "model-00014-of-00071.safetensors",
+ "model.layers.10.self_attn.o_proj.scales": "model-00014-of-00071.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00014-of-00071.safetensors",
+ "model.layers.10.self_attn.q_proj.biases": "model-00014-of-00071.safetensors",
+ "model.layers.10.self_attn.q_proj.scales": "model-00014-of-00071.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00014-of-00071.safetensors",
+ "model.layers.10.self_attn.v_proj.biases": "model-00014-of-00071.safetensors",
+ "model.layers.10.self_attn.v_proj.scales": "model-00014-of-00071.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00014-of-00071.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00017-of-00071.safetensors",
+ "model.layers.11.mlp.gate.e_score_correction_bias": "model-00017-of-00071.safetensors",
+ "model.layers.11.mlp.gate.weight": "model-00017-of-00071.safetensors",
+ "model.layers.11.mlp.switch_mlp.down_proj.biases": "model-00017-of-00071.safetensors",
+ "model.layers.11.mlp.switch_mlp.down_proj.scales": "model-00017-of-00071.safetensors",
+ "model.layers.11.mlp.switch_mlp.down_proj.weight": "model-00017-of-00071.safetensors",
+ "model.layers.11.mlp.switch_mlp.gate_proj.biases": "model-00016-of-00071.safetensors",
+ "model.layers.11.mlp.switch_mlp.gate_proj.scales": "model-00016-of-00071.safetensors",
+ "model.layers.11.mlp.switch_mlp.gate_proj.weight": "model-00016-of-00071.safetensors",
+ "model.layers.11.mlp.switch_mlp.up_proj.biases": "model-00017-of-00071.safetensors",
+ "model.layers.11.mlp.switch_mlp.up_proj.scales": "model-00017-of-00071.safetensors",
+ "model.layers.11.mlp.switch_mlp.up_proj.weight": "model-00017-of-00071.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00017-of-00071.safetensors",
+ "model.layers.11.self_attn.k_proj.biases": "model-00016-of-00071.safetensors",
+ "model.layers.11.self_attn.k_proj.scales": "model-00016-of-00071.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00016-of-00071.safetensors",
+ "model.layers.11.self_attn.o_proj.biases": "model-00016-of-00071.safetensors",
+ "model.layers.11.self_attn.o_proj.scales": "model-00016-of-00071.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00016-of-00071.safetensors",
+ "model.layers.11.self_attn.q_proj.biases": "model-00016-of-00071.safetensors",
+ "model.layers.11.self_attn.q_proj.scales": "model-00016-of-00071.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00016-of-00071.safetensors",
+ "model.layers.11.self_attn.v_proj.biases": "model-00016-of-00071.safetensors",
+ "model.layers.11.self_attn.v_proj.scales": "model-00016-of-00071.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00016-of-00071.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00019-of-00071.safetensors",
+ "model.layers.12.mlp.gate.e_score_correction_bias": "model-00019-of-00071.safetensors",
+ "model.layers.12.mlp.gate.weight": "model-00019-of-00071.safetensors",
+ "model.layers.12.mlp.switch_mlp.down_proj.biases": "model-00019-of-00071.safetensors",
+ "model.layers.12.mlp.switch_mlp.down_proj.scales": "model-00019-of-00071.safetensors",
+ "model.layers.12.mlp.switch_mlp.down_proj.weight": "model-00019-of-00071.safetensors",
+ "model.layers.12.mlp.switch_mlp.gate_proj.biases": "model-00018-of-00071.safetensors",
+ "model.layers.12.mlp.switch_mlp.gate_proj.scales": "model-00018-of-00071.safetensors",
+ "model.layers.12.mlp.switch_mlp.gate_proj.weight": "model-00018-of-00071.safetensors",
+ "model.layers.12.mlp.switch_mlp.up_proj.biases": "model-00018-of-00071.safetensors",
+ "model.layers.12.mlp.switch_mlp.up_proj.scales": "model-00018-of-00071.safetensors",
+ "model.layers.12.mlp.switch_mlp.up_proj.weight": "model-00018-of-00071.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00019-of-00071.safetensors",
+ "model.layers.12.self_attn.attention_sink_bias": "model-00017-of-00071.safetensors",
+ "model.layers.12.self_attn.k_proj.biases": "model-00017-of-00071.safetensors",
+ "model.layers.12.self_attn.k_proj.scales": "model-00017-of-00071.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00017-of-00071.safetensors",
+ "model.layers.12.self_attn.o_proj.biases": "model-00017-of-00071.safetensors",
+ "model.layers.12.self_attn.o_proj.scales": "model-00017-of-00071.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00017-of-00071.safetensors",
+ "model.layers.12.self_attn.q_proj.biases": "model-00017-of-00071.safetensors",
+ "model.layers.12.self_attn.q_proj.scales": "model-00017-of-00071.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00017-of-00071.safetensors",
+ "model.layers.12.self_attn.v_proj.biases": "model-00017-of-00071.safetensors",
+ "model.layers.12.self_attn.v_proj.scales": "model-00017-of-00071.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00017-of-00071.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00020-of-00071.safetensors",
+ "model.layers.13.mlp.gate.e_score_correction_bias": "model-00020-of-00071.safetensors",
+ "model.layers.13.mlp.gate.weight": "model-00020-of-00071.safetensors",
+ "model.layers.13.mlp.switch_mlp.down_proj.biases": "model-00020-of-00071.safetensors",
+ "model.layers.13.mlp.switch_mlp.down_proj.scales": "model-00020-of-00071.safetensors",
+ "model.layers.13.mlp.switch_mlp.down_proj.weight": "model-00020-of-00071.safetensors",
+ "model.layers.13.mlp.switch_mlp.gate_proj.biases": "model-00019-of-00071.safetensors",
+ "model.layers.13.mlp.switch_mlp.gate_proj.scales": "model-00019-of-00071.safetensors",
+ "model.layers.13.mlp.switch_mlp.gate_proj.weight": "model-00019-of-00071.safetensors",
+ "model.layers.13.mlp.switch_mlp.up_proj.biases": "model-00020-of-00071.safetensors",
+ "model.layers.13.mlp.switch_mlp.up_proj.scales": "model-00020-of-00071.safetensors",
+ "model.layers.13.mlp.switch_mlp.up_proj.weight": "model-00020-of-00071.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00020-of-00071.safetensors",
+ "model.layers.13.self_attn.attention_sink_bias": "model-00019-of-00071.safetensors",
+ "model.layers.13.self_attn.k_proj.biases": "model-00019-of-00071.safetensors",
+ "model.layers.13.self_attn.k_proj.scales": "model-00019-of-00071.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00019-of-00071.safetensors",
+ "model.layers.13.self_attn.o_proj.biases": "model-00019-of-00071.safetensors",
+ "model.layers.13.self_attn.o_proj.scales": "model-00019-of-00071.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00019-of-00071.safetensors",
+ "model.layers.13.self_attn.q_proj.biases": "model-00019-of-00071.safetensors",
+ "model.layers.13.self_attn.q_proj.scales": "model-00019-of-00071.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00019-of-00071.safetensors",
+ "model.layers.13.self_attn.v_proj.biases": "model-00019-of-00071.safetensors",
+ "model.layers.13.self_attn.v_proj.scales": "model-00019-of-00071.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00019-of-00071.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00022-of-00071.safetensors",
+ "model.layers.14.mlp.gate.e_score_correction_bias": "model-00022-of-00071.safetensors",
+ "model.layers.14.mlp.gate.weight": "model-00022-of-00071.safetensors",
+ "model.layers.14.mlp.switch_mlp.down_proj.biases": "model-00022-of-00071.safetensors",
+ "model.layers.14.mlp.switch_mlp.down_proj.scales": "model-00022-of-00071.safetensors",
+ "model.layers.14.mlp.switch_mlp.down_proj.weight": "model-00022-of-00071.safetensors",
+ "model.layers.14.mlp.switch_mlp.gate_proj.biases": "model-00021-of-00071.safetensors",
+ "model.layers.14.mlp.switch_mlp.gate_proj.scales": "model-00021-of-00071.safetensors",
+ "model.layers.14.mlp.switch_mlp.gate_proj.weight": "model-00021-of-00071.safetensors",
+ "model.layers.14.mlp.switch_mlp.up_proj.biases": "model-00021-of-00071.safetensors",
+ "model.layers.14.mlp.switch_mlp.up_proj.scales": "model-00021-of-00071.safetensors",
+ "model.layers.14.mlp.switch_mlp.up_proj.weight": "model-00021-of-00071.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00022-of-00071.safetensors",
+ "model.layers.14.self_attn.attention_sink_bias": "model-00020-of-00071.safetensors",
+ "model.layers.14.self_attn.k_proj.biases": "model-00020-of-00071.safetensors",
+ "model.layers.14.self_attn.k_proj.scales": "model-00020-of-00071.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00020-of-00071.safetensors",
+ "model.layers.14.self_attn.o_proj.biases": "model-00020-of-00071.safetensors",
+ "model.layers.14.self_attn.o_proj.scales": "model-00020-of-00071.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00020-of-00071.safetensors",
+ "model.layers.14.self_attn.q_proj.biases": "model-00020-of-00071.safetensors",
+ "model.layers.14.self_attn.q_proj.scales": "model-00020-of-00071.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00020-of-00071.safetensors",
+ "model.layers.14.self_attn.v_proj.biases": "model-00020-of-00071.safetensors",
+ "model.layers.14.self_attn.v_proj.scales": "model-00020-of-00071.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00020-of-00071.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00023-of-00071.safetensors",
+ "model.layers.15.mlp.gate.e_score_correction_bias": "model-00023-of-00071.safetensors",
+ "model.layers.15.mlp.gate.weight": "model-00023-of-00071.safetensors",
+ "model.layers.15.mlp.switch_mlp.down_proj.biases": "model-00023-of-00071.safetensors",
+ "model.layers.15.mlp.switch_mlp.down_proj.scales": "model-00023-of-00071.safetensors",
+ "model.layers.15.mlp.switch_mlp.down_proj.weight": "model-00023-of-00071.safetensors",
+ "model.layers.15.mlp.switch_mlp.gate_proj.biases": "model-00022-of-00071.safetensors",
+ "model.layers.15.mlp.switch_mlp.gate_proj.scales": "model-00022-of-00071.safetensors",
+ "model.layers.15.mlp.switch_mlp.gate_proj.weight": "model-00022-of-00071.safetensors",
+ "model.layers.15.mlp.switch_mlp.up_proj.biases": "model-00023-of-00071.safetensors",
+ "model.layers.15.mlp.switch_mlp.up_proj.scales": "model-00023-of-00071.safetensors",
+ "model.layers.15.mlp.switch_mlp.up_proj.weight": "model-00023-of-00071.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00023-of-00071.safetensors",
+ "model.layers.15.self_attn.attention_sink_bias": "model-00022-of-00071.safetensors",
+ "model.layers.15.self_attn.k_proj.biases": "model-00022-of-00071.safetensors",
+ "model.layers.15.self_attn.k_proj.scales": "model-00022-of-00071.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00022-of-00071.safetensors",
+ "model.layers.15.self_attn.o_proj.biases": "model-00022-of-00071.safetensors",
+ "model.layers.15.self_attn.o_proj.scales": "model-00022-of-00071.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00022-of-00071.safetensors",
+ "model.layers.15.self_attn.q_proj.biases": "model-00022-of-00071.safetensors",
+ "model.layers.15.self_attn.q_proj.scales": "model-00022-of-00071.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00022-of-00071.safetensors",
+ "model.layers.15.self_attn.v_proj.biases": "model-00022-of-00071.safetensors",
+ "model.layers.15.self_attn.v_proj.scales": "model-00022-of-00071.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00022-of-00071.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00025-of-00071.safetensors",
+ "model.layers.16.mlp.gate.e_score_correction_bias": "model-00025-of-00071.safetensors",
+ "model.layers.16.mlp.gate.weight": "model-00025-of-00071.safetensors",
+ "model.layers.16.mlp.switch_mlp.down_proj.biases": "model-00025-of-00071.safetensors",
+ "model.layers.16.mlp.switch_mlp.down_proj.scales": "model-00025-of-00071.safetensors",
+ "model.layers.16.mlp.switch_mlp.down_proj.weight": "model-00025-of-00071.safetensors",
+ "model.layers.16.mlp.switch_mlp.gate_proj.biases": "model-00024-of-00071.safetensors",
+ "model.layers.16.mlp.switch_mlp.gate_proj.scales": "model-00024-of-00071.safetensors",
+ "model.layers.16.mlp.switch_mlp.gate_proj.weight": "model-00024-of-00071.safetensors",
+ "model.layers.16.mlp.switch_mlp.up_proj.biases": "model-00024-of-00071.safetensors",
+ "model.layers.16.mlp.switch_mlp.up_proj.scales": "model-00024-of-00071.safetensors",
+ "model.layers.16.mlp.switch_mlp.up_proj.weight": "model-00024-of-00071.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00025-of-00071.safetensors",
+ "model.layers.16.self_attn.attention_sink_bias": "model-00023-of-00071.safetensors",
+ "model.layers.16.self_attn.k_proj.biases": "model-00023-of-00071.safetensors",
+ "model.layers.16.self_attn.k_proj.scales": "model-00023-of-00071.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00023-of-00071.safetensors",
+ "model.layers.16.self_attn.o_proj.biases": "model-00023-of-00071.safetensors",
+ "model.layers.16.self_attn.o_proj.scales": "model-00023-of-00071.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00023-of-00071.safetensors",
+ "model.layers.16.self_attn.q_proj.biases": "model-00023-of-00071.safetensors",
+ "model.layers.16.self_attn.q_proj.scales": "model-00023-of-00071.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00023-of-00071.safetensors",
+ "model.layers.16.self_attn.v_proj.biases": "model-00023-of-00071.safetensors",
+ "model.layers.16.self_attn.v_proj.scales": "model-00023-of-00071.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00023-of-00071.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00026-of-00071.safetensors",
+ "model.layers.17.mlp.gate.e_score_correction_bias": "model-00026-of-00071.safetensors",
+ "model.layers.17.mlp.gate.weight": "model-00026-of-00071.safetensors",
+ "model.layers.17.mlp.switch_mlp.down_proj.biases": "model-00026-of-00071.safetensors",
+ "model.layers.17.mlp.switch_mlp.down_proj.scales": "model-00026-of-00071.safetensors",
+ "model.layers.17.mlp.switch_mlp.down_proj.weight": "model-00026-of-00071.safetensors",
+ "model.layers.17.mlp.switch_mlp.gate_proj.biases": "model-00025-of-00071.safetensors",
+ "model.layers.17.mlp.switch_mlp.gate_proj.scales": "model-00025-of-00071.safetensors",
+ "model.layers.17.mlp.switch_mlp.gate_proj.weight": "model-00025-of-00071.safetensors",
+ "model.layers.17.mlp.switch_mlp.up_proj.biases": "model-00026-of-00071.safetensors",
+ "model.layers.17.mlp.switch_mlp.up_proj.scales": "model-00026-of-00071.safetensors",
+ "model.layers.17.mlp.switch_mlp.up_proj.weight": "model-00026-of-00071.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00026-of-00071.safetensors",
+ "model.layers.17.self_attn.k_proj.biases": "model-00025-of-00071.safetensors",
+ "model.layers.17.self_attn.k_proj.scales": "model-00025-of-00071.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00025-of-00071.safetensors",
+ "model.layers.17.self_attn.o_proj.biases": "model-00025-of-00071.safetensors",
+ "model.layers.17.self_attn.o_proj.scales": "model-00025-of-00071.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00025-of-00071.safetensors",
+ "model.layers.17.self_attn.q_proj.biases": "model-00025-of-00071.safetensors",
+ "model.layers.17.self_attn.q_proj.scales": "model-00025-of-00071.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00025-of-00071.safetensors",
+ "model.layers.17.self_attn.v_proj.biases": "model-00025-of-00071.safetensors",
+ "model.layers.17.self_attn.v_proj.scales": "model-00025-of-00071.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00025-of-00071.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00028-of-00071.safetensors",
+ "model.layers.18.mlp.gate.e_score_correction_bias": "model-00028-of-00071.safetensors",
+ "model.layers.18.mlp.gate.weight": "model-00028-of-00071.safetensors",
+ "model.layers.18.mlp.switch_mlp.down_proj.biases": "model-00028-of-00071.safetensors",
+ "model.layers.18.mlp.switch_mlp.down_proj.scales": "model-00028-of-00071.safetensors",
+ "model.layers.18.mlp.switch_mlp.down_proj.weight": "model-00028-of-00071.safetensors",
+ "model.layers.18.mlp.switch_mlp.gate_proj.biases": "model-00027-of-00071.safetensors",
+ "model.layers.18.mlp.switch_mlp.gate_proj.scales": "model-00027-of-00071.safetensors",
+ "model.layers.18.mlp.switch_mlp.gate_proj.weight": "model-00027-of-00071.safetensors",
+ "model.layers.18.mlp.switch_mlp.up_proj.biases": "model-00027-of-00071.safetensors",
+ "model.layers.18.mlp.switch_mlp.up_proj.scales": "model-00027-of-00071.safetensors",
+ "model.layers.18.mlp.switch_mlp.up_proj.weight": "model-00027-of-00071.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00028-of-00071.safetensors",
+ "model.layers.18.self_attn.attention_sink_bias": "model-00026-of-00071.safetensors",
+ "model.layers.18.self_attn.k_proj.biases": "model-00026-of-00071.safetensors",
+ "model.layers.18.self_attn.k_proj.scales": "model-00026-of-00071.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00026-of-00071.safetensors",
+ "model.layers.18.self_attn.o_proj.biases": "model-00026-of-00071.safetensors",
+ "model.layers.18.self_attn.o_proj.scales": "model-00026-of-00071.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00026-of-00071.safetensors",
+ "model.layers.18.self_attn.q_proj.biases": "model-00026-of-00071.safetensors",
+ "model.layers.18.self_attn.q_proj.scales": "model-00026-of-00071.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00026-of-00071.safetensors",
+ "model.layers.18.self_attn.v_proj.biases": "model-00026-of-00071.safetensors",
+ "model.layers.18.self_attn.v_proj.scales": "model-00026-of-00071.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00026-of-00071.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00029-of-00071.safetensors",
+ "model.layers.19.mlp.gate.e_score_correction_bias": "model-00029-of-00071.safetensors",
+ "model.layers.19.mlp.gate.weight": "model-00029-of-00071.safetensors",
+ "model.layers.19.mlp.switch_mlp.down_proj.biases": "model-00029-of-00071.safetensors",
+ "model.layers.19.mlp.switch_mlp.down_proj.scales": "model-00029-of-00071.safetensors",
+ "model.layers.19.mlp.switch_mlp.down_proj.weight": "model-00029-of-00071.safetensors",
+ "model.layers.19.mlp.switch_mlp.gate_proj.biases": "model-00028-of-00071.safetensors",
+ "model.layers.19.mlp.switch_mlp.gate_proj.scales": "model-00028-of-00071.safetensors",
+ "model.layers.19.mlp.switch_mlp.gate_proj.weight": "model-00028-of-00071.safetensors",
+ "model.layers.19.mlp.switch_mlp.up_proj.biases": "model-00029-of-00071.safetensors",
+ "model.layers.19.mlp.switch_mlp.up_proj.scales": "model-00029-of-00071.safetensors",
+ "model.layers.19.mlp.switch_mlp.up_proj.weight": "model-00029-of-00071.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00029-of-00071.safetensors",
+ "model.layers.19.self_attn.attention_sink_bias": "model-00028-of-00071.safetensors",
+ "model.layers.19.self_attn.k_proj.biases": "model-00028-of-00071.safetensors",
+ "model.layers.19.self_attn.k_proj.scales": "model-00028-of-00071.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00028-of-00071.safetensors",
+ "model.layers.19.self_attn.o_proj.biases": "model-00028-of-00071.safetensors",
+ "model.layers.19.self_attn.o_proj.scales": "model-00028-of-00071.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00028-of-00071.safetensors",
+ "model.layers.19.self_attn.q_proj.biases": "model-00028-of-00071.safetensors",
+ "model.layers.19.self_attn.q_proj.scales": "model-00028-of-00071.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00028-of-00071.safetensors",
+ "model.layers.19.self_attn.v_proj.biases": "model-00028-of-00071.safetensors",
+ "model.layers.19.self_attn.v_proj.scales": "model-00028-of-00071.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00028-of-00071.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00004-of-00071.safetensors",
+ "model.layers.2.mlp.gate.e_score_correction_bias": "model-00004-of-00071.safetensors",
+ "model.layers.2.mlp.gate.weight": "model-00004-of-00071.safetensors",
+ "model.layers.2.mlp.switch_mlp.down_proj.biases": "model-00004-of-00071.safetensors",
+ "model.layers.2.mlp.switch_mlp.down_proj.scales": "model-00004-of-00071.safetensors",
+ "model.layers.2.mlp.switch_mlp.down_proj.weight": "model-00004-of-00071.safetensors",
+ "model.layers.2.mlp.switch_mlp.gate_proj.biases": "model-00003-of-00071.safetensors",
+ "model.layers.2.mlp.switch_mlp.gate_proj.scales": "model-00003-of-00071.safetensors",
+ "model.layers.2.mlp.switch_mlp.gate_proj.weight": "model-00003-of-00071.safetensors",
+ "model.layers.2.mlp.switch_mlp.up_proj.biases": "model-00003-of-00071.safetensors",
+ "model.layers.2.mlp.switch_mlp.up_proj.scales": "model-00003-of-00071.safetensors",
+ "model.layers.2.mlp.switch_mlp.up_proj.weight": "model-00003-of-00071.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00004-of-00071.safetensors",
+ "model.layers.2.self_attn.attention_sink_bias": "model-00002-of-00071.safetensors",
+ "model.layers.2.self_attn.k_proj.biases": "model-00002-of-00071.safetensors",
+ "model.layers.2.self_attn.k_proj.scales": "model-00002-of-00071.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00002-of-00071.safetensors",
+ "model.layers.2.self_attn.o_proj.biases": "model-00002-of-00071.safetensors",
+ "model.layers.2.self_attn.o_proj.scales": "model-00002-of-00071.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00002-of-00071.safetensors",
+ "model.layers.2.self_attn.q_proj.biases": "model-00002-of-00071.safetensors",
+ "model.layers.2.self_attn.q_proj.scales": "model-00002-of-00071.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00002-of-00071.safetensors",
+ "model.layers.2.self_attn.v_proj.biases": "model-00002-of-00071.safetensors",
+ "model.layers.2.self_attn.v_proj.scales": "model-00002-of-00071.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00002-of-00071.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00031-of-00071.safetensors",
+ "model.layers.20.mlp.gate.e_score_correction_bias": "model-00031-of-00071.safetensors",
+ "model.layers.20.mlp.gate.weight": "model-00031-of-00071.safetensors",
+ "model.layers.20.mlp.switch_mlp.down_proj.biases": "model-00031-of-00071.safetensors",
+ "model.layers.20.mlp.switch_mlp.down_proj.scales": "model-00031-of-00071.safetensors",
+ "model.layers.20.mlp.switch_mlp.down_proj.weight": "model-00031-of-00071.safetensors",
+ "model.layers.20.mlp.switch_mlp.gate_proj.biases": "model-00030-of-00071.safetensors",
+ "model.layers.20.mlp.switch_mlp.gate_proj.scales": "model-00030-of-00071.safetensors",
+ "model.layers.20.mlp.switch_mlp.gate_proj.weight": "model-00030-of-00071.safetensors",
+ "model.layers.20.mlp.switch_mlp.up_proj.biases": "model-00030-of-00071.safetensors",
+ "model.layers.20.mlp.switch_mlp.up_proj.scales": "model-00030-of-00071.safetensors",
+ "model.layers.20.mlp.switch_mlp.up_proj.weight": "model-00030-of-00071.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00031-of-00071.safetensors",
+ "model.layers.20.self_attn.attention_sink_bias": "model-00029-of-00071.safetensors",
+ "model.layers.20.self_attn.k_proj.biases": "model-00029-of-00071.safetensors",
+ "model.layers.20.self_attn.k_proj.scales": "model-00029-of-00071.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00029-of-00071.safetensors",
+ "model.layers.20.self_attn.o_proj.biases": "model-00029-of-00071.safetensors",
+ "model.layers.20.self_attn.o_proj.scales": "model-00029-of-00071.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00029-of-00071.safetensors",
+ "model.layers.20.self_attn.q_proj.biases": "model-00029-of-00071.safetensors",
+ "model.layers.20.self_attn.q_proj.scales": "model-00029-of-00071.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00029-of-00071.safetensors",
+ "model.layers.20.self_attn.v_proj.biases": "model-00029-of-00071.safetensors",
+ "model.layers.20.self_attn.v_proj.scales": "model-00029-of-00071.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00029-of-00071.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00032-of-00071.safetensors",
+ "model.layers.21.mlp.gate.e_score_correction_bias": "model-00032-of-00071.safetensors",
+ "model.layers.21.mlp.gate.weight": "model-00032-of-00071.safetensors",
+ "model.layers.21.mlp.switch_mlp.down_proj.biases": "model-00032-of-00071.safetensors",
+ "model.layers.21.mlp.switch_mlp.down_proj.scales": "model-00032-of-00071.safetensors",
+ "model.layers.21.mlp.switch_mlp.down_proj.weight": "model-00032-of-00071.safetensors",
+ "model.layers.21.mlp.switch_mlp.gate_proj.biases": "model-00031-of-00071.safetensors",
+ "model.layers.21.mlp.switch_mlp.gate_proj.scales": "model-00031-of-00071.safetensors",
+ "model.layers.21.mlp.switch_mlp.gate_proj.weight": "model-00031-of-00071.safetensors",
+ "model.layers.21.mlp.switch_mlp.up_proj.biases": "model-00032-of-00071.safetensors",
+ "model.layers.21.mlp.switch_mlp.up_proj.scales": "model-00032-of-00071.safetensors",
+ "model.layers.21.mlp.switch_mlp.up_proj.weight": "model-00032-of-00071.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00032-of-00071.safetensors",
+ "model.layers.21.self_attn.attention_sink_bias": "model-00031-of-00071.safetensors",
+ "model.layers.21.self_attn.k_proj.biases": "model-00031-of-00071.safetensors",
+ "model.layers.21.self_attn.k_proj.scales": "model-00031-of-00071.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00031-of-00071.safetensors",
+ "model.layers.21.self_attn.o_proj.biases": "model-00031-of-00071.safetensors",
+ "model.layers.21.self_attn.o_proj.scales": "model-00031-of-00071.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00031-of-00071.safetensors",
+ "model.layers.21.self_attn.q_proj.biases": "model-00031-of-00071.safetensors",
+ "model.layers.21.self_attn.q_proj.scales": "model-00031-of-00071.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00031-of-00071.safetensors",
+ "model.layers.21.self_attn.v_proj.biases": "model-00031-of-00071.safetensors",
+ "model.layers.21.self_attn.v_proj.scales": "model-00031-of-00071.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00031-of-00071.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00034-of-00071.safetensors",
+ "model.layers.22.mlp.gate.e_score_correction_bias": "model-00034-of-00071.safetensors",
+ "model.layers.22.mlp.gate.weight": "model-00034-of-00071.safetensors",
+ "model.layers.22.mlp.switch_mlp.down_proj.biases": "model-00034-of-00071.safetensors",
+ "model.layers.22.mlp.switch_mlp.down_proj.scales": "model-00034-of-00071.safetensors",
+ "model.layers.22.mlp.switch_mlp.down_proj.weight": "model-00034-of-00071.safetensors",
+ "model.layers.22.mlp.switch_mlp.gate_proj.biases": "model-00033-of-00071.safetensors",
+ "model.layers.22.mlp.switch_mlp.gate_proj.scales": "model-00033-of-00071.safetensors",
+ "model.layers.22.mlp.switch_mlp.gate_proj.weight": "model-00033-of-00071.safetensors",
+ "model.layers.22.mlp.switch_mlp.up_proj.biases": "model-00033-of-00071.safetensors",
+ "model.layers.22.mlp.switch_mlp.up_proj.scales": "model-00033-of-00071.safetensors",
+ "model.layers.22.mlp.switch_mlp.up_proj.weight": "model-00033-of-00071.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00034-of-00071.safetensors",
+ "model.layers.22.self_attn.attention_sink_bias": "model-00032-of-00071.safetensors",
+ "model.layers.22.self_attn.k_proj.biases": "model-00032-of-00071.safetensors",
+ "model.layers.22.self_attn.k_proj.scales": "model-00032-of-00071.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00032-of-00071.safetensors",
+ "model.layers.22.self_attn.o_proj.biases": "model-00032-of-00071.safetensors",
+ "model.layers.22.self_attn.o_proj.scales": "model-00032-of-00071.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00032-of-00071.safetensors",
+ "model.layers.22.self_attn.q_proj.biases": "model-00032-of-00071.safetensors",
+ "model.layers.22.self_attn.q_proj.scales": "model-00032-of-00071.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00032-of-00071.safetensors",
+ "model.layers.22.self_attn.v_proj.biases": "model-00032-of-00071.safetensors",
+ "model.layers.22.self_attn.v_proj.scales": "model-00032-of-00071.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00032-of-00071.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00035-of-00071.safetensors",
+ "model.layers.23.mlp.gate.e_score_correction_bias": "model-00035-of-00071.safetensors",
+ "model.layers.23.mlp.gate.weight": "model-00035-of-00071.safetensors",
+ "model.layers.23.mlp.switch_mlp.down_proj.biases": "model-00035-of-00071.safetensors",
+ "model.layers.23.mlp.switch_mlp.down_proj.scales": "model-00035-of-00071.safetensors",
+ "model.layers.23.mlp.switch_mlp.down_proj.weight": "model-00035-of-00071.safetensors",
+ "model.layers.23.mlp.switch_mlp.gate_proj.biases": "model-00034-of-00071.safetensors",
+ "model.layers.23.mlp.switch_mlp.gate_proj.scales": "model-00034-of-00071.safetensors",
+ "model.layers.23.mlp.switch_mlp.gate_proj.weight": "model-00034-of-00071.safetensors",
+ "model.layers.23.mlp.switch_mlp.up_proj.biases": "model-00035-of-00071.safetensors",
+ "model.layers.23.mlp.switch_mlp.up_proj.scales": "model-00035-of-00071.safetensors",
+ "model.layers.23.mlp.switch_mlp.up_proj.weight": "model-00035-of-00071.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00035-of-00071.safetensors",
+ "model.layers.23.self_attn.k_proj.biases": "model-00034-of-00071.safetensors",
+ "model.layers.23.self_attn.k_proj.scales": "model-00034-of-00071.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00034-of-00071.safetensors",
+ "model.layers.23.self_attn.o_proj.biases": "model-00034-of-00071.safetensors",
+ "model.layers.23.self_attn.o_proj.scales": "model-00034-of-00071.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00034-of-00071.safetensors",
+ "model.layers.23.self_attn.q_proj.biases": "model-00034-of-00071.safetensors",
+ "model.layers.23.self_attn.q_proj.scales": "model-00034-of-00071.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00034-of-00071.safetensors",
+ "model.layers.23.self_attn.v_proj.biases": "model-00034-of-00071.safetensors",
+ "model.layers.23.self_attn.v_proj.scales": "model-00034-of-00071.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00034-of-00071.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00037-of-00071.safetensors",
+ "model.layers.24.mlp.gate.e_score_correction_bias": "model-00037-of-00071.safetensors",
+ "model.layers.24.mlp.gate.weight": "model-00037-of-00071.safetensors",
+ "model.layers.24.mlp.switch_mlp.down_proj.biases": "model-00037-of-00071.safetensors",
+ "model.layers.24.mlp.switch_mlp.down_proj.scales": "model-00037-of-00071.safetensors",
+ "model.layers.24.mlp.switch_mlp.down_proj.weight": "model-00037-of-00071.safetensors",
+ "model.layers.24.mlp.switch_mlp.gate_proj.biases": "model-00036-of-00071.safetensors",
+ "model.layers.24.mlp.switch_mlp.gate_proj.scales": "model-00036-of-00071.safetensors",
+ "model.layers.24.mlp.switch_mlp.gate_proj.weight": "model-00036-of-00071.safetensors",
+ "model.layers.24.mlp.switch_mlp.up_proj.biases": "model-00036-of-00071.safetensors",
+ "model.layers.24.mlp.switch_mlp.up_proj.scales": "model-00036-of-00071.safetensors",
+ "model.layers.24.mlp.switch_mlp.up_proj.weight": "model-00036-of-00071.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00037-of-00071.safetensors",
+ "model.layers.24.self_attn.attention_sink_bias": "model-00035-of-00071.safetensors",
+ "model.layers.24.self_attn.k_proj.biases": "model-00035-of-00071.safetensors",
+ "model.layers.24.self_attn.k_proj.scales": "model-00035-of-00071.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00035-of-00071.safetensors",
+ "model.layers.24.self_attn.o_proj.biases": "model-00035-of-00071.safetensors",
+ "model.layers.24.self_attn.o_proj.scales": "model-00035-of-00071.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00035-of-00071.safetensors",
+ "model.layers.24.self_attn.q_proj.biases": "model-00035-of-00071.safetensors",
+ "model.layers.24.self_attn.q_proj.scales": "model-00035-of-00071.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00035-of-00071.safetensors",
+ "model.layers.24.self_attn.v_proj.biases": "model-00035-of-00071.safetensors",
+ "model.layers.24.self_attn.v_proj.scales": "model-00035-of-00071.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00035-of-00071.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00038-of-00071.safetensors",
+ "model.layers.25.mlp.gate.e_score_correction_bias": "model-00038-of-00071.safetensors",
+ "model.layers.25.mlp.gate.weight": "model-00038-of-00071.safetensors",
+ "model.layers.25.mlp.switch_mlp.down_proj.biases": "model-00038-of-00071.safetensors",
+ "model.layers.25.mlp.switch_mlp.down_proj.scales": "model-00038-of-00071.safetensors",
+ "model.layers.25.mlp.switch_mlp.down_proj.weight": "model-00038-of-00071.safetensors",
+ "model.layers.25.mlp.switch_mlp.gate_proj.biases": "model-00037-of-00071.safetensors",
+ "model.layers.25.mlp.switch_mlp.gate_proj.scales": "model-00037-of-00071.safetensors",
+ "model.layers.25.mlp.switch_mlp.gate_proj.weight": "model-00037-of-00071.safetensors",
+ "model.layers.25.mlp.switch_mlp.up_proj.biases": "model-00038-of-00071.safetensors",
+ "model.layers.25.mlp.switch_mlp.up_proj.scales": "model-00038-of-00071.safetensors",
+ "model.layers.25.mlp.switch_mlp.up_proj.weight": "model-00038-of-00071.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00038-of-00071.safetensors",
+ "model.layers.25.self_attn.attention_sink_bias": "model-00037-of-00071.safetensors",
+ "model.layers.25.self_attn.k_proj.biases": "model-00037-of-00071.safetensors",
+ "model.layers.25.self_attn.k_proj.scales": "model-00037-of-00071.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00037-of-00071.safetensors",
+ "model.layers.25.self_attn.o_proj.biases": "model-00037-of-00071.safetensors",
+ "model.layers.25.self_attn.o_proj.scales": "model-00037-of-00071.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00037-of-00071.safetensors",
+ "model.layers.25.self_attn.q_proj.biases": "model-00037-of-00071.safetensors",
+ "model.layers.25.self_attn.q_proj.scales": "model-00037-of-00071.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00037-of-00071.safetensors",
+ "model.layers.25.self_attn.v_proj.biases": "model-00037-of-00071.safetensors",
+ "model.layers.25.self_attn.v_proj.scales": "model-00037-of-00071.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00037-of-00071.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00040-of-00071.safetensors",
+ "model.layers.26.mlp.gate.e_score_correction_bias": "model-00040-of-00071.safetensors",
+ "model.layers.26.mlp.gate.weight": "model-00040-of-00071.safetensors",
+ "model.layers.26.mlp.switch_mlp.down_proj.biases": "model-00040-of-00071.safetensors",
+ "model.layers.26.mlp.switch_mlp.down_proj.scales": "model-00040-of-00071.safetensors",
+ "model.layers.26.mlp.switch_mlp.down_proj.weight": "model-00040-of-00071.safetensors",
+ "model.layers.26.mlp.switch_mlp.gate_proj.biases": "model-00039-of-00071.safetensors",
+ "model.layers.26.mlp.switch_mlp.gate_proj.scales": "model-00039-of-00071.safetensors",
+ "model.layers.26.mlp.switch_mlp.gate_proj.weight": "model-00039-of-00071.safetensors",
+ "model.layers.26.mlp.switch_mlp.up_proj.biases": "model-00039-of-00071.safetensors",
+ "model.layers.26.mlp.switch_mlp.up_proj.scales": "model-00039-of-00071.safetensors",
+ "model.layers.26.mlp.switch_mlp.up_proj.weight": "model-00039-of-00071.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00040-of-00071.safetensors",
+ "model.layers.26.self_attn.attention_sink_bias": "model-00038-of-00071.safetensors",
+ "model.layers.26.self_attn.k_proj.biases": "model-00038-of-00071.safetensors",
+ "model.layers.26.self_attn.k_proj.scales": "model-00038-of-00071.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00038-of-00071.safetensors",
+ "model.layers.26.self_attn.o_proj.biases": "model-00038-of-00071.safetensors",
+ "model.layers.26.self_attn.o_proj.scales": "model-00038-of-00071.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00038-of-00071.safetensors",
+ "model.layers.26.self_attn.q_proj.biases": "model-00038-of-00071.safetensors",
+ "model.layers.26.self_attn.q_proj.scales": "model-00038-of-00071.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00038-of-00071.safetensors",
+ "model.layers.26.self_attn.v_proj.biases": "model-00038-of-00071.safetensors",
+ "model.layers.26.self_attn.v_proj.scales": "model-00038-of-00071.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00038-of-00071.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00041-of-00071.safetensors",
+ "model.layers.27.mlp.gate.e_score_correction_bias": "model-00041-of-00071.safetensors",
+ "model.layers.27.mlp.gate.weight": "model-00041-of-00071.safetensors",
+ "model.layers.27.mlp.switch_mlp.down_proj.biases": "model-00041-of-00071.safetensors",
+ "model.layers.27.mlp.switch_mlp.down_proj.scales": "model-00041-of-00071.safetensors",
+ "model.layers.27.mlp.switch_mlp.down_proj.weight": "model-00041-of-00071.safetensors",
+ "model.layers.27.mlp.switch_mlp.gate_proj.biases": "model-00040-of-00071.safetensors",
+ "model.layers.27.mlp.switch_mlp.gate_proj.scales": "model-00040-of-00071.safetensors",
+ "model.layers.27.mlp.switch_mlp.gate_proj.weight": "model-00040-of-00071.safetensors",
+ "model.layers.27.mlp.switch_mlp.up_proj.biases": "model-00041-of-00071.safetensors",
+ "model.layers.27.mlp.switch_mlp.up_proj.scales": "model-00041-of-00071.safetensors",
+ "model.layers.27.mlp.switch_mlp.up_proj.weight": "model-00041-of-00071.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00041-of-00071.safetensors",
+ "model.layers.27.self_attn.attention_sink_bias": "model-00040-of-00071.safetensors",
+ "model.layers.27.self_attn.k_proj.biases": "model-00040-of-00071.safetensors",
+ "model.layers.27.self_attn.k_proj.scales": "model-00040-of-00071.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00040-of-00071.safetensors",
+ "model.layers.27.self_attn.o_proj.biases": "model-00040-of-00071.safetensors",
+ "model.layers.27.self_attn.o_proj.scales": "model-00040-of-00071.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00040-of-00071.safetensors",
+ "model.layers.27.self_attn.q_proj.biases": "model-00040-of-00071.safetensors",
+ "model.layers.27.self_attn.q_proj.scales": "model-00040-of-00071.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00040-of-00071.safetensors",
+ "model.layers.27.self_attn.v_proj.biases": "model-00040-of-00071.safetensors",
+ "model.layers.27.self_attn.v_proj.scales": "model-00040-of-00071.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00040-of-00071.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00043-of-00071.safetensors",
+ "model.layers.28.mlp.gate.e_score_correction_bias": "model-00043-of-00071.safetensors",
+ "model.layers.28.mlp.gate.weight": "model-00043-of-00071.safetensors",
+ "model.layers.28.mlp.switch_mlp.down_proj.biases": "model-00043-of-00071.safetensors",
+ "model.layers.28.mlp.switch_mlp.down_proj.scales": "model-00043-of-00071.safetensors",
+ "model.layers.28.mlp.switch_mlp.down_proj.weight": "model-00043-of-00071.safetensors",
+ "model.layers.28.mlp.switch_mlp.gate_proj.biases": "model-00042-of-00071.safetensors",
+ "model.layers.28.mlp.switch_mlp.gate_proj.scales": "model-00042-of-00071.safetensors",
+ "model.layers.28.mlp.switch_mlp.gate_proj.weight": "model-00042-of-00071.safetensors",
+ "model.layers.28.mlp.switch_mlp.up_proj.biases": "model-00042-of-00071.safetensors",
+ "model.layers.28.mlp.switch_mlp.up_proj.scales": "model-00042-of-00071.safetensors",
+ "model.layers.28.mlp.switch_mlp.up_proj.weight": "model-00042-of-00071.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00043-of-00071.safetensors",
+ "model.layers.28.self_attn.attention_sink_bias": "model-00041-of-00071.safetensors",
+ "model.layers.28.self_attn.k_proj.biases": "model-00041-of-00071.safetensors",
+ "model.layers.28.self_attn.k_proj.scales": "model-00041-of-00071.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00041-of-00071.safetensors",
+ "model.layers.28.self_attn.o_proj.biases": "model-00041-of-00071.safetensors",
+ "model.layers.28.self_attn.o_proj.scales": "model-00041-of-00071.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00041-of-00071.safetensors",
+ "model.layers.28.self_attn.q_proj.biases": "model-00041-of-00071.safetensors",
+ "model.layers.28.self_attn.q_proj.scales": "model-00041-of-00071.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00041-of-00071.safetensors",
+ "model.layers.28.self_attn.v_proj.biases": "model-00041-of-00071.safetensors",
+ "model.layers.28.self_attn.v_proj.scales": "model-00041-of-00071.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00041-of-00071.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00044-of-00071.safetensors",
+ "model.layers.29.mlp.gate.e_score_correction_bias": "model-00044-of-00071.safetensors",
+ "model.layers.29.mlp.gate.weight": "model-00044-of-00071.safetensors",
+ "model.layers.29.mlp.switch_mlp.down_proj.biases": "model-00044-of-00071.safetensors",
+ "model.layers.29.mlp.switch_mlp.down_proj.scales": "model-00044-of-00071.safetensors",
+ "model.layers.29.mlp.switch_mlp.down_proj.weight": "model-00044-of-00071.safetensors",
+ "model.layers.29.mlp.switch_mlp.gate_proj.biases": "model-00043-of-00071.safetensors",
+ "model.layers.29.mlp.switch_mlp.gate_proj.scales": "model-00043-of-00071.safetensors",
+ "model.layers.29.mlp.switch_mlp.gate_proj.weight": "model-00043-of-00071.safetensors",
+ "model.layers.29.mlp.switch_mlp.up_proj.biases": "model-00044-of-00071.safetensors",
+ "model.layers.29.mlp.switch_mlp.up_proj.scales": "model-00044-of-00071.safetensors",
+ "model.layers.29.mlp.switch_mlp.up_proj.weight": "model-00044-of-00071.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00044-of-00071.safetensors",
+ "model.layers.29.self_attn.k_proj.biases": "model-00043-of-00071.safetensors",
+ "model.layers.29.self_attn.k_proj.scales": "model-00043-of-00071.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00043-of-00071.safetensors",
+ "model.layers.29.self_attn.o_proj.biases": "model-00043-of-00071.safetensors",
+ "model.layers.29.self_attn.o_proj.scales": "model-00043-of-00071.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00043-of-00071.safetensors",
+ "model.layers.29.self_attn.q_proj.biases": "model-00043-of-00071.safetensors",
+ "model.layers.29.self_attn.q_proj.scales": "model-00043-of-00071.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00043-of-00071.safetensors",
+ "model.layers.29.self_attn.v_proj.biases": "model-00043-of-00071.safetensors",
+ "model.layers.29.self_attn.v_proj.scales": "model-00043-of-00071.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00043-of-00071.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00005-of-00071.safetensors",
+ "model.layers.3.mlp.gate.e_score_correction_bias": "model-00005-of-00071.safetensors",
+ "model.layers.3.mlp.gate.weight": "model-00005-of-00071.safetensors",
+ "model.layers.3.mlp.switch_mlp.down_proj.biases": "model-00005-of-00071.safetensors",
+ "model.layers.3.mlp.switch_mlp.down_proj.scales": "model-00005-of-00071.safetensors",
+ "model.layers.3.mlp.switch_mlp.down_proj.weight": "model-00005-of-00071.safetensors",
+ "model.layers.3.mlp.switch_mlp.gate_proj.biases": "model-00004-of-00071.safetensors",
+ "model.layers.3.mlp.switch_mlp.gate_proj.scales": "model-00004-of-00071.safetensors",
+ "model.layers.3.mlp.switch_mlp.gate_proj.weight": "model-00004-of-00071.safetensors",
+ "model.layers.3.mlp.switch_mlp.up_proj.biases": "model-00005-of-00071.safetensors",
+ "model.layers.3.mlp.switch_mlp.up_proj.scales": "model-00005-of-00071.safetensors",
+ "model.layers.3.mlp.switch_mlp.up_proj.weight": "model-00005-of-00071.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00005-of-00071.safetensors",
+ "model.layers.3.self_attn.attention_sink_bias": "model-00004-of-00071.safetensors",
+ "model.layers.3.self_attn.k_proj.biases": "model-00004-of-00071.safetensors",
+ "model.layers.3.self_attn.k_proj.scales": "model-00004-of-00071.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00004-of-00071.safetensors",
+ "model.layers.3.self_attn.o_proj.biases": "model-00004-of-00071.safetensors",
+ "model.layers.3.self_attn.o_proj.scales": "model-00004-of-00071.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00004-of-00071.safetensors",
+ "model.layers.3.self_attn.q_proj.biases": "model-00004-of-00071.safetensors",
+ "model.layers.3.self_attn.q_proj.scales": "model-00004-of-00071.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00004-of-00071.safetensors",
+ "model.layers.3.self_attn.v_proj.biases": "model-00004-of-00071.safetensors",
+ "model.layers.3.self_attn.v_proj.scales": "model-00004-of-00071.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00004-of-00071.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00046-of-00071.safetensors",
+ "model.layers.30.mlp.gate.e_score_correction_bias": "model-00046-of-00071.safetensors",
+ "model.layers.30.mlp.gate.weight": "model-00046-of-00071.safetensors",
+ "model.layers.30.mlp.switch_mlp.down_proj.biases": "model-00046-of-00071.safetensors",
+ "model.layers.30.mlp.switch_mlp.down_proj.scales": "model-00046-of-00071.safetensors",
+ "model.layers.30.mlp.switch_mlp.down_proj.weight": "model-00046-of-00071.safetensors",
+ "model.layers.30.mlp.switch_mlp.gate_proj.biases": "model-00045-of-00071.safetensors",
+ "model.layers.30.mlp.switch_mlp.gate_proj.scales": "model-00045-of-00071.safetensors",
+ "model.layers.30.mlp.switch_mlp.gate_proj.weight": "model-00045-of-00071.safetensors",
+ "model.layers.30.mlp.switch_mlp.up_proj.biases": "model-00045-of-00071.safetensors",
+ "model.layers.30.mlp.switch_mlp.up_proj.scales": "model-00045-of-00071.safetensors",
+ "model.layers.30.mlp.switch_mlp.up_proj.weight": "model-00045-of-00071.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00046-of-00071.safetensors",
+ "model.layers.30.self_attn.attention_sink_bias": "model-00044-of-00071.safetensors",
+ "model.layers.30.self_attn.k_proj.biases": "model-00044-of-00071.safetensors",
+ "model.layers.30.self_attn.k_proj.scales": "model-00044-of-00071.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00044-of-00071.safetensors",
+ "model.layers.30.self_attn.o_proj.biases": "model-00044-of-00071.safetensors",
+ "model.layers.30.self_attn.o_proj.scales": "model-00044-of-00071.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00044-of-00071.safetensors",
+ "model.layers.30.self_attn.q_proj.biases": "model-00044-of-00071.safetensors",
+ "model.layers.30.self_attn.q_proj.scales": "model-00044-of-00071.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00044-of-00071.safetensors",
+ "model.layers.30.self_attn.v_proj.biases": "model-00044-of-00071.safetensors",
+ "model.layers.30.self_attn.v_proj.scales": "model-00044-of-00071.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00044-of-00071.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00047-of-00071.safetensors",
+ "model.layers.31.mlp.gate.e_score_correction_bias": "model-00047-of-00071.safetensors",
+ "model.layers.31.mlp.gate.weight": "model-00047-of-00071.safetensors",
+ "model.layers.31.mlp.switch_mlp.down_proj.biases": "model-00047-of-00071.safetensors",
+ "model.layers.31.mlp.switch_mlp.down_proj.scales": "model-00047-of-00071.safetensors",
+ "model.layers.31.mlp.switch_mlp.down_proj.weight": "model-00047-of-00071.safetensors",
+ "model.layers.31.mlp.switch_mlp.gate_proj.biases": "model-00046-of-00071.safetensors",
+ "model.layers.31.mlp.switch_mlp.gate_proj.scales": "model-00046-of-00071.safetensors",
+ "model.layers.31.mlp.switch_mlp.gate_proj.weight": "model-00046-of-00071.safetensors",
+ "model.layers.31.mlp.switch_mlp.up_proj.biases": "model-00047-of-00071.safetensors",
+ "model.layers.31.mlp.switch_mlp.up_proj.scales": "model-00047-of-00071.safetensors",
+ "model.layers.31.mlp.switch_mlp.up_proj.weight": "model-00047-of-00071.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00047-of-00071.safetensors",
+ "model.layers.31.self_attn.attention_sink_bias": "model-00046-of-00071.safetensors",
+ "model.layers.31.self_attn.k_proj.biases": "model-00046-of-00071.safetensors",
+ "model.layers.31.self_attn.k_proj.scales": "model-00046-of-00071.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00046-of-00071.safetensors",
+ "model.layers.31.self_attn.o_proj.biases": "model-00046-of-00071.safetensors",
+ "model.layers.31.self_attn.o_proj.scales": "model-00046-of-00071.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00046-of-00071.safetensors",
+ "model.layers.31.self_attn.q_proj.biases": "model-00046-of-00071.safetensors",
+ "model.layers.31.self_attn.q_proj.scales": "model-00046-of-00071.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00046-of-00071.safetensors",
+ "model.layers.31.self_attn.v_proj.biases": "model-00046-of-00071.safetensors",
+ "model.layers.31.self_attn.v_proj.scales": "model-00046-of-00071.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00046-of-00071.safetensors",
+ "model.layers.32.input_layernorm.weight": "model-00049-of-00071.safetensors",
+ "model.layers.32.mlp.gate.e_score_correction_bias": "model-00049-of-00071.safetensors",
+ "model.layers.32.mlp.gate.weight": "model-00049-of-00071.safetensors",
+ "model.layers.32.mlp.switch_mlp.down_proj.biases": "model-00049-of-00071.safetensors",
+ "model.layers.32.mlp.switch_mlp.down_proj.scales": "model-00049-of-00071.safetensors",
+ "model.layers.32.mlp.switch_mlp.down_proj.weight": "model-00049-of-00071.safetensors",
+ "model.layers.32.mlp.switch_mlp.gate_proj.biases": "model-00048-of-00071.safetensors",
+ "model.layers.32.mlp.switch_mlp.gate_proj.scales": "model-00048-of-00071.safetensors",
+ "model.layers.32.mlp.switch_mlp.gate_proj.weight": "model-00048-of-00071.safetensors",
+ "model.layers.32.mlp.switch_mlp.up_proj.biases": "model-00048-of-00071.safetensors",
+ "model.layers.32.mlp.switch_mlp.up_proj.scales": "model-00048-of-00071.safetensors",
+ "model.layers.32.mlp.switch_mlp.up_proj.weight": "model-00048-of-00071.safetensors",
+ "model.layers.32.post_attention_layernorm.weight": "model-00049-of-00071.safetensors",
+ "model.layers.32.self_attn.attention_sink_bias": "model-00047-of-00071.safetensors",
+ "model.layers.32.self_attn.k_proj.biases": "model-00047-of-00071.safetensors",
+ "model.layers.32.self_attn.k_proj.scales": "model-00047-of-00071.safetensors",
+ "model.layers.32.self_attn.k_proj.weight": "model-00047-of-00071.safetensors",
+ "model.layers.32.self_attn.o_proj.biases": "model-00047-of-00071.safetensors",
+ "model.layers.32.self_attn.o_proj.scales": "model-00047-of-00071.safetensors",
+ "model.layers.32.self_attn.o_proj.weight": "model-00047-of-00071.safetensors",
+ "model.layers.32.self_attn.q_proj.biases": "model-00047-of-00071.safetensors",
+ "model.layers.32.self_attn.q_proj.scales": "model-00047-of-00071.safetensors",
+ "model.layers.32.self_attn.q_proj.weight": "model-00047-of-00071.safetensors",
+ "model.layers.32.self_attn.v_proj.biases": "model-00047-of-00071.safetensors",
+ "model.layers.32.self_attn.v_proj.scales": "model-00047-of-00071.safetensors",
+ "model.layers.32.self_attn.v_proj.weight": "model-00047-of-00071.safetensors",
+ "model.layers.33.input_layernorm.weight": "model-00050-of-00071.safetensors",
+ "model.layers.33.mlp.gate.e_score_correction_bias": "model-00050-of-00071.safetensors",
+ "model.layers.33.mlp.gate.weight": "model-00050-of-00071.safetensors",
+ "model.layers.33.mlp.switch_mlp.down_proj.biases": "model-00050-of-00071.safetensors",
+ "model.layers.33.mlp.switch_mlp.down_proj.scales": "model-00050-of-00071.safetensors",
+ "model.layers.33.mlp.switch_mlp.down_proj.weight": "model-00050-of-00071.safetensors",
+ "model.layers.33.mlp.switch_mlp.gate_proj.biases": "model-00049-of-00071.safetensors",
+ "model.layers.33.mlp.switch_mlp.gate_proj.scales": "model-00049-of-00071.safetensors",
+ "model.layers.33.mlp.switch_mlp.gate_proj.weight": "model-00049-of-00071.safetensors",
+ "model.layers.33.mlp.switch_mlp.up_proj.biases": "model-00050-of-00071.safetensors",
+ "model.layers.33.mlp.switch_mlp.up_proj.scales": "model-00050-of-00071.safetensors",
+ "model.layers.33.mlp.switch_mlp.up_proj.weight": "model-00050-of-00071.safetensors",
+ "model.layers.33.post_attention_layernorm.weight": "model-00050-of-00071.safetensors",
+ "model.layers.33.self_attn.attention_sink_bias": "model-00049-of-00071.safetensors",
+ "model.layers.33.self_attn.k_proj.biases": "model-00049-of-00071.safetensors",
+ "model.layers.33.self_attn.k_proj.scales": "model-00049-of-00071.safetensors",
+ "model.layers.33.self_attn.k_proj.weight": "model-00049-of-00071.safetensors",
+ "model.layers.33.self_attn.o_proj.biases": "model-00049-of-00071.safetensors",
+ "model.layers.33.self_attn.o_proj.scales": "model-00049-of-00071.safetensors",
+ "model.layers.33.self_attn.o_proj.weight": "model-00049-of-00071.safetensors",
+ "model.layers.33.self_attn.q_proj.biases": "model-00049-of-00071.safetensors",
+ "model.layers.33.self_attn.q_proj.scales": "model-00049-of-00071.safetensors",
+ "model.layers.33.self_attn.q_proj.weight": "model-00049-of-00071.safetensors",
+ "model.layers.33.self_attn.v_proj.biases": "model-00049-of-00071.safetensors",
+ "model.layers.33.self_attn.v_proj.scales": "model-00049-of-00071.safetensors",
+ "model.layers.33.self_attn.v_proj.weight": "model-00049-of-00071.safetensors",
+ "model.layers.34.input_layernorm.weight": "model-00052-of-00071.safetensors",
+ "model.layers.34.mlp.gate.e_score_correction_bias": "model-00052-of-00071.safetensors",
+ "model.layers.34.mlp.gate.weight": "model-00052-of-00071.safetensors",
+ "model.layers.34.mlp.switch_mlp.down_proj.biases": "model-00052-of-00071.safetensors",
+ "model.layers.34.mlp.switch_mlp.down_proj.scales": "model-00052-of-00071.safetensors",
+ "model.layers.34.mlp.switch_mlp.down_proj.weight": "model-00052-of-00071.safetensors",
+ "model.layers.34.mlp.switch_mlp.gate_proj.biases": "model-00051-of-00071.safetensors",
+ "model.layers.34.mlp.switch_mlp.gate_proj.scales": "model-00051-of-00071.safetensors",
+ "model.layers.34.mlp.switch_mlp.gate_proj.weight": "model-00051-of-00071.safetensors",
+ "model.layers.34.mlp.switch_mlp.up_proj.biases": "model-00051-of-00071.safetensors",
+ "model.layers.34.mlp.switch_mlp.up_proj.scales": "model-00051-of-00071.safetensors",
+ "model.layers.34.mlp.switch_mlp.up_proj.weight": "model-00051-of-00071.safetensors",
+ "model.layers.34.post_attention_layernorm.weight": "model-00052-of-00071.safetensors",
+ "model.layers.34.self_attn.attention_sink_bias": "model-00050-of-00071.safetensors",
+ "model.layers.34.self_attn.k_proj.biases": "model-00050-of-00071.safetensors",
+ "model.layers.34.self_attn.k_proj.scales": "model-00050-of-00071.safetensors",
+ "model.layers.34.self_attn.k_proj.weight": "model-00050-of-00071.safetensors",
+ "model.layers.34.self_attn.o_proj.biases": "model-00050-of-00071.safetensors",
+ "model.layers.34.self_attn.o_proj.scales": "model-00050-of-00071.safetensors",
+ "model.layers.34.self_attn.o_proj.weight": "model-00050-of-00071.safetensors",
+ "model.layers.34.self_attn.q_proj.biases": "model-00050-of-00071.safetensors",
+ "model.layers.34.self_attn.q_proj.scales": "model-00050-of-00071.safetensors",
+ "model.layers.34.self_attn.q_proj.weight": "model-00050-of-00071.safetensors",
+ "model.layers.34.self_attn.v_proj.biases": "model-00050-of-00071.safetensors",
+ "model.layers.34.self_attn.v_proj.scales": "model-00050-of-00071.safetensors",
+ "model.layers.34.self_attn.v_proj.weight": "model-00050-of-00071.safetensors",
+ "model.layers.35.input_layernorm.weight": "model-00053-of-00071.safetensors",
+ "model.layers.35.mlp.gate.e_score_correction_bias": "model-00053-of-00071.safetensors",
+ "model.layers.35.mlp.gate.weight": "model-00053-of-00071.safetensors",
+ "model.layers.35.mlp.switch_mlp.down_proj.biases": "model-00053-of-00071.safetensors",
+ "model.layers.35.mlp.switch_mlp.down_proj.scales": "model-00053-of-00071.safetensors",
+ "model.layers.35.mlp.switch_mlp.down_proj.weight": "model-00053-of-00071.safetensors",
+ "model.layers.35.mlp.switch_mlp.gate_proj.biases": "model-00052-of-00071.safetensors",
+ "model.layers.35.mlp.switch_mlp.gate_proj.scales": "model-00052-of-00071.safetensors",
+ "model.layers.35.mlp.switch_mlp.gate_proj.weight": "model-00052-of-00071.safetensors",
+ "model.layers.35.mlp.switch_mlp.up_proj.biases": "model-00053-of-00071.safetensors",
+ "model.layers.35.mlp.switch_mlp.up_proj.scales": "model-00053-of-00071.safetensors",
+ "model.layers.35.mlp.switch_mlp.up_proj.weight": "model-00053-of-00071.safetensors",
+ "model.layers.35.post_attention_layernorm.weight": "model-00053-of-00071.safetensors",
+ "model.layers.35.self_attn.k_proj.biases": "model-00052-of-00071.safetensors",
+ "model.layers.35.self_attn.k_proj.scales": "model-00052-of-00071.safetensors",
+ "model.layers.35.self_attn.k_proj.weight": "model-00052-of-00071.safetensors",
+ "model.layers.35.self_attn.o_proj.biases": "model-00052-of-00071.safetensors",
+ "model.layers.35.self_attn.o_proj.scales": "model-00052-of-00071.safetensors",
+ "model.layers.35.self_attn.o_proj.weight": "model-00052-of-00071.safetensors",
+ "model.layers.35.self_attn.q_proj.biases": "model-00052-of-00071.safetensors",
+ "model.layers.35.self_attn.q_proj.scales": "model-00052-of-00071.safetensors",
+ "model.layers.35.self_attn.q_proj.weight": "model-00052-of-00071.safetensors",
+ "model.layers.35.self_attn.v_proj.biases": "model-00052-of-00071.safetensors",
+ "model.layers.35.self_attn.v_proj.scales": "model-00052-of-00071.safetensors",
+ "model.layers.35.self_attn.v_proj.weight": "model-00052-of-00071.safetensors",
+ "model.layers.36.input_layernorm.weight": "model-00055-of-00071.safetensors",
+ "model.layers.36.mlp.gate.e_score_correction_bias": "model-00055-of-00071.safetensors",
+ "model.layers.36.mlp.gate.weight": "model-00055-of-00071.safetensors",
+ "model.layers.36.mlp.switch_mlp.down_proj.biases": "model-00055-of-00071.safetensors",
+ "model.layers.36.mlp.switch_mlp.down_proj.scales": "model-00055-of-00071.safetensors",
+ "model.layers.36.mlp.switch_mlp.down_proj.weight": "model-00055-of-00071.safetensors",
+ "model.layers.36.mlp.switch_mlp.gate_proj.biases": "model-00054-of-00071.safetensors",
+ "model.layers.36.mlp.switch_mlp.gate_proj.scales": "model-00054-of-00071.safetensors",
+ "model.layers.36.mlp.switch_mlp.gate_proj.weight": "model-00054-of-00071.safetensors",
+ "model.layers.36.mlp.switch_mlp.up_proj.biases": "model-00054-of-00071.safetensors",
+ "model.layers.36.mlp.switch_mlp.up_proj.scales": "model-00054-of-00071.safetensors",
+ "model.layers.36.mlp.switch_mlp.up_proj.weight": "model-00054-of-00071.safetensors",
+ "model.layers.36.post_attention_layernorm.weight": "model-00055-of-00071.safetensors",
+ "model.layers.36.self_attn.attention_sink_bias": "model-00053-of-00071.safetensors",
+ "model.layers.36.self_attn.k_proj.biases": "model-00053-of-00071.safetensors",
+ "model.layers.36.self_attn.k_proj.scales": "model-00053-of-00071.safetensors",
+ "model.layers.36.self_attn.k_proj.weight": "model-00053-of-00071.safetensors",
+ "model.layers.36.self_attn.o_proj.biases": "model-00053-of-00071.safetensors",
+ "model.layers.36.self_attn.o_proj.scales": "model-00053-of-00071.safetensors",
+ "model.layers.36.self_attn.o_proj.weight": "model-00053-of-00071.safetensors",
+ "model.layers.36.self_attn.q_proj.biases": "model-00053-of-00071.safetensors",
+ "model.layers.36.self_attn.q_proj.scales": "model-00053-of-00071.safetensors",
+ "model.layers.36.self_attn.q_proj.weight": "model-00053-of-00071.safetensors",
+ "model.layers.36.self_attn.v_proj.biases": "model-00053-of-00071.safetensors",
+ "model.layers.36.self_attn.v_proj.scales": "model-00053-of-00071.safetensors",
+ "model.layers.36.self_attn.v_proj.weight": "model-00053-of-00071.safetensors",
+ "model.layers.37.input_layernorm.weight": "model-00056-of-00071.safetensors",
+ "model.layers.37.mlp.gate.e_score_correction_bias": "model-00056-of-00071.safetensors",
+ "model.layers.37.mlp.gate.weight": "model-00056-of-00071.safetensors",
+ "model.layers.37.mlp.switch_mlp.down_proj.biases": "model-00056-of-00071.safetensors",
+ "model.layers.37.mlp.switch_mlp.down_proj.scales": "model-00056-of-00071.safetensors",
+ "model.layers.37.mlp.switch_mlp.down_proj.weight": "model-00056-of-00071.safetensors",
+ "model.layers.37.mlp.switch_mlp.gate_proj.biases": "model-00055-of-00071.safetensors",
+ "model.layers.37.mlp.switch_mlp.gate_proj.scales": "model-00055-of-00071.safetensors",
+ "model.layers.37.mlp.switch_mlp.gate_proj.weight": "model-00055-of-00071.safetensors",
+ "model.layers.37.mlp.switch_mlp.up_proj.biases": "model-00056-of-00071.safetensors",
+ "model.layers.37.mlp.switch_mlp.up_proj.scales": "model-00056-of-00071.safetensors",
+ "model.layers.37.mlp.switch_mlp.up_proj.weight": "model-00056-of-00071.safetensors",
+ "model.layers.37.post_attention_layernorm.weight": "model-00056-of-00071.safetensors",
+ "model.layers.37.self_attn.attention_sink_bias": "model-00055-of-00071.safetensors",
+ "model.layers.37.self_attn.k_proj.biases": "model-00055-of-00071.safetensors",
+ "model.layers.37.self_attn.k_proj.scales": "model-00055-of-00071.safetensors",
+ "model.layers.37.self_attn.k_proj.weight": "model-00055-of-00071.safetensors",
+ "model.layers.37.self_attn.o_proj.biases": "model-00055-of-00071.safetensors",
+ "model.layers.37.self_attn.o_proj.scales": "model-00055-of-00071.safetensors",
+ "model.layers.37.self_attn.o_proj.weight": "model-00055-of-00071.safetensors",
+ "model.layers.37.self_attn.q_proj.biases": "model-00055-of-00071.safetensors",
+ "model.layers.37.self_attn.q_proj.scales": "model-00055-of-00071.safetensors",
+ "model.layers.37.self_attn.q_proj.weight": "model-00055-of-00071.safetensors",
+ "model.layers.37.self_attn.v_proj.biases": "model-00055-of-00071.safetensors",
+ "model.layers.37.self_attn.v_proj.scales": "model-00055-of-00071.safetensors",
+ "model.layers.37.self_attn.v_proj.weight": "model-00055-of-00071.safetensors",
+ "model.layers.38.input_layernorm.weight": "model-00058-of-00071.safetensors",
+ "model.layers.38.mlp.gate.e_score_correction_bias": "model-00058-of-00071.safetensors",
+ "model.layers.38.mlp.gate.weight": "model-00058-of-00071.safetensors",
+ "model.layers.38.mlp.switch_mlp.down_proj.biases": "model-00058-of-00071.safetensors",
+ "model.layers.38.mlp.switch_mlp.down_proj.scales": "model-00058-of-00071.safetensors",
+ "model.layers.38.mlp.switch_mlp.down_proj.weight": "model-00058-of-00071.safetensors",
+ "model.layers.38.mlp.switch_mlp.gate_proj.biases": "model-00057-of-00071.safetensors",
+ "model.layers.38.mlp.switch_mlp.gate_proj.scales": "model-00057-of-00071.safetensors",
+ "model.layers.38.mlp.switch_mlp.gate_proj.weight": "model-00057-of-00071.safetensors",
+ "model.layers.38.mlp.switch_mlp.up_proj.biases": "model-00057-of-00071.safetensors",
+ "model.layers.38.mlp.switch_mlp.up_proj.scales": "model-00057-of-00071.safetensors",
+ "model.layers.38.mlp.switch_mlp.up_proj.weight": "model-00057-of-00071.safetensors",
+ "model.layers.38.post_attention_layernorm.weight": "model-00058-of-00071.safetensors",
+ "model.layers.38.self_attn.attention_sink_bias": "model-00056-of-00071.safetensors",
+ "model.layers.38.self_attn.k_proj.biases": "model-00056-of-00071.safetensors",
+ "model.layers.38.self_attn.k_proj.scales": "model-00056-of-00071.safetensors",
+ "model.layers.38.self_attn.k_proj.weight": "model-00056-of-00071.safetensors",
+ "model.layers.38.self_attn.o_proj.biases": "model-00056-of-00071.safetensors",
+ "model.layers.38.self_attn.o_proj.scales": "model-00056-of-00071.safetensors",
+ "model.layers.38.self_attn.o_proj.weight": "model-00056-of-00071.safetensors",
+ "model.layers.38.self_attn.q_proj.biases": "model-00056-of-00071.safetensors",
+ "model.layers.38.self_attn.q_proj.scales": "model-00056-of-00071.safetensors",
+ "model.layers.38.self_attn.q_proj.weight": "model-00056-of-00071.safetensors",
+ "model.layers.38.self_attn.v_proj.biases": "model-00056-of-00071.safetensors",
+ "model.layers.38.self_attn.v_proj.scales": "model-00056-of-00071.safetensors",
+ "model.layers.38.self_attn.v_proj.weight": "model-00056-of-00071.safetensors",
+ "model.layers.39.input_layernorm.weight": "model-00059-of-00071.safetensors",
+ "model.layers.39.mlp.gate.e_score_correction_bias": "model-00059-of-00071.safetensors",
+ "model.layers.39.mlp.gate.weight": "model-00059-of-00071.safetensors",
+ "model.layers.39.mlp.switch_mlp.down_proj.biases": "model-00059-of-00071.safetensors",
+ "model.layers.39.mlp.switch_mlp.down_proj.scales": "model-00059-of-00071.safetensors",
+ "model.layers.39.mlp.switch_mlp.down_proj.weight": "model-00059-of-00071.safetensors",
+ "model.layers.39.mlp.switch_mlp.gate_proj.biases": "model-00058-of-00071.safetensors",
+ "model.layers.39.mlp.switch_mlp.gate_proj.scales": "model-00058-of-00071.safetensors",
+ "model.layers.39.mlp.switch_mlp.gate_proj.weight": "model-00058-of-00071.safetensors",
+ "model.layers.39.mlp.switch_mlp.up_proj.biases": "model-00059-of-00071.safetensors",
+ "model.layers.39.mlp.switch_mlp.up_proj.scales": "model-00059-of-00071.safetensors",
+ "model.layers.39.mlp.switch_mlp.up_proj.weight": "model-00059-of-00071.safetensors",
+ "model.layers.39.post_attention_layernorm.weight": "model-00059-of-00071.safetensors",
+ "model.layers.39.self_attn.attention_sink_bias": "model-00058-of-00071.safetensors",
+ "model.layers.39.self_attn.k_proj.biases": "model-00058-of-00071.safetensors",
+ "model.layers.39.self_attn.k_proj.scales": "model-00058-of-00071.safetensors",
+ "model.layers.39.self_attn.k_proj.weight": "model-00058-of-00071.safetensors",
+ "model.layers.39.self_attn.o_proj.biases": "model-00058-of-00071.safetensors",
+ "model.layers.39.self_attn.o_proj.scales": "model-00058-of-00071.safetensors",
+ "model.layers.39.self_attn.o_proj.weight": "model-00058-of-00071.safetensors",
+ "model.layers.39.self_attn.q_proj.biases": "model-00058-of-00071.safetensors",
+ "model.layers.39.self_attn.q_proj.scales": "model-00058-of-00071.safetensors",
+ "model.layers.39.self_attn.q_proj.weight": "model-00058-of-00071.safetensors",
+ "model.layers.39.self_attn.v_proj.biases": "model-00058-of-00071.safetensors",
+ "model.layers.39.self_attn.v_proj.scales": "model-00058-of-00071.safetensors",
+ "model.layers.39.self_attn.v_proj.weight": "model-00058-of-00071.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00007-of-00071.safetensors",
+ "model.layers.4.mlp.gate.e_score_correction_bias": "model-00007-of-00071.safetensors",
+ "model.layers.4.mlp.gate.weight": "model-00007-of-00071.safetensors",
+ "model.layers.4.mlp.switch_mlp.down_proj.biases": "model-00007-of-00071.safetensors",
+ "model.layers.4.mlp.switch_mlp.down_proj.scales": "model-00007-of-00071.safetensors",
+ "model.layers.4.mlp.switch_mlp.down_proj.weight": "model-00007-of-00071.safetensors",
+ "model.layers.4.mlp.switch_mlp.gate_proj.biases": "model-00006-of-00071.safetensors",
+ "model.layers.4.mlp.switch_mlp.gate_proj.scales": "model-00006-of-00071.safetensors",
+ "model.layers.4.mlp.switch_mlp.gate_proj.weight": "model-00006-of-00071.safetensors",
+ "model.layers.4.mlp.switch_mlp.up_proj.biases": "model-00006-of-00071.safetensors",
+ "model.layers.4.mlp.switch_mlp.up_proj.scales": "model-00006-of-00071.safetensors",
+ "model.layers.4.mlp.switch_mlp.up_proj.weight": "model-00006-of-00071.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00007-of-00071.safetensors",
+ "model.layers.4.self_attn.attention_sink_bias": "model-00005-of-00071.safetensors",
+ "model.layers.4.self_attn.k_proj.biases": "model-00005-of-00071.safetensors",
+ "model.layers.4.self_attn.k_proj.scales": "model-00005-of-00071.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00005-of-00071.safetensors",
+ "model.layers.4.self_attn.o_proj.biases": "model-00005-of-00071.safetensors",
+ "model.layers.4.self_attn.o_proj.scales": "model-00005-of-00071.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00005-of-00071.safetensors",
+ "model.layers.4.self_attn.q_proj.biases": "model-00005-of-00071.safetensors",
+ "model.layers.4.self_attn.q_proj.scales": "model-00005-of-00071.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00005-of-00071.safetensors",
+ "model.layers.4.self_attn.v_proj.biases": "model-00005-of-00071.safetensors",
+ "model.layers.4.self_attn.v_proj.scales": "model-00005-of-00071.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00005-of-00071.safetensors",
+ "model.layers.40.input_layernorm.weight": "model-00061-of-00071.safetensors",
+ "model.layers.40.mlp.gate.e_score_correction_bias": "model-00061-of-00071.safetensors",
+ "model.layers.40.mlp.gate.weight": "model-00061-of-00071.safetensors",
+ "model.layers.40.mlp.switch_mlp.down_proj.biases": "model-00061-of-00071.safetensors",
+ "model.layers.40.mlp.switch_mlp.down_proj.scales": "model-00061-of-00071.safetensors",
+ "model.layers.40.mlp.switch_mlp.down_proj.weight": "model-00061-of-00071.safetensors",
+ "model.layers.40.mlp.switch_mlp.gate_proj.biases": "model-00060-of-00071.safetensors",
+ "model.layers.40.mlp.switch_mlp.gate_proj.scales": "model-00060-of-00071.safetensors",
+ "model.layers.40.mlp.switch_mlp.gate_proj.weight": "model-00060-of-00071.safetensors",
+ "model.layers.40.mlp.switch_mlp.up_proj.biases": "model-00060-of-00071.safetensors",
+ "model.layers.40.mlp.switch_mlp.up_proj.scales": "model-00060-of-00071.safetensors",
+ "model.layers.40.mlp.switch_mlp.up_proj.weight": "model-00060-of-00071.safetensors",
+ "model.layers.40.post_attention_layernorm.weight": "model-00061-of-00071.safetensors",
+ "model.layers.40.self_attn.attention_sink_bias": "model-00059-of-00071.safetensors",
+ "model.layers.40.self_attn.k_proj.biases": "model-00059-of-00071.safetensors",
+ "model.layers.40.self_attn.k_proj.scales": "model-00059-of-00071.safetensors",
+ "model.layers.40.self_attn.k_proj.weight": "model-00059-of-00071.safetensors",
+ "model.layers.40.self_attn.o_proj.biases": "model-00059-of-00071.safetensors",
+ "model.layers.40.self_attn.o_proj.scales": "model-00059-of-00071.safetensors",
+ "model.layers.40.self_attn.o_proj.weight": "model-00059-of-00071.safetensors",
+ "model.layers.40.self_attn.q_proj.biases": "model-00059-of-00071.safetensors",
+ "model.layers.40.self_attn.q_proj.scales": "model-00059-of-00071.safetensors",
+ "model.layers.40.self_attn.q_proj.weight": "model-00059-of-00071.safetensors",
+ "model.layers.40.self_attn.v_proj.biases": "model-00059-of-00071.safetensors",
+ "model.layers.40.self_attn.v_proj.scales": "model-00059-of-00071.safetensors",
+ "model.layers.40.self_attn.v_proj.weight": "model-00059-of-00071.safetensors",
+ "model.layers.41.input_layernorm.weight": "model-00062-of-00071.safetensors",
+ "model.layers.41.mlp.gate.e_score_correction_bias": "model-00062-of-00071.safetensors",
+ "model.layers.41.mlp.gate.weight": "model-00062-of-00071.safetensors",
+ "model.layers.41.mlp.switch_mlp.down_proj.biases": "model-00062-of-00071.safetensors",
+ "model.layers.41.mlp.switch_mlp.down_proj.scales": "model-00062-of-00071.safetensors",
+ "model.layers.41.mlp.switch_mlp.down_proj.weight": "model-00062-of-00071.safetensors",
+ "model.layers.41.mlp.switch_mlp.gate_proj.biases": "model-00061-of-00071.safetensors",
+ "model.layers.41.mlp.switch_mlp.gate_proj.scales": "model-00061-of-00071.safetensors",
+ "model.layers.41.mlp.switch_mlp.gate_proj.weight": "model-00061-of-00071.safetensors",
+ "model.layers.41.mlp.switch_mlp.up_proj.biases": "model-00062-of-00071.safetensors",
+ "model.layers.41.mlp.switch_mlp.up_proj.scales": "model-00062-of-00071.safetensors",
+ "model.layers.41.mlp.switch_mlp.up_proj.weight": "model-00062-of-00071.safetensors",
+ "model.layers.41.post_attention_layernorm.weight": "model-00062-of-00071.safetensors",
+ "model.layers.41.self_attn.k_proj.biases": "model-00061-of-00071.safetensors",
+ "model.layers.41.self_attn.k_proj.scales": "model-00061-of-00071.safetensors",
+ "model.layers.41.self_attn.k_proj.weight": "model-00061-of-00071.safetensors",
+ "model.layers.41.self_attn.o_proj.biases": "model-00061-of-00071.safetensors",
+ "model.layers.41.self_attn.o_proj.scales": "model-00061-of-00071.safetensors",
+ "model.layers.41.self_attn.o_proj.weight": "model-00061-of-00071.safetensors",
+ "model.layers.41.self_attn.q_proj.biases": "model-00061-of-00071.safetensors",
+ "model.layers.41.self_attn.q_proj.scales": "model-00061-of-00071.safetensors",
+ "model.layers.41.self_attn.q_proj.weight": "model-00061-of-00071.safetensors",
+ "model.layers.41.self_attn.v_proj.biases": "model-00061-of-00071.safetensors",
+ "model.layers.41.self_attn.v_proj.scales": "model-00061-of-00071.safetensors",
+ "model.layers.41.self_attn.v_proj.weight": "model-00061-of-00071.safetensors",
+ "model.layers.42.input_layernorm.weight": "model-00064-of-00071.safetensors",
+ "model.layers.42.mlp.gate.e_score_correction_bias": "model-00064-of-00071.safetensors",
+ "model.layers.42.mlp.gate.weight": "model-00064-of-00071.safetensors",
+ "model.layers.42.mlp.switch_mlp.down_proj.biases": "model-00064-of-00071.safetensors",
+ "model.layers.42.mlp.switch_mlp.down_proj.scales": "model-00064-of-00071.safetensors",
+ "model.layers.42.mlp.switch_mlp.down_proj.weight": "model-00064-of-00071.safetensors",
+ "model.layers.42.mlp.switch_mlp.gate_proj.biases": "model-00063-of-00071.safetensors",
+ "model.layers.42.mlp.switch_mlp.gate_proj.scales": "model-00063-of-00071.safetensors",
+ "model.layers.42.mlp.switch_mlp.gate_proj.weight": "model-00063-of-00071.safetensors",
+ "model.layers.42.mlp.switch_mlp.up_proj.biases": "model-00063-of-00071.safetensors",
+ "model.layers.42.mlp.switch_mlp.up_proj.scales": "model-00063-of-00071.safetensors",
+ "model.layers.42.mlp.switch_mlp.up_proj.weight": "model-00063-of-00071.safetensors",
+ "model.layers.42.post_attention_layernorm.weight": "model-00064-of-00071.safetensors",
+ "model.layers.42.self_attn.attention_sink_bias": "model-00062-of-00071.safetensors",
+ "model.layers.42.self_attn.k_proj.biases": "model-00062-of-00071.safetensors",
+ "model.layers.42.self_attn.k_proj.scales": "model-00062-of-00071.safetensors",
+ "model.layers.42.self_attn.k_proj.weight": "model-00062-of-00071.safetensors",
+ "model.layers.42.self_attn.o_proj.biases": "model-00062-of-00071.safetensors",
+ "model.layers.42.self_attn.o_proj.scales": "model-00062-of-00071.safetensors",
+ "model.layers.42.self_attn.o_proj.weight": "model-00062-of-00071.safetensors",
+ "model.layers.42.self_attn.q_proj.biases": "model-00062-of-00071.safetensors",
+ "model.layers.42.self_attn.q_proj.scales": "model-00062-of-00071.safetensors",
+ "model.layers.42.self_attn.q_proj.weight": "model-00062-of-00071.safetensors",
+ "model.layers.42.self_attn.v_proj.biases": "model-00062-of-00071.safetensors",
+ "model.layers.42.self_attn.v_proj.scales": "model-00062-of-00071.safetensors",
+ "model.layers.42.self_attn.v_proj.weight": "model-00062-of-00071.safetensors",
+ "model.layers.43.input_layernorm.weight": "model-00065-of-00071.safetensors",
+ "model.layers.43.mlp.gate.e_score_correction_bias": "model-00065-of-00071.safetensors",
+ "model.layers.43.mlp.gate.weight": "model-00065-of-00071.safetensors",
+ "model.layers.43.mlp.switch_mlp.down_proj.biases": "model-00065-of-00071.safetensors",
+ "model.layers.43.mlp.switch_mlp.down_proj.scales": "model-00065-of-00071.safetensors",
+ "model.layers.43.mlp.switch_mlp.down_proj.weight": "model-00065-of-00071.safetensors",
+ "model.layers.43.mlp.switch_mlp.gate_proj.biases": "model-00064-of-00071.safetensors",
+ "model.layers.43.mlp.switch_mlp.gate_proj.scales": "model-00064-of-00071.safetensors",
+ "model.layers.43.mlp.switch_mlp.gate_proj.weight": "model-00064-of-00071.safetensors",
+ "model.layers.43.mlp.switch_mlp.up_proj.biases": "model-00065-of-00071.safetensors",
+ "model.layers.43.mlp.switch_mlp.up_proj.scales": "model-00065-of-00071.safetensors",
+ "model.layers.43.mlp.switch_mlp.up_proj.weight": "model-00065-of-00071.safetensors",
+ "model.layers.43.post_attention_layernorm.weight": "model-00065-of-00071.safetensors",
+ "model.layers.43.self_attn.attention_sink_bias": "model-00064-of-00071.safetensors",
+ "model.layers.43.self_attn.k_proj.biases": "model-00064-of-00071.safetensors",
+ "model.layers.43.self_attn.k_proj.scales": "model-00064-of-00071.safetensors",
+ "model.layers.43.self_attn.k_proj.weight": "model-00064-of-00071.safetensors",
+ "model.layers.43.self_attn.o_proj.biases": "model-00064-of-00071.safetensors",
+ "model.layers.43.self_attn.o_proj.scales": "model-00064-of-00071.safetensors",
+ "model.layers.43.self_attn.o_proj.weight": "model-00064-of-00071.safetensors",
+ "model.layers.43.self_attn.q_proj.biases": "model-00064-of-00071.safetensors",
+ "model.layers.43.self_attn.q_proj.scales": "model-00064-of-00071.safetensors",
+ "model.layers.43.self_attn.q_proj.weight": "model-00064-of-00071.safetensors",
+ "model.layers.43.self_attn.v_proj.biases": "model-00064-of-00071.safetensors",
+ "model.layers.43.self_attn.v_proj.scales": "model-00064-of-00071.safetensors",
+ "model.layers.43.self_attn.v_proj.weight": "model-00064-of-00071.safetensors",
+ "model.layers.44.input_layernorm.weight": "model-00067-of-00071.safetensors",
+ "model.layers.44.mlp.gate.e_score_correction_bias": "model-00067-of-00071.safetensors",
+ "model.layers.44.mlp.gate.weight": "model-00067-of-00071.safetensors",
+ "model.layers.44.mlp.switch_mlp.down_proj.biases": "model-00067-of-00071.safetensors",
+ "model.layers.44.mlp.switch_mlp.down_proj.scales": "model-00067-of-00071.safetensors",
+ "model.layers.44.mlp.switch_mlp.down_proj.weight": "model-00067-of-00071.safetensors",
+ "model.layers.44.mlp.switch_mlp.gate_proj.biases": "model-00066-of-00071.safetensors",
+ "model.layers.44.mlp.switch_mlp.gate_proj.scales": "model-00066-of-00071.safetensors",
+ "model.layers.44.mlp.switch_mlp.gate_proj.weight": "model-00066-of-00071.safetensors",
+ "model.layers.44.mlp.switch_mlp.up_proj.biases": "model-00066-of-00071.safetensors",
+ "model.layers.44.mlp.switch_mlp.up_proj.scales": "model-00066-of-00071.safetensors",
+ "model.layers.44.mlp.switch_mlp.up_proj.weight": "model-00066-of-00071.safetensors",
+ "model.layers.44.post_attention_layernorm.weight": "model-00067-of-00071.safetensors",
+ "model.layers.44.self_attn.attention_sink_bias": "model-00065-of-00071.safetensors",
+ "model.layers.44.self_attn.k_proj.biases": "model-00065-of-00071.safetensors",
+ "model.layers.44.self_attn.k_proj.scales": "model-00065-of-00071.safetensors",
+ "model.layers.44.self_attn.k_proj.weight": "model-00065-of-00071.safetensors",
+ "model.layers.44.self_attn.o_proj.biases": "model-00065-of-00071.safetensors",
+ "model.layers.44.self_attn.o_proj.scales": "model-00065-of-00071.safetensors",
+ "model.layers.44.self_attn.o_proj.weight": "model-00065-of-00071.safetensors",
+ "model.layers.44.self_attn.q_proj.biases": "model-00065-of-00071.safetensors",
+ "model.layers.44.self_attn.q_proj.scales": "model-00065-of-00071.safetensors",
+ "model.layers.44.self_attn.q_proj.weight": "model-00065-of-00071.safetensors",
+ "model.layers.44.self_attn.v_proj.biases": "model-00065-of-00071.safetensors",
+ "model.layers.44.self_attn.v_proj.scales": "model-00065-of-00071.safetensors",
+ "model.layers.44.self_attn.v_proj.weight": "model-00065-of-00071.safetensors",
+ "model.layers.45.input_layernorm.weight": "model-00068-of-00071.safetensors",
+ "model.layers.45.mlp.gate.e_score_correction_bias": "model-00068-of-00071.safetensors",
+ "model.layers.45.mlp.gate.weight": "model-00068-of-00071.safetensors",
+ "model.layers.45.mlp.switch_mlp.down_proj.biases": "model-00068-of-00071.safetensors",
+ "model.layers.45.mlp.switch_mlp.down_proj.scales": "model-00068-of-00071.safetensors",
+ "model.layers.45.mlp.switch_mlp.down_proj.weight": "model-00068-of-00071.safetensors",
+ "model.layers.45.mlp.switch_mlp.gate_proj.biases": "model-00067-of-00071.safetensors",
+ "model.layers.45.mlp.switch_mlp.gate_proj.scales": "model-00067-of-00071.safetensors",
+ "model.layers.45.mlp.switch_mlp.gate_proj.weight": "model-00067-of-00071.safetensors",
+ "model.layers.45.mlp.switch_mlp.up_proj.biases": "model-00068-of-00071.safetensors",
+ "model.layers.45.mlp.switch_mlp.up_proj.scales": "model-00068-of-00071.safetensors",
+ "model.layers.45.mlp.switch_mlp.up_proj.weight": "model-00068-of-00071.safetensors",
+ "model.layers.45.post_attention_layernorm.weight": "model-00068-of-00071.safetensors",
+ "model.layers.45.self_attn.attention_sink_bias": "model-00067-of-00071.safetensors",
+ "model.layers.45.self_attn.k_proj.biases": "model-00067-of-00071.safetensors",
+ "model.layers.45.self_attn.k_proj.scales": "model-00067-of-00071.safetensors",
+ "model.layers.45.self_attn.k_proj.weight": "model-00067-of-00071.safetensors",
+ "model.layers.45.self_attn.o_proj.biases": "model-00067-of-00071.safetensors",
+ "model.layers.45.self_attn.o_proj.scales": "model-00067-of-00071.safetensors",
+ "model.layers.45.self_attn.o_proj.weight": "model-00067-of-00071.safetensors",
+ "model.layers.45.self_attn.q_proj.biases": "model-00067-of-00071.safetensors",
+ "model.layers.45.self_attn.q_proj.scales": "model-00067-of-00071.safetensors",
+ "model.layers.45.self_attn.q_proj.weight": "model-00067-of-00071.safetensors",
+ "model.layers.45.self_attn.v_proj.biases": "model-00067-of-00071.safetensors",
+ "model.layers.45.self_attn.v_proj.scales": "model-00067-of-00071.safetensors",
+ "model.layers.45.self_attn.v_proj.weight": "model-00067-of-00071.safetensors",
+ "model.layers.46.input_layernorm.weight": "model-00070-of-00071.safetensors",
+ "model.layers.46.mlp.gate.e_score_correction_bias": "model-00070-of-00071.safetensors",
+ "model.layers.46.mlp.gate.weight": "model-00070-of-00071.safetensors",
+ "model.layers.46.mlp.switch_mlp.down_proj.biases": "model-00070-of-00071.safetensors",
+ "model.layers.46.mlp.switch_mlp.down_proj.scales": "model-00070-of-00071.safetensors",
+ "model.layers.46.mlp.switch_mlp.down_proj.weight": "model-00070-of-00071.safetensors",
+ "model.layers.46.mlp.switch_mlp.gate_proj.biases": "model-00069-of-00071.safetensors",
+ "model.layers.46.mlp.switch_mlp.gate_proj.scales": "model-00069-of-00071.safetensors",
+ "model.layers.46.mlp.switch_mlp.gate_proj.weight": "model-00069-of-00071.safetensors",
+ "model.layers.46.mlp.switch_mlp.up_proj.biases": "model-00069-of-00071.safetensors",
+ "model.layers.46.mlp.switch_mlp.up_proj.scales": "model-00069-of-00071.safetensors",
+ "model.layers.46.mlp.switch_mlp.up_proj.weight": "model-00069-of-00071.safetensors",
+ "model.layers.46.post_attention_layernorm.weight": "model-00070-of-00071.safetensors",
+ "model.layers.46.self_attn.attention_sink_bias": "model-00068-of-00071.safetensors",
+ "model.layers.46.self_attn.k_proj.biases": "model-00068-of-00071.safetensors",
+ "model.layers.46.self_attn.k_proj.scales": "model-00068-of-00071.safetensors",
+ "model.layers.46.self_attn.k_proj.weight": "model-00068-of-00071.safetensors",
+ "model.layers.46.self_attn.o_proj.biases": "model-00068-of-00071.safetensors",
+ "model.layers.46.self_attn.o_proj.scales": "model-00068-of-00071.safetensors",
+ "model.layers.46.self_attn.o_proj.weight": "model-00068-of-00071.safetensors",
+ "model.layers.46.self_attn.q_proj.biases": "model-00068-of-00071.safetensors",
+ "model.layers.46.self_attn.q_proj.scales": "model-00068-of-00071.safetensors",
+ "model.layers.46.self_attn.q_proj.weight": "model-00068-of-00071.safetensors",
+ "model.layers.46.self_attn.v_proj.biases": "model-00068-of-00071.safetensors",
+ "model.layers.46.self_attn.v_proj.scales": "model-00068-of-00071.safetensors",
+ "model.layers.46.self_attn.v_proj.weight": "model-00068-of-00071.safetensors",
+ "model.layers.47.input_layernorm.weight": "model-00071-of-00071.safetensors",
+ "model.layers.47.mlp.gate.e_score_correction_bias": "model-00071-of-00071.safetensors",
+ "model.layers.47.mlp.gate.weight": "model-00071-of-00071.safetensors",
+ "model.layers.47.mlp.switch_mlp.down_proj.biases": "model-00071-of-00071.safetensors",
+ "model.layers.47.mlp.switch_mlp.down_proj.scales": "model-00071-of-00071.safetensors",
+ "model.layers.47.mlp.switch_mlp.down_proj.weight": "model-00071-of-00071.safetensors",
+ "model.layers.47.mlp.switch_mlp.gate_proj.biases": "model-00070-of-00071.safetensors",
+ "model.layers.47.mlp.switch_mlp.gate_proj.scales": "model-00070-of-00071.safetensors",
+ "model.layers.47.mlp.switch_mlp.gate_proj.weight": "model-00070-of-00071.safetensors",
+ "model.layers.47.mlp.switch_mlp.up_proj.biases": "model-00071-of-00071.safetensors",
+ "model.layers.47.mlp.switch_mlp.up_proj.scales": "model-00071-of-00071.safetensors",
+ "model.layers.47.mlp.switch_mlp.up_proj.weight": "model-00071-of-00071.safetensors",
+ "model.layers.47.post_attention_layernorm.weight": "model-00071-of-00071.safetensors",
+ "model.layers.47.self_attn.k_proj.biases": "model-00070-of-00071.safetensors",
+ "model.layers.47.self_attn.k_proj.scales": "model-00070-of-00071.safetensors",
+ "model.layers.47.self_attn.k_proj.weight": "model-00070-of-00071.safetensors",
+ "model.layers.47.self_attn.o_proj.biases": "model-00070-of-00071.safetensors",
+ "model.layers.47.self_attn.o_proj.scales": "model-00070-of-00071.safetensors",
+ "model.layers.47.self_attn.o_proj.weight": "model-00070-of-00071.safetensors",
+ "model.layers.47.self_attn.q_proj.biases": "model-00070-of-00071.safetensors",
+ "model.layers.47.self_attn.q_proj.scales": "model-00070-of-00071.safetensors",
+ "model.layers.47.self_attn.q_proj.weight": "model-00070-of-00071.safetensors",
+ "model.layers.47.self_attn.v_proj.biases": "model-00070-of-00071.safetensors",
+ "model.layers.47.self_attn.v_proj.scales": "model-00070-of-00071.safetensors",
+ "model.layers.47.self_attn.v_proj.weight": "model-00070-of-00071.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00008-of-00071.safetensors",
+ "model.layers.5.mlp.gate.e_score_correction_bias": "model-00008-of-00071.safetensors",
+ "model.layers.5.mlp.gate.weight": "model-00008-of-00071.safetensors",
+ "model.layers.5.mlp.switch_mlp.down_proj.biases": "model-00008-of-00071.safetensors",
+ "model.layers.5.mlp.switch_mlp.down_proj.scales": "model-00008-of-00071.safetensors",
+ "model.layers.5.mlp.switch_mlp.down_proj.weight": "model-00008-of-00071.safetensors",
+ "model.layers.5.mlp.switch_mlp.gate_proj.biases": "model-00007-of-00071.safetensors",
+ "model.layers.5.mlp.switch_mlp.gate_proj.scales": "model-00007-of-00071.safetensors",
+ "model.layers.5.mlp.switch_mlp.gate_proj.weight": "model-00007-of-00071.safetensors",
+ "model.layers.5.mlp.switch_mlp.up_proj.biases": "model-00008-of-00071.safetensors",
+ "model.layers.5.mlp.switch_mlp.up_proj.scales": "model-00008-of-00071.safetensors",
+ "model.layers.5.mlp.switch_mlp.up_proj.weight": "model-00008-of-00071.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00008-of-00071.safetensors",
+ "model.layers.5.self_attn.k_proj.biases": "model-00007-of-00071.safetensors",
+ "model.layers.5.self_attn.k_proj.scales": "model-00007-of-00071.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00007-of-00071.safetensors",
+ "model.layers.5.self_attn.o_proj.biases": "model-00007-of-00071.safetensors",
+ "model.layers.5.self_attn.o_proj.scales": "model-00007-of-00071.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00007-of-00071.safetensors",
+ "model.layers.5.self_attn.q_proj.biases": "model-00007-of-00071.safetensors",
+ "model.layers.5.self_attn.q_proj.scales": "model-00007-of-00071.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00007-of-00071.safetensors",
+ "model.layers.5.self_attn.v_proj.biases": "model-00007-of-00071.safetensors",
+ "model.layers.5.self_attn.v_proj.scales": "model-00007-of-00071.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00007-of-00071.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00010-of-00071.safetensors",
+ "model.layers.6.mlp.gate.e_score_correction_bias": "model-00010-of-00071.safetensors",
+ "model.layers.6.mlp.gate.weight": "model-00010-of-00071.safetensors",
+ "model.layers.6.mlp.switch_mlp.down_proj.biases": "model-00010-of-00071.safetensors",
+ "model.layers.6.mlp.switch_mlp.down_proj.scales": "model-00010-of-00071.safetensors",
+ "model.layers.6.mlp.switch_mlp.down_proj.weight": "model-00010-of-00071.safetensors",
+ "model.layers.6.mlp.switch_mlp.gate_proj.biases": "model-00009-of-00071.safetensors",
+ "model.layers.6.mlp.switch_mlp.gate_proj.scales": "model-00009-of-00071.safetensors",
+ "model.layers.6.mlp.switch_mlp.gate_proj.weight": "model-00009-of-00071.safetensors",
+ "model.layers.6.mlp.switch_mlp.up_proj.biases": "model-00009-of-00071.safetensors",
+ "model.layers.6.mlp.switch_mlp.up_proj.scales": "model-00009-of-00071.safetensors",
+ "model.layers.6.mlp.switch_mlp.up_proj.weight": "model-00009-of-00071.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00010-of-00071.safetensors",
+ "model.layers.6.self_attn.attention_sink_bias": "model-00008-of-00071.safetensors",
+ "model.layers.6.self_attn.k_proj.biases": "model-00008-of-00071.safetensors",
+ "model.layers.6.self_attn.k_proj.scales": "model-00008-of-00071.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00008-of-00071.safetensors",
+ "model.layers.6.self_attn.o_proj.biases": "model-00008-of-00071.safetensors",
+ "model.layers.6.self_attn.o_proj.scales": "model-00008-of-00071.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00008-of-00071.safetensors",
+ "model.layers.6.self_attn.q_proj.biases": "model-00008-of-00071.safetensors",
+ "model.layers.6.self_attn.q_proj.scales": "model-00008-of-00071.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00008-of-00071.safetensors",
+ "model.layers.6.self_attn.v_proj.biases": "model-00008-of-00071.safetensors",
+ "model.layers.6.self_attn.v_proj.scales": "model-00008-of-00071.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00008-of-00071.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00011-of-00071.safetensors",
+ "model.layers.7.mlp.gate.e_score_correction_bias": "model-00011-of-00071.safetensors",
+ "model.layers.7.mlp.gate.weight": "model-00011-of-00071.safetensors",
+ "model.layers.7.mlp.switch_mlp.down_proj.biases": "model-00011-of-00071.safetensors",
+ "model.layers.7.mlp.switch_mlp.down_proj.scales": "model-00011-of-00071.safetensors",
+ "model.layers.7.mlp.switch_mlp.down_proj.weight": "model-00011-of-00071.safetensors",
+ "model.layers.7.mlp.switch_mlp.gate_proj.biases": "model-00010-of-00071.safetensors",
+ "model.layers.7.mlp.switch_mlp.gate_proj.scales": "model-00010-of-00071.safetensors",
+ "model.layers.7.mlp.switch_mlp.gate_proj.weight": "model-00010-of-00071.safetensors",
+ "model.layers.7.mlp.switch_mlp.up_proj.biases": "model-00011-of-00071.safetensors",
+ "model.layers.7.mlp.switch_mlp.up_proj.scales": "model-00011-of-00071.safetensors",
+ "model.layers.7.mlp.switch_mlp.up_proj.weight": "model-00011-of-00071.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00011-of-00071.safetensors",
+ "model.layers.7.self_attn.attention_sink_bias": "model-00010-of-00071.safetensors",
+ "model.layers.7.self_attn.k_proj.biases": "model-00010-of-00071.safetensors",
+ "model.layers.7.self_attn.k_proj.scales": "model-00010-of-00071.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00010-of-00071.safetensors",
+ "model.layers.7.self_attn.o_proj.biases": "model-00010-of-00071.safetensors",
+ "model.layers.7.self_attn.o_proj.scales": "model-00010-of-00071.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00010-of-00071.safetensors",
+ "model.layers.7.self_attn.q_proj.biases": "model-00010-of-00071.safetensors",
+ "model.layers.7.self_attn.q_proj.scales": "model-00010-of-00071.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00010-of-00071.safetensors",
+ "model.layers.7.self_attn.v_proj.biases": "model-00010-of-00071.safetensors",
+ "model.layers.7.self_attn.v_proj.scales": "model-00010-of-00071.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00010-of-00071.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00013-of-00071.safetensors",
+ "model.layers.8.mlp.gate.e_score_correction_bias": "model-00013-of-00071.safetensors",
+ "model.layers.8.mlp.gate.weight": "model-00013-of-00071.safetensors",
+ "model.layers.8.mlp.switch_mlp.down_proj.biases": "model-00013-of-00071.safetensors",
+ "model.layers.8.mlp.switch_mlp.down_proj.scales": "model-00013-of-00071.safetensors",
+ "model.layers.8.mlp.switch_mlp.down_proj.weight": "model-00013-of-00071.safetensors",
+ "model.layers.8.mlp.switch_mlp.gate_proj.biases": "model-00012-of-00071.safetensors",
+ "model.layers.8.mlp.switch_mlp.gate_proj.scales": "model-00012-of-00071.safetensors",
+ "model.layers.8.mlp.switch_mlp.gate_proj.weight": "model-00012-of-00071.safetensors",
+ "model.layers.8.mlp.switch_mlp.up_proj.biases": "model-00012-of-00071.safetensors",
+ "model.layers.8.mlp.switch_mlp.up_proj.scales": "model-00012-of-00071.safetensors",
+ "model.layers.8.mlp.switch_mlp.up_proj.weight": "model-00012-of-00071.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00013-of-00071.safetensors",
+ "model.layers.8.self_attn.attention_sink_bias": "model-00011-of-00071.safetensors",
+ "model.layers.8.self_attn.k_proj.biases": "model-00011-of-00071.safetensors",
+ "model.layers.8.self_attn.k_proj.scales": "model-00011-of-00071.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00011-of-00071.safetensors",
+ "model.layers.8.self_attn.o_proj.biases": "model-00011-of-00071.safetensors",
+ "model.layers.8.self_attn.o_proj.scales": "model-00011-of-00071.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00011-of-00071.safetensors",
+ "model.layers.8.self_attn.q_proj.biases": "model-00011-of-00071.safetensors",
+ "model.layers.8.self_attn.q_proj.scales": "model-00011-of-00071.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00011-of-00071.safetensors",
+ "model.layers.8.self_attn.v_proj.biases": "model-00011-of-00071.safetensors",
+ "model.layers.8.self_attn.v_proj.scales": "model-00011-of-00071.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00011-of-00071.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00014-of-00071.safetensors",
+ "model.layers.9.mlp.gate.e_score_correction_bias": "model-00014-of-00071.safetensors",
+ "model.layers.9.mlp.gate.weight": "model-00014-of-00071.safetensors",
+ "model.layers.9.mlp.switch_mlp.down_proj.biases": "model-00014-of-00071.safetensors",
+ "model.layers.9.mlp.switch_mlp.down_proj.scales": "model-00014-of-00071.safetensors",
+ "model.layers.9.mlp.switch_mlp.down_proj.weight": "model-00014-of-00071.safetensors",
+ "model.layers.9.mlp.switch_mlp.gate_proj.biases": "model-00013-of-00071.safetensors",
+ "model.layers.9.mlp.switch_mlp.gate_proj.scales": "model-00013-of-00071.safetensors",
+ "model.layers.9.mlp.switch_mlp.gate_proj.weight": "model-00013-of-00071.safetensors",
+ "model.layers.9.mlp.switch_mlp.up_proj.biases": "model-00014-of-00071.safetensors",
+ "model.layers.9.mlp.switch_mlp.up_proj.scales": "model-00014-of-00071.safetensors",
+ "model.layers.9.mlp.switch_mlp.up_proj.weight": "model-00014-of-00071.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00014-of-00071.safetensors",
+ "model.layers.9.self_attn.attention_sink_bias": "model-00013-of-00071.safetensors",
+ "model.layers.9.self_attn.k_proj.biases": "model-00013-of-00071.safetensors",
+ "model.layers.9.self_attn.k_proj.scales": "model-00013-of-00071.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00013-of-00071.safetensors",
+ "model.layers.9.self_attn.o_proj.biases": "model-00013-of-00071.safetensors",
+ "model.layers.9.self_attn.o_proj.scales": "model-00013-of-00071.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00013-of-00071.safetensors",
+ "model.layers.9.self_attn.q_proj.biases": "model-00013-of-00071.safetensors",
+ "model.layers.9.self_attn.q_proj.scales": "model-00013-of-00071.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00013-of-00071.safetensors",
+ "model.layers.9.self_attn.v_proj.biases": "model-00013-of-00071.safetensors",
+ "model.layers.9.self_attn.v_proj.scales": "model-00013-of-00071.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00013-of-00071.safetensors",
+ "model.norm.weight": "model-00071-of-00071.safetensors"
+ }
+}
\ No newline at end of file
diff --git a/modeling_mimo_v2_flash.py b/modeling_mimo_v2_flash.py
new file mode 100644
index 0000000000000000000000000000000000000000..e13fa11b83fade578d33b6dd7941365deafcbea9
--- /dev/null
+++ b/modeling_mimo_v2_flash.py
@@ -0,0 +1,664 @@
+# coding=utf-8
+#
+# Copyright 2025 Xiaomi Corporation.
+# Copyright 2025 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Callable, Optional, Tuple, Union
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from transformers.generation import GenerationMixin
+from transformers.activations import ACT2FN
+from transformers.cache_utils import Cache, DynamicCache
+from transformers.integrations import use_kernel_forward_from_hub
+
+from transformers.modeling_outputs import (
+ BaseModelOutputWithPast,
+ CausalLMOutputWithPast,
+)
+
+from transformers.masking_utils import create_causal_mask, create_sliding_window_causal_mask
+from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
+from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
+from transformers.processing_utils import Unpack
+from transformers.utils import (
+ logging,
+)
+
+from transformers.modeling_outputs import MoeModelOutputWithPast
+from transformers.utils import TransformersKwargs, auto_docstring, can_return_tuple
+from .configuration_mimo_v2_flash import MiMoV2FlashConfig
+
+logger = logging.get_logger(__name__)
+
+
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2]
+ x2 = x[..., x.shape[-1] // 2:]
+ return torch.cat((-x2, x1), dim=-1)
+
+
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors.
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`, *optional*):
+ Deprecated and unused.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+ cos = cos.unsqueeze(unsqueeze_dim)
+ sin = sin.unsqueeze(unsqueeze_dim)
+ q_embed = (q * cos) + (rotate_half(q) * sin)
+ k_embed = (k * cos) + (rotate_half(k) * sin)
+ return q_embed, k_embed
+
+
+def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
+ """
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
+ """
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
+ if n_rep == 1:
+ return hidden_states
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
+
+
+def eager_attention_forward(
+ module: nn.Module,
+ query: torch.Tensor,
+ key: torch.Tensor,
+ value: torch.Tensor,
+ attention_mask: Optional[torch.Tensor],
+ scaling: float,
+ dropout: float = 0.0,
+ sinks: Optional[torch.Tensor] = None,
+):
+ key_states = repeat_kv(key, module.num_key_value_groups)
+ value_states = repeat_kv(value, module.num_key_value_groups)
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
+ if attention_mask is not None:
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
+ attn_weights = attn_weights + causal_mask
+
+ if sinks is not None:
+ sinks = module.attention_sink_bias.reshape(1, -1, 1, 1).expand(query.shape[0], -1, query.shape[-2], -1)
+ attn_weights = torch.cat([attn_weights, sinks], dim=-1)
+
+ attn_weights = attn_weights - attn_weights.max(dim=-1, keepdim=True).values
+ probs = F.softmax(attn_weights, dim=-1, dtype=attn_weights.dtype)
+
+ if sinks is not None:
+ probs = probs[..., :-1] # we drop the sink here
+
+ attn_weights = nn.functional.dropout(probs, p=dropout, training=module.training)
+ attn_output = torch.matmul(attn_weights, value_states)
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ return attn_output, attn_weights
+
+
+@use_kernel_forward_from_hub("RMSNorm")
+class MiMoV2RMSNorm(nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ """
+ MiMoV2RMSNorm is equivalent to T5LayerNorm
+ """
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states):
+ input_dtype = hidden_states.dtype
+ hidden_states = hidden_states.to(torch.float32)
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+ return self.weight * hidden_states.to(input_dtype)
+
+
+class MiMoV2MLP(nn.Module):
+ """MiMoV2MLP matching the gate, up, and down projection layers."""
+
+ def __init__(self, config: MiMoV2FlashConfig, intermediate_size=None):
+ super().__init__()
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
+ self.act_fn = ACT2FN[config.hidden_act]
+
+ def forward(self, hidden_states):
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(hidden_states)) * self.up_proj(hidden_states))
+ return down_proj
+
+
+class MiMoV2MoEGate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.top_k = config.num_experts_per_tok
+ self.n_routed_experts = config.n_routed_experts
+ self.routed_scaling_factor = (
+ config.routed_scaling_factor
+ if config.routed_scaling_factor is not None
+ else 1.0
+ )
+ self.scoring_func = config.scoring_func
+ self.topk_method = config.topk_method
+ self.n_group = config.n_group
+ self.topk_group = config.topk_group
+
+ # topk selection algorithm
+ self.norm_topk_prob = config.norm_topk_prob
+ self.gating_dim = config.hidden_size
+ self.weight = nn.Parameter(
+ torch.empty((self.n_routed_experts, self.gating_dim))
+ )
+ if self.topk_method == "noaux_tc":
+ self.e_score_correction_bias = nn.Parameter(
+ torch.empty((self.n_routed_experts))
+ )
+
+ def forward(self, hidden_states):
+ bsz, seq_len, h = hidden_states.shape
+ ### compute gating score
+ hidden_states = hidden_states.view(-1, h)
+ logits = F.linear(
+ hidden_states.type(torch.float32), self.weight.type(torch.float32), None
+ )
+ if self.scoring_func == "sigmoid":
+ scores = logits.sigmoid()
+ else:
+ raise NotImplementedError(
+ f"insupportable scoring function for MoE gating: {self.scoring_func}"
+ )
+
+ ### select top-k experts
+ if self.topk_method == "noaux_tc":
+ assert not self.training
+ scores_for_choice = scores.view(bsz * seq_len, -1) + self.e_score_correction_bias.unsqueeze(0)
+ group_scores = (
+ scores_for_choice.view(bsz * seq_len, self.n_group, -1).topk(2, dim=-1)[0].sum(dim = -1)
+ ) # [n, n_group]
+ group_idx = torch.topk(
+ group_scores, k=self.topk_group, dim=-1, sorted=False
+ )[
+ 1
+ ] # [n, top_k_group]
+ group_mask = torch.zeros_like(group_scores) # [n, n_group]
+ group_mask.scatter_(1, group_idx, 1) # [n, n_group]
+ score_mask = (
+ group_mask.unsqueeze(-1)
+ .expand(
+ bsz * seq_len, self.n_group, self.n_routed_experts // self.n_group
+ )
+ .reshape(bsz * seq_len, -1)
+ ) # [n, e]
+ tmp_scores = scores_for_choice.masked_fill(~score_mask.bool(), float("-inf")) # [n, e]
+ _, topk_idx = torch.topk(
+ tmp_scores, k=self.top_k, dim=-1, sorted=False
+ )
+ topk_weight = scores.gather(1, topk_idx)
+ else:
+ raise NotImplementedError(
+ f"insupportable TopK function for MoE gating: {self.topk_method}"
+ )
+
+ ### norm gate to sum 1
+ if self.top_k > 1 and self.norm_topk_prob:
+ denominator = topk_weight.sum(dim=-1, keepdim=True) + 1e-20
+ topk_weight = topk_weight / denominator
+ topk_weight = topk_weight * self.routed_scaling_factor # must multiply the scaling factor
+
+ return topk_idx, topk_weight
+
+
+class MiMoV2MoE(nn.Module):
+ """
+ A mixed expert module containing shared experts.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.experts = nn.ModuleList(
+ [
+ MiMoV2MLP(config, intermediate_size=config.moe_intermediate_size)
+ for _ in range(config.n_routed_experts)
+ ]
+ )
+ self.gate = MiMoV2MoEGate(config)
+
+ def moe(self, hidden_states: torch.Tensor, topk_indices: torch.Tensor, topk_weights: torch.Tensor):
+ r"""
+ CALL FOR CONTRIBUTION! I don't have time to optimise this right now, but expert weights need to be fused
+ to not have to do a loop here (deepseek has 256 experts soooo yeah).
+ """
+ final_hidden_states = torch.zeros_like(hidden_states, dtype=topk_weights.dtype)
+ expert_mask = torch.nn.functional.one_hot(topk_indices, num_classes=len(self.experts))
+ expert_mask = expert_mask.permute(2, 0, 1)
+
+ for expert_idx in range(len(self.experts)):
+ expert = self.experts[expert_idx]
+ mask = expert_mask[expert_idx]
+ token_indices, weight_indices = torch.where(mask)
+
+ if token_indices.numel() > 0:
+ expert_weights = topk_weights[token_indices, weight_indices]
+ expert_input = hidden_states[token_indices]
+ expert_output = expert(expert_input)
+ weighted_output = expert_output * expert_weights.unsqueeze(-1)
+ final_hidden_states.index_add_(0, token_indices, weighted_output)
+
+ # in original deepseek, the output of the experts are gathered once we leave this module
+ # thus the moe module is itelsf an IsolatedParallel module
+ # and all expert are "local" meaning we shard but we don't gather
+ return final_hidden_states.type(hidden_states.dtype)
+
+
+ def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
+ orig_shape = hidden_states.shape
+ topk_indices, topk_weights = self.gate(hidden_states)
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
+ hidden_states = self.moe(hidden_states, topk_indices, topk_weights).view(*orig_shape)
+
+ return hidden_states
+
+
+class MiMoV2Attention(nn.Module):
+ """MiMoV2 Global Attention (pattern == 0) and Sliding Window Attention (pattern == 1)."""
+
+ def __init__(self, config: MiMoV2FlashConfig, is_swa: bool, layer_idx: int):
+ super().__init__()
+ self.config = config
+ self.layer_idx = layer_idx
+
+ if is_swa:
+ self.head_dim = config.swa_head_dim
+ self.v_head_dim = config.swa_v_head_dim
+ self.num_attention_heads = config.swa_num_attention_heads
+ self.num_key_value_heads = config.swa_num_key_value_heads
+ else:
+ self.head_dim = config.head_dim
+ self.v_head_dim = config.v_head_dim
+ self.num_attention_heads = config.num_attention_heads
+ self.num_key_value_heads = config.num_key_value_heads
+
+ self.rope_dim = int(self.head_dim * config.partial_rotary_factor)
+ self.num_key_value_groups = self.num_attention_heads // self.num_key_value_heads
+ self.attention_bias = config.attention_bias
+ self.attention_dropout: float = config.attention_dropout
+ self.scaling = self.head_dim ** -0.5
+
+ # These dimensions are for the attention layers
+ q_hidden_size = self.num_attention_heads * self.head_dim
+ k_hidden_size = self.num_key_value_heads * self.head_dim
+ v_hidden_size = self.num_key_value_heads * self.v_head_dim
+ o_hidden_size = self.num_attention_heads * self.v_head_dim
+
+ self.q_proj = nn.Linear(config.hidden_size, q_hidden_size, bias=self.attention_bias)
+ self.k_proj = nn.Linear(config.hidden_size, k_hidden_size, bias=self.attention_bias)
+ self.v_proj = nn.Linear(config.hidden_size, v_hidden_size, bias=self.attention_bias)
+ self.o_proj = nn.Linear(o_hidden_size, config.hidden_size, bias=False)
+
+ self.attention_sink_bias = (
+ torch.nn.Parameter(torch.empty(config.num_attention_heads), requires_grad=False)
+ if (config.add_full_attention_sink_bias and not is_swa) or (config.add_swa_attention_sink_bias and is_swa)
+ else None
+ )
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
+ attention_mask: Optional[torch.Tensor],
+ past_key_values: Optional[Cache] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ **kwargs: Unpack[TransformersKwargs],
+ ) -> tuple[torch.Tensor, torch.Tensor]:
+ input_shape = hidden_states.shape[:-1]
+ qk_hidden_shape = (*input_shape, -1, self.head_dim)
+ v_hidden_shape = (*input_shape, -1, self.v_head_dim)
+
+ query_states = self.q_proj(hidden_states).view(qk_hidden_shape).transpose(1, 2)
+ key_states = self.k_proj(hidden_states).view(qk_hidden_shape).transpose(1, 2)
+ value_states = self.v_proj(hidden_states).view(v_hidden_shape).transpose(1, 2)
+
+ cos, sin = position_embeddings
+
+ query_rope, query_nope = query_states.split([self.rope_dim, self.head_dim - self.rope_dim], dim=-1)
+ key_rope, key_nope = key_states.split([self.rope_dim, self.head_dim - self.rope_dim], dim=-1)
+
+ query_rope, key_rope = apply_rotary_pos_emb(query_rope, key_rope, cos, sin)
+
+ query_states = torch.cat([query_rope, query_nope], dim=-1)
+ key_states = torch.cat([key_rope, key_nope], dim=-1)
+
+ if past_key_values is not None:
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ attention_interface: Callable = eager_attention_forward
+ if self.config._attn_implementation != "eager":
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
+
+ attn_output, attn_weights = attention_interface(
+ self,
+ query_states,
+ key_states,
+ value_states,
+ attention_mask,
+ dropout=0.0 if not self.training else self.attention_dropout,
+ scaling=self.scaling,
+ position_ids=position_ids,
+ sinks=self.attention_sink_bias,
+ )
+
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
+ attn_output = self.o_proj(attn_output)
+ return attn_output, attn_weights
+
+
+class MiMoV2DecoderLayer(nn.Module):
+ """
+ MiMoV2 Decoder Layer. It dynamically chooses the correct attention
+ module based on the layer index and the `hybrid_layer_pattern`.
+ """
+
+ def __init__(self, config: MiMoV2FlashConfig, layer_idx: int):
+ super().__init__()
+
+ # This is the key logic: choose the module based on the pattern
+ is_swa_layer = config.hybrid_layer_pattern[layer_idx] == 1
+ if is_swa_layer:
+ self.attention_type = "sliding_window_attention"
+ self.self_attn = MiMoV2Attention(config, True, layer_idx)
+ else:
+ self.attention_type = "full_attention"
+ self.self_attn = MiMoV2Attention(config, False, layer_idx)
+
+ self.mlp = (
+ MiMoV2MoE(config)
+ if (
+ getattr(config, 'n_routed_experts', None) is not None
+ and config.moe_layer_freq[layer_idx]
+ )
+ else MiMoV2MLP(config)
+ )
+
+ self.input_layernorm = MiMoV2RMSNorm(config.hidden_size, eps=config.layernorm_epsilon)
+ self.post_attention_layernorm = MiMoV2RMSNorm(config.hidden_size, eps=config.layernorm_epsilon)
+ self.hidden_size = config.hidden_size
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Cache] = None,
+ use_cache: Optional[bool] = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
+ **kwargs: Unpack[TransformersKwargs],
+ ) -> torch.Tensor:
+ residual = hidden_states
+ hidden_states = self.input_layernorm(hidden_states)
+ # Self Attention
+ hidden_states, _ = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ position_embeddings=position_embeddings,
+ **kwargs,
+ )
+ hidden_states = residual + hidden_states
+
+ # MLP or MOE
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = residual + hidden_states
+ return hidden_states
+
+class MiMoV2FlashRotaryEmbedding(nn.Module):
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
+
+ def __init__(self, config: MiMoV2FlashConfig, is_swa, device=None):
+ super().__init__()
+ # BC: "rope_type" was originally "type"
+ if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
+ else:
+ self.rope_type = "default"
+ self.max_seq_len_cached = config.max_position_embeddings
+ self.original_max_seq_len = config.max_position_embeddings
+
+ self.config = config
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
+
+ if is_swa:
+ self.config.rope_theta = config.swa_rope_theta
+ self.config.head_dim = config.swa_head_dim
+
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+ self.original_inv_freq = self.inv_freq
+
+ @torch.no_grad()
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
+ def forward(self, x, position_ids):
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
+ position_ids_expanded = position_ids[:, None, :].float()
+
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
+ emb = torch.cat((freqs, freqs), dim=-1)
+ cos = emb.cos() * self.attention_scaling
+ sin = emb.sin() * self.attention_scaling
+
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
+
+
+@auto_docstring
+class MiMoV2Model(PreTrainedModel):
+ """The main 'model' block, corresponding to `model.` in the weight map."""
+ config_class = MiMoV2FlashConfig
+
+ def __init__(self, config: MiMoV2FlashConfig):
+ super().__init__(config)
+ self.vocab_size = config.vocab_size
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size)
+ self.layers = nn.ModuleList(
+ [MiMoV2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
+ )
+ self.norm = MiMoV2RMSNorm(config.hidden_size, eps=config.layernorm_epsilon)
+ self.rotary_emb = MiMoV2FlashRotaryEmbedding(config=config, is_swa=False)
+ self.swa_rotary_emb = MiMoV2FlashRotaryEmbedding(config=config, is_swa=True)
+
+ self.has_sliding_layers = any(
+ pattern == 1 for pattern in config.hybrid_layer_pattern
+ )
+
+ # For Huggingface DynamicCache compatibility
+ self.config.layer_types = [
+ "sliding_attention" if config.hybrid_layer_pattern[i] == 1 else "full_attention"
+ for i in range(config.num_hidden_layers)
+ ]
+
+ @auto_docstring
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Cache] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs: Unpack[TransformersKwargs],
+ ) -> MoeModelOutputWithPast:
+ if (input_ids is None) ^ (inputs_embeds is not None):
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ if use_cache and past_key_values is None:
+ past_key_values = DynamicCache(config=self.config)
+
+ if cache_position is None:
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
+ cache_position = torch.arange(
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
+ )
+
+ if position_ids is None:
+ position_ids = cache_position.unsqueeze(0)
+
+ # It may already have been prepared by e.g. `generate`
+ if not isinstance(causal_mask_mapping := attention_mask, dict):
+ # Prepare mask arguments
+ mask_kwargs = {
+ "config": self.config,
+ "input_embeds": inputs_embeds,
+ "attention_mask": attention_mask,
+ "cache_position": cache_position,
+ "past_key_values": past_key_values,
+ "position_ids": position_ids,
+ }
+ # Create the masks
+ causal_mask_mapping = {
+ "full_attention": create_causal_mask(**mask_kwargs),
+ }
+ # The sliding window alternating layers are not always activated depending on the config
+ if self.has_sliding_layers:
+ causal_mask_mapping["sliding_window_attention"] = create_sliding_window_causal_mask(**mask_kwargs)
+
+ hidden_states = inputs_embeds
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
+ swa_position_embeddings = self.swa_rotary_emb(hidden_states, position_ids)
+
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
+ hidden_states = decoder_layer(
+ hidden_states,
+ attention_mask=causal_mask_mapping[decoder_layer.attention_type],
+ position_embeddings=(
+ position_embeddings
+ if decoder_layer.attention_type == "full_attention"
+ else swa_position_embeddings
+ ),
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ **kwargs,
+ )
+
+ hidden_states = self.norm(hidden_states)
+ return BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=past_key_values if use_cache else None,
+ )
+
+
+@auto_docstring
+class MiMoV2FlashForCausalLM(PreTrainedModel,GenerationMixin):
+ _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
+ _tp_plan = {"lm_head": "colwise_rep"}
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
+
+ config_class = MiMoV2FlashConfig
+ _keys_to_ignore_on_load_unexpected = [r"model.layers\.\d+\.self_attn\.rotary_emb\.inv_freq"]
+
+ def __init__(self, config: MiMoV2FlashConfig):
+ super().__init__(config)
+ self.model = MiMoV2Model(config)
+ self.vocab_size = config.vocab_size
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @can_return_tuple
+ @auto_docstring
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Cache] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ logits_to_keep: Union[int, torch.Tensor] = 0,
+ **kwargs: Unpack[TransformersKwargs],
+ ) -> CausalLMOutputWithPast:
+
+ outputs: BaseModelOutputWithPast = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ **kwargs,
+ )
+
+ hidden_states = outputs.last_hidden_state
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
+
+ loss = None
+ if labels is not None:
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
+
+ return CausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+__all__ = [
+ "MiMoV2FlashForCausalLM"
+]
\ No newline at end of file
diff --git a/tokenizer.json b/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..d4ba2b245534db7e662222ae107e959930ee4dae
--- /dev/null
+++ b/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:05d47c87966b4db779200053de490f89936ed529f8ab889244e271630715fcfe
+size 11422638
diff --git a/tokenizer_config.json b/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f0509bd4d964cb8ad33d4f0f7305fe3553e8eb40
--- /dev/null
+++ b/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_prefix_space": false,
+ "additional_special_tokens": [
+ "<|im_start|>",
+ "<|im_end|>",
+ "<|object_ref_start|>",
+ "<|object_ref_end|>",
+ "<|box_start|>",
+ "<|box_end|>",
+ "<|quad_start|>",
+ "<|quad_end|>",
+ "<|vision_start|>",
+ "<|vision_end|>",
+ "<|vision_pad|>",
+ "<|image_pad|>",
+ "<|video_pad|>"
+ ],
+ "backend": "tokenizers",
+ "bos_token": null,
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|im_end|>",
+ "errors": "replace",
+ "extra_special_tokens": [
+ "<|im_start|>",
+ "<|im_end|>",
+ "<|object_ref_start|>",
+ "<|object_ref_end|>",
+ "<|box_start|>",
+ "<|box_end|>",
+ "<|quad_start|>",
+ "<|quad_end|>",
+ "<|vision_start|>",
+ "<|vision_end|>",
+ "<|vision_pad|>",
+ "<|image_pad|>",
+ "<|video_pad|>"
+ ],
+ "is_local": true,
+ "model_max_length": 262144,
+ "model_specific_special_tokens": {},
+ "pad_token": "<|endoftext|>",
+ "split_special_tokens": false,
+ "tokenizer_class": "Qwen2Tokenizer",
+ "unk_token": null
+}