diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..52373fe24473b1aa44333d318f578ae6bf04b49b 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+tokenizer.json filter=lfs diff=lfs merge=lfs -text
diff --git a/README.md b/README.md
index 01fa0d9ff07263678daa29104bae7a3a4c6bb4b1..1046b24f144cdcbf5b1a83324969e4d0b03a7e52 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,7 @@
---
-license: mit
-base_model:
-- XiaomiMiMo/MiMo-V2-Flash
----
\ No newline at end of file
+language: en
+library_name: mlx
+pipeline_tag: text-generation
+tags:
+- mlx
+---
diff --git a/chat_template.jinja b/chat_template.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..3e59b05b2427fdfc4321c3497a32683954e0702d
--- /dev/null
+++ b/chat_template.jinja
@@ -0,0 +1,143 @@
+{%- if not add_generation_prompt is defined -%}
+ {%- set add_generation_prompt = false -%}
+{%- endif -%}
+{%- if not enable_thinking is defined -%}
+ {%- set enable_thinking = false -%}
+{%- endif -%}
+{%- if not keep_all_reasoning is defined -%}
+ {%- set keep_all_reasoning = false -%}
+{%- endif -%}
+{%- macro render_extra_keys(json_dict, handled_keys) -%}
+ {%- if json_dict is mapping %}
+ {%- for json_key in json_dict if json_key not in handled_keys %}
+ {%- if json_dict[json_key] is mapping or (json_dict[json_key] is sequence and json_dict[json_key] is not string) %}
+ {{- '\n<' ~ json_key ~ '>' ~ (json_dict[json_key] | tojson | safe) ~ '' ~ json_key ~ '>' }}
+ {%- else %}
+ {{-'\n<' ~ json_key ~ '>' ~ (json_dict[json_key] | string) ~ '' ~ json_key ~ '>' }}
+ {%- endif %}
+ {%- endfor %}
+ {%- endif %}
+{%- endmacro -%}
+{%- if messages[0]["role"] == "system" %}
+ {%- set system_message = messages[0]["content"] %}
+ {%- set loop_messages = messages[1:] %}
+{%- else %}
+ {%- set loop_messages = messages %}
+{%- endif %}
+{%- set ns = namespace(last_user_index=-1) %}
+{%- for m in loop_messages %}
+ {%- if m.role == 'user' %}
+ {%- set ns.last_user_index = loop.index0 -%}
+ {%- endif %}
+{%- endfor %}
+{%- if not tools is defined %}
+ {%- set tools = [] %}
+{%- endif %}
+{%- if system_message is defined %}
+ {{- "<|im_start|>system\n" + system_message }}
+{%- else %}
+ {{- "<|im_start|>system\nYou are MiMo, a helpful AI assistant engineered by Xiaomi." }}
+{%- endif %}
+{%- if tools is iterable and tools | length > 0 %}
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou have access to the following functions:\n\n" }}
+ {{- "" }}
+ {%- for tool in tools %}
+ {%- if tool.function is defined %}
+ {%- set tool = tool.function %}
+ {%- endif %}
+ {{- "\n\n" ~ tool.name ~ "" }}
+ {%- if tool.description is defined %}
+ {{- '\n' ~ (tool.description | trim) ~ '' }}
+ {%- endif %}
+ {{- '\n' }}
+ {%- if tool.parameters is defined and tool.parameters is mapping and tool.parameters.properties is defined and tool.parameters.properties is mapping %}
+ {%- for param_name, param_fields in tool.parameters.properties|items %}
+ {{- '\n' }}
+ {{- '\n' ~ param_name ~ '' }}
+ {%- if param_fields.type is defined %}
+ {{- '\n' ~ (param_fields.type | string) ~ '' }}
+ {%- endif %}
+ {%- if param_fields.description is defined %}
+ {{- '\n' ~ (param_fields.description | trim) ~ '' }}
+ {%- endif %}
+ {%- set handled_keys = ['name', 'type', 'description'] %}
+ {{- render_extra_keys(param_fields, handled_keys) }}
+ {{- '\n' }}
+ {%- endfor %}
+ {%- endif %}
+ {%- set handled_keys = ['type', 'properties'] %}
+ {{- render_extra_keys(tool.parameters, handled_keys) }}
+ {{- '\n' }}
+ {%- set handled_keys = ['type', 'name', 'description', 'parameters'] %}
+ {{- render_extra_keys(tool, handled_keys) }}
+ {{- '\n' }}
+ {%- endfor %}
+ {{- "\n" }}
+ {{- '\n\nFor each function call, output the function name and arguments in the following format:\n\n\nvalue_1\nThis is the value for the second parameter\nthat can span\nmultiple lines\n\n\n\n\n- Function calls MUST follow the specified format: an inner block must be nested within XML tags\n- DO NOT use function calls inside tags.\n- The value enclosed between parameter tags is preserved exactly as-is, including newlines and spaces.\n' }}
+{%- endif %}
+{{- '<|im_end|>' }}
+{%- for message in loop_messages %}
+ {%- if message.content is string %}
+ {%- set content = message.content %}
+ {%- else %}
+ {%- set content = '' %}
+ {%- endif %}
+ {%- if message.role == "assistant" %}
+ {%- if message.reasoning_content is string %}
+ {%- set reasoning_content = message.reasoning_content %}
+ {%- else %}
+ {%- set reasoning_content = '' %}
+ {%- if '' in content %}
+ {%- set reasoning_content = content.split('')[0].split('')[-1] %}
+ {%- set content = content.split('')[-1] %}
+ {%- endif %}
+ {%- endif %}
+ {%- if (keep_all_reasoning or loop.index0 > ns.last_user_index) and reasoning_content -%}
+ {{- '<|im_start|>' + message.role + '\n' + reasoning_content + '' + content }}
+ {%- else %}
+ {{- '<|im_start|>' + message.role + '\n' + content }}
+ {%- endif %}
+ {%- if message.tool_calls is defined and message.tool_calls is iterable and message.tool_calls | length > 0 %}
+ {%- for tool_call in message.tool_calls %}
+ {%- if tool_call.function is defined %}
+ {%- set tool_call = tool_call.function %}
+ {%- endif %}
+ {{- '\n\n' }}
+ {%- if tool_call.arguments is defined %}
+ {%- for args_name, args_value in tool_call.arguments|items %}
+ {{- '' }}
+ {%- set args_value = args_value | tojson | safe if args_value is mapping or (args_value is sequence and args_value is not string) else args_value | string %}
+ {{- args_value }}
+ {{- '\n' }}
+ {%- endfor %}
+ {%- endif %}
+ {{- '\n' }}
+ {%- endfor %}
+ {%- endif %}
+ {{- '<|im_end|>' }}
+ {%- elif message.role == "user" or message.role == "system"%}
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' }}
+ {%- elif message.role == "tool" %}
+ {%- if loop.previtem and loop.previtem.role != "tool" %}
+ {{- '<|im_start|>tool\n' }}
+ {%- endif %}
+ {{- '\n' }}
+ {{- message.content }}
+ {{- '\n\n' }}
+ {%- if not loop.last and loop.nextitem.role != "tool" %}
+ {{- '<|im_end|>' }}
+ {%- elif loop.last %}
+ {{- '<|im_end|>' }}
+ {%- endif %}
+ {%- else %}
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' }}
+ {%- endif %}
+{%- endfor %}
+{%- if add_generation_prompt %}
+ {{- '<|im_start|>assistant\n' }}
+ {%- if not enable_thinking -%}
+ {{- '' -}}
+ {%- else -%}
+ {{- '' -}}
+ {%- endif -%}
+{%- endif %}
\ No newline at end of file
diff --git a/config.json b/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..4e8bcb7f7afef1af3e45036107f90a9214160f63
--- /dev/null
+++ b/config.json
@@ -0,0 +1,162 @@
+{
+ "add_full_attention_sink_bias": false,
+ "add_swa_attention_sink_bias": true,
+ "architectures": [
+ "MiMoV2FlashForCausalLM"
+ ],
+ "attention_bias": false,
+ "attention_chunk_size": 128,
+ "attention_dropout": 0.0,
+ "attention_value_scale": 0.707,
+ "auto_map": {
+ "AutoConfig": "configuration_mimo_v2_flash.MiMoV2FlashConfig",
+ "AutoModel": "modeling_mimo_v2_flash.MiMoV2FlashModel",
+ "AutoModelForCausalLM": "modeling_mimo_v2_flash.MiMoV2FlashForCausalLM"
+ },
+ "head_dim": 192,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "hybrid_layer_pattern": [
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0
+ ],
+ "initializer_range": 0.02,
+ "intermediate_size": 16384,
+ "layernorm_epsilon": 1e-05,
+ "max_position_embeddings": 262144,
+ "model_type": "mimo_v2_flash",
+ "moe_intermediate_size": 2048,
+ "moe_layer_freq": [
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1
+ ],
+ "n_group": 1,
+ "n_routed_experts": 256,
+ "n_shared_experts": null,
+ "norm_topk_prob": true,
+ "num_attention_heads": 64,
+ "num_experts_per_tok": 8,
+ "num_hidden_layers": 48,
+ "num_key_value_heads": 4,
+ "partial_rotary_factor": 0.334,
+ "quantization": {
+ "group_size": 32,
+ "bits": 8,
+ "mode": "affine"
+ },
+ "quantization_config": {
+ "group_size": 32,
+ "bits": 8,
+ "mode": "affine"
+ },
+ "rope_theta": 5000000,
+ "routed_scaling_factor": null,
+ "scoring_func": "sigmoid",
+ "sliding_window": 128,
+ "sliding_window_size": 128,
+ "swa_head_dim": 192,
+ "swa_num_attention_heads": 64,
+ "swa_num_key_value_heads": 8,
+ "swa_rope_theta": 10000,
+ "swa_v_head_dim": 128,
+ "tie_word_embeddings": false,
+ "topk_group": 1,
+ "topk_method": "noaux_tc",
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.40.1",
+ "use_cache": true,
+ "v_head_dim": 128,
+ "vocab_size": 152576
+}
\ No newline at end of file
diff --git a/configuration_mimo_v2_flash.py b/configuration_mimo_v2_flash.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc3011a63160318fac7b277685bc8829a7e57f7b
--- /dev/null
+++ b/configuration_mimo_v2_flash.py
@@ -0,0 +1,109 @@
+# coding=utf-8
+#
+# Copyright 2025 Xiaomi Corporation.
+# Copyright 2025 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from transformers.configuration_utils import PretrainedConfig
+from transformers.modeling_rope_utils import rope_config_validation
+from transformers.utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class MiMoV2FlashConfig(PretrainedConfig):
+
+ model_type = ""
+ keys_to_ignore_at_inference = ["past_key_values"]
+
+ # Default tensor parallel plan for base model `Hybrid`
+ base_model_tp_plan = {
+ "layers.*.self_attn.q_proj": "colwise",
+ "layers.*.self_attn.k_proj": "colwise",
+ "layers.*.self_attn.v_proj": "colwise",
+ "layers.*.self_attn.o_proj": "rowwise",
+ "layers.*.mlp.gate_proj": "colwise",
+ "layers.*.mlp.up_proj": "colwise",
+ "layers.*.mlp.down_proj": "rowwise",
+ }
+ base_model_pp_plan = {
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
+ "norm": (["hidden_states"], ["hidden_states"]),
+ }
+
+ attribute_map = {
+ "num_local_experts": "n_routed_experts",
+ }
+
+ def __init__(
+ self,
+ vocab_size=151936,
+ hidden_size=4096,
+ intermediate_size=22016,
+ num_hidden_layers=32,
+ num_attention_heads=32,
+ num_key_value_heads=32,
+ hidden_act="silu",
+ max_position_embeddings=32768,
+ initializer_range=0.02,
+ layernorm_epsilon=1e-6,
+ use_cache=True,
+ tie_word_embeddings=False,
+ rope_theta=10000.0,
+ rope_scaling=None,
+ attention_dropout=0.0,
+ hybrid_block_size=None,
+ hybrid_layer_pattern=None,
+ partial_rotary_factor=1.0,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+
+ # for backward compatibility
+ if num_key_value_heads is None:
+ num_key_value_heads = num_attention_heads
+
+ self.num_key_value_heads = num_key_value_heads
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.layernorm_epsilon = layernorm_epsilon
+ self.use_cache = use_cache
+ self.rope_theta = rope_theta
+ self.rope_scaling = rope_scaling
+ self.attention_dropout = attention_dropout
+
+ if hybrid_block_size is not None and hybrid_layer_pattern is None:
+ hybrid_layer_pattern = [0 if ((i + 1) % hybrid_block_size == 0) else 1 for i in range(num_hidden_layers)]
+ self.hybrid_block_size = hybrid_block_size
+ self.hybrid_layer_pattern = hybrid_layer_pattern
+
+ self.partial_rotary_factor = partial_rotary_factor
+
+ # Validate the correctness of rotary position embeddings parameters
+ # BC: if there is a 'type' field, move it to 'rope_type'.
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
+ rope_config_validation(self)
+
+ super().__init__(
+ tie_word_embeddings=tie_word_embeddings,
+ **kwargs,
+ )
diff --git a/model-00001-of-00072.safetensors b/model-00001-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ff0c2e0a9cb57124b62ccde8856a1f80042f1ae3
--- /dev/null
+++ b/model-00001-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3564f9fb2fa153684f59d170902f870a4398def669ed3b329c84dc7c36acfc59
+size 3551941449
diff --git a/model-00002-of-00072.safetensors b/model-00002-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..5e290e41123609be6a8e2e2cb9a746ce0e745f70
--- /dev/null
+++ b/model-00002-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c70868b4f68fb7483385bc1172e891420b868f4143be6fcd284023be2df33791
+size 4940123906
diff --git a/model-00003-of-00072.safetensors b/model-00003-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..441052df0e38d43f2d8f49bfc2a9f18df523c487
--- /dev/null
+++ b/model-00003-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:51ef8b2a9c3b05549c434d9bfdcdabb3131f01142c225ee456993267ac6d387f
+size 4831839000
diff --git a/model-00004-of-00072.safetensors b/model-00004-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9579a544b4ad3d47b9e5b21a82d86d257ae59324
--- /dev/null
+++ b/model-00004-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f3d03e2757f13268285c0723cd617837a7d02dd1b150369c9590a6e23254fd14
+size 4940123964
diff --git a/model-00005-of-00072.safetensors b/model-00005-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..df36b89794463b620cb41d3c200b726e4bfe5cfe
--- /dev/null
+++ b/model-00005-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:22d04914ea8fe70e072a2d24c19bd231f6d8694c33c11591928a872732192c75
+size 4940123918
diff --git a/model-00006-of-00072.safetensors b/model-00006-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..dc0c39e9ad658fd3c5da94fbfd61c9124da24880
--- /dev/null
+++ b/model-00006-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0ae2cfa90e1ef163725c757bffedc0ac57f8d3f67adf8db42eb54e0125ad5f6a
+size 4831839000
diff --git a/model-00007-of-00072.safetensors b/model-00007-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..87a96bff084824ae887b8a30743bf0f7d7469ad9
--- /dev/null
+++ b/model-00007-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:27651a81025612c476f40d5bbfe0717cf683f94a4150f3c7aa9d281338e2efcd
+size 4934225474
diff --git a/model-00008-of-00072.safetensors b/model-00008-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f864742097ba53a0c99ba3bb29b8daefdce62505
--- /dev/null
+++ b/model-00008-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:297afe818c9101461e956e41c6ffcea64c6e4c269eb186bd89a1adb6df134e24
+size 4940123922
diff --git a/model-00009-of-00072.safetensors b/model-00009-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..18706478c3690d6eebe756b04027d5a4e3787eaf
--- /dev/null
+++ b/model-00009-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2778d597af0aacb7d7452bc07b1a558cddc728526de67c4aa5333c165a476bbc
+size 4831838996
diff --git a/model-00010-of-00072.safetensors b/model-00010-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..a95ea1337c102314752dc6f1ecce5bc4762e2e79
--- /dev/null
+++ b/model-00010-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:167546337c5ab9986933f64167ee4dc210a2d76c89bea71b1953d6bfcb93683b
+size 4940123950
diff --git a/model-00011-of-00072.safetensors b/model-00011-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..69857ca10023dee3397ad4ade76a99d5111f71c6
--- /dev/null
+++ b/model-00011-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae596f3609aac53475670b8e8bdede26fe2cd8e07419d274803838471ae319e9
+size 4940123922
diff --git a/model-00012-of-00072.safetensors b/model-00012-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..908e0a2abb24b7ea07479de3f2e250e0f5f74757
--- /dev/null
+++ b/model-00012-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8fca6f0353431e1699ddc176393464d6fb3a4e372f04c82adebb6c699447f1f9
+size 4831839000
diff --git a/model-00013-of-00072.safetensors b/model-00013-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..5c12b197bde3efb923718205cddfacf2359b93f4
--- /dev/null
+++ b/model-00013-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0b02f14977b040f491a622141940e197466a63c58d1270211971374cbbe950aa
+size 4940123982
diff --git a/model-00014-of-00072.safetensors b/model-00014-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..36fc011de187d1dccb83eaf7d5a483cbbfb11fc0
--- /dev/null
+++ b/model-00014-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:60bd23bf51198ce852ccd4f9e0faacf5aa6e327a888395b6f35ee09da1c84f1e
+size 4940123921
diff --git a/model-00015-of-00072.safetensors b/model-00015-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..5f235ca6ed38acdd95e0e19462dfe7f465bfa7cb
--- /dev/null
+++ b/model-00015-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e43e7806b443749a05ab2131006b8fa74afb37a00ddb3a062479e95c282c58d9
+size 4831839006
diff --git a/model-00016-of-00072.safetensors b/model-00016-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..044128cf3ba60ca482a5a7705d2a4efc1e743c77
--- /dev/null
+++ b/model-00016-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:927bafbce8c7106c322f0f039010ba2e9a79a169ee6390c9edf04de9ea1a05b4
+size 4934225512
diff --git a/model-00017-of-00072.safetensors b/model-00017-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..67c2bb90d6bdc1b93391dc649934458ad19f8b21
--- /dev/null
+++ b/model-00017-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8549fed8b5ec21b6f2de38c4b9483104086f13d95f8ac2e7d8ac5e72427198eb
+size 4940123949
diff --git a/model-00018-of-00072.safetensors b/model-00018-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..201e10c5bee9f65d2d0f6386ffbef6ebb84471fd
--- /dev/null
+++ b/model-00018-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4d5403b03d56858389edb00a7402d65d9c4bbb18bb1db26d83bb01b1a9432cdc
+size 4831839006
diff --git a/model-00019-of-00072.safetensors b/model-00019-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9d4b73f702b8e5af70d41d22f23add58c6343ab8
--- /dev/null
+++ b/model-00019-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d69c2e22e17f5af0af37ebccfdf7a064ec1c501c7335868c038686de7f575967
+size 4940123989
diff --git a/model-00020-of-00072.safetensors b/model-00020-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..41f213706053944ce37bc8726c349222973a1c12
--- /dev/null
+++ b/model-00020-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b41151e4269f6c6abd7810fb4832afb7c31c80f08bc4c1c4fb4e8d11370f986e
+size 4940123939
diff --git a/model-00021-of-00072.safetensors b/model-00021-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..dde0dedb9f3ece36145f52db70ba6c3de350b414
--- /dev/null
+++ b/model-00021-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2e4badd5fbeb9d18231a57f5c562f94da456aed4056962c93daaa9a4d232fefb
+size 4831839002
diff --git a/model-00022-of-00072.safetensors b/model-00022-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..73d5d3985b5ddbd3e13e82349dabea96706e3535
--- /dev/null
+++ b/model-00022-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c3fc2c4b22396b24beef8e4bad859065aeb423f532a6bdc8c138996ba117b1c6
+size 4940124005
diff --git a/model-00023-of-00072.safetensors b/model-00023-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4011aaf0aad36668f3caf39b28e29b4ba057833e
--- /dev/null
+++ b/model-00023-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c986b086f7ac18916db4c6686292dccc47f57814ef8958d812d2b3762f7a5966
+size 4940123949
diff --git a/model-00024-of-00072.safetensors b/model-00024-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2272387fa9144729087521a1be97607288c215a3
--- /dev/null
+++ b/model-00024-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c5178afc1af5fc10c4da438276c92cb02165c2e1cb268d537ac47a1f4c899e2a
+size 4831839006
diff --git a/model-00025-of-00072.safetensors b/model-00025-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d2f062065c5301d2dd5840aea606d3423741d548
--- /dev/null
+++ b/model-00025-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c052c3d7e91a91f0b2dab2bc4cef8b40c8864279efdd96b2ef4091c56a6e4a4c
+size 4934225508
diff --git a/model-00026-of-00072.safetensors b/model-00026-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..e51a5348c600bdc6da51a8fedfc81f0fdac48cf0
--- /dev/null
+++ b/model-00026-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cb1f2063520ab25f301a6e5f878478775b2af879e1bcb926955350fedd84f02e
+size 4940123957
diff --git a/model-00027-of-00072.safetensors b/model-00027-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4372ac8ca93614140d65a4df9036855c1adbc462
--- /dev/null
+++ b/model-00027-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:304b3010a5577bea4407a50159ff91ffc80de7b4ae6f189f7104b0490887945a
+size 4831839008
diff --git a/model-00028-of-00072.safetensors b/model-00028-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..5c96e60647a8cdb63ef769bd689f0aaa2ce88a93
--- /dev/null
+++ b/model-00028-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:80b3efd35db14d53020a41e9534a8216c2c8259186e021c3e7cd6796b9287a9b
+size 4940124003
diff --git a/model-00029-of-00072.safetensors b/model-00029-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..5390d7868cdeb19406f9b7d955acfd89449b1039
--- /dev/null
+++ b/model-00029-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bab7d8b396c84f586c5d38f7d9be903ff80f3f392bafcd7ee99b214cf455397d
+size 4940123947
diff --git a/model-00030-of-00072.safetensors b/model-00030-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ce1d00b6c0d915d9d5eb07d95bab2d87baf16c3b
--- /dev/null
+++ b/model-00030-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e25e2c2041443a60156b160c75670bfabb982e7514f6a45679539342647ef02b
+size 4831839002
diff --git a/model-00031-of-00072.safetensors b/model-00031-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..801a9183fc12f69b44e879d9058762a9272fbcec
--- /dev/null
+++ b/model-00031-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6121c44ab3509c7787f33601ddef916c217d6fedd238b7c13f6d8c3f96094954
+size 4940124003
diff --git a/model-00032-of-00072.safetensors b/model-00032-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c464d4fed51a810a4845d9a23ffbad0164164019
--- /dev/null
+++ b/model-00032-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8c79b19ad7c4a8d4f136add9fdb1fa2a3e4b56ff2a822f2520bb8ba35f41638f
+size 4940123935
diff --git a/model-00033-of-00072.safetensors b/model-00033-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..fda1ea5ebd330089dc3adfa91621d4cb7a63f9b3
--- /dev/null
+++ b/model-00033-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:65e739e529e6e3924a4084df384f168b7491640e9615f6e72c707d41558426f0
+size 4831839004
diff --git a/model-00034-of-00072.safetensors b/model-00034-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..203e921bee7b4775a86edf227a883dc8e2f836fb
--- /dev/null
+++ b/model-00034-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:912778eba874a4f2ce33a320a3f224d7c36bd907f7440bb9d51f5e8002e71255
+size 4934225514
diff --git a/model-00035-of-00072.safetensors b/model-00035-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0e545063391dcccb23bd01e34098c01e570f7541
--- /dev/null
+++ b/model-00035-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aa3a6d658e6ae5d863f7331a115af2ff9a4a8263d7efb1675ac245407929d710
+size 4940123937
diff --git a/model-00036-of-00072.safetensors b/model-00036-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..5c20d30aeb4a4631cd0f0e4cf8aeff983a79292b
--- /dev/null
+++ b/model-00036-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c0a4f7214ae8bea3dd6af704268aa82fab93dd57ffe990e1d86340dae162962a
+size 4831839000
diff --git a/model-00037-of-00072.safetensors b/model-00037-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0a483970532185f46b8be5f54c199c69459a3280
--- /dev/null
+++ b/model-00037-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7729a68882abf56b6d9ec561f8b32bb6ac713f4f3f63142a0a4e08554cc2f965
+size 4940124001
diff --git a/model-00038-of-00072.safetensors b/model-00038-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..285a9ab691fe0e2e156bf51ba8562b535d682440
--- /dev/null
+++ b/model-00038-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:05f8f17a0c58090dda6d28028969a2a22abab57cfd9821aafabeb4d07035355e
+size 4940123941
diff --git a/model-00039-of-00072.safetensors b/model-00039-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..303567f463a134fea7e5a165d6101c1b7e1c6158
--- /dev/null
+++ b/model-00039-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4d9cc35fd4ec45e2e6fb6d2b78d4646b653323e49ecda4cb375ac7d0691213ef
+size 4831839006
diff --git a/model-00040-of-00072.safetensors b/model-00040-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d457dd7af2c87b7a2577aa548f1ef7f5922943c6
--- /dev/null
+++ b/model-00040-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a3e30e6a71b5abeff02db6b924685fb0e7d777c4cacc0d53994aa0f9735936ea
+size 4940124003
diff --git a/model-00041-of-00072.safetensors b/model-00041-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..1258e220f39bbeb92b4ed7819ebe86aa340cdfc0
--- /dev/null
+++ b/model-00041-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:226592038f40014d8f58d3738315f7f0dfd4381ffa36c2b31307161ce1eea94a
+size 4940123941
diff --git a/model-00042-of-00072.safetensors b/model-00042-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f85145e915a2b8e0504f34a9f6924376d0798fe9
--- /dev/null
+++ b/model-00042-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53c3adf5c3dbee1bd61e31a7d61c593be2ab8290c387d7dd73548db4687804d0
+size 4831839002
diff --git a/model-00043-of-00072.safetensors b/model-00043-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..034904bbf138a7afd2a635a68bf26e1d587fa5e6
--- /dev/null
+++ b/model-00043-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aed27333b8c5713e963d1f25d2f0c0c5894de02737d000bcabec21875744594f
+size 4934225510
diff --git a/model-00044-of-00072.safetensors b/model-00044-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ce56d5a8c52b8e68049756aab8d64cac3628cffb
--- /dev/null
+++ b/model-00044-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:24a262a7a7e2f7316ff73d94e520ed7c67c597c031842e546a26b952501e629c
+size 4940123947
diff --git a/model-00045-of-00072.safetensors b/model-00045-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..54c75baf83a7c23b9d49fe7cf2fdf93a5185eaf8
--- /dev/null
+++ b/model-00045-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:74cfc7925b332a5d57e92805b8263d7211735c25e3f21dd19c5072602bb9f12c
+size 4831839006
diff --git a/model-00046-of-00072.safetensors b/model-00046-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c4e8e6332ed47868a20813589a1606152e33833d
--- /dev/null
+++ b/model-00046-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2db647c5c1b655b8bdfe0a9a4aad91095fb15c9216798f288f96aa91278b1176
+size 4940123981
diff --git a/model-00047-of-00072.safetensors b/model-00047-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..3dd518b9bc1747d09e872f3f629a75faae27e2d4
--- /dev/null
+++ b/model-00047-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:480c38471988cc830d4279a27af0cab04919b7c28079d1a6572aa447661056b7
+size 4940123963
diff --git a/model-00048-of-00072.safetensors b/model-00048-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..89d4165f0f2fc506e1f36038bbf45e8a68ed272f
--- /dev/null
+++ b/model-00048-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1d9d20439fc9ca61ca48deaf3d03b17d7bce22e126983346bdc3edc659dc6b49
+size 4831839006
diff --git a/model-00049-of-00072.safetensors b/model-00049-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..bba419d6e4aa5409e65133426c053fc310b6f533
--- /dev/null
+++ b/model-00049-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2893d69887a8fb418e74b686400610c2ba09f2e8b0a4212e7110678192f74c1e
+size 4940124005
diff --git a/model-00050-of-00072.safetensors b/model-00050-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b2cc707223a1c607a03b37e054ff6453693070b8
--- /dev/null
+++ b/model-00050-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d034d7e1d74f391153fa8a4b0c410a6a22029b18410e33e973389d55a8e733b0
+size 4940123947
diff --git a/model-00051-of-00072.safetensors b/model-00051-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9bb0fdd1da00ad997cc0f49db14857893d137b1d
--- /dev/null
+++ b/model-00051-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:96ee9263178dce43f606d3c39890aaf29b6f9dd1820319fe1f8f2b6412c47e75
+size 4831839006
diff --git a/model-00052-of-00072.safetensors b/model-00052-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b42cff075d8edaa53548476037d1384fc4a30671
--- /dev/null
+++ b/model-00052-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dad34352bff15554cdb7850f08347fd05b0471e0229bcbcbf2425199a7449c92
+size 4934225478
diff --git a/model-00053-of-00072.safetensors b/model-00053-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..7b6a5a62fefbc277c243e1e5c313cb49415804eb
--- /dev/null
+++ b/model-00053-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:994be7f8cc88786f6999ac4cf9a1625a81eee821c10a6fad4c704e3dc9eb3161
+size 4940123933
diff --git a/model-00054-of-00072.safetensors b/model-00054-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..1857da5b4c60de980d24c223048a9e264d5eb20d
--- /dev/null
+++ b/model-00054-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7818d83a9ff866acff6f21bf9c1f25d33db0ce6ec55ebeade8f94b538f9b7799
+size 4831839008
diff --git a/model-00055-of-00072.safetensors b/model-00055-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..6a5d325aa8141550eaff1e7433d761f5b13c9da9
--- /dev/null
+++ b/model-00055-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:19950175c459666adbfabae35f30750d3d76b92ad6022e141c5a7766d4f4c41a
+size 4940123997
diff --git a/model-00056-of-00072.safetensors b/model-00056-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..1f1ce6a05d2f79eac3f3c6ca6262bbee293daa62
--- /dev/null
+++ b/model-00056-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:491aebd362f1ab48a768d83da40f10e7d383c7b3691e6f87745be3bfb80a0434
+size 4940123929
diff --git a/model-00057-of-00072.safetensors b/model-00057-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2b4d68074808baac8287e682ea1516668cf61f68
--- /dev/null
+++ b/model-00057-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0df948325e18b7262b10dfef05c2885ef625f677b3aa072e0411db8b5e5dc62a
+size 4831839004
diff --git a/model-00058-of-00072.safetensors b/model-00058-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..311cc9a7f8e3e2199be45a01820c1646e5cd065e
--- /dev/null
+++ b/model-00058-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2332f0e4bedcc9ed7b6193e6f34b5def15aeb8d0e957a70ab5fa446a88bf2e0d
+size 4940123965
diff --git a/model-00059-of-00072.safetensors b/model-00059-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..18965b4e132c1106dade2b850909475742c2435e
--- /dev/null
+++ b/model-00059-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ceac2faa7b295d7f02032f0b805734aec755d3ca4368ca7cb6bb7eb9c1a4666
+size 4940123935
diff --git a/model-00060-of-00072.safetensors b/model-00060-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9aee41b6ea8db7afe3555e38dc71caa2d075ae7e
--- /dev/null
+++ b/model-00060-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2031516d8d063d15a5cff9c6f1f012442c84654ff0f7608a835defa88236d715
+size 4831839006
diff --git a/model-00061-of-00072.safetensors b/model-00061-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0a07218745e0fdef1679ff672e201650d06cdb26
--- /dev/null
+++ b/model-00061-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f569d63fb70077d301ad250eb1e2f61dd05d3b9849c8b90030991d0c68bd41a9
+size 4934225510
diff --git a/model-00062-of-00072.safetensors b/model-00062-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d9b84605931ca6f6f21f690bd3c5f6c7bd556d57
--- /dev/null
+++ b/model-00062-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1f9dc91fcbaa33fbd7a6967d6d13058ac4df22b85879c13f16a81ac24639a3f4
+size 4940123941
diff --git a/model-00063-of-00072.safetensors b/model-00063-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9ddf6a47249addce08e5905359c965c7d02b97f4
--- /dev/null
+++ b/model-00063-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:39ae2beb0287a2fec907fea494d9110e9d0d6df739311e86bf717834dab3b32d
+size 4831839006
diff --git a/model-00064-of-00072.safetensors b/model-00064-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b6586ed5b0ceea4d4ae10ecc01714c35dac72c48
--- /dev/null
+++ b/model-00064-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:31a5b18e5eea9d2d4a6172326270731e08cb0792aadc1e573aed1787f922eef1
+size 4940123965
diff --git a/model-00065-of-00072.safetensors b/model-00065-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..cc7e38a05b1abdc73b4704acd7194df5219849e9
--- /dev/null
+++ b/model-00065-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7d22377b4f70190e2536dd817ada712fd2f4d46d8ed3952b02e3ceb64f76219e
+size 4940123917
diff --git a/model-00066-of-00072.safetensors b/model-00066-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..affb0b6e63e59ad15eaba5e171abaa690354c6b3
--- /dev/null
+++ b/model-00066-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:81e5fd0f5773923d37323f42efef79cf970026201039bb6b6d6c0d6cdf355913
+size 4831839006
diff --git a/model-00067-of-00072.safetensors b/model-00067-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..60ace09ce0caf634aff8f003b67f3bc13aca870c
--- /dev/null
+++ b/model-00067-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ac81dfc5dfb38658a3368a0e7a77e18a52006494e448f7a3de7e0b45352d1cfe
+size 4940124005
diff --git a/model-00068-of-00072.safetensors b/model-00068-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..e4bd1f1ee732440e014768677652824e29d12364
--- /dev/null
+++ b/model-00068-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a78e33de5bf9839d21712f98bb4a558eb18d75ad5238ac63a3e3f5fb4e5c9d7c
+size 4940123931
diff --git a/model-00069-of-00072.safetensors b/model-00069-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..803e39d53b450ae15ce559de1ca0d2fd2db6e6a7
--- /dev/null
+++ b/model-00069-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6b2f86eeb9dbf98610a982b4e074fda0b33c1d277de9c8d0d42cbf55b4dff58e
+size 4831839000
diff --git a/model-00070-of-00072.safetensors b/model-00070-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ca9b6a033c97ebe613aaf146b3b34c5f92deee8e
--- /dev/null
+++ b/model-00070-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:918f875a671f589bac28494ad9beb0a49c80b03950c7201eb08590c24ef0ffb9
+size 4934225488
diff --git a/model-00071-of-00072.safetensors b/model-00071-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d084b61c7609f036a220f8d36397bda420849bf0
--- /dev/null
+++ b/model-00071-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b3ec4c1b38eb45a41e17b6df1cd20ec6df2f13176ed62a86ef8c3f4a4f2e24f6
+size 4833962256
diff --git a/model-00072-of-00072.safetensors b/model-00072-of-00072.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ce1c6b4267de4f5a802759a8a431c9507866690c
--- /dev/null
+++ b/model-00072-of-00072.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:df5199871274a30e3653b1ea8d685c307a13ee3f957e37677613e3885fc2a657
+size 703070513
diff --git a/model.safetensors.index.json b/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..ede727a62b948a108948cf1b508cca1be04883d7
--- /dev/null
+++ b/model.safetensors.index.json
@@ -0,0 +1,1252 @@
+{
+ "metadata": {
+ "total_size": 347419635584,
+ "total_parameters": 308778778368
+ },
+ "weight_map": {
+ "lm_head.biases": "model-00072-of-00072.safetensors",
+ "lm_head.scales": "model-00072-of-00072.safetensors",
+ "lm_head.weight": "model-00072-of-00072.safetensors",
+ "model.embed_tokens.biases": "model-00001-of-00072.safetensors",
+ "model.embed_tokens.scales": "model-00001-of-00072.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00072.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00072.safetensors",
+ "model.layers.0.mlp.down_proj.biases": "model-00001-of-00072.safetensors",
+ "model.layers.0.mlp.down_proj.scales": "model-00001-of-00072.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00072.safetensors",
+ "model.layers.0.mlp.gate_proj.biases": "model-00001-of-00072.safetensors",
+ "model.layers.0.mlp.gate_proj.scales": "model-00001-of-00072.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00072.safetensors",
+ "model.layers.0.mlp.up_proj.biases": "model-00001-of-00072.safetensors",
+ "model.layers.0.mlp.up_proj.scales": "model-00001-of-00072.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00072.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00072.safetensors",
+ "model.layers.0.self_attn.k_proj.biases": "model-00001-of-00072.safetensors",
+ "model.layers.0.self_attn.k_proj.scales": "model-00001-of-00072.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00072.safetensors",
+ "model.layers.0.self_attn.o_proj.biases": "model-00001-of-00072.safetensors",
+ "model.layers.0.self_attn.o_proj.scales": "model-00001-of-00072.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00072.safetensors",
+ "model.layers.0.self_attn.q_proj.biases": "model-00001-of-00072.safetensors",
+ "model.layers.0.self_attn.q_proj.scales": "model-00001-of-00072.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00072.safetensors",
+ "model.layers.0.self_attn.v_proj.biases": "model-00001-of-00072.safetensors",
+ "model.layers.0.self_attn.v_proj.scales": "model-00001-of-00072.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00072.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00002-of-00072.safetensors",
+ "model.layers.1.mlp.gate.e_score_correction_bias": "model-00002-of-00072.safetensors",
+ "model.layers.1.mlp.gate.weight": "model-00002-of-00072.safetensors",
+ "model.layers.1.mlp.switch_mlp.down_proj.biases": "model-00002-of-00072.safetensors",
+ "model.layers.1.mlp.switch_mlp.down_proj.scales": "model-00002-of-00072.safetensors",
+ "model.layers.1.mlp.switch_mlp.down_proj.weight": "model-00002-of-00072.safetensors",
+ "model.layers.1.mlp.switch_mlp.gate_proj.biases": "model-00001-of-00072.safetensors",
+ "model.layers.1.mlp.switch_mlp.gate_proj.scales": "model-00001-of-00072.safetensors",
+ "model.layers.1.mlp.switch_mlp.gate_proj.weight": "model-00001-of-00072.safetensors",
+ "model.layers.1.mlp.switch_mlp.up_proj.biases": "model-00002-of-00072.safetensors",
+ "model.layers.1.mlp.switch_mlp.up_proj.scales": "model-00002-of-00072.safetensors",
+ "model.layers.1.mlp.switch_mlp.up_proj.weight": "model-00002-of-00072.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00002-of-00072.safetensors",
+ "model.layers.1.self_attn.attention_sink_bias": "model-00001-of-00072.safetensors",
+ "model.layers.1.self_attn.k_proj.biases": "model-00001-of-00072.safetensors",
+ "model.layers.1.self_attn.k_proj.scales": "model-00001-of-00072.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00072.safetensors",
+ "model.layers.1.self_attn.o_proj.biases": "model-00001-of-00072.safetensors",
+ "model.layers.1.self_attn.o_proj.scales": "model-00001-of-00072.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00072.safetensors",
+ "model.layers.1.self_attn.q_proj.biases": "model-00001-of-00072.safetensors",
+ "model.layers.1.self_attn.q_proj.scales": "model-00001-of-00072.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00072.safetensors",
+ "model.layers.1.self_attn.v_proj.biases": "model-00001-of-00072.safetensors",
+ "model.layers.1.self_attn.v_proj.scales": "model-00001-of-00072.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00072.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00016-of-00072.safetensors",
+ "model.layers.10.mlp.gate.e_score_correction_bias": "model-00016-of-00072.safetensors",
+ "model.layers.10.mlp.gate.weight": "model-00016-of-00072.safetensors",
+ "model.layers.10.mlp.switch_mlp.down_proj.biases": "model-00016-of-00072.safetensors",
+ "model.layers.10.mlp.switch_mlp.down_proj.scales": "model-00016-of-00072.safetensors",
+ "model.layers.10.mlp.switch_mlp.down_proj.weight": "model-00016-of-00072.safetensors",
+ "model.layers.10.mlp.switch_mlp.gate_proj.biases": "model-00015-of-00072.safetensors",
+ "model.layers.10.mlp.switch_mlp.gate_proj.scales": "model-00015-of-00072.safetensors",
+ "model.layers.10.mlp.switch_mlp.gate_proj.weight": "model-00015-of-00072.safetensors",
+ "model.layers.10.mlp.switch_mlp.up_proj.biases": "model-00015-of-00072.safetensors",
+ "model.layers.10.mlp.switch_mlp.up_proj.scales": "model-00015-of-00072.safetensors",
+ "model.layers.10.mlp.switch_mlp.up_proj.weight": "model-00015-of-00072.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00016-of-00072.safetensors",
+ "model.layers.10.self_attn.attention_sink_bias": "model-00014-of-00072.safetensors",
+ "model.layers.10.self_attn.k_proj.biases": "model-00014-of-00072.safetensors",
+ "model.layers.10.self_attn.k_proj.scales": "model-00014-of-00072.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00014-of-00072.safetensors",
+ "model.layers.10.self_attn.o_proj.biases": "model-00014-of-00072.safetensors",
+ "model.layers.10.self_attn.o_proj.scales": "model-00014-of-00072.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00014-of-00072.safetensors",
+ "model.layers.10.self_attn.q_proj.biases": "model-00014-of-00072.safetensors",
+ "model.layers.10.self_attn.q_proj.scales": "model-00014-of-00072.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00014-of-00072.safetensors",
+ "model.layers.10.self_attn.v_proj.biases": "model-00014-of-00072.safetensors",
+ "model.layers.10.self_attn.v_proj.scales": "model-00014-of-00072.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00014-of-00072.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00017-of-00072.safetensors",
+ "model.layers.11.mlp.gate.e_score_correction_bias": "model-00017-of-00072.safetensors",
+ "model.layers.11.mlp.gate.weight": "model-00017-of-00072.safetensors",
+ "model.layers.11.mlp.switch_mlp.down_proj.biases": "model-00017-of-00072.safetensors",
+ "model.layers.11.mlp.switch_mlp.down_proj.scales": "model-00017-of-00072.safetensors",
+ "model.layers.11.mlp.switch_mlp.down_proj.weight": "model-00017-of-00072.safetensors",
+ "model.layers.11.mlp.switch_mlp.gate_proj.biases": "model-00016-of-00072.safetensors",
+ "model.layers.11.mlp.switch_mlp.gate_proj.scales": "model-00016-of-00072.safetensors",
+ "model.layers.11.mlp.switch_mlp.gate_proj.weight": "model-00016-of-00072.safetensors",
+ "model.layers.11.mlp.switch_mlp.up_proj.biases": "model-00017-of-00072.safetensors",
+ "model.layers.11.mlp.switch_mlp.up_proj.scales": "model-00017-of-00072.safetensors",
+ "model.layers.11.mlp.switch_mlp.up_proj.weight": "model-00017-of-00072.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00017-of-00072.safetensors",
+ "model.layers.11.self_attn.k_proj.biases": "model-00016-of-00072.safetensors",
+ "model.layers.11.self_attn.k_proj.scales": "model-00016-of-00072.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00016-of-00072.safetensors",
+ "model.layers.11.self_attn.o_proj.biases": "model-00016-of-00072.safetensors",
+ "model.layers.11.self_attn.o_proj.scales": "model-00016-of-00072.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00016-of-00072.safetensors",
+ "model.layers.11.self_attn.q_proj.biases": "model-00016-of-00072.safetensors",
+ "model.layers.11.self_attn.q_proj.scales": "model-00016-of-00072.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00016-of-00072.safetensors",
+ "model.layers.11.self_attn.v_proj.biases": "model-00016-of-00072.safetensors",
+ "model.layers.11.self_attn.v_proj.scales": "model-00016-of-00072.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00016-of-00072.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00019-of-00072.safetensors",
+ "model.layers.12.mlp.gate.e_score_correction_bias": "model-00019-of-00072.safetensors",
+ "model.layers.12.mlp.gate.weight": "model-00019-of-00072.safetensors",
+ "model.layers.12.mlp.switch_mlp.down_proj.biases": "model-00019-of-00072.safetensors",
+ "model.layers.12.mlp.switch_mlp.down_proj.scales": "model-00019-of-00072.safetensors",
+ "model.layers.12.mlp.switch_mlp.down_proj.weight": "model-00019-of-00072.safetensors",
+ "model.layers.12.mlp.switch_mlp.gate_proj.biases": "model-00018-of-00072.safetensors",
+ "model.layers.12.mlp.switch_mlp.gate_proj.scales": "model-00018-of-00072.safetensors",
+ "model.layers.12.mlp.switch_mlp.gate_proj.weight": "model-00018-of-00072.safetensors",
+ "model.layers.12.mlp.switch_mlp.up_proj.biases": "model-00018-of-00072.safetensors",
+ "model.layers.12.mlp.switch_mlp.up_proj.scales": "model-00018-of-00072.safetensors",
+ "model.layers.12.mlp.switch_mlp.up_proj.weight": "model-00018-of-00072.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00019-of-00072.safetensors",
+ "model.layers.12.self_attn.attention_sink_bias": "model-00017-of-00072.safetensors",
+ "model.layers.12.self_attn.k_proj.biases": "model-00017-of-00072.safetensors",
+ "model.layers.12.self_attn.k_proj.scales": "model-00017-of-00072.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00017-of-00072.safetensors",
+ "model.layers.12.self_attn.o_proj.biases": "model-00017-of-00072.safetensors",
+ "model.layers.12.self_attn.o_proj.scales": "model-00017-of-00072.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00017-of-00072.safetensors",
+ "model.layers.12.self_attn.q_proj.biases": "model-00017-of-00072.safetensors",
+ "model.layers.12.self_attn.q_proj.scales": "model-00017-of-00072.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00017-of-00072.safetensors",
+ "model.layers.12.self_attn.v_proj.biases": "model-00017-of-00072.safetensors",
+ "model.layers.12.self_attn.v_proj.scales": "model-00017-of-00072.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00017-of-00072.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00020-of-00072.safetensors",
+ "model.layers.13.mlp.gate.e_score_correction_bias": "model-00020-of-00072.safetensors",
+ "model.layers.13.mlp.gate.weight": "model-00020-of-00072.safetensors",
+ "model.layers.13.mlp.switch_mlp.down_proj.biases": "model-00020-of-00072.safetensors",
+ "model.layers.13.mlp.switch_mlp.down_proj.scales": "model-00020-of-00072.safetensors",
+ "model.layers.13.mlp.switch_mlp.down_proj.weight": "model-00020-of-00072.safetensors",
+ "model.layers.13.mlp.switch_mlp.gate_proj.biases": "model-00019-of-00072.safetensors",
+ "model.layers.13.mlp.switch_mlp.gate_proj.scales": "model-00019-of-00072.safetensors",
+ "model.layers.13.mlp.switch_mlp.gate_proj.weight": "model-00019-of-00072.safetensors",
+ "model.layers.13.mlp.switch_mlp.up_proj.biases": "model-00020-of-00072.safetensors",
+ "model.layers.13.mlp.switch_mlp.up_proj.scales": "model-00020-of-00072.safetensors",
+ "model.layers.13.mlp.switch_mlp.up_proj.weight": "model-00020-of-00072.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00020-of-00072.safetensors",
+ "model.layers.13.self_attn.attention_sink_bias": "model-00019-of-00072.safetensors",
+ "model.layers.13.self_attn.k_proj.biases": "model-00019-of-00072.safetensors",
+ "model.layers.13.self_attn.k_proj.scales": "model-00019-of-00072.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00019-of-00072.safetensors",
+ "model.layers.13.self_attn.o_proj.biases": "model-00019-of-00072.safetensors",
+ "model.layers.13.self_attn.o_proj.scales": "model-00019-of-00072.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00019-of-00072.safetensors",
+ "model.layers.13.self_attn.q_proj.biases": "model-00019-of-00072.safetensors",
+ "model.layers.13.self_attn.q_proj.scales": "model-00019-of-00072.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00019-of-00072.safetensors",
+ "model.layers.13.self_attn.v_proj.biases": "model-00019-of-00072.safetensors",
+ "model.layers.13.self_attn.v_proj.scales": "model-00019-of-00072.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00019-of-00072.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00022-of-00072.safetensors",
+ "model.layers.14.mlp.gate.e_score_correction_bias": "model-00022-of-00072.safetensors",
+ "model.layers.14.mlp.gate.weight": "model-00022-of-00072.safetensors",
+ "model.layers.14.mlp.switch_mlp.down_proj.biases": "model-00022-of-00072.safetensors",
+ "model.layers.14.mlp.switch_mlp.down_proj.scales": "model-00022-of-00072.safetensors",
+ "model.layers.14.mlp.switch_mlp.down_proj.weight": "model-00022-of-00072.safetensors",
+ "model.layers.14.mlp.switch_mlp.gate_proj.biases": "model-00021-of-00072.safetensors",
+ "model.layers.14.mlp.switch_mlp.gate_proj.scales": "model-00021-of-00072.safetensors",
+ "model.layers.14.mlp.switch_mlp.gate_proj.weight": "model-00021-of-00072.safetensors",
+ "model.layers.14.mlp.switch_mlp.up_proj.biases": "model-00021-of-00072.safetensors",
+ "model.layers.14.mlp.switch_mlp.up_proj.scales": "model-00021-of-00072.safetensors",
+ "model.layers.14.mlp.switch_mlp.up_proj.weight": "model-00021-of-00072.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00022-of-00072.safetensors",
+ "model.layers.14.self_attn.attention_sink_bias": "model-00020-of-00072.safetensors",
+ "model.layers.14.self_attn.k_proj.biases": "model-00020-of-00072.safetensors",
+ "model.layers.14.self_attn.k_proj.scales": "model-00020-of-00072.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00020-of-00072.safetensors",
+ "model.layers.14.self_attn.o_proj.biases": "model-00020-of-00072.safetensors",
+ "model.layers.14.self_attn.o_proj.scales": "model-00020-of-00072.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00020-of-00072.safetensors",
+ "model.layers.14.self_attn.q_proj.biases": "model-00020-of-00072.safetensors",
+ "model.layers.14.self_attn.q_proj.scales": "model-00020-of-00072.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00020-of-00072.safetensors",
+ "model.layers.14.self_attn.v_proj.biases": "model-00020-of-00072.safetensors",
+ "model.layers.14.self_attn.v_proj.scales": "model-00020-of-00072.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00020-of-00072.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00023-of-00072.safetensors",
+ "model.layers.15.mlp.gate.e_score_correction_bias": "model-00023-of-00072.safetensors",
+ "model.layers.15.mlp.gate.weight": "model-00023-of-00072.safetensors",
+ "model.layers.15.mlp.switch_mlp.down_proj.biases": "model-00023-of-00072.safetensors",
+ "model.layers.15.mlp.switch_mlp.down_proj.scales": "model-00023-of-00072.safetensors",
+ "model.layers.15.mlp.switch_mlp.down_proj.weight": "model-00023-of-00072.safetensors",
+ "model.layers.15.mlp.switch_mlp.gate_proj.biases": "model-00022-of-00072.safetensors",
+ "model.layers.15.mlp.switch_mlp.gate_proj.scales": "model-00022-of-00072.safetensors",
+ "model.layers.15.mlp.switch_mlp.gate_proj.weight": "model-00022-of-00072.safetensors",
+ "model.layers.15.mlp.switch_mlp.up_proj.biases": "model-00023-of-00072.safetensors",
+ "model.layers.15.mlp.switch_mlp.up_proj.scales": "model-00023-of-00072.safetensors",
+ "model.layers.15.mlp.switch_mlp.up_proj.weight": "model-00023-of-00072.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00023-of-00072.safetensors",
+ "model.layers.15.self_attn.attention_sink_bias": "model-00022-of-00072.safetensors",
+ "model.layers.15.self_attn.k_proj.biases": "model-00022-of-00072.safetensors",
+ "model.layers.15.self_attn.k_proj.scales": "model-00022-of-00072.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00022-of-00072.safetensors",
+ "model.layers.15.self_attn.o_proj.biases": "model-00022-of-00072.safetensors",
+ "model.layers.15.self_attn.o_proj.scales": "model-00022-of-00072.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00022-of-00072.safetensors",
+ "model.layers.15.self_attn.q_proj.biases": "model-00022-of-00072.safetensors",
+ "model.layers.15.self_attn.q_proj.scales": "model-00022-of-00072.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00022-of-00072.safetensors",
+ "model.layers.15.self_attn.v_proj.biases": "model-00022-of-00072.safetensors",
+ "model.layers.15.self_attn.v_proj.scales": "model-00022-of-00072.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00022-of-00072.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00025-of-00072.safetensors",
+ "model.layers.16.mlp.gate.e_score_correction_bias": "model-00025-of-00072.safetensors",
+ "model.layers.16.mlp.gate.weight": "model-00025-of-00072.safetensors",
+ "model.layers.16.mlp.switch_mlp.down_proj.biases": "model-00025-of-00072.safetensors",
+ "model.layers.16.mlp.switch_mlp.down_proj.scales": "model-00025-of-00072.safetensors",
+ "model.layers.16.mlp.switch_mlp.down_proj.weight": "model-00025-of-00072.safetensors",
+ "model.layers.16.mlp.switch_mlp.gate_proj.biases": "model-00024-of-00072.safetensors",
+ "model.layers.16.mlp.switch_mlp.gate_proj.scales": "model-00024-of-00072.safetensors",
+ "model.layers.16.mlp.switch_mlp.gate_proj.weight": "model-00024-of-00072.safetensors",
+ "model.layers.16.mlp.switch_mlp.up_proj.biases": "model-00024-of-00072.safetensors",
+ "model.layers.16.mlp.switch_mlp.up_proj.scales": "model-00024-of-00072.safetensors",
+ "model.layers.16.mlp.switch_mlp.up_proj.weight": "model-00024-of-00072.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00025-of-00072.safetensors",
+ "model.layers.16.self_attn.attention_sink_bias": "model-00023-of-00072.safetensors",
+ "model.layers.16.self_attn.k_proj.biases": "model-00023-of-00072.safetensors",
+ "model.layers.16.self_attn.k_proj.scales": "model-00023-of-00072.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00023-of-00072.safetensors",
+ "model.layers.16.self_attn.o_proj.biases": "model-00023-of-00072.safetensors",
+ "model.layers.16.self_attn.o_proj.scales": "model-00023-of-00072.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00023-of-00072.safetensors",
+ "model.layers.16.self_attn.q_proj.biases": "model-00023-of-00072.safetensors",
+ "model.layers.16.self_attn.q_proj.scales": "model-00023-of-00072.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00023-of-00072.safetensors",
+ "model.layers.16.self_attn.v_proj.biases": "model-00023-of-00072.safetensors",
+ "model.layers.16.self_attn.v_proj.scales": "model-00023-of-00072.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00023-of-00072.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00026-of-00072.safetensors",
+ "model.layers.17.mlp.gate.e_score_correction_bias": "model-00026-of-00072.safetensors",
+ "model.layers.17.mlp.gate.weight": "model-00026-of-00072.safetensors",
+ "model.layers.17.mlp.switch_mlp.down_proj.biases": "model-00026-of-00072.safetensors",
+ "model.layers.17.mlp.switch_mlp.down_proj.scales": "model-00026-of-00072.safetensors",
+ "model.layers.17.mlp.switch_mlp.down_proj.weight": "model-00026-of-00072.safetensors",
+ "model.layers.17.mlp.switch_mlp.gate_proj.biases": "model-00025-of-00072.safetensors",
+ "model.layers.17.mlp.switch_mlp.gate_proj.scales": "model-00025-of-00072.safetensors",
+ "model.layers.17.mlp.switch_mlp.gate_proj.weight": "model-00025-of-00072.safetensors",
+ "model.layers.17.mlp.switch_mlp.up_proj.biases": "model-00026-of-00072.safetensors",
+ "model.layers.17.mlp.switch_mlp.up_proj.scales": "model-00026-of-00072.safetensors",
+ "model.layers.17.mlp.switch_mlp.up_proj.weight": "model-00026-of-00072.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00026-of-00072.safetensors",
+ "model.layers.17.self_attn.k_proj.biases": "model-00025-of-00072.safetensors",
+ "model.layers.17.self_attn.k_proj.scales": "model-00025-of-00072.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00025-of-00072.safetensors",
+ "model.layers.17.self_attn.o_proj.biases": "model-00025-of-00072.safetensors",
+ "model.layers.17.self_attn.o_proj.scales": "model-00025-of-00072.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00025-of-00072.safetensors",
+ "model.layers.17.self_attn.q_proj.biases": "model-00025-of-00072.safetensors",
+ "model.layers.17.self_attn.q_proj.scales": "model-00025-of-00072.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00025-of-00072.safetensors",
+ "model.layers.17.self_attn.v_proj.biases": "model-00025-of-00072.safetensors",
+ "model.layers.17.self_attn.v_proj.scales": "model-00025-of-00072.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00025-of-00072.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00028-of-00072.safetensors",
+ "model.layers.18.mlp.gate.e_score_correction_bias": "model-00028-of-00072.safetensors",
+ "model.layers.18.mlp.gate.weight": "model-00028-of-00072.safetensors",
+ "model.layers.18.mlp.switch_mlp.down_proj.biases": "model-00028-of-00072.safetensors",
+ "model.layers.18.mlp.switch_mlp.down_proj.scales": "model-00028-of-00072.safetensors",
+ "model.layers.18.mlp.switch_mlp.down_proj.weight": "model-00028-of-00072.safetensors",
+ "model.layers.18.mlp.switch_mlp.gate_proj.biases": "model-00027-of-00072.safetensors",
+ "model.layers.18.mlp.switch_mlp.gate_proj.scales": "model-00027-of-00072.safetensors",
+ "model.layers.18.mlp.switch_mlp.gate_proj.weight": "model-00027-of-00072.safetensors",
+ "model.layers.18.mlp.switch_mlp.up_proj.biases": "model-00027-of-00072.safetensors",
+ "model.layers.18.mlp.switch_mlp.up_proj.scales": "model-00027-of-00072.safetensors",
+ "model.layers.18.mlp.switch_mlp.up_proj.weight": "model-00027-of-00072.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00028-of-00072.safetensors",
+ "model.layers.18.self_attn.attention_sink_bias": "model-00026-of-00072.safetensors",
+ "model.layers.18.self_attn.k_proj.biases": "model-00026-of-00072.safetensors",
+ "model.layers.18.self_attn.k_proj.scales": "model-00026-of-00072.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00026-of-00072.safetensors",
+ "model.layers.18.self_attn.o_proj.biases": "model-00026-of-00072.safetensors",
+ "model.layers.18.self_attn.o_proj.scales": "model-00026-of-00072.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00026-of-00072.safetensors",
+ "model.layers.18.self_attn.q_proj.biases": "model-00026-of-00072.safetensors",
+ "model.layers.18.self_attn.q_proj.scales": "model-00026-of-00072.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00026-of-00072.safetensors",
+ "model.layers.18.self_attn.v_proj.biases": "model-00026-of-00072.safetensors",
+ "model.layers.18.self_attn.v_proj.scales": "model-00026-of-00072.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00026-of-00072.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00029-of-00072.safetensors",
+ "model.layers.19.mlp.gate.e_score_correction_bias": "model-00029-of-00072.safetensors",
+ "model.layers.19.mlp.gate.weight": "model-00029-of-00072.safetensors",
+ "model.layers.19.mlp.switch_mlp.down_proj.biases": "model-00029-of-00072.safetensors",
+ "model.layers.19.mlp.switch_mlp.down_proj.scales": "model-00029-of-00072.safetensors",
+ "model.layers.19.mlp.switch_mlp.down_proj.weight": "model-00029-of-00072.safetensors",
+ "model.layers.19.mlp.switch_mlp.gate_proj.biases": "model-00028-of-00072.safetensors",
+ "model.layers.19.mlp.switch_mlp.gate_proj.scales": "model-00028-of-00072.safetensors",
+ "model.layers.19.mlp.switch_mlp.gate_proj.weight": "model-00028-of-00072.safetensors",
+ "model.layers.19.mlp.switch_mlp.up_proj.biases": "model-00029-of-00072.safetensors",
+ "model.layers.19.mlp.switch_mlp.up_proj.scales": "model-00029-of-00072.safetensors",
+ "model.layers.19.mlp.switch_mlp.up_proj.weight": "model-00029-of-00072.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00029-of-00072.safetensors",
+ "model.layers.19.self_attn.attention_sink_bias": "model-00028-of-00072.safetensors",
+ "model.layers.19.self_attn.k_proj.biases": "model-00028-of-00072.safetensors",
+ "model.layers.19.self_attn.k_proj.scales": "model-00028-of-00072.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00028-of-00072.safetensors",
+ "model.layers.19.self_attn.o_proj.biases": "model-00028-of-00072.safetensors",
+ "model.layers.19.self_attn.o_proj.scales": "model-00028-of-00072.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00028-of-00072.safetensors",
+ "model.layers.19.self_attn.q_proj.biases": "model-00028-of-00072.safetensors",
+ "model.layers.19.self_attn.q_proj.scales": "model-00028-of-00072.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00028-of-00072.safetensors",
+ "model.layers.19.self_attn.v_proj.biases": "model-00028-of-00072.safetensors",
+ "model.layers.19.self_attn.v_proj.scales": "model-00028-of-00072.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00028-of-00072.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00004-of-00072.safetensors",
+ "model.layers.2.mlp.gate.e_score_correction_bias": "model-00004-of-00072.safetensors",
+ "model.layers.2.mlp.gate.weight": "model-00004-of-00072.safetensors",
+ "model.layers.2.mlp.switch_mlp.down_proj.biases": "model-00004-of-00072.safetensors",
+ "model.layers.2.mlp.switch_mlp.down_proj.scales": "model-00004-of-00072.safetensors",
+ "model.layers.2.mlp.switch_mlp.down_proj.weight": "model-00004-of-00072.safetensors",
+ "model.layers.2.mlp.switch_mlp.gate_proj.biases": "model-00003-of-00072.safetensors",
+ "model.layers.2.mlp.switch_mlp.gate_proj.scales": "model-00003-of-00072.safetensors",
+ "model.layers.2.mlp.switch_mlp.gate_proj.weight": "model-00003-of-00072.safetensors",
+ "model.layers.2.mlp.switch_mlp.up_proj.biases": "model-00003-of-00072.safetensors",
+ "model.layers.2.mlp.switch_mlp.up_proj.scales": "model-00003-of-00072.safetensors",
+ "model.layers.2.mlp.switch_mlp.up_proj.weight": "model-00003-of-00072.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00004-of-00072.safetensors",
+ "model.layers.2.self_attn.attention_sink_bias": "model-00002-of-00072.safetensors",
+ "model.layers.2.self_attn.k_proj.biases": "model-00002-of-00072.safetensors",
+ "model.layers.2.self_attn.k_proj.scales": "model-00002-of-00072.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00002-of-00072.safetensors",
+ "model.layers.2.self_attn.o_proj.biases": "model-00002-of-00072.safetensors",
+ "model.layers.2.self_attn.o_proj.scales": "model-00002-of-00072.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00002-of-00072.safetensors",
+ "model.layers.2.self_attn.q_proj.biases": "model-00002-of-00072.safetensors",
+ "model.layers.2.self_attn.q_proj.scales": "model-00002-of-00072.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00002-of-00072.safetensors",
+ "model.layers.2.self_attn.v_proj.biases": "model-00002-of-00072.safetensors",
+ "model.layers.2.self_attn.v_proj.scales": "model-00002-of-00072.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00002-of-00072.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00031-of-00072.safetensors",
+ "model.layers.20.mlp.gate.e_score_correction_bias": "model-00031-of-00072.safetensors",
+ "model.layers.20.mlp.gate.weight": "model-00031-of-00072.safetensors",
+ "model.layers.20.mlp.switch_mlp.down_proj.biases": "model-00031-of-00072.safetensors",
+ "model.layers.20.mlp.switch_mlp.down_proj.scales": "model-00031-of-00072.safetensors",
+ "model.layers.20.mlp.switch_mlp.down_proj.weight": "model-00031-of-00072.safetensors",
+ "model.layers.20.mlp.switch_mlp.gate_proj.biases": "model-00030-of-00072.safetensors",
+ "model.layers.20.mlp.switch_mlp.gate_proj.scales": "model-00030-of-00072.safetensors",
+ "model.layers.20.mlp.switch_mlp.gate_proj.weight": "model-00030-of-00072.safetensors",
+ "model.layers.20.mlp.switch_mlp.up_proj.biases": "model-00030-of-00072.safetensors",
+ "model.layers.20.mlp.switch_mlp.up_proj.scales": "model-00030-of-00072.safetensors",
+ "model.layers.20.mlp.switch_mlp.up_proj.weight": "model-00030-of-00072.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00031-of-00072.safetensors",
+ "model.layers.20.self_attn.attention_sink_bias": "model-00029-of-00072.safetensors",
+ "model.layers.20.self_attn.k_proj.biases": "model-00029-of-00072.safetensors",
+ "model.layers.20.self_attn.k_proj.scales": "model-00029-of-00072.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00029-of-00072.safetensors",
+ "model.layers.20.self_attn.o_proj.biases": "model-00029-of-00072.safetensors",
+ "model.layers.20.self_attn.o_proj.scales": "model-00029-of-00072.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00029-of-00072.safetensors",
+ "model.layers.20.self_attn.q_proj.biases": "model-00029-of-00072.safetensors",
+ "model.layers.20.self_attn.q_proj.scales": "model-00029-of-00072.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00029-of-00072.safetensors",
+ "model.layers.20.self_attn.v_proj.biases": "model-00029-of-00072.safetensors",
+ "model.layers.20.self_attn.v_proj.scales": "model-00029-of-00072.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00029-of-00072.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00032-of-00072.safetensors",
+ "model.layers.21.mlp.gate.e_score_correction_bias": "model-00032-of-00072.safetensors",
+ "model.layers.21.mlp.gate.weight": "model-00032-of-00072.safetensors",
+ "model.layers.21.mlp.switch_mlp.down_proj.biases": "model-00032-of-00072.safetensors",
+ "model.layers.21.mlp.switch_mlp.down_proj.scales": "model-00032-of-00072.safetensors",
+ "model.layers.21.mlp.switch_mlp.down_proj.weight": "model-00032-of-00072.safetensors",
+ "model.layers.21.mlp.switch_mlp.gate_proj.biases": "model-00031-of-00072.safetensors",
+ "model.layers.21.mlp.switch_mlp.gate_proj.scales": "model-00031-of-00072.safetensors",
+ "model.layers.21.mlp.switch_mlp.gate_proj.weight": "model-00031-of-00072.safetensors",
+ "model.layers.21.mlp.switch_mlp.up_proj.biases": "model-00032-of-00072.safetensors",
+ "model.layers.21.mlp.switch_mlp.up_proj.scales": "model-00032-of-00072.safetensors",
+ "model.layers.21.mlp.switch_mlp.up_proj.weight": "model-00032-of-00072.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00032-of-00072.safetensors",
+ "model.layers.21.self_attn.attention_sink_bias": "model-00031-of-00072.safetensors",
+ "model.layers.21.self_attn.k_proj.biases": "model-00031-of-00072.safetensors",
+ "model.layers.21.self_attn.k_proj.scales": "model-00031-of-00072.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00031-of-00072.safetensors",
+ "model.layers.21.self_attn.o_proj.biases": "model-00031-of-00072.safetensors",
+ "model.layers.21.self_attn.o_proj.scales": "model-00031-of-00072.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00031-of-00072.safetensors",
+ "model.layers.21.self_attn.q_proj.biases": "model-00031-of-00072.safetensors",
+ "model.layers.21.self_attn.q_proj.scales": "model-00031-of-00072.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00031-of-00072.safetensors",
+ "model.layers.21.self_attn.v_proj.biases": "model-00031-of-00072.safetensors",
+ "model.layers.21.self_attn.v_proj.scales": "model-00031-of-00072.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00031-of-00072.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00034-of-00072.safetensors",
+ "model.layers.22.mlp.gate.e_score_correction_bias": "model-00034-of-00072.safetensors",
+ "model.layers.22.mlp.gate.weight": "model-00034-of-00072.safetensors",
+ "model.layers.22.mlp.switch_mlp.down_proj.biases": "model-00034-of-00072.safetensors",
+ "model.layers.22.mlp.switch_mlp.down_proj.scales": "model-00034-of-00072.safetensors",
+ "model.layers.22.mlp.switch_mlp.down_proj.weight": "model-00034-of-00072.safetensors",
+ "model.layers.22.mlp.switch_mlp.gate_proj.biases": "model-00033-of-00072.safetensors",
+ "model.layers.22.mlp.switch_mlp.gate_proj.scales": "model-00033-of-00072.safetensors",
+ "model.layers.22.mlp.switch_mlp.gate_proj.weight": "model-00033-of-00072.safetensors",
+ "model.layers.22.mlp.switch_mlp.up_proj.biases": "model-00033-of-00072.safetensors",
+ "model.layers.22.mlp.switch_mlp.up_proj.scales": "model-00033-of-00072.safetensors",
+ "model.layers.22.mlp.switch_mlp.up_proj.weight": "model-00033-of-00072.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00034-of-00072.safetensors",
+ "model.layers.22.self_attn.attention_sink_bias": "model-00032-of-00072.safetensors",
+ "model.layers.22.self_attn.k_proj.biases": "model-00032-of-00072.safetensors",
+ "model.layers.22.self_attn.k_proj.scales": "model-00032-of-00072.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00032-of-00072.safetensors",
+ "model.layers.22.self_attn.o_proj.biases": "model-00032-of-00072.safetensors",
+ "model.layers.22.self_attn.o_proj.scales": "model-00032-of-00072.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00032-of-00072.safetensors",
+ "model.layers.22.self_attn.q_proj.biases": "model-00032-of-00072.safetensors",
+ "model.layers.22.self_attn.q_proj.scales": "model-00032-of-00072.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00032-of-00072.safetensors",
+ "model.layers.22.self_attn.v_proj.biases": "model-00032-of-00072.safetensors",
+ "model.layers.22.self_attn.v_proj.scales": "model-00032-of-00072.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00032-of-00072.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00035-of-00072.safetensors",
+ "model.layers.23.mlp.gate.e_score_correction_bias": "model-00035-of-00072.safetensors",
+ "model.layers.23.mlp.gate.weight": "model-00035-of-00072.safetensors",
+ "model.layers.23.mlp.switch_mlp.down_proj.biases": "model-00035-of-00072.safetensors",
+ "model.layers.23.mlp.switch_mlp.down_proj.scales": "model-00035-of-00072.safetensors",
+ "model.layers.23.mlp.switch_mlp.down_proj.weight": "model-00035-of-00072.safetensors",
+ "model.layers.23.mlp.switch_mlp.gate_proj.biases": "model-00034-of-00072.safetensors",
+ "model.layers.23.mlp.switch_mlp.gate_proj.scales": "model-00034-of-00072.safetensors",
+ "model.layers.23.mlp.switch_mlp.gate_proj.weight": "model-00034-of-00072.safetensors",
+ "model.layers.23.mlp.switch_mlp.up_proj.biases": "model-00035-of-00072.safetensors",
+ "model.layers.23.mlp.switch_mlp.up_proj.scales": "model-00035-of-00072.safetensors",
+ "model.layers.23.mlp.switch_mlp.up_proj.weight": "model-00035-of-00072.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00035-of-00072.safetensors",
+ "model.layers.23.self_attn.k_proj.biases": "model-00034-of-00072.safetensors",
+ "model.layers.23.self_attn.k_proj.scales": "model-00034-of-00072.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00034-of-00072.safetensors",
+ "model.layers.23.self_attn.o_proj.biases": "model-00034-of-00072.safetensors",
+ "model.layers.23.self_attn.o_proj.scales": "model-00034-of-00072.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00034-of-00072.safetensors",
+ "model.layers.23.self_attn.q_proj.biases": "model-00034-of-00072.safetensors",
+ "model.layers.23.self_attn.q_proj.scales": "model-00034-of-00072.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00034-of-00072.safetensors",
+ "model.layers.23.self_attn.v_proj.biases": "model-00034-of-00072.safetensors",
+ "model.layers.23.self_attn.v_proj.scales": "model-00034-of-00072.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00034-of-00072.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00037-of-00072.safetensors",
+ "model.layers.24.mlp.gate.e_score_correction_bias": "model-00037-of-00072.safetensors",
+ "model.layers.24.mlp.gate.weight": "model-00037-of-00072.safetensors",
+ "model.layers.24.mlp.switch_mlp.down_proj.biases": "model-00037-of-00072.safetensors",
+ "model.layers.24.mlp.switch_mlp.down_proj.scales": "model-00037-of-00072.safetensors",
+ "model.layers.24.mlp.switch_mlp.down_proj.weight": "model-00037-of-00072.safetensors",
+ "model.layers.24.mlp.switch_mlp.gate_proj.biases": "model-00036-of-00072.safetensors",
+ "model.layers.24.mlp.switch_mlp.gate_proj.scales": "model-00036-of-00072.safetensors",
+ "model.layers.24.mlp.switch_mlp.gate_proj.weight": "model-00036-of-00072.safetensors",
+ "model.layers.24.mlp.switch_mlp.up_proj.biases": "model-00036-of-00072.safetensors",
+ "model.layers.24.mlp.switch_mlp.up_proj.scales": "model-00036-of-00072.safetensors",
+ "model.layers.24.mlp.switch_mlp.up_proj.weight": "model-00036-of-00072.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00037-of-00072.safetensors",
+ "model.layers.24.self_attn.attention_sink_bias": "model-00035-of-00072.safetensors",
+ "model.layers.24.self_attn.k_proj.biases": "model-00035-of-00072.safetensors",
+ "model.layers.24.self_attn.k_proj.scales": "model-00035-of-00072.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00035-of-00072.safetensors",
+ "model.layers.24.self_attn.o_proj.biases": "model-00035-of-00072.safetensors",
+ "model.layers.24.self_attn.o_proj.scales": "model-00035-of-00072.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00035-of-00072.safetensors",
+ "model.layers.24.self_attn.q_proj.biases": "model-00035-of-00072.safetensors",
+ "model.layers.24.self_attn.q_proj.scales": "model-00035-of-00072.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00035-of-00072.safetensors",
+ "model.layers.24.self_attn.v_proj.biases": "model-00035-of-00072.safetensors",
+ "model.layers.24.self_attn.v_proj.scales": "model-00035-of-00072.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00035-of-00072.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00038-of-00072.safetensors",
+ "model.layers.25.mlp.gate.e_score_correction_bias": "model-00038-of-00072.safetensors",
+ "model.layers.25.mlp.gate.weight": "model-00038-of-00072.safetensors",
+ "model.layers.25.mlp.switch_mlp.down_proj.biases": "model-00038-of-00072.safetensors",
+ "model.layers.25.mlp.switch_mlp.down_proj.scales": "model-00038-of-00072.safetensors",
+ "model.layers.25.mlp.switch_mlp.down_proj.weight": "model-00038-of-00072.safetensors",
+ "model.layers.25.mlp.switch_mlp.gate_proj.biases": "model-00037-of-00072.safetensors",
+ "model.layers.25.mlp.switch_mlp.gate_proj.scales": "model-00037-of-00072.safetensors",
+ "model.layers.25.mlp.switch_mlp.gate_proj.weight": "model-00037-of-00072.safetensors",
+ "model.layers.25.mlp.switch_mlp.up_proj.biases": "model-00038-of-00072.safetensors",
+ "model.layers.25.mlp.switch_mlp.up_proj.scales": "model-00038-of-00072.safetensors",
+ "model.layers.25.mlp.switch_mlp.up_proj.weight": "model-00038-of-00072.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00038-of-00072.safetensors",
+ "model.layers.25.self_attn.attention_sink_bias": "model-00037-of-00072.safetensors",
+ "model.layers.25.self_attn.k_proj.biases": "model-00037-of-00072.safetensors",
+ "model.layers.25.self_attn.k_proj.scales": "model-00037-of-00072.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00037-of-00072.safetensors",
+ "model.layers.25.self_attn.o_proj.biases": "model-00037-of-00072.safetensors",
+ "model.layers.25.self_attn.o_proj.scales": "model-00037-of-00072.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00037-of-00072.safetensors",
+ "model.layers.25.self_attn.q_proj.biases": "model-00037-of-00072.safetensors",
+ "model.layers.25.self_attn.q_proj.scales": "model-00037-of-00072.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00037-of-00072.safetensors",
+ "model.layers.25.self_attn.v_proj.biases": "model-00037-of-00072.safetensors",
+ "model.layers.25.self_attn.v_proj.scales": "model-00037-of-00072.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00037-of-00072.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00040-of-00072.safetensors",
+ "model.layers.26.mlp.gate.e_score_correction_bias": "model-00040-of-00072.safetensors",
+ "model.layers.26.mlp.gate.weight": "model-00040-of-00072.safetensors",
+ "model.layers.26.mlp.switch_mlp.down_proj.biases": "model-00040-of-00072.safetensors",
+ "model.layers.26.mlp.switch_mlp.down_proj.scales": "model-00040-of-00072.safetensors",
+ "model.layers.26.mlp.switch_mlp.down_proj.weight": "model-00040-of-00072.safetensors",
+ "model.layers.26.mlp.switch_mlp.gate_proj.biases": "model-00039-of-00072.safetensors",
+ "model.layers.26.mlp.switch_mlp.gate_proj.scales": "model-00039-of-00072.safetensors",
+ "model.layers.26.mlp.switch_mlp.gate_proj.weight": "model-00039-of-00072.safetensors",
+ "model.layers.26.mlp.switch_mlp.up_proj.biases": "model-00039-of-00072.safetensors",
+ "model.layers.26.mlp.switch_mlp.up_proj.scales": "model-00039-of-00072.safetensors",
+ "model.layers.26.mlp.switch_mlp.up_proj.weight": "model-00039-of-00072.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00040-of-00072.safetensors",
+ "model.layers.26.self_attn.attention_sink_bias": "model-00038-of-00072.safetensors",
+ "model.layers.26.self_attn.k_proj.biases": "model-00038-of-00072.safetensors",
+ "model.layers.26.self_attn.k_proj.scales": "model-00038-of-00072.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00038-of-00072.safetensors",
+ "model.layers.26.self_attn.o_proj.biases": "model-00038-of-00072.safetensors",
+ "model.layers.26.self_attn.o_proj.scales": "model-00038-of-00072.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00038-of-00072.safetensors",
+ "model.layers.26.self_attn.q_proj.biases": "model-00038-of-00072.safetensors",
+ "model.layers.26.self_attn.q_proj.scales": "model-00038-of-00072.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00038-of-00072.safetensors",
+ "model.layers.26.self_attn.v_proj.biases": "model-00038-of-00072.safetensors",
+ "model.layers.26.self_attn.v_proj.scales": "model-00038-of-00072.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00038-of-00072.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00041-of-00072.safetensors",
+ "model.layers.27.mlp.gate.e_score_correction_bias": "model-00041-of-00072.safetensors",
+ "model.layers.27.mlp.gate.weight": "model-00041-of-00072.safetensors",
+ "model.layers.27.mlp.switch_mlp.down_proj.biases": "model-00041-of-00072.safetensors",
+ "model.layers.27.mlp.switch_mlp.down_proj.scales": "model-00041-of-00072.safetensors",
+ "model.layers.27.mlp.switch_mlp.down_proj.weight": "model-00041-of-00072.safetensors",
+ "model.layers.27.mlp.switch_mlp.gate_proj.biases": "model-00040-of-00072.safetensors",
+ "model.layers.27.mlp.switch_mlp.gate_proj.scales": "model-00040-of-00072.safetensors",
+ "model.layers.27.mlp.switch_mlp.gate_proj.weight": "model-00040-of-00072.safetensors",
+ "model.layers.27.mlp.switch_mlp.up_proj.biases": "model-00041-of-00072.safetensors",
+ "model.layers.27.mlp.switch_mlp.up_proj.scales": "model-00041-of-00072.safetensors",
+ "model.layers.27.mlp.switch_mlp.up_proj.weight": "model-00041-of-00072.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00041-of-00072.safetensors",
+ "model.layers.27.self_attn.attention_sink_bias": "model-00040-of-00072.safetensors",
+ "model.layers.27.self_attn.k_proj.biases": "model-00040-of-00072.safetensors",
+ "model.layers.27.self_attn.k_proj.scales": "model-00040-of-00072.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00040-of-00072.safetensors",
+ "model.layers.27.self_attn.o_proj.biases": "model-00040-of-00072.safetensors",
+ "model.layers.27.self_attn.o_proj.scales": "model-00040-of-00072.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00040-of-00072.safetensors",
+ "model.layers.27.self_attn.q_proj.biases": "model-00040-of-00072.safetensors",
+ "model.layers.27.self_attn.q_proj.scales": "model-00040-of-00072.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00040-of-00072.safetensors",
+ "model.layers.27.self_attn.v_proj.biases": "model-00040-of-00072.safetensors",
+ "model.layers.27.self_attn.v_proj.scales": "model-00040-of-00072.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00040-of-00072.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00043-of-00072.safetensors",
+ "model.layers.28.mlp.gate.e_score_correction_bias": "model-00043-of-00072.safetensors",
+ "model.layers.28.mlp.gate.weight": "model-00043-of-00072.safetensors",
+ "model.layers.28.mlp.switch_mlp.down_proj.biases": "model-00043-of-00072.safetensors",
+ "model.layers.28.mlp.switch_mlp.down_proj.scales": "model-00043-of-00072.safetensors",
+ "model.layers.28.mlp.switch_mlp.down_proj.weight": "model-00043-of-00072.safetensors",
+ "model.layers.28.mlp.switch_mlp.gate_proj.biases": "model-00042-of-00072.safetensors",
+ "model.layers.28.mlp.switch_mlp.gate_proj.scales": "model-00042-of-00072.safetensors",
+ "model.layers.28.mlp.switch_mlp.gate_proj.weight": "model-00042-of-00072.safetensors",
+ "model.layers.28.mlp.switch_mlp.up_proj.biases": "model-00042-of-00072.safetensors",
+ "model.layers.28.mlp.switch_mlp.up_proj.scales": "model-00042-of-00072.safetensors",
+ "model.layers.28.mlp.switch_mlp.up_proj.weight": "model-00042-of-00072.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00043-of-00072.safetensors",
+ "model.layers.28.self_attn.attention_sink_bias": "model-00041-of-00072.safetensors",
+ "model.layers.28.self_attn.k_proj.biases": "model-00041-of-00072.safetensors",
+ "model.layers.28.self_attn.k_proj.scales": "model-00041-of-00072.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00041-of-00072.safetensors",
+ "model.layers.28.self_attn.o_proj.biases": "model-00041-of-00072.safetensors",
+ "model.layers.28.self_attn.o_proj.scales": "model-00041-of-00072.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00041-of-00072.safetensors",
+ "model.layers.28.self_attn.q_proj.biases": "model-00041-of-00072.safetensors",
+ "model.layers.28.self_attn.q_proj.scales": "model-00041-of-00072.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00041-of-00072.safetensors",
+ "model.layers.28.self_attn.v_proj.biases": "model-00041-of-00072.safetensors",
+ "model.layers.28.self_attn.v_proj.scales": "model-00041-of-00072.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00041-of-00072.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00044-of-00072.safetensors",
+ "model.layers.29.mlp.gate.e_score_correction_bias": "model-00044-of-00072.safetensors",
+ "model.layers.29.mlp.gate.weight": "model-00044-of-00072.safetensors",
+ "model.layers.29.mlp.switch_mlp.down_proj.biases": "model-00044-of-00072.safetensors",
+ "model.layers.29.mlp.switch_mlp.down_proj.scales": "model-00044-of-00072.safetensors",
+ "model.layers.29.mlp.switch_mlp.down_proj.weight": "model-00044-of-00072.safetensors",
+ "model.layers.29.mlp.switch_mlp.gate_proj.biases": "model-00043-of-00072.safetensors",
+ "model.layers.29.mlp.switch_mlp.gate_proj.scales": "model-00043-of-00072.safetensors",
+ "model.layers.29.mlp.switch_mlp.gate_proj.weight": "model-00043-of-00072.safetensors",
+ "model.layers.29.mlp.switch_mlp.up_proj.biases": "model-00044-of-00072.safetensors",
+ "model.layers.29.mlp.switch_mlp.up_proj.scales": "model-00044-of-00072.safetensors",
+ "model.layers.29.mlp.switch_mlp.up_proj.weight": "model-00044-of-00072.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00044-of-00072.safetensors",
+ "model.layers.29.self_attn.k_proj.biases": "model-00043-of-00072.safetensors",
+ "model.layers.29.self_attn.k_proj.scales": "model-00043-of-00072.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00043-of-00072.safetensors",
+ "model.layers.29.self_attn.o_proj.biases": "model-00043-of-00072.safetensors",
+ "model.layers.29.self_attn.o_proj.scales": "model-00043-of-00072.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00043-of-00072.safetensors",
+ "model.layers.29.self_attn.q_proj.biases": "model-00043-of-00072.safetensors",
+ "model.layers.29.self_attn.q_proj.scales": "model-00043-of-00072.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00043-of-00072.safetensors",
+ "model.layers.29.self_attn.v_proj.biases": "model-00043-of-00072.safetensors",
+ "model.layers.29.self_attn.v_proj.scales": "model-00043-of-00072.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00043-of-00072.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00005-of-00072.safetensors",
+ "model.layers.3.mlp.gate.e_score_correction_bias": "model-00005-of-00072.safetensors",
+ "model.layers.3.mlp.gate.weight": "model-00005-of-00072.safetensors",
+ "model.layers.3.mlp.switch_mlp.down_proj.biases": "model-00005-of-00072.safetensors",
+ "model.layers.3.mlp.switch_mlp.down_proj.scales": "model-00005-of-00072.safetensors",
+ "model.layers.3.mlp.switch_mlp.down_proj.weight": "model-00005-of-00072.safetensors",
+ "model.layers.3.mlp.switch_mlp.gate_proj.biases": "model-00004-of-00072.safetensors",
+ "model.layers.3.mlp.switch_mlp.gate_proj.scales": "model-00004-of-00072.safetensors",
+ "model.layers.3.mlp.switch_mlp.gate_proj.weight": "model-00004-of-00072.safetensors",
+ "model.layers.3.mlp.switch_mlp.up_proj.biases": "model-00005-of-00072.safetensors",
+ "model.layers.3.mlp.switch_mlp.up_proj.scales": "model-00005-of-00072.safetensors",
+ "model.layers.3.mlp.switch_mlp.up_proj.weight": "model-00005-of-00072.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00005-of-00072.safetensors",
+ "model.layers.3.self_attn.attention_sink_bias": "model-00004-of-00072.safetensors",
+ "model.layers.3.self_attn.k_proj.biases": "model-00004-of-00072.safetensors",
+ "model.layers.3.self_attn.k_proj.scales": "model-00004-of-00072.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00004-of-00072.safetensors",
+ "model.layers.3.self_attn.o_proj.biases": "model-00004-of-00072.safetensors",
+ "model.layers.3.self_attn.o_proj.scales": "model-00004-of-00072.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00004-of-00072.safetensors",
+ "model.layers.3.self_attn.q_proj.biases": "model-00004-of-00072.safetensors",
+ "model.layers.3.self_attn.q_proj.scales": "model-00004-of-00072.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00004-of-00072.safetensors",
+ "model.layers.3.self_attn.v_proj.biases": "model-00004-of-00072.safetensors",
+ "model.layers.3.self_attn.v_proj.scales": "model-00004-of-00072.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00004-of-00072.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00046-of-00072.safetensors",
+ "model.layers.30.mlp.gate.e_score_correction_bias": "model-00046-of-00072.safetensors",
+ "model.layers.30.mlp.gate.weight": "model-00046-of-00072.safetensors",
+ "model.layers.30.mlp.switch_mlp.down_proj.biases": "model-00046-of-00072.safetensors",
+ "model.layers.30.mlp.switch_mlp.down_proj.scales": "model-00046-of-00072.safetensors",
+ "model.layers.30.mlp.switch_mlp.down_proj.weight": "model-00046-of-00072.safetensors",
+ "model.layers.30.mlp.switch_mlp.gate_proj.biases": "model-00045-of-00072.safetensors",
+ "model.layers.30.mlp.switch_mlp.gate_proj.scales": "model-00045-of-00072.safetensors",
+ "model.layers.30.mlp.switch_mlp.gate_proj.weight": "model-00045-of-00072.safetensors",
+ "model.layers.30.mlp.switch_mlp.up_proj.biases": "model-00045-of-00072.safetensors",
+ "model.layers.30.mlp.switch_mlp.up_proj.scales": "model-00045-of-00072.safetensors",
+ "model.layers.30.mlp.switch_mlp.up_proj.weight": "model-00045-of-00072.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00046-of-00072.safetensors",
+ "model.layers.30.self_attn.attention_sink_bias": "model-00044-of-00072.safetensors",
+ "model.layers.30.self_attn.k_proj.biases": "model-00044-of-00072.safetensors",
+ "model.layers.30.self_attn.k_proj.scales": "model-00044-of-00072.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00044-of-00072.safetensors",
+ "model.layers.30.self_attn.o_proj.biases": "model-00044-of-00072.safetensors",
+ "model.layers.30.self_attn.o_proj.scales": "model-00044-of-00072.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00044-of-00072.safetensors",
+ "model.layers.30.self_attn.q_proj.biases": "model-00044-of-00072.safetensors",
+ "model.layers.30.self_attn.q_proj.scales": "model-00044-of-00072.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00044-of-00072.safetensors",
+ "model.layers.30.self_attn.v_proj.biases": "model-00044-of-00072.safetensors",
+ "model.layers.30.self_attn.v_proj.scales": "model-00044-of-00072.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00044-of-00072.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00047-of-00072.safetensors",
+ "model.layers.31.mlp.gate.e_score_correction_bias": "model-00047-of-00072.safetensors",
+ "model.layers.31.mlp.gate.weight": "model-00047-of-00072.safetensors",
+ "model.layers.31.mlp.switch_mlp.down_proj.biases": "model-00047-of-00072.safetensors",
+ "model.layers.31.mlp.switch_mlp.down_proj.scales": "model-00047-of-00072.safetensors",
+ "model.layers.31.mlp.switch_mlp.down_proj.weight": "model-00047-of-00072.safetensors",
+ "model.layers.31.mlp.switch_mlp.gate_proj.biases": "model-00046-of-00072.safetensors",
+ "model.layers.31.mlp.switch_mlp.gate_proj.scales": "model-00046-of-00072.safetensors",
+ "model.layers.31.mlp.switch_mlp.gate_proj.weight": "model-00046-of-00072.safetensors",
+ "model.layers.31.mlp.switch_mlp.up_proj.biases": "model-00047-of-00072.safetensors",
+ "model.layers.31.mlp.switch_mlp.up_proj.scales": "model-00047-of-00072.safetensors",
+ "model.layers.31.mlp.switch_mlp.up_proj.weight": "model-00047-of-00072.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00047-of-00072.safetensors",
+ "model.layers.31.self_attn.attention_sink_bias": "model-00046-of-00072.safetensors",
+ "model.layers.31.self_attn.k_proj.biases": "model-00046-of-00072.safetensors",
+ "model.layers.31.self_attn.k_proj.scales": "model-00046-of-00072.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00046-of-00072.safetensors",
+ "model.layers.31.self_attn.o_proj.biases": "model-00046-of-00072.safetensors",
+ "model.layers.31.self_attn.o_proj.scales": "model-00046-of-00072.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00046-of-00072.safetensors",
+ "model.layers.31.self_attn.q_proj.biases": "model-00046-of-00072.safetensors",
+ "model.layers.31.self_attn.q_proj.scales": "model-00046-of-00072.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00046-of-00072.safetensors",
+ "model.layers.31.self_attn.v_proj.biases": "model-00046-of-00072.safetensors",
+ "model.layers.31.self_attn.v_proj.scales": "model-00046-of-00072.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00046-of-00072.safetensors",
+ "model.layers.32.input_layernorm.weight": "model-00049-of-00072.safetensors",
+ "model.layers.32.mlp.gate.e_score_correction_bias": "model-00049-of-00072.safetensors",
+ "model.layers.32.mlp.gate.weight": "model-00049-of-00072.safetensors",
+ "model.layers.32.mlp.switch_mlp.down_proj.biases": "model-00049-of-00072.safetensors",
+ "model.layers.32.mlp.switch_mlp.down_proj.scales": "model-00049-of-00072.safetensors",
+ "model.layers.32.mlp.switch_mlp.down_proj.weight": "model-00049-of-00072.safetensors",
+ "model.layers.32.mlp.switch_mlp.gate_proj.biases": "model-00048-of-00072.safetensors",
+ "model.layers.32.mlp.switch_mlp.gate_proj.scales": "model-00048-of-00072.safetensors",
+ "model.layers.32.mlp.switch_mlp.gate_proj.weight": "model-00048-of-00072.safetensors",
+ "model.layers.32.mlp.switch_mlp.up_proj.biases": "model-00048-of-00072.safetensors",
+ "model.layers.32.mlp.switch_mlp.up_proj.scales": "model-00048-of-00072.safetensors",
+ "model.layers.32.mlp.switch_mlp.up_proj.weight": "model-00048-of-00072.safetensors",
+ "model.layers.32.post_attention_layernorm.weight": "model-00049-of-00072.safetensors",
+ "model.layers.32.self_attn.attention_sink_bias": "model-00047-of-00072.safetensors",
+ "model.layers.32.self_attn.k_proj.biases": "model-00047-of-00072.safetensors",
+ "model.layers.32.self_attn.k_proj.scales": "model-00047-of-00072.safetensors",
+ "model.layers.32.self_attn.k_proj.weight": "model-00047-of-00072.safetensors",
+ "model.layers.32.self_attn.o_proj.biases": "model-00047-of-00072.safetensors",
+ "model.layers.32.self_attn.o_proj.scales": "model-00047-of-00072.safetensors",
+ "model.layers.32.self_attn.o_proj.weight": "model-00047-of-00072.safetensors",
+ "model.layers.32.self_attn.q_proj.biases": "model-00047-of-00072.safetensors",
+ "model.layers.32.self_attn.q_proj.scales": "model-00047-of-00072.safetensors",
+ "model.layers.32.self_attn.q_proj.weight": "model-00047-of-00072.safetensors",
+ "model.layers.32.self_attn.v_proj.biases": "model-00047-of-00072.safetensors",
+ "model.layers.32.self_attn.v_proj.scales": "model-00047-of-00072.safetensors",
+ "model.layers.32.self_attn.v_proj.weight": "model-00047-of-00072.safetensors",
+ "model.layers.33.input_layernorm.weight": "model-00050-of-00072.safetensors",
+ "model.layers.33.mlp.gate.e_score_correction_bias": "model-00050-of-00072.safetensors",
+ "model.layers.33.mlp.gate.weight": "model-00050-of-00072.safetensors",
+ "model.layers.33.mlp.switch_mlp.down_proj.biases": "model-00050-of-00072.safetensors",
+ "model.layers.33.mlp.switch_mlp.down_proj.scales": "model-00050-of-00072.safetensors",
+ "model.layers.33.mlp.switch_mlp.down_proj.weight": "model-00050-of-00072.safetensors",
+ "model.layers.33.mlp.switch_mlp.gate_proj.biases": "model-00049-of-00072.safetensors",
+ "model.layers.33.mlp.switch_mlp.gate_proj.scales": "model-00049-of-00072.safetensors",
+ "model.layers.33.mlp.switch_mlp.gate_proj.weight": "model-00049-of-00072.safetensors",
+ "model.layers.33.mlp.switch_mlp.up_proj.biases": "model-00050-of-00072.safetensors",
+ "model.layers.33.mlp.switch_mlp.up_proj.scales": "model-00050-of-00072.safetensors",
+ "model.layers.33.mlp.switch_mlp.up_proj.weight": "model-00050-of-00072.safetensors",
+ "model.layers.33.post_attention_layernorm.weight": "model-00050-of-00072.safetensors",
+ "model.layers.33.self_attn.attention_sink_bias": "model-00049-of-00072.safetensors",
+ "model.layers.33.self_attn.k_proj.biases": "model-00049-of-00072.safetensors",
+ "model.layers.33.self_attn.k_proj.scales": "model-00049-of-00072.safetensors",
+ "model.layers.33.self_attn.k_proj.weight": "model-00049-of-00072.safetensors",
+ "model.layers.33.self_attn.o_proj.biases": "model-00049-of-00072.safetensors",
+ "model.layers.33.self_attn.o_proj.scales": "model-00049-of-00072.safetensors",
+ "model.layers.33.self_attn.o_proj.weight": "model-00049-of-00072.safetensors",
+ "model.layers.33.self_attn.q_proj.biases": "model-00049-of-00072.safetensors",
+ "model.layers.33.self_attn.q_proj.scales": "model-00049-of-00072.safetensors",
+ "model.layers.33.self_attn.q_proj.weight": "model-00049-of-00072.safetensors",
+ "model.layers.33.self_attn.v_proj.biases": "model-00049-of-00072.safetensors",
+ "model.layers.33.self_attn.v_proj.scales": "model-00049-of-00072.safetensors",
+ "model.layers.33.self_attn.v_proj.weight": "model-00049-of-00072.safetensors",
+ "model.layers.34.input_layernorm.weight": "model-00052-of-00072.safetensors",
+ "model.layers.34.mlp.gate.e_score_correction_bias": "model-00052-of-00072.safetensors",
+ "model.layers.34.mlp.gate.weight": "model-00052-of-00072.safetensors",
+ "model.layers.34.mlp.switch_mlp.down_proj.biases": "model-00052-of-00072.safetensors",
+ "model.layers.34.mlp.switch_mlp.down_proj.scales": "model-00052-of-00072.safetensors",
+ "model.layers.34.mlp.switch_mlp.down_proj.weight": "model-00052-of-00072.safetensors",
+ "model.layers.34.mlp.switch_mlp.gate_proj.biases": "model-00051-of-00072.safetensors",
+ "model.layers.34.mlp.switch_mlp.gate_proj.scales": "model-00051-of-00072.safetensors",
+ "model.layers.34.mlp.switch_mlp.gate_proj.weight": "model-00051-of-00072.safetensors",
+ "model.layers.34.mlp.switch_mlp.up_proj.biases": "model-00051-of-00072.safetensors",
+ "model.layers.34.mlp.switch_mlp.up_proj.scales": "model-00051-of-00072.safetensors",
+ "model.layers.34.mlp.switch_mlp.up_proj.weight": "model-00051-of-00072.safetensors",
+ "model.layers.34.post_attention_layernorm.weight": "model-00052-of-00072.safetensors",
+ "model.layers.34.self_attn.attention_sink_bias": "model-00050-of-00072.safetensors",
+ "model.layers.34.self_attn.k_proj.biases": "model-00050-of-00072.safetensors",
+ "model.layers.34.self_attn.k_proj.scales": "model-00050-of-00072.safetensors",
+ "model.layers.34.self_attn.k_proj.weight": "model-00050-of-00072.safetensors",
+ "model.layers.34.self_attn.o_proj.biases": "model-00050-of-00072.safetensors",
+ "model.layers.34.self_attn.o_proj.scales": "model-00050-of-00072.safetensors",
+ "model.layers.34.self_attn.o_proj.weight": "model-00050-of-00072.safetensors",
+ "model.layers.34.self_attn.q_proj.biases": "model-00050-of-00072.safetensors",
+ "model.layers.34.self_attn.q_proj.scales": "model-00050-of-00072.safetensors",
+ "model.layers.34.self_attn.q_proj.weight": "model-00050-of-00072.safetensors",
+ "model.layers.34.self_attn.v_proj.biases": "model-00050-of-00072.safetensors",
+ "model.layers.34.self_attn.v_proj.scales": "model-00050-of-00072.safetensors",
+ "model.layers.34.self_attn.v_proj.weight": "model-00050-of-00072.safetensors",
+ "model.layers.35.input_layernorm.weight": "model-00053-of-00072.safetensors",
+ "model.layers.35.mlp.gate.e_score_correction_bias": "model-00053-of-00072.safetensors",
+ "model.layers.35.mlp.gate.weight": "model-00053-of-00072.safetensors",
+ "model.layers.35.mlp.switch_mlp.down_proj.biases": "model-00053-of-00072.safetensors",
+ "model.layers.35.mlp.switch_mlp.down_proj.scales": "model-00053-of-00072.safetensors",
+ "model.layers.35.mlp.switch_mlp.down_proj.weight": "model-00053-of-00072.safetensors",
+ "model.layers.35.mlp.switch_mlp.gate_proj.biases": "model-00052-of-00072.safetensors",
+ "model.layers.35.mlp.switch_mlp.gate_proj.scales": "model-00052-of-00072.safetensors",
+ "model.layers.35.mlp.switch_mlp.gate_proj.weight": "model-00052-of-00072.safetensors",
+ "model.layers.35.mlp.switch_mlp.up_proj.biases": "model-00053-of-00072.safetensors",
+ "model.layers.35.mlp.switch_mlp.up_proj.scales": "model-00053-of-00072.safetensors",
+ "model.layers.35.mlp.switch_mlp.up_proj.weight": "model-00053-of-00072.safetensors",
+ "model.layers.35.post_attention_layernorm.weight": "model-00053-of-00072.safetensors",
+ "model.layers.35.self_attn.k_proj.biases": "model-00052-of-00072.safetensors",
+ "model.layers.35.self_attn.k_proj.scales": "model-00052-of-00072.safetensors",
+ "model.layers.35.self_attn.k_proj.weight": "model-00052-of-00072.safetensors",
+ "model.layers.35.self_attn.o_proj.biases": "model-00052-of-00072.safetensors",
+ "model.layers.35.self_attn.o_proj.scales": "model-00052-of-00072.safetensors",
+ "model.layers.35.self_attn.o_proj.weight": "model-00052-of-00072.safetensors",
+ "model.layers.35.self_attn.q_proj.biases": "model-00052-of-00072.safetensors",
+ "model.layers.35.self_attn.q_proj.scales": "model-00052-of-00072.safetensors",
+ "model.layers.35.self_attn.q_proj.weight": "model-00052-of-00072.safetensors",
+ "model.layers.35.self_attn.v_proj.biases": "model-00052-of-00072.safetensors",
+ "model.layers.35.self_attn.v_proj.scales": "model-00052-of-00072.safetensors",
+ "model.layers.35.self_attn.v_proj.weight": "model-00052-of-00072.safetensors",
+ "model.layers.36.input_layernorm.weight": "model-00055-of-00072.safetensors",
+ "model.layers.36.mlp.gate.e_score_correction_bias": "model-00055-of-00072.safetensors",
+ "model.layers.36.mlp.gate.weight": "model-00055-of-00072.safetensors",
+ "model.layers.36.mlp.switch_mlp.down_proj.biases": "model-00055-of-00072.safetensors",
+ "model.layers.36.mlp.switch_mlp.down_proj.scales": "model-00055-of-00072.safetensors",
+ "model.layers.36.mlp.switch_mlp.down_proj.weight": "model-00055-of-00072.safetensors",
+ "model.layers.36.mlp.switch_mlp.gate_proj.biases": "model-00054-of-00072.safetensors",
+ "model.layers.36.mlp.switch_mlp.gate_proj.scales": "model-00054-of-00072.safetensors",
+ "model.layers.36.mlp.switch_mlp.gate_proj.weight": "model-00054-of-00072.safetensors",
+ "model.layers.36.mlp.switch_mlp.up_proj.biases": "model-00054-of-00072.safetensors",
+ "model.layers.36.mlp.switch_mlp.up_proj.scales": "model-00054-of-00072.safetensors",
+ "model.layers.36.mlp.switch_mlp.up_proj.weight": "model-00054-of-00072.safetensors",
+ "model.layers.36.post_attention_layernorm.weight": "model-00055-of-00072.safetensors",
+ "model.layers.36.self_attn.attention_sink_bias": "model-00053-of-00072.safetensors",
+ "model.layers.36.self_attn.k_proj.biases": "model-00053-of-00072.safetensors",
+ "model.layers.36.self_attn.k_proj.scales": "model-00053-of-00072.safetensors",
+ "model.layers.36.self_attn.k_proj.weight": "model-00053-of-00072.safetensors",
+ "model.layers.36.self_attn.o_proj.biases": "model-00053-of-00072.safetensors",
+ "model.layers.36.self_attn.o_proj.scales": "model-00053-of-00072.safetensors",
+ "model.layers.36.self_attn.o_proj.weight": "model-00053-of-00072.safetensors",
+ "model.layers.36.self_attn.q_proj.biases": "model-00053-of-00072.safetensors",
+ "model.layers.36.self_attn.q_proj.scales": "model-00053-of-00072.safetensors",
+ "model.layers.36.self_attn.q_proj.weight": "model-00053-of-00072.safetensors",
+ "model.layers.36.self_attn.v_proj.biases": "model-00053-of-00072.safetensors",
+ "model.layers.36.self_attn.v_proj.scales": "model-00053-of-00072.safetensors",
+ "model.layers.36.self_attn.v_proj.weight": "model-00053-of-00072.safetensors",
+ "model.layers.37.input_layernorm.weight": "model-00056-of-00072.safetensors",
+ "model.layers.37.mlp.gate.e_score_correction_bias": "model-00056-of-00072.safetensors",
+ "model.layers.37.mlp.gate.weight": "model-00056-of-00072.safetensors",
+ "model.layers.37.mlp.switch_mlp.down_proj.biases": "model-00056-of-00072.safetensors",
+ "model.layers.37.mlp.switch_mlp.down_proj.scales": "model-00056-of-00072.safetensors",
+ "model.layers.37.mlp.switch_mlp.down_proj.weight": "model-00056-of-00072.safetensors",
+ "model.layers.37.mlp.switch_mlp.gate_proj.biases": "model-00055-of-00072.safetensors",
+ "model.layers.37.mlp.switch_mlp.gate_proj.scales": "model-00055-of-00072.safetensors",
+ "model.layers.37.mlp.switch_mlp.gate_proj.weight": "model-00055-of-00072.safetensors",
+ "model.layers.37.mlp.switch_mlp.up_proj.biases": "model-00056-of-00072.safetensors",
+ "model.layers.37.mlp.switch_mlp.up_proj.scales": "model-00056-of-00072.safetensors",
+ "model.layers.37.mlp.switch_mlp.up_proj.weight": "model-00056-of-00072.safetensors",
+ "model.layers.37.post_attention_layernorm.weight": "model-00056-of-00072.safetensors",
+ "model.layers.37.self_attn.attention_sink_bias": "model-00055-of-00072.safetensors",
+ "model.layers.37.self_attn.k_proj.biases": "model-00055-of-00072.safetensors",
+ "model.layers.37.self_attn.k_proj.scales": "model-00055-of-00072.safetensors",
+ "model.layers.37.self_attn.k_proj.weight": "model-00055-of-00072.safetensors",
+ "model.layers.37.self_attn.o_proj.biases": "model-00055-of-00072.safetensors",
+ "model.layers.37.self_attn.o_proj.scales": "model-00055-of-00072.safetensors",
+ "model.layers.37.self_attn.o_proj.weight": "model-00055-of-00072.safetensors",
+ "model.layers.37.self_attn.q_proj.biases": "model-00055-of-00072.safetensors",
+ "model.layers.37.self_attn.q_proj.scales": "model-00055-of-00072.safetensors",
+ "model.layers.37.self_attn.q_proj.weight": "model-00055-of-00072.safetensors",
+ "model.layers.37.self_attn.v_proj.biases": "model-00055-of-00072.safetensors",
+ "model.layers.37.self_attn.v_proj.scales": "model-00055-of-00072.safetensors",
+ "model.layers.37.self_attn.v_proj.weight": "model-00055-of-00072.safetensors",
+ "model.layers.38.input_layernorm.weight": "model-00058-of-00072.safetensors",
+ "model.layers.38.mlp.gate.e_score_correction_bias": "model-00058-of-00072.safetensors",
+ "model.layers.38.mlp.gate.weight": "model-00058-of-00072.safetensors",
+ "model.layers.38.mlp.switch_mlp.down_proj.biases": "model-00058-of-00072.safetensors",
+ "model.layers.38.mlp.switch_mlp.down_proj.scales": "model-00058-of-00072.safetensors",
+ "model.layers.38.mlp.switch_mlp.down_proj.weight": "model-00058-of-00072.safetensors",
+ "model.layers.38.mlp.switch_mlp.gate_proj.biases": "model-00057-of-00072.safetensors",
+ "model.layers.38.mlp.switch_mlp.gate_proj.scales": "model-00057-of-00072.safetensors",
+ "model.layers.38.mlp.switch_mlp.gate_proj.weight": "model-00057-of-00072.safetensors",
+ "model.layers.38.mlp.switch_mlp.up_proj.biases": "model-00057-of-00072.safetensors",
+ "model.layers.38.mlp.switch_mlp.up_proj.scales": "model-00057-of-00072.safetensors",
+ "model.layers.38.mlp.switch_mlp.up_proj.weight": "model-00057-of-00072.safetensors",
+ "model.layers.38.post_attention_layernorm.weight": "model-00058-of-00072.safetensors",
+ "model.layers.38.self_attn.attention_sink_bias": "model-00056-of-00072.safetensors",
+ "model.layers.38.self_attn.k_proj.biases": "model-00056-of-00072.safetensors",
+ "model.layers.38.self_attn.k_proj.scales": "model-00056-of-00072.safetensors",
+ "model.layers.38.self_attn.k_proj.weight": "model-00056-of-00072.safetensors",
+ "model.layers.38.self_attn.o_proj.biases": "model-00056-of-00072.safetensors",
+ "model.layers.38.self_attn.o_proj.scales": "model-00056-of-00072.safetensors",
+ "model.layers.38.self_attn.o_proj.weight": "model-00056-of-00072.safetensors",
+ "model.layers.38.self_attn.q_proj.biases": "model-00056-of-00072.safetensors",
+ "model.layers.38.self_attn.q_proj.scales": "model-00056-of-00072.safetensors",
+ "model.layers.38.self_attn.q_proj.weight": "model-00056-of-00072.safetensors",
+ "model.layers.38.self_attn.v_proj.biases": "model-00056-of-00072.safetensors",
+ "model.layers.38.self_attn.v_proj.scales": "model-00056-of-00072.safetensors",
+ "model.layers.38.self_attn.v_proj.weight": "model-00056-of-00072.safetensors",
+ "model.layers.39.input_layernorm.weight": "model-00059-of-00072.safetensors",
+ "model.layers.39.mlp.gate.e_score_correction_bias": "model-00059-of-00072.safetensors",
+ "model.layers.39.mlp.gate.weight": "model-00059-of-00072.safetensors",
+ "model.layers.39.mlp.switch_mlp.down_proj.biases": "model-00059-of-00072.safetensors",
+ "model.layers.39.mlp.switch_mlp.down_proj.scales": "model-00059-of-00072.safetensors",
+ "model.layers.39.mlp.switch_mlp.down_proj.weight": "model-00059-of-00072.safetensors",
+ "model.layers.39.mlp.switch_mlp.gate_proj.biases": "model-00058-of-00072.safetensors",
+ "model.layers.39.mlp.switch_mlp.gate_proj.scales": "model-00058-of-00072.safetensors",
+ "model.layers.39.mlp.switch_mlp.gate_proj.weight": "model-00058-of-00072.safetensors",
+ "model.layers.39.mlp.switch_mlp.up_proj.biases": "model-00059-of-00072.safetensors",
+ "model.layers.39.mlp.switch_mlp.up_proj.scales": "model-00059-of-00072.safetensors",
+ "model.layers.39.mlp.switch_mlp.up_proj.weight": "model-00059-of-00072.safetensors",
+ "model.layers.39.post_attention_layernorm.weight": "model-00059-of-00072.safetensors",
+ "model.layers.39.self_attn.attention_sink_bias": "model-00058-of-00072.safetensors",
+ "model.layers.39.self_attn.k_proj.biases": "model-00058-of-00072.safetensors",
+ "model.layers.39.self_attn.k_proj.scales": "model-00058-of-00072.safetensors",
+ "model.layers.39.self_attn.k_proj.weight": "model-00058-of-00072.safetensors",
+ "model.layers.39.self_attn.o_proj.biases": "model-00058-of-00072.safetensors",
+ "model.layers.39.self_attn.o_proj.scales": "model-00058-of-00072.safetensors",
+ "model.layers.39.self_attn.o_proj.weight": "model-00058-of-00072.safetensors",
+ "model.layers.39.self_attn.q_proj.biases": "model-00058-of-00072.safetensors",
+ "model.layers.39.self_attn.q_proj.scales": "model-00058-of-00072.safetensors",
+ "model.layers.39.self_attn.q_proj.weight": "model-00058-of-00072.safetensors",
+ "model.layers.39.self_attn.v_proj.biases": "model-00058-of-00072.safetensors",
+ "model.layers.39.self_attn.v_proj.scales": "model-00058-of-00072.safetensors",
+ "model.layers.39.self_attn.v_proj.weight": "model-00058-of-00072.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00007-of-00072.safetensors",
+ "model.layers.4.mlp.gate.e_score_correction_bias": "model-00007-of-00072.safetensors",
+ "model.layers.4.mlp.gate.weight": "model-00007-of-00072.safetensors",
+ "model.layers.4.mlp.switch_mlp.down_proj.biases": "model-00007-of-00072.safetensors",
+ "model.layers.4.mlp.switch_mlp.down_proj.scales": "model-00007-of-00072.safetensors",
+ "model.layers.4.mlp.switch_mlp.down_proj.weight": "model-00007-of-00072.safetensors",
+ "model.layers.4.mlp.switch_mlp.gate_proj.biases": "model-00006-of-00072.safetensors",
+ "model.layers.4.mlp.switch_mlp.gate_proj.scales": "model-00006-of-00072.safetensors",
+ "model.layers.4.mlp.switch_mlp.gate_proj.weight": "model-00006-of-00072.safetensors",
+ "model.layers.4.mlp.switch_mlp.up_proj.biases": "model-00006-of-00072.safetensors",
+ "model.layers.4.mlp.switch_mlp.up_proj.scales": "model-00006-of-00072.safetensors",
+ "model.layers.4.mlp.switch_mlp.up_proj.weight": "model-00006-of-00072.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00007-of-00072.safetensors",
+ "model.layers.4.self_attn.attention_sink_bias": "model-00005-of-00072.safetensors",
+ "model.layers.4.self_attn.k_proj.biases": "model-00005-of-00072.safetensors",
+ "model.layers.4.self_attn.k_proj.scales": "model-00005-of-00072.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00005-of-00072.safetensors",
+ "model.layers.4.self_attn.o_proj.biases": "model-00005-of-00072.safetensors",
+ "model.layers.4.self_attn.o_proj.scales": "model-00005-of-00072.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00005-of-00072.safetensors",
+ "model.layers.4.self_attn.q_proj.biases": "model-00005-of-00072.safetensors",
+ "model.layers.4.self_attn.q_proj.scales": "model-00005-of-00072.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00005-of-00072.safetensors",
+ "model.layers.4.self_attn.v_proj.biases": "model-00005-of-00072.safetensors",
+ "model.layers.4.self_attn.v_proj.scales": "model-00005-of-00072.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00005-of-00072.safetensors",
+ "model.layers.40.input_layernorm.weight": "model-00061-of-00072.safetensors",
+ "model.layers.40.mlp.gate.e_score_correction_bias": "model-00061-of-00072.safetensors",
+ "model.layers.40.mlp.gate.weight": "model-00061-of-00072.safetensors",
+ "model.layers.40.mlp.switch_mlp.down_proj.biases": "model-00061-of-00072.safetensors",
+ "model.layers.40.mlp.switch_mlp.down_proj.scales": "model-00061-of-00072.safetensors",
+ "model.layers.40.mlp.switch_mlp.down_proj.weight": "model-00061-of-00072.safetensors",
+ "model.layers.40.mlp.switch_mlp.gate_proj.biases": "model-00060-of-00072.safetensors",
+ "model.layers.40.mlp.switch_mlp.gate_proj.scales": "model-00060-of-00072.safetensors",
+ "model.layers.40.mlp.switch_mlp.gate_proj.weight": "model-00060-of-00072.safetensors",
+ "model.layers.40.mlp.switch_mlp.up_proj.biases": "model-00060-of-00072.safetensors",
+ "model.layers.40.mlp.switch_mlp.up_proj.scales": "model-00060-of-00072.safetensors",
+ "model.layers.40.mlp.switch_mlp.up_proj.weight": "model-00060-of-00072.safetensors",
+ "model.layers.40.post_attention_layernorm.weight": "model-00061-of-00072.safetensors",
+ "model.layers.40.self_attn.attention_sink_bias": "model-00059-of-00072.safetensors",
+ "model.layers.40.self_attn.k_proj.biases": "model-00059-of-00072.safetensors",
+ "model.layers.40.self_attn.k_proj.scales": "model-00059-of-00072.safetensors",
+ "model.layers.40.self_attn.k_proj.weight": "model-00059-of-00072.safetensors",
+ "model.layers.40.self_attn.o_proj.biases": "model-00059-of-00072.safetensors",
+ "model.layers.40.self_attn.o_proj.scales": "model-00059-of-00072.safetensors",
+ "model.layers.40.self_attn.o_proj.weight": "model-00059-of-00072.safetensors",
+ "model.layers.40.self_attn.q_proj.biases": "model-00059-of-00072.safetensors",
+ "model.layers.40.self_attn.q_proj.scales": "model-00059-of-00072.safetensors",
+ "model.layers.40.self_attn.q_proj.weight": "model-00059-of-00072.safetensors",
+ "model.layers.40.self_attn.v_proj.biases": "model-00059-of-00072.safetensors",
+ "model.layers.40.self_attn.v_proj.scales": "model-00059-of-00072.safetensors",
+ "model.layers.40.self_attn.v_proj.weight": "model-00059-of-00072.safetensors",
+ "model.layers.41.input_layernorm.weight": "model-00062-of-00072.safetensors",
+ "model.layers.41.mlp.gate.e_score_correction_bias": "model-00062-of-00072.safetensors",
+ "model.layers.41.mlp.gate.weight": "model-00062-of-00072.safetensors",
+ "model.layers.41.mlp.switch_mlp.down_proj.biases": "model-00062-of-00072.safetensors",
+ "model.layers.41.mlp.switch_mlp.down_proj.scales": "model-00062-of-00072.safetensors",
+ "model.layers.41.mlp.switch_mlp.down_proj.weight": "model-00062-of-00072.safetensors",
+ "model.layers.41.mlp.switch_mlp.gate_proj.biases": "model-00061-of-00072.safetensors",
+ "model.layers.41.mlp.switch_mlp.gate_proj.scales": "model-00061-of-00072.safetensors",
+ "model.layers.41.mlp.switch_mlp.gate_proj.weight": "model-00061-of-00072.safetensors",
+ "model.layers.41.mlp.switch_mlp.up_proj.biases": "model-00062-of-00072.safetensors",
+ "model.layers.41.mlp.switch_mlp.up_proj.scales": "model-00062-of-00072.safetensors",
+ "model.layers.41.mlp.switch_mlp.up_proj.weight": "model-00062-of-00072.safetensors",
+ "model.layers.41.post_attention_layernorm.weight": "model-00062-of-00072.safetensors",
+ "model.layers.41.self_attn.k_proj.biases": "model-00061-of-00072.safetensors",
+ "model.layers.41.self_attn.k_proj.scales": "model-00061-of-00072.safetensors",
+ "model.layers.41.self_attn.k_proj.weight": "model-00061-of-00072.safetensors",
+ "model.layers.41.self_attn.o_proj.biases": "model-00061-of-00072.safetensors",
+ "model.layers.41.self_attn.o_proj.scales": "model-00061-of-00072.safetensors",
+ "model.layers.41.self_attn.o_proj.weight": "model-00061-of-00072.safetensors",
+ "model.layers.41.self_attn.q_proj.biases": "model-00061-of-00072.safetensors",
+ "model.layers.41.self_attn.q_proj.scales": "model-00061-of-00072.safetensors",
+ "model.layers.41.self_attn.q_proj.weight": "model-00061-of-00072.safetensors",
+ "model.layers.41.self_attn.v_proj.biases": "model-00061-of-00072.safetensors",
+ "model.layers.41.self_attn.v_proj.scales": "model-00061-of-00072.safetensors",
+ "model.layers.41.self_attn.v_proj.weight": "model-00061-of-00072.safetensors",
+ "model.layers.42.input_layernorm.weight": "model-00064-of-00072.safetensors",
+ "model.layers.42.mlp.gate.e_score_correction_bias": "model-00064-of-00072.safetensors",
+ "model.layers.42.mlp.gate.weight": "model-00064-of-00072.safetensors",
+ "model.layers.42.mlp.switch_mlp.down_proj.biases": "model-00064-of-00072.safetensors",
+ "model.layers.42.mlp.switch_mlp.down_proj.scales": "model-00064-of-00072.safetensors",
+ "model.layers.42.mlp.switch_mlp.down_proj.weight": "model-00064-of-00072.safetensors",
+ "model.layers.42.mlp.switch_mlp.gate_proj.biases": "model-00063-of-00072.safetensors",
+ "model.layers.42.mlp.switch_mlp.gate_proj.scales": "model-00063-of-00072.safetensors",
+ "model.layers.42.mlp.switch_mlp.gate_proj.weight": "model-00063-of-00072.safetensors",
+ "model.layers.42.mlp.switch_mlp.up_proj.biases": "model-00063-of-00072.safetensors",
+ "model.layers.42.mlp.switch_mlp.up_proj.scales": "model-00063-of-00072.safetensors",
+ "model.layers.42.mlp.switch_mlp.up_proj.weight": "model-00063-of-00072.safetensors",
+ "model.layers.42.post_attention_layernorm.weight": "model-00064-of-00072.safetensors",
+ "model.layers.42.self_attn.attention_sink_bias": "model-00062-of-00072.safetensors",
+ "model.layers.42.self_attn.k_proj.biases": "model-00062-of-00072.safetensors",
+ "model.layers.42.self_attn.k_proj.scales": "model-00062-of-00072.safetensors",
+ "model.layers.42.self_attn.k_proj.weight": "model-00062-of-00072.safetensors",
+ "model.layers.42.self_attn.o_proj.biases": "model-00062-of-00072.safetensors",
+ "model.layers.42.self_attn.o_proj.scales": "model-00062-of-00072.safetensors",
+ "model.layers.42.self_attn.o_proj.weight": "model-00062-of-00072.safetensors",
+ "model.layers.42.self_attn.q_proj.biases": "model-00062-of-00072.safetensors",
+ "model.layers.42.self_attn.q_proj.scales": "model-00062-of-00072.safetensors",
+ "model.layers.42.self_attn.q_proj.weight": "model-00062-of-00072.safetensors",
+ "model.layers.42.self_attn.v_proj.biases": "model-00062-of-00072.safetensors",
+ "model.layers.42.self_attn.v_proj.scales": "model-00062-of-00072.safetensors",
+ "model.layers.42.self_attn.v_proj.weight": "model-00062-of-00072.safetensors",
+ "model.layers.43.input_layernorm.weight": "model-00065-of-00072.safetensors",
+ "model.layers.43.mlp.gate.e_score_correction_bias": "model-00065-of-00072.safetensors",
+ "model.layers.43.mlp.gate.weight": "model-00065-of-00072.safetensors",
+ "model.layers.43.mlp.switch_mlp.down_proj.biases": "model-00065-of-00072.safetensors",
+ "model.layers.43.mlp.switch_mlp.down_proj.scales": "model-00065-of-00072.safetensors",
+ "model.layers.43.mlp.switch_mlp.down_proj.weight": "model-00065-of-00072.safetensors",
+ "model.layers.43.mlp.switch_mlp.gate_proj.biases": "model-00064-of-00072.safetensors",
+ "model.layers.43.mlp.switch_mlp.gate_proj.scales": "model-00064-of-00072.safetensors",
+ "model.layers.43.mlp.switch_mlp.gate_proj.weight": "model-00064-of-00072.safetensors",
+ "model.layers.43.mlp.switch_mlp.up_proj.biases": "model-00065-of-00072.safetensors",
+ "model.layers.43.mlp.switch_mlp.up_proj.scales": "model-00065-of-00072.safetensors",
+ "model.layers.43.mlp.switch_mlp.up_proj.weight": "model-00065-of-00072.safetensors",
+ "model.layers.43.post_attention_layernorm.weight": "model-00065-of-00072.safetensors",
+ "model.layers.43.self_attn.attention_sink_bias": "model-00064-of-00072.safetensors",
+ "model.layers.43.self_attn.k_proj.biases": "model-00064-of-00072.safetensors",
+ "model.layers.43.self_attn.k_proj.scales": "model-00064-of-00072.safetensors",
+ "model.layers.43.self_attn.k_proj.weight": "model-00064-of-00072.safetensors",
+ "model.layers.43.self_attn.o_proj.biases": "model-00064-of-00072.safetensors",
+ "model.layers.43.self_attn.o_proj.scales": "model-00064-of-00072.safetensors",
+ "model.layers.43.self_attn.o_proj.weight": "model-00064-of-00072.safetensors",
+ "model.layers.43.self_attn.q_proj.biases": "model-00064-of-00072.safetensors",
+ "model.layers.43.self_attn.q_proj.scales": "model-00064-of-00072.safetensors",
+ "model.layers.43.self_attn.q_proj.weight": "model-00064-of-00072.safetensors",
+ "model.layers.43.self_attn.v_proj.biases": "model-00064-of-00072.safetensors",
+ "model.layers.43.self_attn.v_proj.scales": "model-00064-of-00072.safetensors",
+ "model.layers.43.self_attn.v_proj.weight": "model-00064-of-00072.safetensors",
+ "model.layers.44.input_layernorm.weight": "model-00067-of-00072.safetensors",
+ "model.layers.44.mlp.gate.e_score_correction_bias": "model-00067-of-00072.safetensors",
+ "model.layers.44.mlp.gate.weight": "model-00067-of-00072.safetensors",
+ "model.layers.44.mlp.switch_mlp.down_proj.biases": "model-00067-of-00072.safetensors",
+ "model.layers.44.mlp.switch_mlp.down_proj.scales": "model-00067-of-00072.safetensors",
+ "model.layers.44.mlp.switch_mlp.down_proj.weight": "model-00067-of-00072.safetensors",
+ "model.layers.44.mlp.switch_mlp.gate_proj.biases": "model-00066-of-00072.safetensors",
+ "model.layers.44.mlp.switch_mlp.gate_proj.scales": "model-00066-of-00072.safetensors",
+ "model.layers.44.mlp.switch_mlp.gate_proj.weight": "model-00066-of-00072.safetensors",
+ "model.layers.44.mlp.switch_mlp.up_proj.biases": "model-00066-of-00072.safetensors",
+ "model.layers.44.mlp.switch_mlp.up_proj.scales": "model-00066-of-00072.safetensors",
+ "model.layers.44.mlp.switch_mlp.up_proj.weight": "model-00066-of-00072.safetensors",
+ "model.layers.44.post_attention_layernorm.weight": "model-00067-of-00072.safetensors",
+ "model.layers.44.self_attn.attention_sink_bias": "model-00065-of-00072.safetensors",
+ "model.layers.44.self_attn.k_proj.biases": "model-00065-of-00072.safetensors",
+ "model.layers.44.self_attn.k_proj.scales": "model-00065-of-00072.safetensors",
+ "model.layers.44.self_attn.k_proj.weight": "model-00065-of-00072.safetensors",
+ "model.layers.44.self_attn.o_proj.biases": "model-00065-of-00072.safetensors",
+ "model.layers.44.self_attn.o_proj.scales": "model-00065-of-00072.safetensors",
+ "model.layers.44.self_attn.o_proj.weight": "model-00065-of-00072.safetensors",
+ "model.layers.44.self_attn.q_proj.biases": "model-00065-of-00072.safetensors",
+ "model.layers.44.self_attn.q_proj.scales": "model-00065-of-00072.safetensors",
+ "model.layers.44.self_attn.q_proj.weight": "model-00065-of-00072.safetensors",
+ "model.layers.44.self_attn.v_proj.biases": "model-00065-of-00072.safetensors",
+ "model.layers.44.self_attn.v_proj.scales": "model-00065-of-00072.safetensors",
+ "model.layers.44.self_attn.v_proj.weight": "model-00065-of-00072.safetensors",
+ "model.layers.45.input_layernorm.weight": "model-00068-of-00072.safetensors",
+ "model.layers.45.mlp.gate.e_score_correction_bias": "model-00068-of-00072.safetensors",
+ "model.layers.45.mlp.gate.weight": "model-00068-of-00072.safetensors",
+ "model.layers.45.mlp.switch_mlp.down_proj.biases": "model-00068-of-00072.safetensors",
+ "model.layers.45.mlp.switch_mlp.down_proj.scales": "model-00068-of-00072.safetensors",
+ "model.layers.45.mlp.switch_mlp.down_proj.weight": "model-00068-of-00072.safetensors",
+ "model.layers.45.mlp.switch_mlp.gate_proj.biases": "model-00067-of-00072.safetensors",
+ "model.layers.45.mlp.switch_mlp.gate_proj.scales": "model-00067-of-00072.safetensors",
+ "model.layers.45.mlp.switch_mlp.gate_proj.weight": "model-00067-of-00072.safetensors",
+ "model.layers.45.mlp.switch_mlp.up_proj.biases": "model-00068-of-00072.safetensors",
+ "model.layers.45.mlp.switch_mlp.up_proj.scales": "model-00068-of-00072.safetensors",
+ "model.layers.45.mlp.switch_mlp.up_proj.weight": "model-00068-of-00072.safetensors",
+ "model.layers.45.post_attention_layernorm.weight": "model-00068-of-00072.safetensors",
+ "model.layers.45.self_attn.attention_sink_bias": "model-00067-of-00072.safetensors",
+ "model.layers.45.self_attn.k_proj.biases": "model-00067-of-00072.safetensors",
+ "model.layers.45.self_attn.k_proj.scales": "model-00067-of-00072.safetensors",
+ "model.layers.45.self_attn.k_proj.weight": "model-00067-of-00072.safetensors",
+ "model.layers.45.self_attn.o_proj.biases": "model-00067-of-00072.safetensors",
+ "model.layers.45.self_attn.o_proj.scales": "model-00067-of-00072.safetensors",
+ "model.layers.45.self_attn.o_proj.weight": "model-00067-of-00072.safetensors",
+ "model.layers.45.self_attn.q_proj.biases": "model-00067-of-00072.safetensors",
+ "model.layers.45.self_attn.q_proj.scales": "model-00067-of-00072.safetensors",
+ "model.layers.45.self_attn.q_proj.weight": "model-00067-of-00072.safetensors",
+ "model.layers.45.self_attn.v_proj.biases": "model-00067-of-00072.safetensors",
+ "model.layers.45.self_attn.v_proj.scales": "model-00067-of-00072.safetensors",
+ "model.layers.45.self_attn.v_proj.weight": "model-00067-of-00072.safetensors",
+ "model.layers.46.input_layernorm.weight": "model-00070-of-00072.safetensors",
+ "model.layers.46.mlp.gate.e_score_correction_bias": "model-00070-of-00072.safetensors",
+ "model.layers.46.mlp.gate.weight": "model-00070-of-00072.safetensors",
+ "model.layers.46.mlp.switch_mlp.down_proj.biases": "model-00070-of-00072.safetensors",
+ "model.layers.46.mlp.switch_mlp.down_proj.scales": "model-00070-of-00072.safetensors",
+ "model.layers.46.mlp.switch_mlp.down_proj.weight": "model-00070-of-00072.safetensors",
+ "model.layers.46.mlp.switch_mlp.gate_proj.biases": "model-00069-of-00072.safetensors",
+ "model.layers.46.mlp.switch_mlp.gate_proj.scales": "model-00069-of-00072.safetensors",
+ "model.layers.46.mlp.switch_mlp.gate_proj.weight": "model-00069-of-00072.safetensors",
+ "model.layers.46.mlp.switch_mlp.up_proj.biases": "model-00069-of-00072.safetensors",
+ "model.layers.46.mlp.switch_mlp.up_proj.scales": "model-00069-of-00072.safetensors",
+ "model.layers.46.mlp.switch_mlp.up_proj.weight": "model-00069-of-00072.safetensors",
+ "model.layers.46.post_attention_layernorm.weight": "model-00070-of-00072.safetensors",
+ "model.layers.46.self_attn.attention_sink_bias": "model-00068-of-00072.safetensors",
+ "model.layers.46.self_attn.k_proj.biases": "model-00068-of-00072.safetensors",
+ "model.layers.46.self_attn.k_proj.scales": "model-00068-of-00072.safetensors",
+ "model.layers.46.self_attn.k_proj.weight": "model-00068-of-00072.safetensors",
+ "model.layers.46.self_attn.o_proj.biases": "model-00068-of-00072.safetensors",
+ "model.layers.46.self_attn.o_proj.scales": "model-00068-of-00072.safetensors",
+ "model.layers.46.self_attn.o_proj.weight": "model-00068-of-00072.safetensors",
+ "model.layers.46.self_attn.q_proj.biases": "model-00068-of-00072.safetensors",
+ "model.layers.46.self_attn.q_proj.scales": "model-00068-of-00072.safetensors",
+ "model.layers.46.self_attn.q_proj.weight": "model-00068-of-00072.safetensors",
+ "model.layers.46.self_attn.v_proj.biases": "model-00068-of-00072.safetensors",
+ "model.layers.46.self_attn.v_proj.scales": "model-00068-of-00072.safetensors",
+ "model.layers.46.self_attn.v_proj.weight": "model-00068-of-00072.safetensors",
+ "model.layers.47.input_layernorm.weight": "model-00071-of-00072.safetensors",
+ "model.layers.47.mlp.gate.e_score_correction_bias": "model-00071-of-00072.safetensors",
+ "model.layers.47.mlp.gate.weight": "model-00071-of-00072.safetensors",
+ "model.layers.47.mlp.switch_mlp.down_proj.biases": "model-00071-of-00072.safetensors",
+ "model.layers.47.mlp.switch_mlp.down_proj.scales": "model-00071-of-00072.safetensors",
+ "model.layers.47.mlp.switch_mlp.down_proj.weight": "model-00071-of-00072.safetensors",
+ "model.layers.47.mlp.switch_mlp.gate_proj.biases": "model-00070-of-00072.safetensors",
+ "model.layers.47.mlp.switch_mlp.gate_proj.scales": "model-00070-of-00072.safetensors",
+ "model.layers.47.mlp.switch_mlp.gate_proj.weight": "model-00070-of-00072.safetensors",
+ "model.layers.47.mlp.switch_mlp.up_proj.biases": "model-00071-of-00072.safetensors",
+ "model.layers.47.mlp.switch_mlp.up_proj.scales": "model-00071-of-00072.safetensors",
+ "model.layers.47.mlp.switch_mlp.up_proj.weight": "model-00071-of-00072.safetensors",
+ "model.layers.47.post_attention_layernorm.weight": "model-00071-of-00072.safetensors",
+ "model.layers.47.self_attn.k_proj.biases": "model-00070-of-00072.safetensors",
+ "model.layers.47.self_attn.k_proj.scales": "model-00070-of-00072.safetensors",
+ "model.layers.47.self_attn.k_proj.weight": "model-00070-of-00072.safetensors",
+ "model.layers.47.self_attn.o_proj.biases": "model-00070-of-00072.safetensors",
+ "model.layers.47.self_attn.o_proj.scales": "model-00070-of-00072.safetensors",
+ "model.layers.47.self_attn.o_proj.weight": "model-00070-of-00072.safetensors",
+ "model.layers.47.self_attn.q_proj.biases": "model-00070-of-00072.safetensors",
+ "model.layers.47.self_attn.q_proj.scales": "model-00070-of-00072.safetensors",
+ "model.layers.47.self_attn.q_proj.weight": "model-00070-of-00072.safetensors",
+ "model.layers.47.self_attn.v_proj.biases": "model-00070-of-00072.safetensors",
+ "model.layers.47.self_attn.v_proj.scales": "model-00070-of-00072.safetensors",
+ "model.layers.47.self_attn.v_proj.weight": "model-00070-of-00072.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00008-of-00072.safetensors",
+ "model.layers.5.mlp.gate.e_score_correction_bias": "model-00008-of-00072.safetensors",
+ "model.layers.5.mlp.gate.weight": "model-00008-of-00072.safetensors",
+ "model.layers.5.mlp.switch_mlp.down_proj.biases": "model-00008-of-00072.safetensors",
+ "model.layers.5.mlp.switch_mlp.down_proj.scales": "model-00008-of-00072.safetensors",
+ "model.layers.5.mlp.switch_mlp.down_proj.weight": "model-00008-of-00072.safetensors",
+ "model.layers.5.mlp.switch_mlp.gate_proj.biases": "model-00007-of-00072.safetensors",
+ "model.layers.5.mlp.switch_mlp.gate_proj.scales": "model-00007-of-00072.safetensors",
+ "model.layers.5.mlp.switch_mlp.gate_proj.weight": "model-00007-of-00072.safetensors",
+ "model.layers.5.mlp.switch_mlp.up_proj.biases": "model-00008-of-00072.safetensors",
+ "model.layers.5.mlp.switch_mlp.up_proj.scales": "model-00008-of-00072.safetensors",
+ "model.layers.5.mlp.switch_mlp.up_proj.weight": "model-00008-of-00072.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00008-of-00072.safetensors",
+ "model.layers.5.self_attn.k_proj.biases": "model-00007-of-00072.safetensors",
+ "model.layers.5.self_attn.k_proj.scales": "model-00007-of-00072.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00007-of-00072.safetensors",
+ "model.layers.5.self_attn.o_proj.biases": "model-00007-of-00072.safetensors",
+ "model.layers.5.self_attn.o_proj.scales": "model-00007-of-00072.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00007-of-00072.safetensors",
+ "model.layers.5.self_attn.q_proj.biases": "model-00007-of-00072.safetensors",
+ "model.layers.5.self_attn.q_proj.scales": "model-00007-of-00072.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00007-of-00072.safetensors",
+ "model.layers.5.self_attn.v_proj.biases": "model-00007-of-00072.safetensors",
+ "model.layers.5.self_attn.v_proj.scales": "model-00007-of-00072.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00007-of-00072.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00010-of-00072.safetensors",
+ "model.layers.6.mlp.gate.e_score_correction_bias": "model-00010-of-00072.safetensors",
+ "model.layers.6.mlp.gate.weight": "model-00010-of-00072.safetensors",
+ "model.layers.6.mlp.switch_mlp.down_proj.biases": "model-00010-of-00072.safetensors",
+ "model.layers.6.mlp.switch_mlp.down_proj.scales": "model-00010-of-00072.safetensors",
+ "model.layers.6.mlp.switch_mlp.down_proj.weight": "model-00010-of-00072.safetensors",
+ "model.layers.6.mlp.switch_mlp.gate_proj.biases": "model-00009-of-00072.safetensors",
+ "model.layers.6.mlp.switch_mlp.gate_proj.scales": "model-00009-of-00072.safetensors",
+ "model.layers.6.mlp.switch_mlp.gate_proj.weight": "model-00009-of-00072.safetensors",
+ "model.layers.6.mlp.switch_mlp.up_proj.biases": "model-00009-of-00072.safetensors",
+ "model.layers.6.mlp.switch_mlp.up_proj.scales": "model-00009-of-00072.safetensors",
+ "model.layers.6.mlp.switch_mlp.up_proj.weight": "model-00009-of-00072.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00010-of-00072.safetensors",
+ "model.layers.6.self_attn.attention_sink_bias": "model-00008-of-00072.safetensors",
+ "model.layers.6.self_attn.k_proj.biases": "model-00008-of-00072.safetensors",
+ "model.layers.6.self_attn.k_proj.scales": "model-00008-of-00072.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00008-of-00072.safetensors",
+ "model.layers.6.self_attn.o_proj.biases": "model-00008-of-00072.safetensors",
+ "model.layers.6.self_attn.o_proj.scales": "model-00008-of-00072.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00008-of-00072.safetensors",
+ "model.layers.6.self_attn.q_proj.biases": "model-00008-of-00072.safetensors",
+ "model.layers.6.self_attn.q_proj.scales": "model-00008-of-00072.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00008-of-00072.safetensors",
+ "model.layers.6.self_attn.v_proj.biases": "model-00008-of-00072.safetensors",
+ "model.layers.6.self_attn.v_proj.scales": "model-00008-of-00072.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00008-of-00072.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00011-of-00072.safetensors",
+ "model.layers.7.mlp.gate.e_score_correction_bias": "model-00011-of-00072.safetensors",
+ "model.layers.7.mlp.gate.weight": "model-00011-of-00072.safetensors",
+ "model.layers.7.mlp.switch_mlp.down_proj.biases": "model-00011-of-00072.safetensors",
+ "model.layers.7.mlp.switch_mlp.down_proj.scales": "model-00011-of-00072.safetensors",
+ "model.layers.7.mlp.switch_mlp.down_proj.weight": "model-00011-of-00072.safetensors",
+ "model.layers.7.mlp.switch_mlp.gate_proj.biases": "model-00010-of-00072.safetensors",
+ "model.layers.7.mlp.switch_mlp.gate_proj.scales": "model-00010-of-00072.safetensors",
+ "model.layers.7.mlp.switch_mlp.gate_proj.weight": "model-00010-of-00072.safetensors",
+ "model.layers.7.mlp.switch_mlp.up_proj.biases": "model-00011-of-00072.safetensors",
+ "model.layers.7.mlp.switch_mlp.up_proj.scales": "model-00011-of-00072.safetensors",
+ "model.layers.7.mlp.switch_mlp.up_proj.weight": "model-00011-of-00072.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00011-of-00072.safetensors",
+ "model.layers.7.self_attn.attention_sink_bias": "model-00010-of-00072.safetensors",
+ "model.layers.7.self_attn.k_proj.biases": "model-00010-of-00072.safetensors",
+ "model.layers.7.self_attn.k_proj.scales": "model-00010-of-00072.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00010-of-00072.safetensors",
+ "model.layers.7.self_attn.o_proj.biases": "model-00010-of-00072.safetensors",
+ "model.layers.7.self_attn.o_proj.scales": "model-00010-of-00072.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00010-of-00072.safetensors",
+ "model.layers.7.self_attn.q_proj.biases": "model-00010-of-00072.safetensors",
+ "model.layers.7.self_attn.q_proj.scales": "model-00010-of-00072.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00010-of-00072.safetensors",
+ "model.layers.7.self_attn.v_proj.biases": "model-00010-of-00072.safetensors",
+ "model.layers.7.self_attn.v_proj.scales": "model-00010-of-00072.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00010-of-00072.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00013-of-00072.safetensors",
+ "model.layers.8.mlp.gate.e_score_correction_bias": "model-00013-of-00072.safetensors",
+ "model.layers.8.mlp.gate.weight": "model-00013-of-00072.safetensors",
+ "model.layers.8.mlp.switch_mlp.down_proj.biases": "model-00013-of-00072.safetensors",
+ "model.layers.8.mlp.switch_mlp.down_proj.scales": "model-00013-of-00072.safetensors",
+ "model.layers.8.mlp.switch_mlp.down_proj.weight": "model-00013-of-00072.safetensors",
+ "model.layers.8.mlp.switch_mlp.gate_proj.biases": "model-00012-of-00072.safetensors",
+ "model.layers.8.mlp.switch_mlp.gate_proj.scales": "model-00012-of-00072.safetensors",
+ "model.layers.8.mlp.switch_mlp.gate_proj.weight": "model-00012-of-00072.safetensors",
+ "model.layers.8.mlp.switch_mlp.up_proj.biases": "model-00012-of-00072.safetensors",
+ "model.layers.8.mlp.switch_mlp.up_proj.scales": "model-00012-of-00072.safetensors",
+ "model.layers.8.mlp.switch_mlp.up_proj.weight": "model-00012-of-00072.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00013-of-00072.safetensors",
+ "model.layers.8.self_attn.attention_sink_bias": "model-00011-of-00072.safetensors",
+ "model.layers.8.self_attn.k_proj.biases": "model-00011-of-00072.safetensors",
+ "model.layers.8.self_attn.k_proj.scales": "model-00011-of-00072.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00011-of-00072.safetensors",
+ "model.layers.8.self_attn.o_proj.biases": "model-00011-of-00072.safetensors",
+ "model.layers.8.self_attn.o_proj.scales": "model-00011-of-00072.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00011-of-00072.safetensors",
+ "model.layers.8.self_attn.q_proj.biases": "model-00011-of-00072.safetensors",
+ "model.layers.8.self_attn.q_proj.scales": "model-00011-of-00072.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00011-of-00072.safetensors",
+ "model.layers.8.self_attn.v_proj.biases": "model-00011-of-00072.safetensors",
+ "model.layers.8.self_attn.v_proj.scales": "model-00011-of-00072.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00011-of-00072.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00014-of-00072.safetensors",
+ "model.layers.9.mlp.gate.e_score_correction_bias": "model-00014-of-00072.safetensors",
+ "model.layers.9.mlp.gate.weight": "model-00014-of-00072.safetensors",
+ "model.layers.9.mlp.switch_mlp.down_proj.biases": "model-00014-of-00072.safetensors",
+ "model.layers.9.mlp.switch_mlp.down_proj.scales": "model-00014-of-00072.safetensors",
+ "model.layers.9.mlp.switch_mlp.down_proj.weight": "model-00014-of-00072.safetensors",
+ "model.layers.9.mlp.switch_mlp.gate_proj.biases": "model-00013-of-00072.safetensors",
+ "model.layers.9.mlp.switch_mlp.gate_proj.scales": "model-00013-of-00072.safetensors",
+ "model.layers.9.mlp.switch_mlp.gate_proj.weight": "model-00013-of-00072.safetensors",
+ "model.layers.9.mlp.switch_mlp.up_proj.biases": "model-00014-of-00072.safetensors",
+ "model.layers.9.mlp.switch_mlp.up_proj.scales": "model-00014-of-00072.safetensors",
+ "model.layers.9.mlp.switch_mlp.up_proj.weight": "model-00014-of-00072.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00014-of-00072.safetensors",
+ "model.layers.9.self_attn.attention_sink_bias": "model-00013-of-00072.safetensors",
+ "model.layers.9.self_attn.k_proj.biases": "model-00013-of-00072.safetensors",
+ "model.layers.9.self_attn.k_proj.scales": "model-00013-of-00072.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00013-of-00072.safetensors",
+ "model.layers.9.self_attn.o_proj.biases": "model-00013-of-00072.safetensors",
+ "model.layers.9.self_attn.o_proj.scales": "model-00013-of-00072.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00013-of-00072.safetensors",
+ "model.layers.9.self_attn.q_proj.biases": "model-00013-of-00072.safetensors",
+ "model.layers.9.self_attn.q_proj.scales": "model-00013-of-00072.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00013-of-00072.safetensors",
+ "model.layers.9.self_attn.v_proj.biases": "model-00013-of-00072.safetensors",
+ "model.layers.9.self_attn.v_proj.scales": "model-00013-of-00072.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00013-of-00072.safetensors",
+ "model.norm.weight": "model-00071-of-00072.safetensors"
+ }
+}
\ No newline at end of file
diff --git a/modeling_mimo_v2_flash.py b/modeling_mimo_v2_flash.py
new file mode 100644
index 0000000000000000000000000000000000000000..e13fa11b83fade578d33b6dd7941365deafcbea9
--- /dev/null
+++ b/modeling_mimo_v2_flash.py
@@ -0,0 +1,664 @@
+# coding=utf-8
+#
+# Copyright 2025 Xiaomi Corporation.
+# Copyright 2025 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Callable, Optional, Tuple, Union
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from transformers.generation import GenerationMixin
+from transformers.activations import ACT2FN
+from transformers.cache_utils import Cache, DynamicCache
+from transformers.integrations import use_kernel_forward_from_hub
+
+from transformers.modeling_outputs import (
+ BaseModelOutputWithPast,
+ CausalLMOutputWithPast,
+)
+
+from transformers.masking_utils import create_causal_mask, create_sliding_window_causal_mask
+from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
+from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
+from transformers.processing_utils import Unpack
+from transformers.utils import (
+ logging,
+)
+
+from transformers.modeling_outputs import MoeModelOutputWithPast
+from transformers.utils import TransformersKwargs, auto_docstring, can_return_tuple
+from .configuration_mimo_v2_flash import MiMoV2FlashConfig
+
+logger = logging.get_logger(__name__)
+
+
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2]
+ x2 = x[..., x.shape[-1] // 2:]
+ return torch.cat((-x2, x1), dim=-1)
+
+
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors.
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`, *optional*):
+ Deprecated and unused.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+ cos = cos.unsqueeze(unsqueeze_dim)
+ sin = sin.unsqueeze(unsqueeze_dim)
+ q_embed = (q * cos) + (rotate_half(q) * sin)
+ k_embed = (k * cos) + (rotate_half(k) * sin)
+ return q_embed, k_embed
+
+
+def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
+ """
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
+ """
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
+ if n_rep == 1:
+ return hidden_states
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
+
+
+def eager_attention_forward(
+ module: nn.Module,
+ query: torch.Tensor,
+ key: torch.Tensor,
+ value: torch.Tensor,
+ attention_mask: Optional[torch.Tensor],
+ scaling: float,
+ dropout: float = 0.0,
+ sinks: Optional[torch.Tensor] = None,
+):
+ key_states = repeat_kv(key, module.num_key_value_groups)
+ value_states = repeat_kv(value, module.num_key_value_groups)
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
+ if attention_mask is not None:
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
+ attn_weights = attn_weights + causal_mask
+
+ if sinks is not None:
+ sinks = module.attention_sink_bias.reshape(1, -1, 1, 1).expand(query.shape[0], -1, query.shape[-2], -1)
+ attn_weights = torch.cat([attn_weights, sinks], dim=-1)
+
+ attn_weights = attn_weights - attn_weights.max(dim=-1, keepdim=True).values
+ probs = F.softmax(attn_weights, dim=-1, dtype=attn_weights.dtype)
+
+ if sinks is not None:
+ probs = probs[..., :-1] # we drop the sink here
+
+ attn_weights = nn.functional.dropout(probs, p=dropout, training=module.training)
+ attn_output = torch.matmul(attn_weights, value_states)
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ return attn_output, attn_weights
+
+
+@use_kernel_forward_from_hub("RMSNorm")
+class MiMoV2RMSNorm(nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ """
+ MiMoV2RMSNorm is equivalent to T5LayerNorm
+ """
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states):
+ input_dtype = hidden_states.dtype
+ hidden_states = hidden_states.to(torch.float32)
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+ return self.weight * hidden_states.to(input_dtype)
+
+
+class MiMoV2MLP(nn.Module):
+ """MiMoV2MLP matching the gate, up, and down projection layers."""
+
+ def __init__(self, config: MiMoV2FlashConfig, intermediate_size=None):
+ super().__init__()
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
+ self.act_fn = ACT2FN[config.hidden_act]
+
+ def forward(self, hidden_states):
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(hidden_states)) * self.up_proj(hidden_states))
+ return down_proj
+
+
+class MiMoV2MoEGate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.top_k = config.num_experts_per_tok
+ self.n_routed_experts = config.n_routed_experts
+ self.routed_scaling_factor = (
+ config.routed_scaling_factor
+ if config.routed_scaling_factor is not None
+ else 1.0
+ )
+ self.scoring_func = config.scoring_func
+ self.topk_method = config.topk_method
+ self.n_group = config.n_group
+ self.topk_group = config.topk_group
+
+ # topk selection algorithm
+ self.norm_topk_prob = config.norm_topk_prob
+ self.gating_dim = config.hidden_size
+ self.weight = nn.Parameter(
+ torch.empty((self.n_routed_experts, self.gating_dim))
+ )
+ if self.topk_method == "noaux_tc":
+ self.e_score_correction_bias = nn.Parameter(
+ torch.empty((self.n_routed_experts))
+ )
+
+ def forward(self, hidden_states):
+ bsz, seq_len, h = hidden_states.shape
+ ### compute gating score
+ hidden_states = hidden_states.view(-1, h)
+ logits = F.linear(
+ hidden_states.type(torch.float32), self.weight.type(torch.float32), None
+ )
+ if self.scoring_func == "sigmoid":
+ scores = logits.sigmoid()
+ else:
+ raise NotImplementedError(
+ f"insupportable scoring function for MoE gating: {self.scoring_func}"
+ )
+
+ ### select top-k experts
+ if self.topk_method == "noaux_tc":
+ assert not self.training
+ scores_for_choice = scores.view(bsz * seq_len, -1) + self.e_score_correction_bias.unsqueeze(0)
+ group_scores = (
+ scores_for_choice.view(bsz * seq_len, self.n_group, -1).topk(2, dim=-1)[0].sum(dim = -1)
+ ) # [n, n_group]
+ group_idx = torch.topk(
+ group_scores, k=self.topk_group, dim=-1, sorted=False
+ )[
+ 1
+ ] # [n, top_k_group]
+ group_mask = torch.zeros_like(group_scores) # [n, n_group]
+ group_mask.scatter_(1, group_idx, 1) # [n, n_group]
+ score_mask = (
+ group_mask.unsqueeze(-1)
+ .expand(
+ bsz * seq_len, self.n_group, self.n_routed_experts // self.n_group
+ )
+ .reshape(bsz * seq_len, -1)
+ ) # [n, e]
+ tmp_scores = scores_for_choice.masked_fill(~score_mask.bool(), float("-inf")) # [n, e]
+ _, topk_idx = torch.topk(
+ tmp_scores, k=self.top_k, dim=-1, sorted=False
+ )
+ topk_weight = scores.gather(1, topk_idx)
+ else:
+ raise NotImplementedError(
+ f"insupportable TopK function for MoE gating: {self.topk_method}"
+ )
+
+ ### norm gate to sum 1
+ if self.top_k > 1 and self.norm_topk_prob:
+ denominator = topk_weight.sum(dim=-1, keepdim=True) + 1e-20
+ topk_weight = topk_weight / denominator
+ topk_weight = topk_weight * self.routed_scaling_factor # must multiply the scaling factor
+
+ return topk_idx, topk_weight
+
+
+class MiMoV2MoE(nn.Module):
+ """
+ A mixed expert module containing shared experts.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.experts = nn.ModuleList(
+ [
+ MiMoV2MLP(config, intermediate_size=config.moe_intermediate_size)
+ for _ in range(config.n_routed_experts)
+ ]
+ )
+ self.gate = MiMoV2MoEGate(config)
+
+ def moe(self, hidden_states: torch.Tensor, topk_indices: torch.Tensor, topk_weights: torch.Tensor):
+ r"""
+ CALL FOR CONTRIBUTION! I don't have time to optimise this right now, but expert weights need to be fused
+ to not have to do a loop here (deepseek has 256 experts soooo yeah).
+ """
+ final_hidden_states = torch.zeros_like(hidden_states, dtype=topk_weights.dtype)
+ expert_mask = torch.nn.functional.one_hot(topk_indices, num_classes=len(self.experts))
+ expert_mask = expert_mask.permute(2, 0, 1)
+
+ for expert_idx in range(len(self.experts)):
+ expert = self.experts[expert_idx]
+ mask = expert_mask[expert_idx]
+ token_indices, weight_indices = torch.where(mask)
+
+ if token_indices.numel() > 0:
+ expert_weights = topk_weights[token_indices, weight_indices]
+ expert_input = hidden_states[token_indices]
+ expert_output = expert(expert_input)
+ weighted_output = expert_output * expert_weights.unsqueeze(-1)
+ final_hidden_states.index_add_(0, token_indices, weighted_output)
+
+ # in original deepseek, the output of the experts are gathered once we leave this module
+ # thus the moe module is itelsf an IsolatedParallel module
+ # and all expert are "local" meaning we shard but we don't gather
+ return final_hidden_states.type(hidden_states.dtype)
+
+
+ def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
+ orig_shape = hidden_states.shape
+ topk_indices, topk_weights = self.gate(hidden_states)
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
+ hidden_states = self.moe(hidden_states, topk_indices, topk_weights).view(*orig_shape)
+
+ return hidden_states
+
+
+class MiMoV2Attention(nn.Module):
+ """MiMoV2 Global Attention (pattern == 0) and Sliding Window Attention (pattern == 1)."""
+
+ def __init__(self, config: MiMoV2FlashConfig, is_swa: bool, layer_idx: int):
+ super().__init__()
+ self.config = config
+ self.layer_idx = layer_idx
+
+ if is_swa:
+ self.head_dim = config.swa_head_dim
+ self.v_head_dim = config.swa_v_head_dim
+ self.num_attention_heads = config.swa_num_attention_heads
+ self.num_key_value_heads = config.swa_num_key_value_heads
+ else:
+ self.head_dim = config.head_dim
+ self.v_head_dim = config.v_head_dim
+ self.num_attention_heads = config.num_attention_heads
+ self.num_key_value_heads = config.num_key_value_heads
+
+ self.rope_dim = int(self.head_dim * config.partial_rotary_factor)
+ self.num_key_value_groups = self.num_attention_heads // self.num_key_value_heads
+ self.attention_bias = config.attention_bias
+ self.attention_dropout: float = config.attention_dropout
+ self.scaling = self.head_dim ** -0.5
+
+ # These dimensions are for the attention layers
+ q_hidden_size = self.num_attention_heads * self.head_dim
+ k_hidden_size = self.num_key_value_heads * self.head_dim
+ v_hidden_size = self.num_key_value_heads * self.v_head_dim
+ o_hidden_size = self.num_attention_heads * self.v_head_dim
+
+ self.q_proj = nn.Linear(config.hidden_size, q_hidden_size, bias=self.attention_bias)
+ self.k_proj = nn.Linear(config.hidden_size, k_hidden_size, bias=self.attention_bias)
+ self.v_proj = nn.Linear(config.hidden_size, v_hidden_size, bias=self.attention_bias)
+ self.o_proj = nn.Linear(o_hidden_size, config.hidden_size, bias=False)
+
+ self.attention_sink_bias = (
+ torch.nn.Parameter(torch.empty(config.num_attention_heads), requires_grad=False)
+ if (config.add_full_attention_sink_bias and not is_swa) or (config.add_swa_attention_sink_bias and is_swa)
+ else None
+ )
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
+ attention_mask: Optional[torch.Tensor],
+ past_key_values: Optional[Cache] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ **kwargs: Unpack[TransformersKwargs],
+ ) -> tuple[torch.Tensor, torch.Tensor]:
+ input_shape = hidden_states.shape[:-1]
+ qk_hidden_shape = (*input_shape, -1, self.head_dim)
+ v_hidden_shape = (*input_shape, -1, self.v_head_dim)
+
+ query_states = self.q_proj(hidden_states).view(qk_hidden_shape).transpose(1, 2)
+ key_states = self.k_proj(hidden_states).view(qk_hidden_shape).transpose(1, 2)
+ value_states = self.v_proj(hidden_states).view(v_hidden_shape).transpose(1, 2)
+
+ cos, sin = position_embeddings
+
+ query_rope, query_nope = query_states.split([self.rope_dim, self.head_dim - self.rope_dim], dim=-1)
+ key_rope, key_nope = key_states.split([self.rope_dim, self.head_dim - self.rope_dim], dim=-1)
+
+ query_rope, key_rope = apply_rotary_pos_emb(query_rope, key_rope, cos, sin)
+
+ query_states = torch.cat([query_rope, query_nope], dim=-1)
+ key_states = torch.cat([key_rope, key_nope], dim=-1)
+
+ if past_key_values is not None:
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ attention_interface: Callable = eager_attention_forward
+ if self.config._attn_implementation != "eager":
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
+
+ attn_output, attn_weights = attention_interface(
+ self,
+ query_states,
+ key_states,
+ value_states,
+ attention_mask,
+ dropout=0.0 if not self.training else self.attention_dropout,
+ scaling=self.scaling,
+ position_ids=position_ids,
+ sinks=self.attention_sink_bias,
+ )
+
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
+ attn_output = self.o_proj(attn_output)
+ return attn_output, attn_weights
+
+
+class MiMoV2DecoderLayer(nn.Module):
+ """
+ MiMoV2 Decoder Layer. It dynamically chooses the correct attention
+ module based on the layer index and the `hybrid_layer_pattern`.
+ """
+
+ def __init__(self, config: MiMoV2FlashConfig, layer_idx: int):
+ super().__init__()
+
+ # This is the key logic: choose the module based on the pattern
+ is_swa_layer = config.hybrid_layer_pattern[layer_idx] == 1
+ if is_swa_layer:
+ self.attention_type = "sliding_window_attention"
+ self.self_attn = MiMoV2Attention(config, True, layer_idx)
+ else:
+ self.attention_type = "full_attention"
+ self.self_attn = MiMoV2Attention(config, False, layer_idx)
+
+ self.mlp = (
+ MiMoV2MoE(config)
+ if (
+ getattr(config, 'n_routed_experts', None) is not None
+ and config.moe_layer_freq[layer_idx]
+ )
+ else MiMoV2MLP(config)
+ )
+
+ self.input_layernorm = MiMoV2RMSNorm(config.hidden_size, eps=config.layernorm_epsilon)
+ self.post_attention_layernorm = MiMoV2RMSNorm(config.hidden_size, eps=config.layernorm_epsilon)
+ self.hidden_size = config.hidden_size
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Cache] = None,
+ use_cache: Optional[bool] = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
+ **kwargs: Unpack[TransformersKwargs],
+ ) -> torch.Tensor:
+ residual = hidden_states
+ hidden_states = self.input_layernorm(hidden_states)
+ # Self Attention
+ hidden_states, _ = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ position_embeddings=position_embeddings,
+ **kwargs,
+ )
+ hidden_states = residual + hidden_states
+
+ # MLP or MOE
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = residual + hidden_states
+ return hidden_states
+
+class MiMoV2FlashRotaryEmbedding(nn.Module):
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
+
+ def __init__(self, config: MiMoV2FlashConfig, is_swa, device=None):
+ super().__init__()
+ # BC: "rope_type" was originally "type"
+ if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
+ else:
+ self.rope_type = "default"
+ self.max_seq_len_cached = config.max_position_embeddings
+ self.original_max_seq_len = config.max_position_embeddings
+
+ self.config = config
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
+
+ if is_swa:
+ self.config.rope_theta = config.swa_rope_theta
+ self.config.head_dim = config.swa_head_dim
+
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+ self.original_inv_freq = self.inv_freq
+
+ @torch.no_grad()
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
+ def forward(self, x, position_ids):
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
+ position_ids_expanded = position_ids[:, None, :].float()
+
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
+ emb = torch.cat((freqs, freqs), dim=-1)
+ cos = emb.cos() * self.attention_scaling
+ sin = emb.sin() * self.attention_scaling
+
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
+
+
+@auto_docstring
+class MiMoV2Model(PreTrainedModel):
+ """The main 'model' block, corresponding to `model.` in the weight map."""
+ config_class = MiMoV2FlashConfig
+
+ def __init__(self, config: MiMoV2FlashConfig):
+ super().__init__(config)
+ self.vocab_size = config.vocab_size
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size)
+ self.layers = nn.ModuleList(
+ [MiMoV2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
+ )
+ self.norm = MiMoV2RMSNorm(config.hidden_size, eps=config.layernorm_epsilon)
+ self.rotary_emb = MiMoV2FlashRotaryEmbedding(config=config, is_swa=False)
+ self.swa_rotary_emb = MiMoV2FlashRotaryEmbedding(config=config, is_swa=True)
+
+ self.has_sliding_layers = any(
+ pattern == 1 for pattern in config.hybrid_layer_pattern
+ )
+
+ # For Huggingface DynamicCache compatibility
+ self.config.layer_types = [
+ "sliding_attention" if config.hybrid_layer_pattern[i] == 1 else "full_attention"
+ for i in range(config.num_hidden_layers)
+ ]
+
+ @auto_docstring
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Cache] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs: Unpack[TransformersKwargs],
+ ) -> MoeModelOutputWithPast:
+ if (input_ids is None) ^ (inputs_embeds is not None):
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ if use_cache and past_key_values is None:
+ past_key_values = DynamicCache(config=self.config)
+
+ if cache_position is None:
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
+ cache_position = torch.arange(
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
+ )
+
+ if position_ids is None:
+ position_ids = cache_position.unsqueeze(0)
+
+ # It may already have been prepared by e.g. `generate`
+ if not isinstance(causal_mask_mapping := attention_mask, dict):
+ # Prepare mask arguments
+ mask_kwargs = {
+ "config": self.config,
+ "input_embeds": inputs_embeds,
+ "attention_mask": attention_mask,
+ "cache_position": cache_position,
+ "past_key_values": past_key_values,
+ "position_ids": position_ids,
+ }
+ # Create the masks
+ causal_mask_mapping = {
+ "full_attention": create_causal_mask(**mask_kwargs),
+ }
+ # The sliding window alternating layers are not always activated depending on the config
+ if self.has_sliding_layers:
+ causal_mask_mapping["sliding_window_attention"] = create_sliding_window_causal_mask(**mask_kwargs)
+
+ hidden_states = inputs_embeds
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
+ swa_position_embeddings = self.swa_rotary_emb(hidden_states, position_ids)
+
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
+ hidden_states = decoder_layer(
+ hidden_states,
+ attention_mask=causal_mask_mapping[decoder_layer.attention_type],
+ position_embeddings=(
+ position_embeddings
+ if decoder_layer.attention_type == "full_attention"
+ else swa_position_embeddings
+ ),
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ **kwargs,
+ )
+
+ hidden_states = self.norm(hidden_states)
+ return BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=past_key_values if use_cache else None,
+ )
+
+
+@auto_docstring
+class MiMoV2FlashForCausalLM(PreTrainedModel,GenerationMixin):
+ _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
+ _tp_plan = {"lm_head": "colwise_rep"}
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
+
+ config_class = MiMoV2FlashConfig
+ _keys_to_ignore_on_load_unexpected = [r"model.layers\.\d+\.self_attn\.rotary_emb\.inv_freq"]
+
+ def __init__(self, config: MiMoV2FlashConfig):
+ super().__init__(config)
+ self.model = MiMoV2Model(config)
+ self.vocab_size = config.vocab_size
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @can_return_tuple
+ @auto_docstring
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Cache] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ logits_to_keep: Union[int, torch.Tensor] = 0,
+ **kwargs: Unpack[TransformersKwargs],
+ ) -> CausalLMOutputWithPast:
+
+ outputs: BaseModelOutputWithPast = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ **kwargs,
+ )
+
+ hidden_states = outputs.last_hidden_state
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
+
+ loss = None
+ if labels is not None:
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
+
+ return CausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+__all__ = [
+ "MiMoV2FlashForCausalLM"
+]
\ No newline at end of file
diff --git a/tokenizer.json b/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..d4ba2b245534db7e662222ae107e959930ee4dae
--- /dev/null
+++ b/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:05d47c87966b4db779200053de490f89936ed529f8ab889244e271630715fcfe
+size 11422638
diff --git a/tokenizer_config.json b/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f0509bd4d964cb8ad33d4f0f7305fe3553e8eb40
--- /dev/null
+++ b/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_prefix_space": false,
+ "additional_special_tokens": [
+ "<|im_start|>",
+ "<|im_end|>",
+ "<|object_ref_start|>",
+ "<|object_ref_end|>",
+ "<|box_start|>",
+ "<|box_end|>",
+ "<|quad_start|>",
+ "<|quad_end|>",
+ "<|vision_start|>",
+ "<|vision_end|>",
+ "<|vision_pad|>",
+ "<|image_pad|>",
+ "<|video_pad|>"
+ ],
+ "backend": "tokenizers",
+ "bos_token": null,
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|im_end|>",
+ "errors": "replace",
+ "extra_special_tokens": [
+ "<|im_start|>",
+ "<|im_end|>",
+ "<|object_ref_start|>",
+ "<|object_ref_end|>",
+ "<|box_start|>",
+ "<|box_end|>",
+ "<|quad_start|>",
+ "<|quad_end|>",
+ "<|vision_start|>",
+ "<|vision_end|>",
+ "<|vision_pad|>",
+ "<|image_pad|>",
+ "<|video_pad|>"
+ ],
+ "is_local": true,
+ "model_max_length": 262144,
+ "model_specific_special_tokens": {},
+ "pad_token": "<|endoftext|>",
+ "split_special_tokens": false,
+ "tokenizer_class": "Qwen2Tokenizer",
+ "unk_token": null
+}