HectorHe commited on
Commit
23b80e9
·
verified ·
1 Parent(s): a454669

Training in progress, step 100

Browse files
chat_template.jinja ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '
2
+
3
+ ' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '
4
+
5
+ ' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}
config.json ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DeepseekV2ForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct--configuration_deepseek.DeepseekV2Config",
9
+ "AutoModel": "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct--modeling_deepseek.DeepseekV2Model",
10
+ "AutoModelForCausalLM": "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct--modeling_deepseek.DeepseekV2ForCausalLM"
11
+ },
12
+ "aux_loss_alpha": 0.001,
13
+ "bos_token_id": 100000,
14
+ "eos_token_id": 100001,
15
+ "ep_size": 1,
16
+ "first_k_dense_replace": 1,
17
+ "hidden_act": "silu",
18
+ "hidden_size": 2048,
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 10944,
21
+ "kv_lora_rank": 512,
22
+ "max_position_embeddings": 163840,
23
+ "model_type": "deepseek_v2",
24
+ "moe_intermediate_size": 1408,
25
+ "moe_layer_freq": 1,
26
+ "n_group": 1,
27
+ "n_routed_experts": 64,
28
+ "n_shared_experts": 2,
29
+ "norm_topk_prob": false,
30
+ "num_attention_heads": 16,
31
+ "num_experts_per_tok": 6,
32
+ "num_hidden_layers": 27,
33
+ "num_key_value_heads": 16,
34
+ "pretraining_tp": 1,
35
+ "q_lora_rank": null,
36
+ "qk_nope_head_dim": 128,
37
+ "qk_rope_head_dim": 64,
38
+ "rms_norm_eps": 1e-06,
39
+ "rope_scaling": {
40
+ "beta_fast": 32,
41
+ "beta_slow": 1,
42
+ "factor": 40,
43
+ "mscale": 0.707,
44
+ "mscale_all_dim": 0.707,
45
+ "original_max_position_embeddings": 4096,
46
+ "type": "yarn"
47
+ },
48
+ "rope_theta": 10000,
49
+ "routed_scaling_factor": 1.0,
50
+ "scoring_func": "softmax",
51
+ "seq_aux": true,
52
+ "tie_word_embeddings": false,
53
+ "topk_group": 1,
54
+ "topk_method": "greedy",
55
+ "torch_dtype": "bfloat16",
56
+ "transformers_version": "4.52.0.dev0",
57
+ "use_cache": true,
58
+ "v_head_dim": 128,
59
+ "vocab_size": 102400
60
+ }
expert_selection.log ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-07-11 01:07:44 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, use_dora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False)
2
+ 2025-07-11 01:07:44 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='lmms-lab/Math10K', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False)
3
+ 2025-07-11 01:07:44 - INFO - __main__ - Training parameters EfficientDistillationConfig(
4
+ _n_gpu=1,
5
+ accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
6
+ adafactor=False,
7
+ adam_beta1=0.9,
8
+ adam_beta2=0.999,
9
+ adam_epsilon=1e-08,
10
+ alpha=0.5,
11
+ auto_find_batch_size=False,
12
+ average_tokens_across_devices=False,
13
+ batch_eval_metrics=False,
14
+ benchmarks=[],
15
+ bf16=True,
16
+ bf16_full_eval=False,
17
+ callbacks=[],
18
+ ce_loss_scale=1.0,
19
+ chars_per_token=<CHARS_PER_TOKEN>,
20
+ chat_template=None,
21
+ completion_only_loss=None,
22
+ data_seed=None,
23
+ dataloader_drop_last=False,
24
+ dataloader_num_workers=0,
25
+ dataloader_persistent_workers=False,
26
+ dataloader_pin_memory=True,
27
+ dataloader_prefetch_factor=None,
28
+ dataset_batch_size=None,
29
+ dataset_kwargs=None,
30
+ dataset_num_proc=None,
31
+ dataset_text_field=text,
32
+ ddp_backend=None,
33
+ ddp_broadcast_buffers=None,
34
+ ddp_bucket_cap_mb=None,
35
+ ddp_find_unused_parameters=None,
36
+ ddp_timeout=1800000000,
37
+ debug=[],
38
+ deepspeed=None,
39
+ disable_dropout=True,
40
+ disable_tqdm=False,
41
+ do_eval=True,
42
+ do_predict=False,
43
+ do_train=False,
44
+ eos_token=<EOS_TOKEN>,
45
+ eval_accumulation_steps=None,
46
+ eval_delay=0,
47
+ eval_do_concat_batches=True,
48
+ eval_on_start=False,
49
+ eval_packing=None,
50
+ eval_steps=None,
51
+ eval_strategy=IntervalStrategy.NO,
52
+ eval_use_gather_object=False,
53
+ expert_num=6,
54
+ fp16=False,
55
+ fp16_backend=auto,
56
+ fp16_full_eval=False,
57
+ fp16_opt_level=O1,
58
+ fsdp=[],
59
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
60
+ fsdp_min_num_params=0,
61
+ fsdp_transformer_layer_cls_to_wrap=None,
62
+ full_determinism=False,
63
+ gradient_accumulation_steps=1,
64
+ gradient_checkpointing=False,
65
+ gradient_checkpointing_kwargs={'use_reentrant': False},
66
+ greater_is_better=None,
67
+ group_by_length=False,
68
+ half_precision_backend=auto,
69
+ hub_always_push=False,
70
+ hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Math10K-Distill-6-experts-test-may,
71
+ hub_model_revision=main,
72
+ hub_private_repo=None,
73
+ hub_strategy=HubStrategy.EVERY_SAVE,
74
+ hub_token=<HUB_TOKEN>,
75
+ ignore_data_skip=False,
76
+ include_for_metrics=[],
77
+ include_inputs_for_metrics=False,
78
+ include_num_input_tokens_seen=False,
79
+ include_tokens_per_second=False,
80
+ jit_mode_eval=False,
81
+ kl_loss_scale=1.0,
82
+ label_names=None,
83
+ label_smoothing_factor=0.0,
84
+ learning_rate=1e-05,
85
+ length_column_name=length,
86
+ lmbda=0.0,
87
+ load_best_model_at_end=False,
88
+ local_rank=0,
89
+ log_level=info,
90
+ log_level_replica=warning,
91
+ log_on_each_node=True,
92
+ logging_dir=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may/runs/Jul11_01-07-43_ip-172-31-67-236,
93
+ logging_first_step=False,
94
+ logging_nan_inf_filter=True,
95
+ logging_steps=1,
96
+ logging_strategy=IntervalStrategy.STEPS,
97
+ loss_type=token_specific,
98
+ lr_scheduler_kwargs={'min_lr_rate': 0.1},
99
+ lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR,
100
+ max_grad_norm=1.0,
101
+ max_length=8192,
102
+ max_new_tokens=1024,
103
+ max_seq_length=None,
104
+ max_steps=-1,
105
+ metric_for_best_model=None,
106
+ model_init_kwargs=None,
107
+ mp_parameters=,
108
+ neftune_noise_alpha=None,
109
+ no_cuda=False,
110
+ num_of_sequences=None,
111
+ num_train_epochs=3,
112
+ optim=OptimizerNames.ADAMW_TORCH,
113
+ optim_args=None,
114
+ optim_target_modules=None,
115
+ output_dir=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may,
116
+ overwrite_hub_revision=False,
117
+ overwrite_output_dir=True,
118
+ packing=False,
119
+ pad_to_multiple_of=None,
120
+ pad_token=<PAD_TOKEN>,
121
+ padding_free=False,
122
+ past_index=-1,
123
+ per_device_eval_batch_size=16,
124
+ per_device_train_batch_size=4,
125
+ prediction_loss_only=False,
126
+ push_to_hub=True,
127
+ push_to_hub_model_id=None,
128
+ push_to_hub_organization=None,
129
+ push_to_hub_revision=False,
130
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
131
+ ray_scope=last,
132
+ reduction=sum,
133
+ remove_unused_columns=True,
134
+ report_to=['wandb'],
135
+ restore_callback_states_from_checkpoint=False,
136
+ resume_from_checkpoint=None,
137
+ run_name=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may,
138
+ save_on_each_node=False,
139
+ save_only_model=False,
140
+ save_safetensors=True,
141
+ save_steps=100,
142
+ save_strategy=SaveStrategy.STEPS,
143
+ save_total_limit=1,
144
+ seed=1234,
145
+ skip_memory_metrics=True,
146
+ system_prompt=None,
147
+ teacher_model_init_kwargs=None,
148
+ teacher_model_name_or_path=None,
149
+ temperature=0.9,
150
+ tf32=None,
151
+ torch_compile=False,
152
+ torch_compile_backend=None,
153
+ torch_compile_mode=None,
154
+ torch_empty_cache_steps=None,
155
+ torchdynamo=None,
156
+ tpu_metrics_debug=False,
157
+ tpu_num_cores=None,
158
+ use_cpu=False,
159
+ use_ipex=False,
160
+ use_legacy_prediction_loop=False,
161
+ use_liger=False,
162
+ use_liger_kernel=False,
163
+ use_mps_device=False,
164
+ wandb_entity=None,
165
+ wandb_project=None,
166
+ warmup_ratio=0.1,
167
+ warmup_steps=0,
168
+ weight_decay=0.0,
169
+ )
170
+ 2025-07-11 01:07:45 - INFO - __main__ - *** Initializing model kwargs ***
171
+ 2025-07-11 01:07:45 - INFO - __main__ - Model memory in step 1, before model initialization (0):Memory allocated: 0.0
172
+ Memory reserved: 0.0
173
+ 2025-07-11 01:08:08 - INFO - __main__ - Model memory in step 1, after model initialization:Memory allocated: 4836.39697265625
174
+ Memory reserved: 7322.0
175
+ 2025-07-11 01:08:08 - INFO - __main__ - Model memory in step 2, before data collator initialization:Memory allocated: 4836.39697265625
176
+ Memory reserved: 6442.0
177
+ 2025-07-11 01:08:08 - INFO - __main__ - Model memory in step 2, after data collator initialization:Memory allocated: 4836.39697265625
178
+ Memory reserved: 6442.0
179
+ 2025-07-11 01:08:08 - INFO - __main__ - Model memory in step 4, before trainer initialization:Memory allocated: 4836.39697265625
180
+ Memory reserved: 6442.0
181
+ 2025-07-11 01:08:09 - INFO - __main__ - Model memory in step 4, after trainer initialization:Memory allocated: 4836.3974609375
182
+ Memory reserved: 6442.0
183
+ 2025-07-11 01:08:09 - INFO - __main__ - Model memory in step 5, before prediction:Memory allocated: 4836.3974609375
184
+ Memory reserved: 6442.0
185
+ 2025-07-11 01:08:09 - INFO - __main__ - Running prediction on test subset to record expert activations...
186
+ 2025-07-11 01:08:20 - INFO - __main__ - Model memory in step 5, after prediction:Memory allocated: 3789.373046875
187
+ Memory reserved: 16034.0
188
+ 2025-07-11 01:08:20 - INFO - __main__ - Top k experts selected: {'model.layers.1.mlp': [51, 61, 44, 45, 14, 22], 'model.layers.2.mlp': [27, 25, 18, 13, 3, 23], 'model.layers.3.mlp': [54, 25, 41, 23, 28, 57], 'model.layers.4.mlp': [37, 21, 33, 49, 11, 14], 'model.layers.5.mlp': [54, 47, 35, 20, 52, 9], 'model.layers.6.mlp': [22, 1, 13, 45, 42, 47], 'model.layers.7.mlp': [58, 43, 24, 18, 44, 62], 'model.layers.8.mlp': [47, 39, 56, 30, 54, 58], 'model.layers.9.mlp': [31, 13, 22, 24, 12, 32], 'model.layers.10.mlp': [47, 19, 42, 2, 13, 22], 'model.layers.11.mlp': [29, 11, 17, 10, 59, 22], 'model.layers.12.mlp': [5, 56, 3, 59, 4, 26], 'model.layers.13.mlp': [10, 42, 58, 14, 47, 17], 'model.layers.14.mlp': [51, 7, 27, 18, 31, 61], 'model.layers.15.mlp': [24, 55, 5, 17, 14, 41], 'model.layers.16.mlp': [61, 33, 63, 49, 19, 9], 'model.layers.17.mlp': [0, 26, 43, 32, 27, 29], 'model.layers.18.mlp': [5, 56, 42, 36, 2, 1], 'model.layers.19.mlp': [2, 23, 24, 36, 40, 0], 'model.layers.20.mlp': [1, 56, 38, 20, 48, 58], 'model.layers.21.mlp': [5, 13, 15, 28, 19, 10], 'model.layers.22.mlp': [58, 32, 31, 3, 45, 14], 'model.layers.23.mlp': [20, 0, 58, 45, 33, 42], 'model.layers.24.mlp': [62, 7, 42, 47, 10, 63], 'model.layers.25.mlp': [45, 48, 39, 11, 46, 38], 'model.layers.26.mlp': [46, 49, 6, 13, 11, 57]}
189
+ 2025-07-11 01:08:20 - INFO - __main__ - Top k experts saved to: /home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may/top_6_experts_lmms-lab_Math10K.json
190
+ 2025-07-11 01:08:20 - INFO - __main__ - Model memory before cleanup:Memory allocated: 3789.373046875
191
+ Memory reserved: 16034.0
192
+ 2025-07-11 01:08:22 - INFO - __main__ - Model memory after cleanup:Memory allocated: 3788.38427734375
193
+ Memory reserved: 5794.0
194
+ 2025-07-11 01:08:22 - INFO - __main__ - Expert selection completed successfully. Run part 2 for training.
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35d9f29300f400bab994c596de8b655c13652c0f0f1de2c8e3e2a35cdf54c0c3
3
+ size 4902968072
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9560e61b86eb9b7b8c25a028a9e1c79798cc7d34b9b4e87ecb337d1a053823e9
3
+ size 419430528
model.safetensors.index.json ADDED
@@ -0,0 +1,774 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 5322300416
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00002-of-00002.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
13
+ "model.layers.0.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
14
+ "model.layers.0.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
15
+ "model.layers.0.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
16
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
17
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
18
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
19
+ "model.layers.1.mlp.gate.weight": "model-00001-of-00002.safetensors",
20
+ "model.layers.1.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
21
+ "model.layers.1.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
22
+ "model.layers.1.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
23
+ "model.layers.1.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
24
+ "model.layers.1.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
25
+ "model.layers.1.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
26
+ "model.layers.1.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
27
+ "model.layers.1.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
28
+ "model.layers.1.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
29
+ "model.layers.1.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
30
+ "model.layers.1.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
31
+ "model.layers.1.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
32
+ "model.layers.1.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
33
+ "model.layers.1.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
34
+ "model.layers.1.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
35
+ "model.layers.1.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
36
+ "model.layers.1.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
37
+ "model.layers.1.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
38
+ "model.layers.1.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
39
+ "model.layers.1.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
40
+ "model.layers.1.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
41
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
42
+ "model.layers.1.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
43
+ "model.layers.1.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
44
+ "model.layers.1.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
45
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
46
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
47
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
48
+ "model.layers.10.mlp.gate.weight": "model-00001-of-00002.safetensors",
49
+ "model.layers.10.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
50
+ "model.layers.10.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
51
+ "model.layers.10.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
52
+ "model.layers.10.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
53
+ "model.layers.10.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
54
+ "model.layers.10.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
55
+ "model.layers.10.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
56
+ "model.layers.10.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
57
+ "model.layers.10.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
58
+ "model.layers.10.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
59
+ "model.layers.10.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
60
+ "model.layers.10.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
61
+ "model.layers.10.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
62
+ "model.layers.10.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
63
+ "model.layers.10.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
64
+ "model.layers.10.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
65
+ "model.layers.10.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
66
+ "model.layers.10.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
67
+ "model.layers.10.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
68
+ "model.layers.10.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
69
+ "model.layers.10.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
70
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
71
+ "model.layers.10.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
72
+ "model.layers.10.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
73
+ "model.layers.10.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
74
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
75
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
76
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
77
+ "model.layers.11.mlp.gate.weight": "model-00001-of-00002.safetensors",
78
+ "model.layers.11.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
79
+ "model.layers.11.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
80
+ "model.layers.11.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
81
+ "model.layers.11.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
82
+ "model.layers.11.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
83
+ "model.layers.11.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
84
+ "model.layers.11.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
85
+ "model.layers.11.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
86
+ "model.layers.11.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
87
+ "model.layers.11.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
88
+ "model.layers.11.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
89
+ "model.layers.11.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
90
+ "model.layers.11.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
91
+ "model.layers.11.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
92
+ "model.layers.11.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
93
+ "model.layers.11.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
94
+ "model.layers.11.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
95
+ "model.layers.11.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
96
+ "model.layers.11.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
97
+ "model.layers.11.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
98
+ "model.layers.11.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
99
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
100
+ "model.layers.11.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
101
+ "model.layers.11.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
102
+ "model.layers.11.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
103
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
104
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
105
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
106
+ "model.layers.12.mlp.gate.weight": "model-00001-of-00002.safetensors",
107
+ "model.layers.12.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
108
+ "model.layers.12.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
109
+ "model.layers.12.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
110
+ "model.layers.12.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
111
+ "model.layers.12.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
112
+ "model.layers.12.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
113
+ "model.layers.12.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
114
+ "model.layers.12.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
115
+ "model.layers.12.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
116
+ "model.layers.12.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
117
+ "model.layers.12.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
118
+ "model.layers.12.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
119
+ "model.layers.12.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
120
+ "model.layers.12.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
121
+ "model.layers.12.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
122
+ "model.layers.12.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
123
+ "model.layers.12.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
124
+ "model.layers.12.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
125
+ "model.layers.12.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
126
+ "model.layers.12.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
127
+ "model.layers.12.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
128
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
129
+ "model.layers.12.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
130
+ "model.layers.12.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
131
+ "model.layers.12.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
132
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
133
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
134
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
135
+ "model.layers.13.mlp.gate.weight": "model-00001-of-00002.safetensors",
136
+ "model.layers.13.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
137
+ "model.layers.13.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
138
+ "model.layers.13.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
139
+ "model.layers.13.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
140
+ "model.layers.13.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
141
+ "model.layers.13.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
142
+ "model.layers.13.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
143
+ "model.layers.13.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
144
+ "model.layers.13.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
145
+ "model.layers.13.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
146
+ "model.layers.13.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
147
+ "model.layers.13.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
148
+ "model.layers.13.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
149
+ "model.layers.13.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
150
+ "model.layers.13.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
151
+ "model.layers.13.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
152
+ "model.layers.13.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
153
+ "model.layers.13.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
154
+ "model.layers.13.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
155
+ "model.layers.13.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
156
+ "model.layers.13.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
157
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
158
+ "model.layers.13.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
159
+ "model.layers.13.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
160
+ "model.layers.13.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
161
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
162
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
163
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
164
+ "model.layers.14.mlp.gate.weight": "model-00001-of-00002.safetensors",
165
+ "model.layers.14.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
166
+ "model.layers.14.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
167
+ "model.layers.14.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
168
+ "model.layers.14.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
169
+ "model.layers.14.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
170
+ "model.layers.14.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
171
+ "model.layers.14.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
172
+ "model.layers.14.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
173
+ "model.layers.14.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
174
+ "model.layers.14.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
175
+ "model.layers.14.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
176
+ "model.layers.14.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
177
+ "model.layers.14.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
178
+ "model.layers.14.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
179
+ "model.layers.14.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
180
+ "model.layers.14.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
181
+ "model.layers.14.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
182
+ "model.layers.14.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
183
+ "model.layers.14.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
184
+ "model.layers.14.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
185
+ "model.layers.14.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
186
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
187
+ "model.layers.14.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
188
+ "model.layers.14.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
189
+ "model.layers.14.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
190
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
191
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
192
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
193
+ "model.layers.15.mlp.gate.weight": "model-00001-of-00002.safetensors",
194
+ "model.layers.15.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
195
+ "model.layers.15.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
196
+ "model.layers.15.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
197
+ "model.layers.15.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
198
+ "model.layers.15.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
199
+ "model.layers.15.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
200
+ "model.layers.15.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
201
+ "model.layers.15.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
202
+ "model.layers.15.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
203
+ "model.layers.15.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
204
+ "model.layers.15.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
205
+ "model.layers.15.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
206
+ "model.layers.15.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
207
+ "model.layers.15.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
208
+ "model.layers.15.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
209
+ "model.layers.15.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
210
+ "model.layers.15.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
211
+ "model.layers.15.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
212
+ "model.layers.15.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
213
+ "model.layers.15.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
214
+ "model.layers.15.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
215
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
216
+ "model.layers.15.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
217
+ "model.layers.15.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
218
+ "model.layers.15.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
219
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
220
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
221
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
222
+ "model.layers.16.mlp.gate.weight": "model-00001-of-00002.safetensors",
223
+ "model.layers.16.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
224
+ "model.layers.16.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
225
+ "model.layers.16.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
226
+ "model.layers.16.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
227
+ "model.layers.16.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
228
+ "model.layers.16.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
229
+ "model.layers.16.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
230
+ "model.layers.16.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
231
+ "model.layers.16.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
232
+ "model.layers.16.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
233
+ "model.layers.16.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
234
+ "model.layers.16.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
235
+ "model.layers.16.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
236
+ "model.layers.16.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
237
+ "model.layers.16.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
238
+ "model.layers.16.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
239
+ "model.layers.16.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
240
+ "model.layers.16.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
241
+ "model.layers.16.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
242
+ "model.layers.16.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
243
+ "model.layers.16.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
244
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
245
+ "model.layers.16.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
246
+ "model.layers.16.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
247
+ "model.layers.16.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
248
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
249
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
250
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
251
+ "model.layers.17.mlp.gate.weight": "model-00001-of-00002.safetensors",
252
+ "model.layers.17.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
253
+ "model.layers.17.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
254
+ "model.layers.17.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
255
+ "model.layers.17.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
256
+ "model.layers.17.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
257
+ "model.layers.17.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
258
+ "model.layers.17.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
259
+ "model.layers.17.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
260
+ "model.layers.17.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
261
+ "model.layers.17.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
262
+ "model.layers.17.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
263
+ "model.layers.17.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
264
+ "model.layers.17.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
265
+ "model.layers.17.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
266
+ "model.layers.17.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
267
+ "model.layers.17.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
268
+ "model.layers.17.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
269
+ "model.layers.17.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
270
+ "model.layers.17.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
271
+ "model.layers.17.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
272
+ "model.layers.17.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
273
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
274
+ "model.layers.17.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
275
+ "model.layers.17.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
276
+ "model.layers.17.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
277
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
278
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
279
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
280
+ "model.layers.18.mlp.gate.weight": "model-00001-of-00002.safetensors",
281
+ "model.layers.18.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
282
+ "model.layers.18.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
283
+ "model.layers.18.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
284
+ "model.layers.18.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
285
+ "model.layers.18.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
286
+ "model.layers.18.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
287
+ "model.layers.18.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
288
+ "model.layers.18.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
289
+ "model.layers.18.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
290
+ "model.layers.18.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
291
+ "model.layers.18.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
292
+ "model.layers.18.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
293
+ "model.layers.18.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
294
+ "model.layers.18.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
295
+ "model.layers.18.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
296
+ "model.layers.18.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
297
+ "model.layers.18.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
298
+ "model.layers.18.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
299
+ "model.layers.18.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
300
+ "model.layers.18.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
301
+ "model.layers.18.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
302
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
303
+ "model.layers.18.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
304
+ "model.layers.18.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
305
+ "model.layers.18.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
306
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
307
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
308
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
309
+ "model.layers.19.mlp.gate.weight": "model-00001-of-00002.safetensors",
310
+ "model.layers.19.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
311
+ "model.layers.19.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
312
+ "model.layers.19.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
313
+ "model.layers.19.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
314
+ "model.layers.19.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
315
+ "model.layers.19.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
316
+ "model.layers.19.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
317
+ "model.layers.19.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
318
+ "model.layers.19.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
319
+ "model.layers.19.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
320
+ "model.layers.19.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
321
+ "model.layers.19.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
322
+ "model.layers.19.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
323
+ "model.layers.19.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
324
+ "model.layers.19.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
325
+ "model.layers.19.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
326
+ "model.layers.19.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
327
+ "model.layers.19.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
328
+ "model.layers.19.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
329
+ "model.layers.19.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
330
+ "model.layers.19.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
331
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
332
+ "model.layers.19.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
333
+ "model.layers.19.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
334
+ "model.layers.19.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
335
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
336
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
337
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
338
+ "model.layers.2.mlp.gate.weight": "model-00001-of-00002.safetensors",
339
+ "model.layers.2.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
340
+ "model.layers.2.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
341
+ "model.layers.2.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
342
+ "model.layers.2.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
343
+ "model.layers.2.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
344
+ "model.layers.2.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
345
+ "model.layers.2.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
346
+ "model.layers.2.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
347
+ "model.layers.2.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
348
+ "model.layers.2.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
349
+ "model.layers.2.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
350
+ "model.layers.2.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
351
+ "model.layers.2.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
352
+ "model.layers.2.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
353
+ "model.layers.2.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
354
+ "model.layers.2.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
355
+ "model.layers.2.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
356
+ "model.layers.2.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
357
+ "model.layers.2.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
358
+ "model.layers.2.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
359
+ "model.layers.2.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
360
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
361
+ "model.layers.2.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
362
+ "model.layers.2.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
363
+ "model.layers.2.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
364
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
365
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
366
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
367
+ "model.layers.20.mlp.gate.weight": "model-00001-of-00002.safetensors",
368
+ "model.layers.20.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
369
+ "model.layers.20.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
370
+ "model.layers.20.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
371
+ "model.layers.20.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
372
+ "model.layers.20.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
373
+ "model.layers.20.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
374
+ "model.layers.20.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
375
+ "model.layers.20.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
376
+ "model.layers.20.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
377
+ "model.layers.20.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
378
+ "model.layers.20.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
379
+ "model.layers.20.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
380
+ "model.layers.20.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
381
+ "model.layers.20.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
382
+ "model.layers.20.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
383
+ "model.layers.20.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
384
+ "model.layers.20.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
385
+ "model.layers.20.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
386
+ "model.layers.20.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
387
+ "model.layers.20.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
388
+ "model.layers.20.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
389
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
390
+ "model.layers.20.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
391
+ "model.layers.20.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
392
+ "model.layers.20.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
393
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
394
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
395
+ "model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors",
396
+ "model.layers.21.mlp.gate.weight": "model-00001-of-00002.safetensors",
397
+ "model.layers.21.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
398
+ "model.layers.21.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
399
+ "model.layers.21.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
400
+ "model.layers.21.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
401
+ "model.layers.21.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
402
+ "model.layers.21.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
403
+ "model.layers.21.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
404
+ "model.layers.21.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
405
+ "model.layers.21.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
406
+ "model.layers.21.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
407
+ "model.layers.21.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
408
+ "model.layers.21.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
409
+ "model.layers.21.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
410
+ "model.layers.21.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
411
+ "model.layers.21.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
412
+ "model.layers.21.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
413
+ "model.layers.21.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
414
+ "model.layers.21.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
415
+ "model.layers.21.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
416
+ "model.layers.21.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
417
+ "model.layers.21.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
418
+ "model.layers.21.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
419
+ "model.layers.21.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
420
+ "model.layers.21.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
421
+ "model.layers.21.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
422
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
423
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
424
+ "model.layers.22.input_layernorm.weight": "model-00001-of-00002.safetensors",
425
+ "model.layers.22.mlp.gate.weight": "model-00001-of-00002.safetensors",
426
+ "model.layers.22.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
427
+ "model.layers.22.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
428
+ "model.layers.22.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
429
+ "model.layers.22.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
430
+ "model.layers.22.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
431
+ "model.layers.22.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
432
+ "model.layers.22.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
433
+ "model.layers.22.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
434
+ "model.layers.22.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
435
+ "model.layers.22.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
436
+ "model.layers.22.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
437
+ "model.layers.22.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
438
+ "model.layers.22.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
439
+ "model.layers.22.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
440
+ "model.layers.22.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
441
+ "model.layers.22.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
442
+ "model.layers.22.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
443
+ "model.layers.22.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
444
+ "model.layers.22.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
445
+ "model.layers.22.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
446
+ "model.layers.22.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
447
+ "model.layers.22.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
448
+ "model.layers.22.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
449
+ "model.layers.22.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
450
+ "model.layers.22.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
451
+ "model.layers.22.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
452
+ "model.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
453
+ "model.layers.23.input_layernorm.weight": "model-00001-of-00002.safetensors",
454
+ "model.layers.23.mlp.gate.weight": "model-00001-of-00002.safetensors",
455
+ "model.layers.23.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
456
+ "model.layers.23.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
457
+ "model.layers.23.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
458
+ "model.layers.23.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
459
+ "model.layers.23.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
460
+ "model.layers.23.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
461
+ "model.layers.23.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
462
+ "model.layers.23.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
463
+ "model.layers.23.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
464
+ "model.layers.23.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
465
+ "model.layers.23.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
466
+ "model.layers.23.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
467
+ "model.layers.23.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
468
+ "model.layers.23.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
469
+ "model.layers.23.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
470
+ "model.layers.23.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
471
+ "model.layers.23.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
472
+ "model.layers.23.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
473
+ "model.layers.23.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
474
+ "model.layers.23.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
475
+ "model.layers.23.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
476
+ "model.layers.23.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
477
+ "model.layers.23.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
478
+ "model.layers.23.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
479
+ "model.layers.23.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
480
+ "model.layers.23.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
481
+ "model.layers.23.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
482
+ "model.layers.24.input_layernorm.weight": "model-00001-of-00002.safetensors",
483
+ "model.layers.24.mlp.gate.weight": "model-00001-of-00002.safetensors",
484
+ "model.layers.24.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
485
+ "model.layers.24.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
486
+ "model.layers.24.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
487
+ "model.layers.24.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
488
+ "model.layers.24.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
489
+ "model.layers.24.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
490
+ "model.layers.24.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
491
+ "model.layers.24.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
492
+ "model.layers.24.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
493
+ "model.layers.24.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
494
+ "model.layers.24.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
495
+ "model.layers.24.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
496
+ "model.layers.24.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
497
+ "model.layers.24.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
498
+ "model.layers.24.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
499
+ "model.layers.24.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
500
+ "model.layers.24.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
501
+ "model.layers.24.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
502
+ "model.layers.24.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
503
+ "model.layers.24.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
504
+ "model.layers.24.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
505
+ "model.layers.24.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
506
+ "model.layers.24.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
507
+ "model.layers.24.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
508
+ "model.layers.24.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
509
+ "model.layers.24.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
510
+ "model.layers.24.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
511
+ "model.layers.25.input_layernorm.weight": "model-00001-of-00002.safetensors",
512
+ "model.layers.25.mlp.gate.weight": "model-00001-of-00002.safetensors",
513
+ "model.layers.25.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
514
+ "model.layers.25.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
515
+ "model.layers.25.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
516
+ "model.layers.25.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
517
+ "model.layers.25.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
518
+ "model.layers.25.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
519
+ "model.layers.25.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
520
+ "model.layers.25.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
521
+ "model.layers.25.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
522
+ "model.layers.25.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
523
+ "model.layers.25.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
524
+ "model.layers.25.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
525
+ "model.layers.25.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
526
+ "model.layers.25.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
527
+ "model.layers.25.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
528
+ "model.layers.25.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
529
+ "model.layers.25.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
530
+ "model.layers.25.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
531
+ "model.layers.25.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
532
+ "model.layers.25.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
533
+ "model.layers.25.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
534
+ "model.layers.25.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
535
+ "model.layers.25.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
536
+ "model.layers.25.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
537
+ "model.layers.25.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
538
+ "model.layers.25.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
539
+ "model.layers.25.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
540
+ "model.layers.26.input_layernorm.weight": "model-00001-of-00002.safetensors",
541
+ "model.layers.26.mlp.gate.weight": "model-00001-of-00002.safetensors",
542
+ "model.layers.26.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
543
+ "model.layers.26.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
544
+ "model.layers.26.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
545
+ "model.layers.26.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
546
+ "model.layers.26.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
547
+ "model.layers.26.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
548
+ "model.layers.26.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
549
+ "model.layers.26.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
550
+ "model.layers.26.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
551
+ "model.layers.26.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
552
+ "model.layers.26.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
553
+ "model.layers.26.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
554
+ "model.layers.26.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
555
+ "model.layers.26.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
556
+ "model.layers.26.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
557
+ "model.layers.26.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
558
+ "model.layers.26.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
559
+ "model.layers.26.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
560
+ "model.layers.26.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
561
+ "model.layers.26.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
562
+ "model.layers.26.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
563
+ "model.layers.26.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
564
+ "model.layers.26.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
565
+ "model.layers.26.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
566
+ "model.layers.26.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
567
+ "model.layers.26.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
568
+ "model.layers.26.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
569
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
570
+ "model.layers.3.mlp.gate.weight": "model-00001-of-00002.safetensors",
571
+ "model.layers.3.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
572
+ "model.layers.3.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
573
+ "model.layers.3.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
574
+ "model.layers.3.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
575
+ "model.layers.3.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
576
+ "model.layers.3.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
577
+ "model.layers.3.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
578
+ "model.layers.3.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
579
+ "model.layers.3.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
580
+ "model.layers.3.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
581
+ "model.layers.3.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
582
+ "model.layers.3.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
583
+ "model.layers.3.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
584
+ "model.layers.3.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
585
+ "model.layers.3.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
586
+ "model.layers.3.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
587
+ "model.layers.3.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
588
+ "model.layers.3.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
589
+ "model.layers.3.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
590
+ "model.layers.3.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
591
+ "model.layers.3.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
592
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
593
+ "model.layers.3.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
594
+ "model.layers.3.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
595
+ "model.layers.3.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
596
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
597
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
598
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
599
+ "model.layers.4.mlp.gate.weight": "model-00001-of-00002.safetensors",
600
+ "model.layers.4.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
601
+ "model.layers.4.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
602
+ "model.layers.4.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
603
+ "model.layers.4.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
604
+ "model.layers.4.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
605
+ "model.layers.4.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
606
+ "model.layers.4.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
607
+ "model.layers.4.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
608
+ "model.layers.4.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
609
+ "model.layers.4.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
610
+ "model.layers.4.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
611
+ "model.layers.4.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
612
+ "model.layers.4.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
613
+ "model.layers.4.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
614
+ "model.layers.4.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
615
+ "model.layers.4.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
616
+ "model.layers.4.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
617
+ "model.layers.4.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
618
+ "model.layers.4.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
619
+ "model.layers.4.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
620
+ "model.layers.4.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
621
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
622
+ "model.layers.4.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
623
+ "model.layers.4.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
624
+ "model.layers.4.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
625
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
626
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
627
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
628
+ "model.layers.5.mlp.gate.weight": "model-00001-of-00002.safetensors",
629
+ "model.layers.5.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
630
+ "model.layers.5.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
631
+ "model.layers.5.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
632
+ "model.layers.5.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
633
+ "model.layers.5.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
634
+ "model.layers.5.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
635
+ "model.layers.5.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
636
+ "model.layers.5.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
637
+ "model.layers.5.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
638
+ "model.layers.5.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
639
+ "model.layers.5.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
640
+ "model.layers.5.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
641
+ "model.layers.5.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
642
+ "model.layers.5.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
643
+ "model.layers.5.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
644
+ "model.layers.5.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
645
+ "model.layers.5.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
646
+ "model.layers.5.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
647
+ "model.layers.5.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
648
+ "model.layers.5.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
649
+ "model.layers.5.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
650
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
651
+ "model.layers.5.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
652
+ "model.layers.5.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
653
+ "model.layers.5.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
654
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
655
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
656
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
657
+ "model.layers.6.mlp.gate.weight": "model-00001-of-00002.safetensors",
658
+ "model.layers.6.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
659
+ "model.layers.6.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
660
+ "model.layers.6.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
661
+ "model.layers.6.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
662
+ "model.layers.6.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
663
+ "model.layers.6.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
664
+ "model.layers.6.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
665
+ "model.layers.6.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
666
+ "model.layers.6.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
667
+ "model.layers.6.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
668
+ "model.layers.6.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
669
+ "model.layers.6.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
670
+ "model.layers.6.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
671
+ "model.layers.6.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
672
+ "model.layers.6.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
673
+ "model.layers.6.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
674
+ "model.layers.6.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
675
+ "model.layers.6.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
676
+ "model.layers.6.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
677
+ "model.layers.6.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
678
+ "model.layers.6.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
679
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
680
+ "model.layers.6.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
681
+ "model.layers.6.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
682
+ "model.layers.6.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
683
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
684
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
685
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
686
+ "model.layers.7.mlp.gate.weight": "model-00001-of-00002.safetensors",
687
+ "model.layers.7.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
688
+ "model.layers.7.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
689
+ "model.layers.7.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
690
+ "model.layers.7.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
691
+ "model.layers.7.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
692
+ "model.layers.7.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
693
+ "model.layers.7.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
694
+ "model.layers.7.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
695
+ "model.layers.7.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
696
+ "model.layers.7.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
697
+ "model.layers.7.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
698
+ "model.layers.7.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
699
+ "model.layers.7.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
700
+ "model.layers.7.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
701
+ "model.layers.7.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
702
+ "model.layers.7.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
703
+ "model.layers.7.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
704
+ "model.layers.7.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
705
+ "model.layers.7.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
706
+ "model.layers.7.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
707
+ "model.layers.7.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
708
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
709
+ "model.layers.7.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
710
+ "model.layers.7.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
711
+ "model.layers.7.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
712
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
713
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
714
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
715
+ "model.layers.8.mlp.gate.weight": "model-00001-of-00002.safetensors",
716
+ "model.layers.8.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
717
+ "model.layers.8.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
718
+ "model.layers.8.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
719
+ "model.layers.8.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
720
+ "model.layers.8.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
721
+ "model.layers.8.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
722
+ "model.layers.8.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
723
+ "model.layers.8.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
724
+ "model.layers.8.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
725
+ "model.layers.8.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
726
+ "model.layers.8.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
727
+ "model.layers.8.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
728
+ "model.layers.8.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
729
+ "model.layers.8.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
730
+ "model.layers.8.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
731
+ "model.layers.8.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
732
+ "model.layers.8.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
733
+ "model.layers.8.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
734
+ "model.layers.8.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
735
+ "model.layers.8.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
736
+ "model.layers.8.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
737
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
738
+ "model.layers.8.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
739
+ "model.layers.8.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
740
+ "model.layers.8.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
741
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
742
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
743
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
744
+ "model.layers.9.mlp.gate.weight": "model-00001-of-00002.safetensors",
745
+ "model.layers.9.mlp.selected_experts.0.down_proj.weight": "model-00001-of-00002.safetensors",
746
+ "model.layers.9.mlp.selected_experts.0.gate_proj.weight": "model-00001-of-00002.safetensors",
747
+ "model.layers.9.mlp.selected_experts.0.up_proj.weight": "model-00001-of-00002.safetensors",
748
+ "model.layers.9.mlp.selected_experts.1.down_proj.weight": "model-00001-of-00002.safetensors",
749
+ "model.layers.9.mlp.selected_experts.1.gate_proj.weight": "model-00001-of-00002.safetensors",
750
+ "model.layers.9.mlp.selected_experts.1.up_proj.weight": "model-00001-of-00002.safetensors",
751
+ "model.layers.9.mlp.selected_experts.2.down_proj.weight": "model-00001-of-00002.safetensors",
752
+ "model.layers.9.mlp.selected_experts.2.gate_proj.weight": "model-00001-of-00002.safetensors",
753
+ "model.layers.9.mlp.selected_experts.2.up_proj.weight": "model-00001-of-00002.safetensors",
754
+ "model.layers.9.mlp.selected_experts.3.down_proj.weight": "model-00001-of-00002.safetensors",
755
+ "model.layers.9.mlp.selected_experts.3.gate_proj.weight": "model-00001-of-00002.safetensors",
756
+ "model.layers.9.mlp.selected_experts.3.up_proj.weight": "model-00001-of-00002.safetensors",
757
+ "model.layers.9.mlp.selected_experts.4.down_proj.weight": "model-00001-of-00002.safetensors",
758
+ "model.layers.9.mlp.selected_experts.4.gate_proj.weight": "model-00001-of-00002.safetensors",
759
+ "model.layers.9.mlp.selected_experts.4.up_proj.weight": "model-00001-of-00002.safetensors",
760
+ "model.layers.9.mlp.selected_experts.5.down_proj.weight": "model-00001-of-00002.safetensors",
761
+ "model.layers.9.mlp.selected_experts.5.gate_proj.weight": "model-00001-of-00002.safetensors",
762
+ "model.layers.9.mlp.selected_experts.5.up_proj.weight": "model-00001-of-00002.safetensors",
763
+ "model.layers.9.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
764
+ "model.layers.9.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
765
+ "model.layers.9.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
766
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
767
+ "model.layers.9.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
768
+ "model.layers.9.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
769
+ "model.layers.9.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
770
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
771
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
772
+ "model.norm.weight": "model-00001-of-00002.safetensors"
773
+ }
774
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|begin▁of▁sentence|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|end▁of▁sentence|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|end▁of▁sentence|>"
17
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "100000": {
7
+ "content": "<|begin▁of▁sentence|>",
8
+ "lstrip": false,
9
+ "normalized": true,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "100001": {
15
+ "content": "<|end▁of▁sentence|>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "100002": {
23
+ "content": "<|fim▁hole|>",
24
+ "lstrip": false,
25
+ "normalized": true,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": false
29
+ },
30
+ "100003": {
31
+ "content": "<|fim▁begin|>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": false
37
+ },
38
+ "100004": {
39
+ "content": "<|fim▁end|>",
40
+ "lstrip": false,
41
+ "normalized": true,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": false
45
+ },
46
+ "100005": {
47
+ "content": "<|completion|>",
48
+ "lstrip": false,
49
+ "normalized": true,
50
+ "rstrip": false,
51
+ "single_word": false,
52
+ "special": false
53
+ },
54
+ "100006": {
55
+ "content": "<|User|>",
56
+ "lstrip": false,
57
+ "normalized": true,
58
+ "rstrip": false,
59
+ "single_word": false,
60
+ "special": false
61
+ },
62
+ "100007": {
63
+ "content": "<|Assistant|>",
64
+ "lstrip": false,
65
+ "normalized": true,
66
+ "rstrip": false,
67
+ "single_word": false,
68
+ "special": false
69
+ },
70
+ "100008": {
71
+ "content": "<|EOT|>",
72
+ "lstrip": false,
73
+ "normalized": true,
74
+ "rstrip": false,
75
+ "single_word": false,
76
+ "special": true
77
+ },
78
+ "100009": {
79
+ "content": "<|tool▁calls▁begin|>",
80
+ "lstrip": false,
81
+ "normalized": true,
82
+ "rstrip": false,
83
+ "single_word": false,
84
+ "special": false
85
+ },
86
+ "100010": {
87
+ "content": "<|tool▁calls▁end|>",
88
+ "lstrip": false,
89
+ "normalized": true,
90
+ "rstrip": false,
91
+ "single_word": false,
92
+ "special": false
93
+ },
94
+ "100011": {
95
+ "content": "<|tool▁call▁begin|>",
96
+ "lstrip": false,
97
+ "normalized": true,
98
+ "rstrip": false,
99
+ "single_word": false,
100
+ "special": false
101
+ },
102
+ "100012": {
103
+ "content": "<|tool▁call▁end|>",
104
+ "lstrip": false,
105
+ "normalized": true,
106
+ "rstrip": false,
107
+ "single_word": false,
108
+ "special": false
109
+ },
110
+ "100013": {
111
+ "content": "<|tool▁outputs▁begin|>",
112
+ "lstrip": false,
113
+ "normalized": true,
114
+ "rstrip": false,
115
+ "single_word": false,
116
+ "special": false
117
+ },
118
+ "100014": {
119
+ "content": "<|tool▁outputs▁end|>",
120
+ "lstrip": false,
121
+ "normalized": true,
122
+ "rstrip": false,
123
+ "single_word": false,
124
+ "special": false
125
+ },
126
+ "100015": {
127
+ "content": "<|tool▁output▁begin|>",
128
+ "lstrip": false,
129
+ "normalized": true,
130
+ "rstrip": false,
131
+ "single_word": false,
132
+ "special": false
133
+ },
134
+ "100016": {
135
+ "content": "<|tool▁output▁end|>",
136
+ "lstrip": false,
137
+ "normalized": true,
138
+ "rstrip": false,
139
+ "single_word": false,
140
+ "special": false
141
+ },
142
+ "100017": {
143
+ "content": "<|tool▁sep|>",
144
+ "lstrip": false,
145
+ "normalized": true,
146
+ "rstrip": false,
147
+ "single_word": false,
148
+ "special": false
149
+ }
150
+ },
151
+ "bos_token": "<|begin▁of▁sentence|>",
152
+ "clean_up_tokenization_spaces": false,
153
+ "eos_token": "<|end▁of▁sentence|>",
154
+ "extra_special_tokens": {},
155
+ "fast_tokenizer": true,
156
+ "legacy": true,
157
+ "model_max_length": 16384,
158
+ "pad_token": "<|end▁of▁sentence|>",
159
+ "sp_model_kwargs": {},
160
+ "tokenizer_class": "LlamaTokenizerFast",
161
+ "unk_token": null,
162
+ "use_default_system_prompt": false
163
+ }
top_6_experts_lmms-lab_Math10K.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model.layers.1.mlp": [51, 61, 44, 45, 14, 22], "model.layers.2.mlp": [27, 25, 18, 13, 3, 23], "model.layers.3.mlp": [54, 25, 41, 23, 28, 57], "model.layers.4.mlp": [37, 21, 33, 49, 11, 14], "model.layers.5.mlp": [54, 47, 35, 20, 52, 9], "model.layers.6.mlp": [22, 1, 13, 45, 42, 47], "model.layers.7.mlp": [58, 43, 24, 18, 44, 62], "model.layers.8.mlp": [47, 39, 56, 30, 54, 58], "model.layers.9.mlp": [31, 13, 22, 24, 12, 32], "model.layers.10.mlp": [47, 19, 42, 2, 13, 22], "model.layers.11.mlp": [29, 11, 17, 10, 59, 22], "model.layers.12.mlp": [5, 56, 3, 59, 4, 26], "model.layers.13.mlp": [10, 42, 58, 14, 47, 17], "model.layers.14.mlp": [51, 7, 27, 18, 31, 61], "model.layers.15.mlp": [24, 55, 5, 17, 14, 41], "model.layers.16.mlp": [61, 33, 63, 49, 19, 9], "model.layers.17.mlp": [0, 26, 43, 32, 27, 29], "model.layers.18.mlp": [5, 56, 42, 36, 2, 1], "model.layers.19.mlp": [2, 23, 24, 36, 40, 0], "model.layers.20.mlp": [1, 56, 38, 20, 48, 58], "model.layers.21.mlp": [5, 13, 15, 28, 19, 10], "model.layers.22.mlp": [58, 32, 31, 3, 45, 14], "model.layers.23.mlp": [20, 0, 58, 45, 33, 42], "model.layers.24.mlp": [62, 7, 42, 47, 10, 63], "model.layers.25.mlp": [45, 48, 39, 11, 46, 38], "model.layers.26.mlp": [46, 49, 6, 13, 11, 57]}
training.log ADDED
@@ -0,0 +1,1609 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-07-11 01:10:36 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, use_dora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False)
2
+ 2025-07-11 01:10:36 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='lmms-lab/Math10K', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False)
3
+ 2025-07-11 01:10:36 - INFO - __main__ - Training parameters EfficientDistillationConfig(
4
+ _n_gpu=1,
5
+ accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
6
+ adafactor=False,
7
+ adam_beta1=0.9,
8
+ adam_beta2=0.999,
9
+ adam_epsilon=1e-08,
10
+ alpha=0.5,
11
+ auto_find_batch_size=False,
12
+ average_tokens_across_devices=False,
13
+ batch_eval_metrics=False,
14
+ benchmarks=[],
15
+ bf16=True,
16
+ bf16_full_eval=False,
17
+ callbacks=[],
18
+ ce_loss_scale=1.0,
19
+ chars_per_token=<CHARS_PER_TOKEN>,
20
+ chat_template=None,
21
+ completion_only_loss=None,
22
+ data_seed=None,
23
+ dataloader_drop_last=False,
24
+ dataloader_num_workers=0,
25
+ dataloader_persistent_workers=False,
26
+ dataloader_pin_memory=True,
27
+ dataloader_prefetch_factor=None,
28
+ dataset_batch_size=None,
29
+ dataset_kwargs=None,
30
+ dataset_num_proc=None,
31
+ dataset_text_field=text,
32
+ ddp_backend=None,
33
+ ddp_broadcast_buffers=None,
34
+ ddp_bucket_cap_mb=None,
35
+ ddp_find_unused_parameters=None,
36
+ ddp_timeout=1800000000,
37
+ debug=[],
38
+ deepspeed=None,
39
+ disable_dropout=True,
40
+ disable_tqdm=False,
41
+ do_eval=True,
42
+ do_predict=False,
43
+ do_train=False,
44
+ eos_token=<EOS_TOKEN>,
45
+ eval_accumulation_steps=None,
46
+ eval_delay=0,
47
+ eval_do_concat_batches=True,
48
+ eval_on_start=False,
49
+ eval_packing=None,
50
+ eval_steps=None,
51
+ eval_strategy=IntervalStrategy.NO,
52
+ eval_use_gather_object=False,
53
+ expert_num=6,
54
+ fp16=False,
55
+ fp16_backend=auto,
56
+ fp16_full_eval=False,
57
+ fp16_opt_level=O1,
58
+ fsdp=[],
59
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
60
+ fsdp_min_num_params=0,
61
+ fsdp_transformer_layer_cls_to_wrap=None,
62
+ full_determinism=False,
63
+ gradient_accumulation_steps=1,
64
+ gradient_checkpointing=False,
65
+ gradient_checkpointing_kwargs={'use_reentrant': False},
66
+ greater_is_better=None,
67
+ group_by_length=False,
68
+ half_precision_backend=auto,
69
+ hub_always_push=False,
70
+ hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Math10K-Distill-6-experts-test-may,
71
+ hub_model_revision=main,
72
+ hub_private_repo=None,
73
+ hub_strategy=HubStrategy.EVERY_SAVE,
74
+ hub_token=<HUB_TOKEN>,
75
+ ignore_data_skip=False,
76
+ include_for_metrics=[],
77
+ include_inputs_for_metrics=False,
78
+ include_num_input_tokens_seen=False,
79
+ include_tokens_per_second=False,
80
+ jit_mode_eval=False,
81
+ kl_loss_scale=1.0,
82
+ label_names=None,
83
+ label_smoothing_factor=0.0,
84
+ learning_rate=1e-05,
85
+ length_column_name=length,
86
+ lmbda=0.0,
87
+ load_best_model_at_end=False,
88
+ local_rank=0,
89
+ log_level=info,
90
+ log_level_replica=warning,
91
+ log_on_each_node=True,
92
+ logging_dir=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may/runs/Jul11_01-10-35_ip-172-31-67-236,
93
+ logging_first_step=False,
94
+ logging_nan_inf_filter=True,
95
+ logging_steps=1,
96
+ logging_strategy=IntervalStrategy.STEPS,
97
+ loss_type=token_specific,
98
+ lr_scheduler_kwargs={'min_lr_rate': 0.1},
99
+ lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR,
100
+ max_grad_norm=1.0,
101
+ max_length=8192,
102
+ max_new_tokens=1024,
103
+ max_seq_length=None,
104
+ max_steps=-1,
105
+ metric_for_best_model=None,
106
+ model_init_kwargs=None,
107
+ mp_parameters=,
108
+ neftune_noise_alpha=None,
109
+ no_cuda=False,
110
+ num_of_sequences=None,
111
+ num_train_epochs=3,
112
+ optim=OptimizerNames.ADAMW_TORCH,
113
+ optim_args=None,
114
+ optim_target_modules=None,
115
+ output_dir=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may,
116
+ overwrite_hub_revision=False,
117
+ overwrite_output_dir=True,
118
+ packing=False,
119
+ pad_to_multiple_of=None,
120
+ pad_token=<PAD_TOKEN>,
121
+ padding_free=False,
122
+ past_index=-1,
123
+ per_device_eval_batch_size=16,
124
+ per_device_train_batch_size=4,
125
+ prediction_loss_only=False,
126
+ push_to_hub=True,
127
+ push_to_hub_model_id=None,
128
+ push_to_hub_organization=None,
129
+ push_to_hub_revision=False,
130
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
131
+ ray_scope=last,
132
+ reduction=sum,
133
+ remove_unused_columns=True,
134
+ report_to=['wandb'],
135
+ restore_callback_states_from_checkpoint=False,
136
+ resume_from_checkpoint=None,
137
+ run_name=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may,
138
+ save_on_each_node=False,
139
+ save_only_model=False,
140
+ save_safetensors=True,
141
+ save_steps=100,
142
+ save_strategy=SaveStrategy.STEPS,
143
+ save_total_limit=1,
144
+ seed=1234,
145
+ skip_memory_metrics=True,
146
+ system_prompt=None,
147
+ teacher_model_init_kwargs=None,
148
+ teacher_model_name_or_path=None,
149
+ temperature=0.9,
150
+ tf32=None,
151
+ torch_compile=False,
152
+ torch_compile_backend=None,
153
+ torch_compile_mode=None,
154
+ torch_empty_cache_steps=None,
155
+ torchdynamo=None,
156
+ tpu_metrics_debug=False,
157
+ tpu_num_cores=None,
158
+ use_cpu=False,
159
+ use_ipex=False,
160
+ use_legacy_prediction_loop=False,
161
+ use_liger=False,
162
+ use_liger_kernel=False,
163
+ use_mps_device=False,
164
+ wandb_entity=None,
165
+ wandb_project=None,
166
+ warmup_ratio=0.1,
167
+ warmup_steps=0,
168
+ weight_decay=0.0,
169
+ )
170
+ 2025-07-11 01:12:32 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, use_dora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False)
171
+ 2025-07-11 01:12:32 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='lmms-lab/Math10K', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False)
172
+ 2025-07-11 01:12:32 - INFO - __main__ - Training parameters EfficientDistillationConfig(
173
+ _n_gpu=1,
174
+ accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
175
+ adafactor=False,
176
+ adam_beta1=0.9,
177
+ adam_beta2=0.999,
178
+ adam_epsilon=1e-08,
179
+ alpha=0.5,
180
+ auto_find_batch_size=False,
181
+ average_tokens_across_devices=False,
182
+ batch_eval_metrics=False,
183
+ benchmarks=[],
184
+ bf16=True,
185
+ bf16_full_eval=False,
186
+ callbacks=[],
187
+ ce_loss_scale=1.0,
188
+ chars_per_token=<CHARS_PER_TOKEN>,
189
+ chat_template=None,
190
+ completion_only_loss=None,
191
+ data_seed=None,
192
+ dataloader_drop_last=False,
193
+ dataloader_num_workers=0,
194
+ dataloader_persistent_workers=False,
195
+ dataloader_pin_memory=True,
196
+ dataloader_prefetch_factor=None,
197
+ dataset_batch_size=None,
198
+ dataset_kwargs=None,
199
+ dataset_num_proc=None,
200
+ dataset_text_field=text,
201
+ ddp_backend=None,
202
+ ddp_broadcast_buffers=None,
203
+ ddp_bucket_cap_mb=None,
204
+ ddp_find_unused_parameters=None,
205
+ ddp_timeout=1800000000,
206
+ debug=[],
207
+ deepspeed=None,
208
+ disable_dropout=True,
209
+ disable_tqdm=False,
210
+ do_eval=True,
211
+ do_predict=False,
212
+ do_train=False,
213
+ eos_token=<EOS_TOKEN>,
214
+ eval_accumulation_steps=None,
215
+ eval_delay=0,
216
+ eval_do_concat_batches=True,
217
+ eval_on_start=False,
218
+ eval_packing=None,
219
+ eval_steps=None,
220
+ eval_strategy=IntervalStrategy.NO,
221
+ eval_use_gather_object=False,
222
+ expert_num=6,
223
+ fp16=False,
224
+ fp16_backend=auto,
225
+ fp16_full_eval=False,
226
+ fp16_opt_level=O1,
227
+ fsdp=[],
228
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
229
+ fsdp_min_num_params=0,
230
+ fsdp_transformer_layer_cls_to_wrap=None,
231
+ full_determinism=False,
232
+ gradient_accumulation_steps=1,
233
+ gradient_checkpointing=False,
234
+ gradient_checkpointing_kwargs={'use_reentrant': False},
235
+ greater_is_better=None,
236
+ group_by_length=False,
237
+ half_precision_backend=auto,
238
+ hub_always_push=False,
239
+ hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Math10K-Distill-6-experts-test-may,
240
+ hub_model_revision=main,
241
+ hub_private_repo=None,
242
+ hub_strategy=HubStrategy.EVERY_SAVE,
243
+ hub_token=<HUB_TOKEN>,
244
+ ignore_data_skip=False,
245
+ include_for_metrics=[],
246
+ include_inputs_for_metrics=False,
247
+ include_num_input_tokens_seen=False,
248
+ include_tokens_per_second=False,
249
+ jit_mode_eval=False,
250
+ kl_loss_scale=1.0,
251
+ label_names=None,
252
+ label_smoothing_factor=0.0,
253
+ learning_rate=1e-05,
254
+ length_column_name=length,
255
+ lmbda=0.0,
256
+ load_best_model_at_end=False,
257
+ local_rank=0,
258
+ log_level=info,
259
+ log_level_replica=warning,
260
+ log_on_each_node=True,
261
+ logging_dir=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may/runs/Jul11_01-12-32_ip-172-31-67-236,
262
+ logging_first_step=False,
263
+ logging_nan_inf_filter=True,
264
+ logging_steps=1,
265
+ logging_strategy=IntervalStrategy.STEPS,
266
+ loss_type=token_specific,
267
+ lr_scheduler_kwargs={'min_lr_rate': 0.1},
268
+ lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR,
269
+ max_grad_norm=1.0,
270
+ max_length=8192,
271
+ max_new_tokens=1024,
272
+ max_seq_length=None,
273
+ max_steps=-1,
274
+ metric_for_best_model=None,
275
+ model_init_kwargs=None,
276
+ mp_parameters=,
277
+ neftune_noise_alpha=None,
278
+ no_cuda=False,
279
+ num_of_sequences=None,
280
+ num_train_epochs=3,
281
+ optim=OptimizerNames.ADAMW_TORCH,
282
+ optim_args=None,
283
+ optim_target_modules=None,
284
+ output_dir=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may,
285
+ overwrite_hub_revision=False,
286
+ overwrite_output_dir=True,
287
+ packing=False,
288
+ pad_to_multiple_of=None,
289
+ pad_token=<PAD_TOKEN>,
290
+ padding_free=False,
291
+ past_index=-1,
292
+ per_device_eval_batch_size=16,
293
+ per_device_train_batch_size=4,
294
+ prediction_loss_only=False,
295
+ push_to_hub=True,
296
+ push_to_hub_model_id=None,
297
+ push_to_hub_organization=None,
298
+ push_to_hub_revision=False,
299
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
300
+ ray_scope=last,
301
+ reduction=sum,
302
+ remove_unused_columns=True,
303
+ report_to=['wandb'],
304
+ restore_callback_states_from_checkpoint=False,
305
+ resume_from_checkpoint=None,
306
+ run_name=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may,
307
+ save_on_each_node=False,
308
+ save_only_model=False,
309
+ save_safetensors=True,
310
+ save_steps=100,
311
+ save_strategy=SaveStrategy.STEPS,
312
+ save_total_limit=1,
313
+ seed=1234,
314
+ skip_memory_metrics=True,
315
+ system_prompt=None,
316
+ teacher_model_init_kwargs=None,
317
+ teacher_model_name_or_path=None,
318
+ temperature=0.9,
319
+ tf32=None,
320
+ torch_compile=False,
321
+ torch_compile_backend=None,
322
+ torch_compile_mode=None,
323
+ torch_empty_cache_steps=None,
324
+ torchdynamo=None,
325
+ tpu_metrics_debug=False,
326
+ tpu_num_cores=None,
327
+ use_cpu=False,
328
+ use_ipex=False,
329
+ use_legacy_prediction_loop=False,
330
+ use_liger=False,
331
+ use_liger_kernel=False,
332
+ use_mps_device=False,
333
+ wandb_entity=None,
334
+ wandb_project=None,
335
+ warmup_ratio=0.1,
336
+ warmup_steps=0,
337
+ weight_decay=0.0,
338
+ )
339
+ 2025-07-11 01:12:33 - INFO - __main__ - *** Initializing model kwargs ***
340
+ 2025-07-11 01:12:33 - INFO - __main__ - Loaded top k experts from /home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may/top_6_experts_lmms-lab_Math10K.json: {'model.layers.1.mlp': [51, 61, 44, 45, 14, 22], 'model.layers.2.mlp': [27, 25, 18, 13, 3, 23], 'model.layers.3.mlp': [54, 25, 41, 23, 28, 57], 'model.layers.4.mlp': [37, 21, 33, 49, 11, 14], 'model.layers.5.mlp': [54, 47, 35, 20, 52, 9], 'model.layers.6.mlp': [22, 1, 13, 45, 42, 47], 'model.layers.7.mlp': [58, 43, 24, 18, 44, 62], 'model.layers.8.mlp': [47, 39, 56, 30, 54, 58], 'model.layers.9.mlp': [31, 13, 22, 24, 12, 32], 'model.layers.10.mlp': [47, 19, 42, 2, 13, 22], 'model.layers.11.mlp': [29, 11, 17, 10, 59, 22], 'model.layers.12.mlp': [5, 56, 3, 59, 4, 26], 'model.layers.13.mlp': [10, 42, 58, 14, 47, 17], 'model.layers.14.mlp': [51, 7, 27, 18, 31, 61], 'model.layers.15.mlp': [24, 55, 5, 17, 14, 41], 'model.layers.16.mlp': [61, 33, 63, 49, 19, 9], 'model.layers.17.mlp': [0, 26, 43, 32, 27, 29], 'model.layers.18.mlp': [5, 56, 42, 36, 2, 1], 'model.layers.19.mlp': [2, 23, 24, 36, 40, 0], 'model.layers.20.mlp': [1, 56, 38, 20, 48, 58], 'model.layers.21.mlp': [5, 13, 15, 28, 19, 10], 'model.layers.22.mlp': [58, 32, 31, 3, 45, 14], 'model.layers.23.mlp': [20, 0, 58, 45, 33, 42], 'model.layers.24.mlp': [62, 7, 42, 47, 10, 63], 'model.layers.25.mlp': [45, 48, 39, 11, 46, 38], 'model.layers.26.mlp': [46, 49, 6, 13, 11, 57]}
341
+ 2025-07-11 01:12:33 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0
342
+ Memory reserved: 0.0
343
+ 2025-07-11 01:12:44 - INFO - __main__ - Model memory after loading model:Memory allocated: 0.0
344
+ Memory reserved: 0.0
345
+ 2025-07-11 01:12:44 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts...
346
+ 2025-07-11 01:12:58 - INFO - __main__ - MoE layers replaced with Dense MLP layers
347
+ 2025-07-11 01:12:58 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 0.0
348
+ Memory reserved: 0.0
349
+ 2025-07-11 01:12:58 - INFO - __main__ - Initializing EfficientDistillationTrainer...
350
+ 2025-07-11 01:13:36 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 31126.0048828125
351
+ Memory reserved: 36896.0
352
+ 2025-07-11 01:13:36 - INFO - __main__ - *** Starting training ***
353
+ 2025-07-11 01:13:36 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM(
354
+ (model): DeepseekV2Model(
355
+ (embed_tokens): Embedding(102400, 2048)
356
+ (layers): ModuleList(
357
+ (0): DeepseekV2DecoderLayer(
358
+ (self_attn): DeepseekV2FlashAttention2(
359
+ (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
360
+ (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
361
+ (kv_a_layernorm): DeepseekV2RMSNorm()
362
+ (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
363
+ (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
364
+ (rotary_emb): DeepseekV2YarnRotaryEmbedding()
365
+ )
366
+ (mlp): DeepseekV2MLP(
367
+ (gate_proj): Linear(in_features=2048, out_features=10944, bias=False)
368
+ (up_proj): Linear(in_features=2048, out_features=10944, bias=False)
369
+ (down_proj): Linear(in_features=10944, out_features=2048, bias=False)
370
+ (act_fn): SiLU()
371
+ )
372
+ (input_layernorm): DeepseekV2RMSNorm()
373
+ (post_attention_layernorm): DeepseekV2RMSNorm()
374
+ )
375
+ (1-26): 26 x DeepseekV2DecoderLayer(
376
+ (self_attn): DeepseekV2FlashAttention2(
377
+ (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
378
+ (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
379
+ (kv_a_layernorm): DeepseekV2RMSNorm()
380
+ (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
381
+ (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
382
+ (rotary_emb): DeepseekV2YarnRotaryEmbedding()
383
+ )
384
+ (mlp): DeepseekV2MoE(
385
+ (gate): MoEGate()
386
+ (shared_experts): DeepseekV2MLP(
387
+ (gate_proj): Linear(in_features=2048, out_features=2816, bias=False)
388
+ (up_proj): Linear(in_features=2048, out_features=2816, bias=False)
389
+ (down_proj): Linear(in_features=2816, out_features=2048, bias=False)
390
+ (act_fn): SiLU()
391
+ )
392
+ (selected_experts): ModuleList(
393
+ (0-5): 6 x DeepseekV2MLP(
394
+ (gate_proj): Linear(in_features=2048, out_features=1408, bias=False)
395
+ (up_proj): Linear(in_features=2048, out_features=1408, bias=False)
396
+ (down_proj): Linear(in_features=1408, out_features=2048, bias=False)
397
+ (act_fn): SiLU()
398
+ )
399
+ )
400
+ (experts): ModuleList()
401
+ )
402
+ (input_layernorm): DeepseekV2RMSNorm()
403
+ (post_attention_layernorm): DeepseekV2RMSNorm()
404
+ )
405
+ )
406
+ (norm): DeepseekV2RMSNorm()
407
+ )
408
+ (lm_head): Linear(in_features=2048, out_features=102400, bias=False)
409
+ )
410
+ 2025-07-11 06:48:58 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, use_dora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False)
411
+ 2025-07-11 06:48:58 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='lmms-lab/Math10K', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False)
412
+ 2025-07-11 06:48:58 - INFO - __main__ - Training parameters EfficientDistillationConfig(
413
+ _n_gpu=1,
414
+ accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
415
+ adafactor=False,
416
+ adam_beta1=0.9,
417
+ adam_beta2=0.999,
418
+ adam_epsilon=1e-08,
419
+ alpha=0.5,
420
+ auto_find_batch_size=False,
421
+ average_tokens_across_devices=False,
422
+ batch_eval_metrics=False,
423
+ benchmarks=[],
424
+ bf16=True,
425
+ bf16_full_eval=False,
426
+ callbacks=[],
427
+ ce_loss_scale=1.0,
428
+ chars_per_token=<CHARS_PER_TOKEN>,
429
+ chat_template=None,
430
+ completion_only_loss=None,
431
+ data_seed=None,
432
+ dataloader_drop_last=False,
433
+ dataloader_num_workers=0,
434
+ dataloader_persistent_workers=False,
435
+ dataloader_pin_memory=True,
436
+ dataloader_prefetch_factor=None,
437
+ dataset_batch_size=None,
438
+ dataset_kwargs=None,
439
+ dataset_num_proc=None,
440
+ dataset_text_field=text,
441
+ ddp_backend=None,
442
+ ddp_broadcast_buffers=None,
443
+ ddp_bucket_cap_mb=None,
444
+ ddp_find_unused_parameters=None,
445
+ ddp_timeout=1800000000,
446
+ debug=[],
447
+ deepspeed=None,
448
+ disable_dropout=True,
449
+ disable_tqdm=False,
450
+ do_eval=True,
451
+ do_predict=False,
452
+ do_train=False,
453
+ eos_token=<EOS_TOKEN>,
454
+ eval_accumulation_steps=None,
455
+ eval_delay=0,
456
+ eval_do_concat_batches=True,
457
+ eval_on_start=False,
458
+ eval_packing=None,
459
+ eval_steps=None,
460
+ eval_strategy=IntervalStrategy.NO,
461
+ eval_use_gather_object=False,
462
+ expert_num=6,
463
+ fp16=False,
464
+ fp16_backend=auto,
465
+ fp16_full_eval=False,
466
+ fp16_opt_level=O1,
467
+ fsdp=[],
468
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
469
+ fsdp_min_num_params=0,
470
+ fsdp_transformer_layer_cls_to_wrap=None,
471
+ full_determinism=False,
472
+ gradient_accumulation_steps=1,
473
+ gradient_checkpointing=False,
474
+ gradient_checkpointing_kwargs={'use_reentrant': False},
475
+ greater_is_better=None,
476
+ group_by_length=False,
477
+ half_precision_backend=auto,
478
+ hub_always_push=False,
479
+ hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Math10K-Distill-6-experts-test-may,
480
+ hub_model_revision=main,
481
+ hub_private_repo=None,
482
+ hub_strategy=HubStrategy.EVERY_SAVE,
483
+ hub_token=<HUB_TOKEN>,
484
+ ignore_data_skip=False,
485
+ include_for_metrics=[],
486
+ include_inputs_for_metrics=False,
487
+ include_num_input_tokens_seen=False,
488
+ include_tokens_per_second=False,
489
+ jit_mode_eval=False,
490
+ kl_loss_scale=1.0,
491
+ label_names=None,
492
+ label_smoothing_factor=0.0,
493
+ learning_rate=1e-05,
494
+ length_column_name=length,
495
+ lmbda=0.0,
496
+ load_best_model_at_end=False,
497
+ local_rank=0,
498
+ log_level=info,
499
+ log_level_replica=warning,
500
+ log_on_each_node=True,
501
+ logging_dir=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may/runs/Jul11_06-48-57_ip-172-31-67-236,
502
+ logging_first_step=False,
503
+ logging_nan_inf_filter=True,
504
+ logging_steps=1,
505
+ logging_strategy=IntervalStrategy.STEPS,
506
+ loss_type=token_specific,
507
+ lr_scheduler_kwargs={'min_lr_rate': 0.1},
508
+ lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR,
509
+ max_grad_norm=1.0,
510
+ max_length=4096,
511
+ max_new_tokens=1024,
512
+ max_seq_length=None,
513
+ max_steps=-1,
514
+ metric_for_best_model=None,
515
+ model_init_kwargs=None,
516
+ mp_parameters=,
517
+ neftune_noise_alpha=None,
518
+ no_cuda=False,
519
+ num_of_sequences=None,
520
+ num_train_epochs=3,
521
+ optim=OptimizerNames.ADAMW_TORCH,
522
+ optim_args=None,
523
+ optim_target_modules=None,
524
+ output_dir=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may,
525
+ overwrite_hub_revision=False,
526
+ overwrite_output_dir=True,
527
+ packing=False,
528
+ pad_to_multiple_of=None,
529
+ pad_token=<PAD_TOKEN>,
530
+ padding_free=False,
531
+ past_index=-1,
532
+ per_device_eval_batch_size=16,
533
+ per_device_train_batch_size=4,
534
+ prediction_loss_only=False,
535
+ push_to_hub=True,
536
+ push_to_hub_model_id=None,
537
+ push_to_hub_organization=None,
538
+ push_to_hub_revision=False,
539
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
540
+ ray_scope=last,
541
+ reduction=sum,
542
+ remove_unused_columns=True,
543
+ report_to=['wandb'],
544
+ restore_callback_states_from_checkpoint=False,
545
+ resume_from_checkpoint=None,
546
+ run_name=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may,
547
+ save_on_each_node=False,
548
+ save_only_model=False,
549
+ save_safetensors=True,
550
+ save_steps=100,
551
+ save_strategy=SaveStrategy.STEPS,
552
+ save_total_limit=1,
553
+ seed=1234,
554
+ skip_memory_metrics=True,
555
+ system_prompt=None,
556
+ teacher_model_init_kwargs=None,
557
+ teacher_model_name_or_path=None,
558
+ temperature=0.9,
559
+ tf32=None,
560
+ torch_compile=False,
561
+ torch_compile_backend=None,
562
+ torch_compile_mode=None,
563
+ torch_empty_cache_steps=None,
564
+ torchdynamo=None,
565
+ tpu_metrics_debug=False,
566
+ tpu_num_cores=None,
567
+ use_cpu=False,
568
+ use_ipex=False,
569
+ use_legacy_prediction_loop=False,
570
+ use_liger=False,
571
+ use_liger_kernel=False,
572
+ use_mps_device=False,
573
+ wandb_entity=None,
574
+ wandb_project=None,
575
+ warmup_ratio=0.1,
576
+ warmup_steps=0,
577
+ weight_decay=0.0,
578
+ )
579
+ 2025-07-11 06:48:58 - INFO - __main__ - *** Initializing model kwargs ***
580
+ 2025-07-11 06:48:58 - INFO - __main__ - Loaded top k experts from /home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may/top_6_experts_lmms-lab_Math10K.json: {'model.layers.1.mlp': [51, 61, 44, 45, 14, 22], 'model.layers.2.mlp': [27, 25, 18, 13, 3, 23], 'model.layers.3.mlp': [54, 25, 41, 23, 28, 57], 'model.layers.4.mlp': [37, 21, 33, 49, 11, 14], 'model.layers.5.mlp': [54, 47, 35, 20, 52, 9], 'model.layers.6.mlp': [22, 1, 13, 45, 42, 47], 'model.layers.7.mlp': [58, 43, 24, 18, 44, 62], 'model.layers.8.mlp': [47, 39, 56, 30, 54, 58], 'model.layers.9.mlp': [31, 13, 22, 24, 12, 32], 'model.layers.10.mlp': [47, 19, 42, 2, 13, 22], 'model.layers.11.mlp': [29, 11, 17, 10, 59, 22], 'model.layers.12.mlp': [5, 56, 3, 59, 4, 26], 'model.layers.13.mlp': [10, 42, 58, 14, 47, 17], 'model.layers.14.mlp': [51, 7, 27, 18, 31, 61], 'model.layers.15.mlp': [24, 55, 5, 17, 14, 41], 'model.layers.16.mlp': [61, 33, 63, 49, 19, 9], 'model.layers.17.mlp': [0, 26, 43, 32, 27, 29], 'model.layers.18.mlp': [5, 56, 42, 36, 2, 1], 'model.layers.19.mlp': [2, 23, 24, 36, 40, 0], 'model.layers.20.mlp': [1, 56, 38, 20, 48, 58], 'model.layers.21.mlp': [5, 13, 15, 28, 19, 10], 'model.layers.22.mlp': [58, 32, 31, 3, 45, 14], 'model.layers.23.mlp': [20, 0, 58, 45, 33, 42], 'model.layers.24.mlp': [62, 7, 42, 47, 10, 63], 'model.layers.25.mlp': [45, 48, 39, 11, 46, 38], 'model.layers.26.mlp': [46, 49, 6, 13, 11, 57]}
581
+ 2025-07-11 06:48:58 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0
582
+ Memory reserved: 0.0
583
+ 2025-07-11 06:49:09 - INFO - __main__ - Model memory after loading model:Memory allocated: 0.0
584
+ Memory reserved: 0.0
585
+ 2025-07-11 06:49:09 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts...
586
+ 2025-07-11 06:49:22 - INFO - __main__ - MoE layers replaced with Dense MLP layers
587
+ 2025-07-11 06:49:22 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 0.0
588
+ Memory reserved: 0.0
589
+ 2025-07-11 06:49:22 - INFO - __main__ - Initializing EfficientDistillationTrainer...
590
+ 2025-07-11 06:49:50 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 31126.0048828125
591
+ Memory reserved: 36896.0
592
+ 2025-07-11 06:49:50 - INFO - __main__ - *** Starting training ***
593
+ 2025-07-11 06:49:50 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM(
594
+ (model): DeepseekV2Model(
595
+ (embed_tokens): Embedding(102400, 2048)
596
+ (layers): ModuleList(
597
+ (0): DeepseekV2DecoderLayer(
598
+ (self_attn): DeepseekV2FlashAttention2(
599
+ (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
600
+ (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
601
+ (kv_a_layernorm): DeepseekV2RMSNorm()
602
+ (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
603
+ (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
604
+ (rotary_emb): DeepseekV2YarnRotaryEmbedding()
605
+ )
606
+ (mlp): DeepseekV2MLP(
607
+ (gate_proj): Linear(in_features=2048, out_features=10944, bias=False)
608
+ (up_proj): Linear(in_features=2048, out_features=10944, bias=False)
609
+ (down_proj): Linear(in_features=10944, out_features=2048, bias=False)
610
+ (act_fn): SiLU()
611
+ )
612
+ (input_layernorm): DeepseekV2RMSNorm()
613
+ (post_attention_layernorm): DeepseekV2RMSNorm()
614
+ )
615
+ (1-26): 26 x DeepseekV2DecoderLayer(
616
+ (self_attn): DeepseekV2FlashAttention2(
617
+ (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
618
+ (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
619
+ (kv_a_layernorm): DeepseekV2RMSNorm()
620
+ (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
621
+ (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
622
+ (rotary_emb): DeepseekV2YarnRotaryEmbedding()
623
+ )
624
+ (mlp): DeepseekV2MoE(
625
+ (gate): MoEGate()
626
+ (shared_experts): DeepseekV2MLP(
627
+ (gate_proj): Linear(in_features=2048, out_features=2816, bias=False)
628
+ (up_proj): Linear(in_features=2048, out_features=2816, bias=False)
629
+ (down_proj): Linear(in_features=2816, out_features=2048, bias=False)
630
+ (act_fn): SiLU()
631
+ )
632
+ (selected_experts): ModuleList(
633
+ (0-5): 6 x DeepseekV2MLP(
634
+ (gate_proj): Linear(in_features=2048, out_features=1408, bias=False)
635
+ (up_proj): Linear(in_features=2048, out_features=1408, bias=False)
636
+ (down_proj): Linear(in_features=1408, out_features=2048, bias=False)
637
+ (act_fn): SiLU()
638
+ )
639
+ )
640
+ (experts): ModuleList()
641
+ )
642
+ (input_layernorm): DeepseekV2RMSNorm()
643
+ (post_attention_layernorm): DeepseekV2RMSNorm()
644
+ )
645
+ )
646
+ (norm): DeepseekV2RMSNorm()
647
+ )
648
+ (lm_head): Linear(in_features=2048, out_features=102400, bias=False)
649
+ )
650
+ 2025-07-11 06:57:54 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, use_dora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False)
651
+ 2025-07-11 06:57:54 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='lmms-lab/Math10K', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False)
652
+ 2025-07-11 06:57:54 - INFO - __main__ - Training parameters EfficientDistillationConfig(
653
+ _n_gpu=1,
654
+ accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
655
+ adafactor=False,
656
+ adam_beta1=0.9,
657
+ adam_beta2=0.999,
658
+ adam_epsilon=1e-08,
659
+ alpha=0.5,
660
+ auto_find_batch_size=False,
661
+ average_tokens_across_devices=False,
662
+ batch_eval_metrics=False,
663
+ benchmarks=[],
664
+ bf16=True,
665
+ bf16_full_eval=False,
666
+ callbacks=[],
667
+ ce_loss_scale=1.0,
668
+ chars_per_token=<CHARS_PER_TOKEN>,
669
+ chat_template=None,
670
+ completion_only_loss=None,
671
+ data_seed=None,
672
+ dataloader_drop_last=False,
673
+ dataloader_num_workers=0,
674
+ dataloader_persistent_workers=False,
675
+ dataloader_pin_memory=True,
676
+ dataloader_prefetch_factor=None,
677
+ dataset_batch_size=None,
678
+ dataset_kwargs=None,
679
+ dataset_num_proc=None,
680
+ dataset_text_field=text,
681
+ ddp_backend=None,
682
+ ddp_broadcast_buffers=None,
683
+ ddp_bucket_cap_mb=None,
684
+ ddp_find_unused_parameters=None,
685
+ ddp_timeout=1800000000,
686
+ debug=[],
687
+ deepspeed=None,
688
+ disable_dropout=True,
689
+ disable_tqdm=False,
690
+ do_eval=True,
691
+ do_predict=False,
692
+ do_train=False,
693
+ eos_token=<EOS_TOKEN>,
694
+ eval_accumulation_steps=None,
695
+ eval_delay=0,
696
+ eval_do_concat_batches=True,
697
+ eval_on_start=False,
698
+ eval_packing=None,
699
+ eval_steps=None,
700
+ eval_strategy=IntervalStrategy.NO,
701
+ eval_use_gather_object=False,
702
+ expert_num=6,
703
+ fp16=False,
704
+ fp16_backend=auto,
705
+ fp16_full_eval=False,
706
+ fp16_opt_level=O1,
707
+ fsdp=[],
708
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
709
+ fsdp_min_num_params=0,
710
+ fsdp_transformer_layer_cls_to_wrap=None,
711
+ full_determinism=False,
712
+ gradient_accumulation_steps=1,
713
+ gradient_checkpointing=False,
714
+ gradient_checkpointing_kwargs={'use_reentrant': False},
715
+ greater_is_better=None,
716
+ group_by_length=False,
717
+ half_precision_backend=auto,
718
+ hub_always_push=False,
719
+ hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Math10K-Distill-6-experts-test-may,
720
+ hub_model_revision=main,
721
+ hub_private_repo=None,
722
+ hub_strategy=HubStrategy.EVERY_SAVE,
723
+ hub_token=<HUB_TOKEN>,
724
+ ignore_data_skip=False,
725
+ include_for_metrics=[],
726
+ include_inputs_for_metrics=False,
727
+ include_num_input_tokens_seen=False,
728
+ include_tokens_per_second=False,
729
+ jit_mode_eval=False,
730
+ kl_loss_scale=1.0,
731
+ label_names=None,
732
+ label_smoothing_factor=0.0,
733
+ learning_rate=1e-05,
734
+ length_column_name=length,
735
+ lmbda=0.0,
736
+ load_best_model_at_end=False,
737
+ local_rank=0,
738
+ log_level=info,
739
+ log_level_replica=warning,
740
+ log_on_each_node=True,
741
+ logging_dir=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may/runs/Jul11_06-57-54_ip-172-31-67-236,
742
+ logging_first_step=False,
743
+ logging_nan_inf_filter=True,
744
+ logging_steps=1,
745
+ logging_strategy=IntervalStrategy.STEPS,
746
+ loss_type=token_specific,
747
+ lr_scheduler_kwargs={'min_lr_rate': 0.1},
748
+ lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR,
749
+ max_grad_norm=1.0,
750
+ max_length=4096,
751
+ max_new_tokens=1024,
752
+ max_seq_length=None,
753
+ max_steps=-1,
754
+ metric_for_best_model=None,
755
+ model_init_kwargs=None,
756
+ mp_parameters=,
757
+ neftune_noise_alpha=None,
758
+ no_cuda=False,
759
+ num_of_sequences=None,
760
+ num_train_epochs=3,
761
+ optim=OptimizerNames.ADAMW_TORCH,
762
+ optim_args=None,
763
+ optim_target_modules=None,
764
+ output_dir=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may,
765
+ overwrite_hub_revision=False,
766
+ overwrite_output_dir=True,
767
+ packing=False,
768
+ pad_to_multiple_of=None,
769
+ pad_token=<PAD_TOKEN>,
770
+ padding_free=False,
771
+ past_index=-1,
772
+ per_device_eval_batch_size=16,
773
+ per_device_train_batch_size=4,
774
+ prediction_loss_only=False,
775
+ push_to_hub=True,
776
+ push_to_hub_model_id=None,
777
+ push_to_hub_organization=None,
778
+ push_to_hub_revision=False,
779
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
780
+ ray_scope=last,
781
+ reduction=sum,
782
+ remove_unused_columns=True,
783
+ report_to=['wandb'],
784
+ restore_callback_states_from_checkpoint=False,
785
+ resume_from_checkpoint=None,
786
+ run_name=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may,
787
+ save_on_each_node=False,
788
+ save_only_model=False,
789
+ save_safetensors=True,
790
+ save_steps=100,
791
+ save_strategy=SaveStrategy.STEPS,
792
+ save_total_limit=1,
793
+ seed=1234,
794
+ skip_memory_metrics=True,
795
+ system_prompt=None,
796
+ teacher_model_init_kwargs=None,
797
+ teacher_model_name_or_path=None,
798
+ temperature=0.9,
799
+ tf32=None,
800
+ torch_compile=False,
801
+ torch_compile_backend=None,
802
+ torch_compile_mode=None,
803
+ torch_empty_cache_steps=None,
804
+ torchdynamo=None,
805
+ tpu_metrics_debug=False,
806
+ tpu_num_cores=None,
807
+ use_cpu=False,
808
+ use_ipex=False,
809
+ use_legacy_prediction_loop=False,
810
+ use_liger=False,
811
+ use_liger_kernel=False,
812
+ use_mps_device=False,
813
+ wandb_entity=None,
814
+ wandb_project=None,
815
+ warmup_ratio=0.1,
816
+ warmup_steps=0,
817
+ weight_decay=0.0,
818
+ )
819
+ 2025-07-11 06:57:56 - INFO - __main__ - *** Initializing model kwargs ***
820
+ 2025-07-11 06:57:56 - INFO - __main__ - Loaded top k experts from /home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may/top_6_experts_lmms-lab_Math10K.json: {'model.layers.1.mlp': [51, 61, 44, 45, 14, 22], 'model.layers.2.mlp': [27, 25, 18, 13, 3, 23], 'model.layers.3.mlp': [54, 25, 41, 23, 28, 57], 'model.layers.4.mlp': [37, 21, 33, 49, 11, 14], 'model.layers.5.mlp': [54, 47, 35, 20, 52, 9], 'model.layers.6.mlp': [22, 1, 13, 45, 42, 47], 'model.layers.7.mlp': [58, 43, 24, 18, 44, 62], 'model.layers.8.mlp': [47, 39, 56, 30, 54, 58], 'model.layers.9.mlp': [31, 13, 22, 24, 12, 32], 'model.layers.10.mlp': [47, 19, 42, 2, 13, 22], 'model.layers.11.mlp': [29, 11, 17, 10, 59, 22], 'model.layers.12.mlp': [5, 56, 3, 59, 4, 26], 'model.layers.13.mlp': [10, 42, 58, 14, 47, 17], 'model.layers.14.mlp': [51, 7, 27, 18, 31, 61], 'model.layers.15.mlp': [24, 55, 5, 17, 14, 41], 'model.layers.16.mlp': [61, 33, 63, 49, 19, 9], 'model.layers.17.mlp': [0, 26, 43, 32, 27, 29], 'model.layers.18.mlp': [5, 56, 42, 36, 2, 1], 'model.layers.19.mlp': [2, 23, 24, 36, 40, 0], 'model.layers.20.mlp': [1, 56, 38, 20, 48, 58], 'model.layers.21.mlp': [5, 13, 15, 28, 19, 10], 'model.layers.22.mlp': [58, 32, 31, 3, 45, 14], 'model.layers.23.mlp': [20, 0, 58, 45, 33, 42], 'model.layers.24.mlp': [62, 7, 42, 47, 10, 63], 'model.layers.25.mlp': [45, 48, 39, 11, 46, 38], 'model.layers.26.mlp': [46, 49, 6, 13, 11, 57]}
821
+ 2025-07-11 06:57:56 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0
822
+ Memory reserved: 0.0
823
+ 2025-07-11 06:58:06 - INFO - __main__ - Model memory after loading model:Memory allocated: 0.0
824
+ Memory reserved: 0.0
825
+ 2025-07-11 06:58:06 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts...
826
+ 2025-07-11 06:58:19 - INFO - __main__ - MoE layers replaced with Dense MLP layers
827
+ 2025-07-11 06:58:19 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 0.0
828
+ Memory reserved: 0.0
829
+ 2025-07-11 06:58:19 - INFO - __main__ - Initializing EfficientDistillationTrainer...
830
+ 2025-07-11 06:58:47 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 31126.0048828125
831
+ Memory reserved: 36896.0
832
+ 2025-07-11 06:58:47 - INFO - __main__ - *** Starting training ***
833
+ 2025-07-11 06:58:47 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM(
834
+ (model): DeepseekV2Model(
835
+ (embed_tokens): Embedding(102400, 2048)
836
+ (layers): ModuleList(
837
+ (0): DeepseekV2DecoderLayer(
838
+ (self_attn): DeepseekV2FlashAttention2(
839
+ (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
840
+ (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
841
+ (kv_a_layernorm): DeepseekV2RMSNorm()
842
+ (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
843
+ (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
844
+ (rotary_emb): DeepseekV2YarnRotaryEmbedding()
845
+ )
846
+ (mlp): DeepseekV2MLP(
847
+ (gate_proj): Linear(in_features=2048, out_features=10944, bias=False)
848
+ (up_proj): Linear(in_features=2048, out_features=10944, bias=False)
849
+ (down_proj): Linear(in_features=10944, out_features=2048, bias=False)
850
+ (act_fn): SiLU()
851
+ )
852
+ (input_layernorm): DeepseekV2RMSNorm()
853
+ (post_attention_layernorm): DeepseekV2RMSNorm()
854
+ )
855
+ (1-26): 26 x DeepseekV2DecoderLayer(
856
+ (self_attn): DeepseekV2FlashAttention2(
857
+ (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
858
+ (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
859
+ (kv_a_layernorm): DeepseekV2RMSNorm()
860
+ (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
861
+ (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
862
+ (rotary_emb): DeepseekV2YarnRotaryEmbedding()
863
+ )
864
+ (mlp): DeepseekV2MoE(
865
+ (gate): MoEGate()
866
+ (shared_experts): DeepseekV2MLP(
867
+ (gate_proj): Linear(in_features=2048, out_features=2816, bias=False)
868
+ (up_proj): Linear(in_features=2048, out_features=2816, bias=False)
869
+ (down_proj): Linear(in_features=2816, out_features=2048, bias=False)
870
+ (act_fn): SiLU()
871
+ )
872
+ (selected_experts): ModuleList(
873
+ (0-5): 6 x DeepseekV2MLP(
874
+ (gate_proj): Linear(in_features=2048, out_features=1408, bias=False)
875
+ (up_proj): Linear(in_features=2048, out_features=1408, bias=False)
876
+ (down_proj): Linear(in_features=1408, out_features=2048, bias=False)
877
+ (act_fn): SiLU()
878
+ )
879
+ )
880
+ (experts): ModuleList()
881
+ )
882
+ (input_layernorm): DeepseekV2RMSNorm()
883
+ (post_attention_layernorm): DeepseekV2RMSNorm()
884
+ )
885
+ )
886
+ (norm): DeepseekV2RMSNorm()
887
+ )
888
+ (lm_head): Linear(in_features=2048, out_features=102400, bias=False)
889
+ )
890
+ 2025-07-11 07:10:39 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, use_dora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False)
891
+ 2025-07-11 07:10:39 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='lmms-lab/Math10K', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False)
892
+ 2025-07-11 07:10:39 - INFO - __main__ - Training parameters EfficientDistillationConfig(
893
+ _n_gpu=1,
894
+ accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
895
+ adafactor=False,
896
+ adam_beta1=0.9,
897
+ adam_beta2=0.999,
898
+ adam_epsilon=1e-08,
899
+ alpha=0.5,
900
+ auto_find_batch_size=False,
901
+ average_tokens_across_devices=False,
902
+ batch_eval_metrics=False,
903
+ benchmarks=[],
904
+ bf16=True,
905
+ bf16_full_eval=False,
906
+ callbacks=[],
907
+ ce_loss_scale=1.0,
908
+ chars_per_token=<CHARS_PER_TOKEN>,
909
+ chat_template=None,
910
+ completion_only_loss=None,
911
+ data_seed=None,
912
+ dataloader_drop_last=False,
913
+ dataloader_num_workers=0,
914
+ dataloader_persistent_workers=False,
915
+ dataloader_pin_memory=True,
916
+ dataloader_prefetch_factor=None,
917
+ dataset_batch_size=None,
918
+ dataset_kwargs=None,
919
+ dataset_num_proc=None,
920
+ dataset_text_field=text,
921
+ ddp_backend=None,
922
+ ddp_broadcast_buffers=None,
923
+ ddp_bucket_cap_mb=None,
924
+ ddp_find_unused_parameters=None,
925
+ ddp_timeout=1800000000,
926
+ debug=[],
927
+ deepspeed=None,
928
+ disable_dropout=True,
929
+ disable_tqdm=False,
930
+ do_eval=True,
931
+ do_predict=False,
932
+ do_train=False,
933
+ eos_token=<EOS_TOKEN>,
934
+ eval_accumulation_steps=None,
935
+ eval_delay=0,
936
+ eval_do_concat_batches=True,
937
+ eval_on_start=False,
938
+ eval_packing=None,
939
+ eval_steps=None,
940
+ eval_strategy=IntervalStrategy.NO,
941
+ eval_use_gather_object=False,
942
+ expert_num=6,
943
+ fp16=False,
944
+ fp16_backend=auto,
945
+ fp16_full_eval=False,
946
+ fp16_opt_level=O1,
947
+ fsdp=[],
948
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
949
+ fsdp_min_num_params=0,
950
+ fsdp_transformer_layer_cls_to_wrap=None,
951
+ full_determinism=False,
952
+ gradient_accumulation_steps=1,
953
+ gradient_checkpointing=False,
954
+ gradient_checkpointing_kwargs={'use_reentrant': False},
955
+ greater_is_better=None,
956
+ group_by_length=False,
957
+ half_precision_backend=auto,
958
+ hub_always_push=False,
959
+ hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Math10K-Distill-6-experts-test-may,
960
+ hub_model_revision=main,
961
+ hub_private_repo=None,
962
+ hub_strategy=HubStrategy.EVERY_SAVE,
963
+ hub_token=<HUB_TOKEN>,
964
+ ignore_data_skip=False,
965
+ include_for_metrics=[],
966
+ include_inputs_for_metrics=False,
967
+ include_num_input_tokens_seen=False,
968
+ include_tokens_per_second=False,
969
+ jit_mode_eval=False,
970
+ kl_loss_scale=1.0,
971
+ label_names=None,
972
+ label_smoothing_factor=0.0,
973
+ learning_rate=1e-05,
974
+ length_column_name=length,
975
+ lmbda=0.0,
976
+ load_best_model_at_end=False,
977
+ local_rank=0,
978
+ log_level=info,
979
+ log_level_replica=warning,
980
+ log_on_each_node=True,
981
+ logging_dir=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may/runs/Jul11_07-10-38_ip-172-31-67-236,
982
+ logging_first_step=False,
983
+ logging_nan_inf_filter=True,
984
+ logging_steps=1,
985
+ logging_strategy=IntervalStrategy.STEPS,
986
+ loss_type=token_specific,
987
+ lr_scheduler_kwargs={'min_lr_rate': 0.1},
988
+ lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR,
989
+ max_grad_norm=1.0,
990
+ max_length=4096,
991
+ max_new_tokens=1024,
992
+ max_seq_length=None,
993
+ max_steps=-1,
994
+ metric_for_best_model=None,
995
+ model_init_kwargs=None,
996
+ mp_parameters=,
997
+ neftune_noise_alpha=None,
998
+ no_cuda=False,
999
+ num_of_sequences=None,
1000
+ num_train_epochs=3,
1001
+ optim=OptimizerNames.ADAMW_TORCH,
1002
+ optim_args=None,
1003
+ optim_target_modules=None,
1004
+ output_dir=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may,
1005
+ overwrite_hub_revision=False,
1006
+ overwrite_output_dir=True,
1007
+ packing=False,
1008
+ pad_to_multiple_of=None,
1009
+ pad_token=<PAD_TOKEN>,
1010
+ padding_free=False,
1011
+ past_index=-1,
1012
+ per_device_eval_batch_size=16,
1013
+ per_device_train_batch_size=4,
1014
+ prediction_loss_only=False,
1015
+ push_to_hub=True,
1016
+ push_to_hub_model_id=None,
1017
+ push_to_hub_organization=None,
1018
+ push_to_hub_revision=False,
1019
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
1020
+ ray_scope=last,
1021
+ reduction=sum,
1022
+ remove_unused_columns=True,
1023
+ report_to=['wandb'],
1024
+ restore_callback_states_from_checkpoint=False,
1025
+ resume_from_checkpoint=None,
1026
+ run_name=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may,
1027
+ save_on_each_node=False,
1028
+ save_only_model=False,
1029
+ save_safetensors=True,
1030
+ save_steps=100,
1031
+ save_strategy=SaveStrategy.STEPS,
1032
+ save_total_limit=1,
1033
+ seed=1234,
1034
+ skip_memory_metrics=True,
1035
+ system_prompt=None,
1036
+ teacher_model_init_kwargs=None,
1037
+ teacher_model_name_or_path=None,
1038
+ temperature=0.9,
1039
+ tf32=None,
1040
+ torch_compile=False,
1041
+ torch_compile_backend=None,
1042
+ torch_compile_mode=None,
1043
+ torch_empty_cache_steps=None,
1044
+ torchdynamo=None,
1045
+ tpu_metrics_debug=False,
1046
+ tpu_num_cores=None,
1047
+ use_cpu=False,
1048
+ use_ipex=False,
1049
+ use_legacy_prediction_loop=False,
1050
+ use_liger=False,
1051
+ use_liger_kernel=False,
1052
+ use_mps_device=False,
1053
+ wandb_entity=None,
1054
+ wandb_project=None,
1055
+ warmup_ratio=0.1,
1056
+ warmup_steps=0,
1057
+ weight_decay=0.0,
1058
+ )
1059
+ 2025-07-11 07:10:40 - INFO - __main__ - *** Initializing model kwargs ***
1060
+ 2025-07-11 07:10:40 - INFO - __main__ - Loaded top k experts from /home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may/top_6_experts_lmms-lab_Math10K.json: {'model.layers.1.mlp': [51, 61, 44, 45, 14, 22], 'model.layers.2.mlp': [27, 25, 18, 13, 3, 23], 'model.layers.3.mlp': [54, 25, 41, 23, 28, 57], 'model.layers.4.mlp': [37, 21, 33, 49, 11, 14], 'model.layers.5.mlp': [54, 47, 35, 20, 52, 9], 'model.layers.6.mlp': [22, 1, 13, 45, 42, 47], 'model.layers.7.mlp': [58, 43, 24, 18, 44, 62], 'model.layers.8.mlp': [47, 39, 56, 30, 54, 58], 'model.layers.9.mlp': [31, 13, 22, 24, 12, 32], 'model.layers.10.mlp': [47, 19, 42, 2, 13, 22], 'model.layers.11.mlp': [29, 11, 17, 10, 59, 22], 'model.layers.12.mlp': [5, 56, 3, 59, 4, 26], 'model.layers.13.mlp': [10, 42, 58, 14, 47, 17], 'model.layers.14.mlp': [51, 7, 27, 18, 31, 61], 'model.layers.15.mlp': [24, 55, 5, 17, 14, 41], 'model.layers.16.mlp': [61, 33, 63, 49, 19, 9], 'model.layers.17.mlp': [0, 26, 43, 32, 27, 29], 'model.layers.18.mlp': [5, 56, 42, 36, 2, 1], 'model.layers.19.mlp': [2, 23, 24, 36, 40, 0], 'model.layers.20.mlp': [1, 56, 38, 20, 48, 58], 'model.layers.21.mlp': [5, 13, 15, 28, 19, 10], 'model.layers.22.mlp': [58, 32, 31, 3, 45, 14], 'model.layers.23.mlp': [20, 0, 58, 45, 33, 42], 'model.layers.24.mlp': [62, 7, 42, 47, 10, 63], 'model.layers.25.mlp': [45, 48, 39, 11, 46, 38], 'model.layers.26.mlp': [46, 49, 6, 13, 11, 57]}
1061
+ 2025-07-11 07:10:40 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0
1062
+ Memory reserved: 0.0
1063
+ 2025-07-11 07:10:51 - INFO - __main__ - Model memory after loading model:Memory allocated: 0.0
1064
+ Memory reserved: 0.0
1065
+ 2025-07-11 07:10:51 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts...
1066
+ 2025-07-11 07:11:03 - INFO - __main__ - MoE layers replaced with Dense MLP layers
1067
+ 2025-07-11 07:11:03 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 0.0
1068
+ Memory reserved: 0.0
1069
+ 2025-07-11 07:11:04 - INFO - __main__ - Initializing EfficientDistillationTrainer...
1070
+ 2025-07-11 07:11:33 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 31126.0048828125
1071
+ Memory reserved: 36896.0
1072
+ 2025-07-11 07:11:33 - INFO - __main__ - *** Starting training ***
1073
+ 2025-07-11 07:11:33 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM(
1074
+ (model): DeepseekV2Model(
1075
+ (embed_tokens): Embedding(102400, 2048)
1076
+ (layers): ModuleList(
1077
+ (0): DeepseekV2DecoderLayer(
1078
+ (self_attn): DeepseekV2FlashAttention2(
1079
+ (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
1080
+ (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
1081
+ (kv_a_layernorm): DeepseekV2RMSNorm()
1082
+ (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
1083
+ (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
1084
+ (rotary_emb): DeepseekV2YarnRotaryEmbedding()
1085
+ )
1086
+ (mlp): DeepseekV2MLP(
1087
+ (gate_proj): Linear(in_features=2048, out_features=10944, bias=False)
1088
+ (up_proj): Linear(in_features=2048, out_features=10944, bias=False)
1089
+ (down_proj): Linear(in_features=10944, out_features=2048, bias=False)
1090
+ (act_fn): SiLU()
1091
+ )
1092
+ (input_layernorm): DeepseekV2RMSNorm()
1093
+ (post_attention_layernorm): DeepseekV2RMSNorm()
1094
+ )
1095
+ (1-26): 26 x DeepseekV2DecoderLayer(
1096
+ (self_attn): DeepseekV2FlashAttention2(
1097
+ (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
1098
+ (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
1099
+ (kv_a_layernorm): DeepseekV2RMSNorm()
1100
+ (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
1101
+ (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
1102
+ (rotary_emb): DeepseekV2YarnRotaryEmbedding()
1103
+ )
1104
+ (mlp): DeepseekV2MoE(
1105
+ (gate): MoEGate()
1106
+ (shared_experts): DeepseekV2MLP(
1107
+ (gate_proj): Linear(in_features=2048, out_features=2816, bias=False)
1108
+ (up_proj): Linear(in_features=2048, out_features=2816, bias=False)
1109
+ (down_proj): Linear(in_features=2816, out_features=2048, bias=False)
1110
+ (act_fn): SiLU()
1111
+ )
1112
+ (selected_experts): ModuleList(
1113
+ (0-5): 6 x DeepseekV2MLP(
1114
+ (gate_proj): Linear(in_features=2048, out_features=1408, bias=False)
1115
+ (up_proj): Linear(in_features=2048, out_features=1408, bias=False)
1116
+ (down_proj): Linear(in_features=1408, out_features=2048, bias=False)
1117
+ (act_fn): SiLU()
1118
+ )
1119
+ )
1120
+ (experts): ModuleList()
1121
+ )
1122
+ (input_layernorm): DeepseekV2RMSNorm()
1123
+ (post_attention_layernorm): DeepseekV2RMSNorm()
1124
+ )
1125
+ )
1126
+ (norm): DeepseekV2RMSNorm()
1127
+ )
1128
+ (lm_head): Linear(in_features=2048, out_features=102400, bias=False)
1129
+ )
1130
+ 2025-07-11 07:19:36 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, use_dora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False)
1131
+ 2025-07-11 07:19:36 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='lmms-lab/Math10K', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False)
1132
+ 2025-07-11 07:19:36 - INFO - __main__ - Training parameters EfficientDistillationConfig(
1133
+ _n_gpu=1,
1134
+ accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
1135
+ adafactor=False,
1136
+ adam_beta1=0.9,
1137
+ adam_beta2=0.999,
1138
+ adam_epsilon=1e-08,
1139
+ alpha=0.5,
1140
+ auto_find_batch_size=False,
1141
+ average_tokens_across_devices=False,
1142
+ batch_eval_metrics=False,
1143
+ benchmarks=[],
1144
+ bf16=True,
1145
+ bf16_full_eval=False,
1146
+ callbacks=[],
1147
+ ce_loss_scale=1.0,
1148
+ chars_per_token=<CHARS_PER_TOKEN>,
1149
+ chat_template=None,
1150
+ completion_only_loss=None,
1151
+ data_seed=None,
1152
+ dataloader_drop_last=False,
1153
+ dataloader_num_workers=0,
1154
+ dataloader_persistent_workers=False,
1155
+ dataloader_pin_memory=True,
1156
+ dataloader_prefetch_factor=None,
1157
+ dataset_batch_size=None,
1158
+ dataset_kwargs=None,
1159
+ dataset_num_proc=None,
1160
+ dataset_text_field=text,
1161
+ ddp_backend=None,
1162
+ ddp_broadcast_buffers=None,
1163
+ ddp_bucket_cap_mb=None,
1164
+ ddp_find_unused_parameters=None,
1165
+ ddp_timeout=1800000000,
1166
+ debug=[],
1167
+ deepspeed=None,
1168
+ disable_dropout=True,
1169
+ disable_tqdm=False,
1170
+ do_eval=True,
1171
+ do_predict=False,
1172
+ do_train=False,
1173
+ eos_token=<EOS_TOKEN>,
1174
+ eval_accumulation_steps=None,
1175
+ eval_delay=0,
1176
+ eval_do_concat_batches=True,
1177
+ eval_on_start=False,
1178
+ eval_packing=None,
1179
+ eval_steps=None,
1180
+ eval_strategy=IntervalStrategy.NO,
1181
+ eval_use_gather_object=False,
1182
+ expert_num=6,
1183
+ fp16=False,
1184
+ fp16_backend=auto,
1185
+ fp16_full_eval=False,
1186
+ fp16_opt_level=O1,
1187
+ fsdp=[],
1188
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
1189
+ fsdp_min_num_params=0,
1190
+ fsdp_transformer_layer_cls_to_wrap=None,
1191
+ full_determinism=False,
1192
+ gradient_accumulation_steps=2,
1193
+ gradient_checkpointing=False,
1194
+ gradient_checkpointing_kwargs={'use_reentrant': False},
1195
+ greater_is_better=None,
1196
+ group_by_length=False,
1197
+ half_precision_backend=auto,
1198
+ hub_always_push=False,
1199
+ hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Math10K-Distill-6-experts-test-may,
1200
+ hub_model_revision=main,
1201
+ hub_private_repo=None,
1202
+ hub_strategy=HubStrategy.EVERY_SAVE,
1203
+ hub_token=<HUB_TOKEN>,
1204
+ ignore_data_skip=False,
1205
+ include_for_metrics=[],
1206
+ include_inputs_for_metrics=False,
1207
+ include_num_input_tokens_seen=False,
1208
+ include_tokens_per_second=False,
1209
+ jit_mode_eval=False,
1210
+ kl_loss_scale=1.0,
1211
+ label_names=None,
1212
+ label_smoothing_factor=0.0,
1213
+ learning_rate=1e-05,
1214
+ length_column_name=length,
1215
+ lmbda=0.0,
1216
+ load_best_model_at_end=False,
1217
+ local_rank=0,
1218
+ log_level=info,
1219
+ log_level_replica=warning,
1220
+ log_on_each_node=True,
1221
+ logging_dir=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may/runs/Jul11_07-19-36_ip-172-31-67-236,
1222
+ logging_first_step=False,
1223
+ logging_nan_inf_filter=True,
1224
+ logging_steps=1,
1225
+ logging_strategy=IntervalStrategy.STEPS,
1226
+ loss_type=token_specific,
1227
+ lr_scheduler_kwargs={'min_lr_rate': 0.1},
1228
+ lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR,
1229
+ max_grad_norm=1.0,
1230
+ max_length=4096,
1231
+ max_new_tokens=1024,
1232
+ max_seq_length=None,
1233
+ max_steps=-1,
1234
+ metric_for_best_model=None,
1235
+ model_init_kwargs=None,
1236
+ mp_parameters=,
1237
+ neftune_noise_alpha=None,
1238
+ no_cuda=False,
1239
+ num_of_sequences=None,
1240
+ num_train_epochs=3,
1241
+ optim=OptimizerNames.ADAMW_TORCH,
1242
+ optim_args=None,
1243
+ optim_target_modules=None,
1244
+ output_dir=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may,
1245
+ overwrite_hub_revision=False,
1246
+ overwrite_output_dir=True,
1247
+ packing=False,
1248
+ pad_to_multiple_of=None,
1249
+ pad_token=<PAD_TOKEN>,
1250
+ padding_free=False,
1251
+ past_index=-1,
1252
+ per_device_eval_batch_size=16,
1253
+ per_device_train_batch_size=2,
1254
+ prediction_loss_only=False,
1255
+ push_to_hub=True,
1256
+ push_to_hub_model_id=None,
1257
+ push_to_hub_organization=None,
1258
+ push_to_hub_revision=False,
1259
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
1260
+ ray_scope=last,
1261
+ reduction=sum,
1262
+ remove_unused_columns=True,
1263
+ report_to=['wandb'],
1264
+ restore_callback_states_from_checkpoint=False,
1265
+ resume_from_checkpoint=None,
1266
+ run_name=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may,
1267
+ save_on_each_node=False,
1268
+ save_only_model=False,
1269
+ save_safetensors=True,
1270
+ save_steps=100,
1271
+ save_strategy=SaveStrategy.STEPS,
1272
+ save_total_limit=1,
1273
+ seed=1234,
1274
+ skip_memory_metrics=True,
1275
+ system_prompt=None,
1276
+ teacher_model_init_kwargs=None,
1277
+ teacher_model_name_or_path=None,
1278
+ temperature=0.9,
1279
+ tf32=None,
1280
+ torch_compile=False,
1281
+ torch_compile_backend=None,
1282
+ torch_compile_mode=None,
1283
+ torch_empty_cache_steps=None,
1284
+ torchdynamo=None,
1285
+ tpu_metrics_debug=False,
1286
+ tpu_num_cores=None,
1287
+ use_cpu=False,
1288
+ use_ipex=False,
1289
+ use_legacy_prediction_loop=False,
1290
+ use_liger=False,
1291
+ use_liger_kernel=False,
1292
+ use_mps_device=False,
1293
+ wandb_entity=None,
1294
+ wandb_project=None,
1295
+ warmup_ratio=0.1,
1296
+ warmup_steps=0,
1297
+ weight_decay=0.0,
1298
+ )
1299
+ 2025-07-11 07:19:37 - INFO - __main__ - *** Initializing model kwargs ***
1300
+ 2025-07-11 07:19:37 - INFO - __main__ - Loaded top k experts from /home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may/top_6_experts_lmms-lab_Math10K.json: {'model.layers.1.mlp': [51, 61, 44, 45, 14, 22], 'model.layers.2.mlp': [27, 25, 18, 13, 3, 23], 'model.layers.3.mlp': [54, 25, 41, 23, 28, 57], 'model.layers.4.mlp': [37, 21, 33, 49, 11, 14], 'model.layers.5.mlp': [54, 47, 35, 20, 52, 9], 'model.layers.6.mlp': [22, 1, 13, 45, 42, 47], 'model.layers.7.mlp': [58, 43, 24, 18, 44, 62], 'model.layers.8.mlp': [47, 39, 56, 30, 54, 58], 'model.layers.9.mlp': [31, 13, 22, 24, 12, 32], 'model.layers.10.mlp': [47, 19, 42, 2, 13, 22], 'model.layers.11.mlp': [29, 11, 17, 10, 59, 22], 'model.layers.12.mlp': [5, 56, 3, 59, 4, 26], 'model.layers.13.mlp': [10, 42, 58, 14, 47, 17], 'model.layers.14.mlp': [51, 7, 27, 18, 31, 61], 'model.layers.15.mlp': [24, 55, 5, 17, 14, 41], 'model.layers.16.mlp': [61, 33, 63, 49, 19, 9], 'model.layers.17.mlp': [0, 26, 43, 32, 27, 29], 'model.layers.18.mlp': [5, 56, 42, 36, 2, 1], 'model.layers.19.mlp': [2, 23, 24, 36, 40, 0], 'model.layers.20.mlp': [1, 56, 38, 20, 48, 58], 'model.layers.21.mlp': [5, 13, 15, 28, 19, 10], 'model.layers.22.mlp': [58, 32, 31, 3, 45, 14], 'model.layers.23.mlp': [20, 0, 58, 45, 33, 42], 'model.layers.24.mlp': [62, 7, 42, 47, 10, 63], 'model.layers.25.mlp': [45, 48, 39, 11, 46, 38], 'model.layers.26.mlp': [46, 49, 6, 13, 11, 57]}
1301
+ 2025-07-11 07:19:37 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0
1302
+ Memory reserved: 0.0
1303
+ 2025-07-11 07:19:48 - INFO - __main__ - Model memory after loading model:Memory allocated: 0.0
1304
+ Memory reserved: 0.0
1305
+ 2025-07-11 07:19:48 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts...
1306
+ 2025-07-11 07:20:01 - INFO - __main__ - MoE layers replaced with Dense MLP layers
1307
+ 2025-07-11 07:20:01 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 0.0
1308
+ Memory reserved: 0.0
1309
+ 2025-07-11 07:20:01 - INFO - __main__ - Initializing EfficientDistillationTrainer...
1310
+ 2025-07-11 07:20:31 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 31126.0048828125
1311
+ Memory reserved: 36896.0
1312
+ 2025-07-11 07:20:31 - INFO - __main__ - *** Starting training ***
1313
+ 2025-07-11 07:20:31 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM(
1314
+ (model): DeepseekV2Model(
1315
+ (embed_tokens): Embedding(102400, 2048)
1316
+ (layers): ModuleList(
1317
+ (0): DeepseekV2DecoderLayer(
1318
+ (self_attn): DeepseekV2FlashAttention2(
1319
+ (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
1320
+ (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
1321
+ (kv_a_layernorm): DeepseekV2RMSNorm()
1322
+ (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
1323
+ (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
1324
+ (rotary_emb): DeepseekV2YarnRotaryEmbedding()
1325
+ )
1326
+ (mlp): DeepseekV2MLP(
1327
+ (gate_proj): Linear(in_features=2048, out_features=10944, bias=False)
1328
+ (up_proj): Linear(in_features=2048, out_features=10944, bias=False)
1329
+ (down_proj): Linear(in_features=10944, out_features=2048, bias=False)
1330
+ (act_fn): SiLU()
1331
+ )
1332
+ (input_layernorm): DeepseekV2RMSNorm()
1333
+ (post_attention_layernorm): DeepseekV2RMSNorm()
1334
+ )
1335
+ (1-26): 26 x DeepseekV2DecoderLayer(
1336
+ (self_attn): DeepseekV2FlashAttention2(
1337
+ (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
1338
+ (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
1339
+ (kv_a_layernorm): DeepseekV2RMSNorm()
1340
+ (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
1341
+ (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
1342
+ (rotary_emb): DeepseekV2YarnRotaryEmbedding()
1343
+ )
1344
+ (mlp): DeepseekV2MoE(
1345
+ (gate): MoEGate()
1346
+ (shared_experts): DeepseekV2MLP(
1347
+ (gate_proj): Linear(in_features=2048, out_features=2816, bias=False)
1348
+ (up_proj): Linear(in_features=2048, out_features=2816, bias=False)
1349
+ (down_proj): Linear(in_features=2816, out_features=2048, bias=False)
1350
+ (act_fn): SiLU()
1351
+ )
1352
+ (selected_experts): ModuleList(
1353
+ (0-5): 6 x DeepseekV2MLP(
1354
+ (gate_proj): Linear(in_features=2048, out_features=1408, bias=False)
1355
+ (up_proj): Linear(in_features=2048, out_features=1408, bias=False)
1356
+ (down_proj): Linear(in_features=1408, out_features=2048, bias=False)
1357
+ (act_fn): SiLU()
1358
+ )
1359
+ )
1360
+ (experts): ModuleList()
1361
+ )
1362
+ (input_layernorm): DeepseekV2RMSNorm()
1363
+ (post_attention_layernorm): DeepseekV2RMSNorm()
1364
+ )
1365
+ )
1366
+ (norm): DeepseekV2RMSNorm()
1367
+ )
1368
+ (lm_head): Linear(in_features=2048, out_features=102400, bias=False)
1369
+ )
1370
+ 2025-07-11 17:48:02 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, use_dora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False)
1371
+ 2025-07-11 17:48:02 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='lmms-lab/Math10K', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False)
1372
+ 2025-07-11 17:48:02 - INFO - __main__ - Training parameters EfficientDistillationConfig(
1373
+ _n_gpu=1,
1374
+ accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
1375
+ adafactor=False,
1376
+ adam_beta1=0.9,
1377
+ adam_beta2=0.999,
1378
+ adam_epsilon=1e-08,
1379
+ alpha=0.5,
1380
+ auto_find_batch_size=False,
1381
+ average_tokens_across_devices=False,
1382
+ batch_eval_metrics=False,
1383
+ benchmarks=[],
1384
+ bf16=True,
1385
+ bf16_full_eval=False,
1386
+ callbacks=[],
1387
+ ce_loss_scale=1.0,
1388
+ chars_per_token=<CHARS_PER_TOKEN>,
1389
+ chat_template=None,
1390
+ completion_only_loss=None,
1391
+ data_seed=None,
1392
+ dataloader_drop_last=False,
1393
+ dataloader_num_workers=0,
1394
+ dataloader_persistent_workers=False,
1395
+ dataloader_pin_memory=True,
1396
+ dataloader_prefetch_factor=None,
1397
+ dataset_batch_size=None,
1398
+ dataset_kwargs=None,
1399
+ dataset_num_proc=None,
1400
+ dataset_text_field=text,
1401
+ ddp_backend=None,
1402
+ ddp_broadcast_buffers=None,
1403
+ ddp_bucket_cap_mb=None,
1404
+ ddp_find_unused_parameters=None,
1405
+ ddp_timeout=1800000000,
1406
+ debug=[],
1407
+ deepspeed=None,
1408
+ disable_dropout=True,
1409
+ disable_tqdm=False,
1410
+ do_eval=True,
1411
+ do_predict=False,
1412
+ do_train=False,
1413
+ eos_token=<EOS_TOKEN>,
1414
+ eval_accumulation_steps=None,
1415
+ eval_delay=0,
1416
+ eval_do_concat_batches=True,
1417
+ eval_on_start=False,
1418
+ eval_packing=None,
1419
+ eval_steps=None,
1420
+ eval_strategy=IntervalStrategy.NO,
1421
+ eval_use_gather_object=False,
1422
+ expert_num=6,
1423
+ fp16=False,
1424
+ fp16_backend=auto,
1425
+ fp16_full_eval=False,
1426
+ fp16_opt_level=O1,
1427
+ fsdp=[],
1428
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
1429
+ fsdp_min_num_params=0,
1430
+ fsdp_transformer_layer_cls_to_wrap=None,
1431
+ full_determinism=False,
1432
+ gradient_accumulation_steps=2,
1433
+ gradient_checkpointing=False,
1434
+ gradient_checkpointing_kwargs={'use_reentrant': False},
1435
+ greater_is_better=None,
1436
+ group_by_length=False,
1437
+ half_precision_backend=auto,
1438
+ hub_always_push=False,
1439
+ hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Math10K-Distill-6-experts-test-may,
1440
+ hub_model_revision=main,
1441
+ hub_private_repo=None,
1442
+ hub_strategy=HubStrategy.EVERY_SAVE,
1443
+ hub_token=<HUB_TOKEN>,
1444
+ ignore_data_skip=False,
1445
+ include_for_metrics=[],
1446
+ include_inputs_for_metrics=False,
1447
+ include_num_input_tokens_seen=False,
1448
+ include_tokens_per_second=False,
1449
+ jit_mode_eval=False,
1450
+ kl_loss_scale=1.0,
1451
+ label_names=None,
1452
+ label_smoothing_factor=0.0,
1453
+ learning_rate=1e-05,
1454
+ length_column_name=length,
1455
+ lmbda=0.0,
1456
+ load_best_model_at_end=False,
1457
+ local_rank=0,
1458
+ log_level=info,
1459
+ log_level_replica=warning,
1460
+ log_on_each_node=True,
1461
+ logging_dir=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may/runs/Jul11_17-48-02_ip-172-31-67-236,
1462
+ logging_first_step=False,
1463
+ logging_nan_inf_filter=True,
1464
+ logging_steps=1,
1465
+ logging_strategy=IntervalStrategy.STEPS,
1466
+ loss_type=token_specific,
1467
+ lr_scheduler_kwargs={'min_lr_rate': 0.1},
1468
+ lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR,
1469
+ max_grad_norm=1.0,
1470
+ max_length=4096,
1471
+ max_new_tokens=1024,
1472
+ max_seq_length=None,
1473
+ max_steps=-1,
1474
+ metric_for_best_model=None,
1475
+ model_init_kwargs=None,
1476
+ mp_parameters=,
1477
+ neftune_noise_alpha=None,
1478
+ no_cuda=False,
1479
+ num_of_sequences=None,
1480
+ num_train_epochs=3,
1481
+ optim=OptimizerNames.ADAMW_TORCH,
1482
+ optim_args=None,
1483
+ optim_target_modules=None,
1484
+ output_dir=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may,
1485
+ overwrite_hub_revision=False,
1486
+ overwrite_output_dir=True,
1487
+ packing=False,
1488
+ pad_to_multiple_of=None,
1489
+ pad_token=<PAD_TOKEN>,
1490
+ padding_free=False,
1491
+ past_index=-1,
1492
+ per_device_eval_batch_size=16,
1493
+ per_device_train_batch_size=2,
1494
+ prediction_loss_only=False,
1495
+ push_to_hub=True,
1496
+ push_to_hub_model_id=None,
1497
+ push_to_hub_organization=None,
1498
+ push_to_hub_revision=False,
1499
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
1500
+ ray_scope=last,
1501
+ reduction=sum,
1502
+ remove_unused_columns=True,
1503
+ report_to=['wandb'],
1504
+ restore_callback_states_from_checkpoint=False,
1505
+ resume_from_checkpoint=None,
1506
+ run_name=/home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may,
1507
+ save_on_each_node=False,
1508
+ save_only_model=False,
1509
+ save_safetensors=True,
1510
+ save_steps=100,
1511
+ save_strategy=SaveStrategy.STEPS,
1512
+ save_total_limit=1,
1513
+ seed=1234,
1514
+ skip_memory_metrics=True,
1515
+ system_prompt=None,
1516
+ teacher_model_init_kwargs=None,
1517
+ teacher_model_name_or_path=None,
1518
+ temperature=0.9,
1519
+ tf32=None,
1520
+ torch_compile=False,
1521
+ torch_compile_backend=None,
1522
+ torch_compile_mode=None,
1523
+ torch_empty_cache_steps=None,
1524
+ torchdynamo=None,
1525
+ tpu_metrics_debug=False,
1526
+ tpu_num_cores=None,
1527
+ use_cpu=False,
1528
+ use_ipex=False,
1529
+ use_legacy_prediction_loop=False,
1530
+ use_liger=False,
1531
+ use_liger_kernel=False,
1532
+ use_mps_device=False,
1533
+ wandb_entity=None,
1534
+ wandb_project=None,
1535
+ warmup_ratio=0.1,
1536
+ warmup_steps=0,
1537
+ weight_decay=0.0,
1538
+ )
1539
+ 2025-07-11 17:48:03 - INFO - __main__ - *** Initializing model kwargs ***
1540
+ 2025-07-11 17:48:03 - INFO - __main__ - Loaded top k experts from /home/ubuntu/sft_models/data/DeepSeek-Coder-V2-Lite-Instruct/distill/math10K/6_experts_test_may/top_6_experts_lmms-lab_Math10K.json: {'model.layers.1.mlp': [51, 61, 44, 45, 14, 22], 'model.layers.2.mlp': [27, 25, 18, 13, 3, 23], 'model.layers.3.mlp': [54, 25, 41, 23, 28, 57], 'model.layers.4.mlp': [37, 21, 33, 49, 11, 14], 'model.layers.5.mlp': [54, 47, 35, 20, 52, 9], 'model.layers.6.mlp': [22, 1, 13, 45, 42, 47], 'model.layers.7.mlp': [58, 43, 24, 18, 44, 62], 'model.layers.8.mlp': [47, 39, 56, 30, 54, 58], 'model.layers.9.mlp': [31, 13, 22, 24, 12, 32], 'model.layers.10.mlp': [47, 19, 42, 2, 13, 22], 'model.layers.11.mlp': [29, 11, 17, 10, 59, 22], 'model.layers.12.mlp': [5, 56, 3, 59, 4, 26], 'model.layers.13.mlp': [10, 42, 58, 14, 47, 17], 'model.layers.14.mlp': [51, 7, 27, 18, 31, 61], 'model.layers.15.mlp': [24, 55, 5, 17, 14, 41], 'model.layers.16.mlp': [61, 33, 63, 49, 19, 9], 'model.layers.17.mlp': [0, 26, 43, 32, 27, 29], 'model.layers.18.mlp': [5, 56, 42, 36, 2, 1], 'model.layers.19.mlp': [2, 23, 24, 36, 40, 0], 'model.layers.20.mlp': [1, 56, 38, 20, 48, 58], 'model.layers.21.mlp': [5, 13, 15, 28, 19, 10], 'model.layers.22.mlp': [58, 32, 31, 3, 45, 14], 'model.layers.23.mlp': [20, 0, 58, 45, 33, 42], 'model.layers.24.mlp': [62, 7, 42, 47, 10, 63], 'model.layers.25.mlp': [45, 48, 39, 11, 46, 38], 'model.layers.26.mlp': [46, 49, 6, 13, 11, 57]}
1541
+ 2025-07-11 17:48:03 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0
1542
+ Memory reserved: 0.0
1543
+ 2025-07-11 17:48:13 - INFO - __main__ - Model memory after loading model:Memory allocated: 0.0
1544
+ Memory reserved: 0.0
1545
+ 2025-07-11 17:48:13 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts...
1546
+ 2025-07-11 17:48:26 - INFO - __main__ - MoE layers replaced with Dense MLP layers
1547
+ 2025-07-11 17:48:26 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 0.0
1548
+ Memory reserved: 0.0
1549
+ 2025-07-11 17:48:26 - INFO - __main__ - Initializing EfficientDistillationTrainer...
1550
+ 2025-07-11 17:48:53 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 31126.0048828125
1551
+ Memory reserved: 36896.0
1552
+ 2025-07-11 17:48:53 - INFO - __main__ - *** Starting training ***
1553
+ 2025-07-11 17:48:53 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM(
1554
+ (model): DeepseekV2Model(
1555
+ (embed_tokens): Embedding(102400, 2048)
1556
+ (layers): ModuleList(
1557
+ (0): DeepseekV2DecoderLayer(
1558
+ (self_attn): DeepseekV2FlashAttention2(
1559
+ (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
1560
+ (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
1561
+ (kv_a_layernorm): DeepseekV2RMSNorm()
1562
+ (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
1563
+ (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
1564
+ (rotary_emb): DeepseekV2YarnRotaryEmbedding()
1565
+ )
1566
+ (mlp): DeepseekV2MLP(
1567
+ (gate_proj): Linear(in_features=2048, out_features=10944, bias=False)
1568
+ (up_proj): Linear(in_features=2048, out_features=10944, bias=False)
1569
+ (down_proj): Linear(in_features=10944, out_features=2048, bias=False)
1570
+ (act_fn): SiLU()
1571
+ )
1572
+ (input_layernorm): DeepseekV2RMSNorm()
1573
+ (post_attention_layernorm): DeepseekV2RMSNorm()
1574
+ )
1575
+ (1-26): 26 x DeepseekV2DecoderLayer(
1576
+ (self_attn): DeepseekV2FlashAttention2(
1577
+ (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
1578
+ (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
1579
+ (kv_a_layernorm): DeepseekV2RMSNorm()
1580
+ (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
1581
+ (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
1582
+ (rotary_emb): DeepseekV2YarnRotaryEmbedding()
1583
+ )
1584
+ (mlp): DeepseekV2MoE(
1585
+ (gate): MoEGate()
1586
+ (shared_experts): DeepseekV2MLP(
1587
+ (gate_proj): Linear(in_features=2048, out_features=2816, bias=False)
1588
+ (up_proj): Linear(in_features=2048, out_features=2816, bias=False)
1589
+ (down_proj): Linear(in_features=2816, out_features=2048, bias=False)
1590
+ (act_fn): SiLU()
1591
+ )
1592
+ (selected_experts): ModuleList(
1593
+ (0-5): 6 x DeepseekV2MLP(
1594
+ (gate_proj): Linear(in_features=2048, out_features=1408, bias=False)
1595
+ (up_proj): Linear(in_features=2048, out_features=1408, bias=False)
1596
+ (down_proj): Linear(in_features=1408, out_features=2048, bias=False)
1597
+ (act_fn): SiLU()
1598
+ )
1599
+ )
1600
+ (experts): ModuleList()
1601
+ )
1602
+ (input_layernorm): DeepseekV2RMSNorm()
1603
+ (post_attention_layernorm): DeepseekV2RMSNorm()
1604
+ )
1605
+ )
1606
+ (norm): DeepseekV2RMSNorm()
1607
+ )
1608
+ (lm_head): Linear(in_features=2048, out_features=102400, bias=False)
1609
+ )
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d11f75edb31adcd5e06b84a9f8b40410ec6468eff322179e3166e266ad80038
3
+ size 8184