diff --git "a/training.log" "b/training.log" --- "a/training.log" +++ "b/training.log" @@ -28089,3 +28089,6725 @@ Memory reserved: 7812.0 ) (lm_head): Linear(in_features=2048, out_features=102400, bias=False) ) +2025-05-01 01:58:14 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 01:58:14 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 01:58:14 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_01-58-13_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=7000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 01:58:16 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 01:58:16 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 01:58:16 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 01:58:40 - INFO - __main__ - Model memory after loading model:Memory allocated: 4836.39697265625 +Memory reserved: 7322.0 +2025-05-01 01:58:40 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts... +2025-05-01 01:58:53 - INFO - __main__ - MoE layers replaced with Dense MLP layers +2025-05-01 01:58:53 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 1404.39697265625 +Memory reserved: 1526.0 +2025-05-01 01:58:53 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 01:59:18 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 6238.91943359375 +Memory reserved: 7812.0 +2025-05-01 01:59:18 - INFO - __main__ - *** Starting training *** +2025-05-01 01:59:18 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (selected_experts): ModuleList( + (0-5): 6 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (experts): ModuleList() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 02:03:36 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 02:03:36 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 02:03:36 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_02-03-35_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=7000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 02:03:38 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 02:03:38 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 02:03:38 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 02:03:52 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 02:04:18 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 9670.91943359375 +Memory reserved: 13120.0 +2025-05-01 02:04:18 - INFO - __main__ - *** Starting training *** +2025-05-01 02:04:18 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (experts): ModuleList( + (0-63): 64 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 02:08:06 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 02:08:06 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 02:08:06 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_02-08-06_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=7000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 02:08:09 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 02:08:09 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 02:08:09 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 02:09:08 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 02:09:08 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 02:09:08 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_02-09-07_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=7000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 02:09:12 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 02:09:12 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 02:09:12 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 02:09:12 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 02:09:51 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 9670.91943359375 +Memory reserved: 13120.0 +2025-05-01 02:09:51 - INFO - __main__ - *** Starting training *** +2025-05-01 02:09:51 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (experts): ModuleList( + (0-63): 64 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 02:15:36 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 02:15:36 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 02:15:36 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_02-15-36_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=7000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 02:15:38 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 02:15:38 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 02:15:38 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 02:15:38 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 02:16:20 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 9670.91943359375 +Memory reserved: 13120.0 +2025-05-01 02:16:20 - INFO - __main__ - *** Starting training *** +2025-05-01 02:16:20 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (experts): ModuleList( + (0-63): 64 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 02:20:57 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 02:20:57 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 02:20:57 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_02-20-57_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=7000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 02:21:00 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 02:21:00 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 02:21:00 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 02:21:00 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 02:21:41 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 9670.91943359375 +Memory reserved: 13120.0 +2025-05-01 02:21:41 - INFO - __main__ - *** Starting training *** +2025-05-01 02:21:41 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (experts): ModuleList( + (0-63): 64 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 02:26:19 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 02:26:19 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 02:26:19 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_02-26-19_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=7000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 02:26:22 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 02:26:22 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 02:26:22 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 02:26:22 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 02:27:03 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 9670.91943359375 +Memory reserved: 13120.0 +2025-05-01 02:27:03 - INFO - __main__ - *** Starting training *** +2025-05-01 02:27:03 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (experts): ModuleList( + (0-63): 64 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 02:27:50 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 02:27:50 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 02:27:50 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_02-27-49_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=7000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 02:27:55 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 02:27:55 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 02:27:55 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 02:27:55 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 02:28:36 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 9670.91943359375 +Memory reserved: 13120.0 +2025-05-01 02:28:36 - INFO - __main__ - *** Starting training *** +2025-05-01 02:28:36 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (experts): ModuleList( + (0-63): 64 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 02:45:12 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 02:45:12 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 02:45:12 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_02-45-11_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=7000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 02:45:15 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 02:45:15 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 02:45:15 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 02:48:52 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 02:48:52 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 02:48:52 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_02-48-52_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=7000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 02:48:55 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 02:48:55 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 02:48:55 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 02:49:18 - INFO - __main__ - Model memory after loading model:Memory allocated: 4836.39697265625 +Memory reserved: 7322.0 +2025-05-01 02:49:18 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts... +2025-05-01 02:51:10 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 02:51:10 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 02:51:10 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_02-51-10_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=7000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 02:51:13 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 02:51:13 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 02:51:13 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 02:51:38 - INFO - __main__ - Model memory after loading model:Memory allocated: 4836.39697265625 +Memory reserved: 7322.0 +2025-05-01 02:51:38 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts... +2025-05-01 02:59:16 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 02:59:16 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 02:59:16 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_02-59-16_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=7000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 02:59:19 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 02:59:19 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 02:59:19 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 02:59:36 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 4836.39697265625 +Memory reserved: 6442.0 +2025-05-01 02:59:36 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 03:00:01 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 9670.91943359375 +Memory reserved: 12800.0 +2025-05-01 03:00:01 - INFO - __main__ - *** Starting training *** +2025-05-01 03:00:01 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (experts): ModuleList( + (0-63): 64 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 03:09:31 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 03:09:31 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 03:09:31 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_03-09-30_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=7000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 03:09:33 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 03:09:33 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 03:09:33 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 03:09:48 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 4836.39697265625 +Memory reserved: 6442.0 +2025-05-01 03:09:49 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 03:10:15 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 9670.91943359375 +Memory reserved: 12800.0 +2025-05-01 03:10:15 - INFO - __main__ - *** Starting training *** +2025-05-01 03:10:15 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (experts): ModuleList( + (0-63): 64 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 03:22:33 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 03:22:33 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 03:22:33 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +bf16=True, +bf16_full_eval=False, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +generation_config=None, +generation_max_length=None, +generation_num_beams=None, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +ignore_unexpected_keys=True, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.1, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_03-22-33_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_new_tokens=900, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_output_dir=True, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +predict_with_generate=False, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_token=, +ray_scope=last, +reduction=batchmean, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +sortish_sampler=False, +split_batches=None, +teacher_model_init_kwargs=None, +temperature=1.0, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger_kernel=False, +use_mps_device=False, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 03:23:29 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 03:23:29 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 03:23:29 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_03-23-28_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=7000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 03:23:31 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 03:23:31 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 03:23:31 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 03:23:55 - INFO - __main__ - Model memory after loading model:Memory allocated: 4836.39697265625 +Memory reserved: 7322.0 +2025-05-01 03:23:55 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts... +2025-05-01 03:24:07 - INFO - __main__ - MoE layers replaced with Dense MLP layers +2025-05-01 03:24:07 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 1404.39697265625 +Memory reserved: 1526.0 +2025-05-01 03:24:07 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 03:24:32 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 6238.91943359375 +Memory reserved: 7812.0 +2025-05-01 03:24:32 - INFO - __main__ - *** Starting training *** +2025-05-01 03:24:32 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (selected_experts): ModuleList( + (0-5): 6 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (experts): ModuleList() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 03:29:11 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 03:29:11 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 03:29:11 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_03-29-10_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=7000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 03:29:14 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 03:29:14 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 03:29:14 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 03:29:37 - INFO - __main__ - Model memory after loading model:Memory allocated: 4836.39697265625 +Memory reserved: 7322.0 +2025-05-01 03:29:37 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts... +2025-05-01 03:29:50 - INFO - __main__ - MoE layers replaced with Dense MLP layers +2025-05-01 03:29:50 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 1404.39697265625 +Memory reserved: 1526.0 +2025-05-01 03:29:50 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 03:30:15 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 6238.91943359375 +Memory reserved: 7812.0 +2025-05-01 03:30:15 - INFO - __main__ - *** Starting training *** +2025-05-01 03:30:15 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (selected_experts): ModuleList( + (0-5): 6 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (experts): ModuleList() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 03:34:30 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 03:34:30 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 03:34:30 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_03-34-29_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=7000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 03:34:32 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 03:34:32 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 03:34:32 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 03:34:54 - INFO - __main__ - Model memory after loading model:Memory allocated: 4836.39697265625 +Memory reserved: 7322.0 +2025-05-01 03:34:54 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts... +2025-05-01 03:35:09 - INFO - __main__ - MoE layers replaced with Dense MLP layers +2025-05-01 03:35:09 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 1404.39697265625 +Memory reserved: 1526.0 +2025-05-01 03:35:09 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 03:35:32 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 6238.91943359375 +Memory reserved: 7812.0 +2025-05-01 03:35:32 - INFO - __main__ - *** Starting training *** +2025-05-01 03:35:32 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (selected_experts): ModuleList( + (0-5): 6 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (experts): ModuleList() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 03:40:34 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 03:40:34 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 03:40:34 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_03-40-33_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=7000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 03:40:36 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 03:40:36 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 03:40:36 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 03:40:58 - INFO - __main__ - Model memory after loading model:Memory allocated: 4836.39697265625 +Memory reserved: 7322.0 +2025-05-01 03:40:58 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts... +2025-05-01 03:41:13 - INFO - __main__ - MoE layers replaced with Dense MLP layers +2025-05-01 03:41:13 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 1404.39697265625 +Memory reserved: 1526.0 +2025-05-01 03:41:13 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 03:41:36 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 6238.91943359375 +Memory reserved: 7812.0 +2025-05-01 03:41:36 - INFO - __main__ - *** Starting training *** +2025-05-01 03:41:36 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (selected_experts): ModuleList( + (0-5): 6 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (experts): ModuleList() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 04:16:53 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 04:16:53 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 04:16:53 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_04-16-53_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=6800, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 04:16:57 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 04:16:57 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 04:16:57 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 04:17:19 - INFO - __main__ - Model memory after loading model:Memory allocated: 4836.39697265625 +Memory reserved: 7322.0 +2025-05-01 04:17:19 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts... +2025-05-01 04:17:35 - INFO - __main__ - MoE layers replaced with Dense MLP layers +2025-05-01 04:17:35 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 1404.39697265625 +Memory reserved: 1526.0 +2025-05-01 04:17:35 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 04:19:08 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 6238.91943359375 +Memory reserved: 7812.0 +2025-05-01 04:19:08 - INFO - __main__ - *** Starting training *** +2025-05-01 04:19:08 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (selected_experts): ModuleList( + (0-5): 6 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (experts): ModuleList() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 05:02:36 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 05:02:36 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 05:02:36 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_05-02-36_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=7000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 05:02:39 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 05:02:39 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 05:02:39 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 05:03:02 - INFO - __main__ - Model memory after loading model:Memory allocated: 4836.39697265625 +Memory reserved: 7322.0 +2025-05-01 05:03:02 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts... +2025-05-01 05:03:17 - INFO - __main__ - MoE layers replaced with Dense MLP layers +2025-05-01 05:03:17 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 1404.39697265625 +Memory reserved: 1526.0 +2025-05-01 05:03:17 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 05:03:41 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 6238.91943359375 +Memory reserved: 7812.0 +2025-05-01 05:03:41 - INFO - __main__ - *** Starting training *** +2025-05-01 05:03:41 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (selected_experts): ModuleList( + (0-5): 6 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (experts): ModuleList() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 05:36:21 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 05:36:21 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 05:36:21 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_05-36-21_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=6000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 05:36:23 - ERROR - __main__ - Top k experts file not found at data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts_open-r1_OpenR1-Math-220k.json. Run part 1 first. +2025-05-01 05:36:24 - ERROR - __main__ - Top k experts file not found at data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts_open-r1_OpenR1-Math-220k.json. Run part 1 first. +2025-05-01 05:36:24 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 05:36:24 - ERROR - __main__ - Top k experts file not found at data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts_open-r1_OpenR1-Math-220k.json. Run part 1 first. +2025-05-01 05:36:24 - ERROR - __main__ - Top k experts file not found at data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts_open-r1_OpenR1-Math-220k.json. Run part 1 first. +2025-05-01 05:36:24 - ERROR - __main__ - Top k experts file not found at data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts_open-r1_OpenR1-Math-220k.json. Run part 1 first. +2025-05-01 05:36:24 - ERROR - __main__ - Top k experts file not found at data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts_open-r1_OpenR1-Math-220k.json. Run part 1 first. +2025-05-01 05:36:24 - ERROR - __main__ - Top k experts file not found at data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts_open-r1_OpenR1-Math-220k.json. Run part 1 first. +2025-05-01 05:37:36 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 05:37:36 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 05:37:36 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_05-37-36_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=6000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 05:37:39 - ERROR - __main__ - Top k experts file not found at data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts_open-r1_OpenR1-Math-220k.json. Run part 1 first. +2025-05-01 05:37:40 - ERROR - __main__ - Top k experts file not found at data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts_open-r1_OpenR1-Math-220k.json. Run part 1 first. +2025-05-01 05:37:40 - ERROR - __main__ - Top k experts file not found at data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts_open-r1_OpenR1-Math-220k.json. Run part 1 first. +2025-05-01 05:39:58 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 05:39:58 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 05:39:58 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_05-39-58_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=6000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 05:40:01 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 05:40:01 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 05:40:01 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 05:40:25 - INFO - __main__ - Model memory after loading model:Memory allocated: 4836.39697265625 +Memory reserved: 7322.0 +2025-05-01 05:40:25 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts... +2025-05-01 05:40:37 - INFO - __main__ - MoE layers replaced with Dense MLP layers +2025-05-01 05:40:37 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 1404.39697265625 +Memory reserved: 1526.0 +2025-05-01 05:40:37 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 05:42:09 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 6238.91943359375 +Memory reserved: 7812.0 +2025-05-01 05:42:09 - INFO - __main__ - *** Starting training *** +2025-05-01 05:42:09 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (selected_experts): ModuleList( + (0-5): 6 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (experts): ModuleList() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 06:39:32 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 06:39:32 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 06:39:32 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_06-39-32_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=6000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 06:39:35 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 06:39:35 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 06:39:35 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 06:39:58 - INFO - __main__ - Model memory after loading model:Memory allocated: 4836.39697265625 +Memory reserved: 7322.0 +2025-05-01 06:39:58 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts... +2025-05-01 06:40:11 - INFO - __main__ - MoE layers replaced with Dense MLP layers +2025-05-01 06:40:11 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 1404.39697265625 +Memory reserved: 1526.0 +2025-05-01 06:40:11 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 06:40:38 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 6238.91943359375 +Memory reserved: 7812.0 +2025-05-01 06:40:38 - INFO - __main__ - *** Starting training *** +2025-05-01 06:40:38 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (selected_experts): ModuleList( + (0-5): 6 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (experts): ModuleList() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 07:13:36 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 07:13:36 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 07:13:36 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_07-13-35_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=6000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 07:13:40 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 07:13:40 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 07:13:40 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 07:14:02 - INFO - __main__ - Model memory after loading model:Memory allocated: 4836.39697265625 +Memory reserved: 7322.0 +2025-05-01 07:14:02 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts... +2025-05-01 07:14:18 - INFO - __main__ - MoE layers replaced with Dense MLP layers +2025-05-01 07:14:18 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 1404.39697265625 +Memory reserved: 1526.0 +2025-05-01 07:14:18 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 07:14:42 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 6238.91943359375 +Memory reserved: 7812.0 +2025-05-01 07:14:42 - INFO - __main__ - *** Starting training *** +2025-05-01 07:14:42 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (selected_experts): ModuleList( + (0-5): 6 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (experts): ModuleList() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 07:17:31 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 07:17:31 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 07:17:31 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_07-17-31_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=6000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 07:17:37 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 07:17:37 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 07:17:37 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 07:18:00 - INFO - __main__ - Model memory after loading model:Memory allocated: 4836.39697265625 +Memory reserved: 7322.0 +2025-05-01 07:18:00 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts... +2025-05-01 07:18:12 - INFO - __main__ - MoE layers replaced with Dense MLP layers +2025-05-01 07:18:12 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 1404.39697265625 +Memory reserved: 1526.0 +2025-05-01 07:18:12 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 07:18:38 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 6238.91943359375 +Memory reserved: 7812.0 +2025-05-01 07:18:38 - INFO - __main__ - *** Starting training *** +2025-05-01 07:18:38 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (selected_experts): ModuleList( + (0-5): 6 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (experts): ModuleList() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 07:49:40 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 07:49:40 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 07:49:40 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_07-49-40_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=6500, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=None, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=70, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 07:49:43 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 07:49:43 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 07:49:43 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 07:50:07 - INFO - __main__ - Model memory after loading model:Memory allocated: 4836.39697265625 +Memory reserved: 7322.0 +2025-05-01 07:50:07 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts... +2025-05-01 07:50:23 - INFO - __main__ - MoE layers replaced with Dense MLP layers +2025-05-01 07:50:23 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 1404.39697265625 +Memory reserved: 1526.0 +2025-05-01 07:50:23 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 07:51:54 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 6238.91943359375 +Memory reserved: 7812.0 +2025-05-01 07:51:54 - INFO - __main__ - *** Starting training *** +2025-05-01 07:51:54 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (selected_experts): ModuleList( + (0-5): 6 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (experts): ModuleList() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 15:26:41 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 15:26:41 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 15:26:41 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=2, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_15-26-40_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=6000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=70, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 15:26:43 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 15:26:43 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 15:26:43 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 15:27:07 - INFO - __main__ - Model memory after loading model:Memory allocated: 4836.39697265625 +Memory reserved: 7322.0 +2025-05-01 15:27:07 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts... +2025-05-01 15:27:19 - INFO - __main__ - MoE layers replaced with Dense MLP layers +2025-05-01 15:27:19 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 1404.39697265625 +Memory reserved: 1526.0 +2025-05-01 15:27:19 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 15:27:46 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 6238.91943359375 +Memory reserved: 7812.0 +2025-05-01 15:27:46 - INFO - __main__ - *** Starting training *** +2025-05-01 15:27:46 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (selected_experts): ModuleList( + (0-5): 6 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (experts): ModuleList() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 15:32:14 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 15:32:14 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 15:32:14 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=2, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_15-32-14_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=6000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=70, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 15:32:17 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 15:32:17 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 15:32:17 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 15:34:19 - INFO - __main__ - Model memory after loading model:Memory allocated: 4836.39697265625 +Memory reserved: 7322.0 +2025-05-01 15:34:19 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts... +2025-05-01 15:34:32 - INFO - __main__ - MoE layers replaced with Dense MLP layers +2025-05-01 15:34:32 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 1404.39697265625 +Memory reserved: 1526.0 +2025-05-01 15:34:32 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 15:35:19 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 6238.91943359375 +Memory reserved: 7812.0 +2025-05-01 15:35:19 - INFO - __main__ - *** Starting training *** +2025-05-01 15:35:19 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (selected_experts): ModuleList( + (0-5): 6 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (experts): ModuleList() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 15:51:14 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 15:51:14 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 15:51:14 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_15-51-13_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=6000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=42, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 15:51:17 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 15:51:17 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 15:51:17 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 15:51:40 - INFO - __main__ - Model memory after loading model:Memory allocated: 4836.39697265625 +Memory reserved: 7322.0 +2025-05-01 15:51:40 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts... +2025-05-01 15:51:56 - INFO - __main__ - MoE layers replaced with Dense MLP layers +2025-05-01 15:51:56 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 1404.39697265625 +Memory reserved: 1526.0 +2025-05-01 15:51:56 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 15:52:35 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 6238.91943359375 +Memory reserved: 7812.0 +2025-05-01 15:52:35 - INFO - __main__ - *** Starting training *** +2025-05-01 15:52:35 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (selected_experts): ModuleList( + (0-5): 6 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (experts): ModuleList() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +) +2025-05-01 16:39:13 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) +2025-05-01 16:39:13 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='open-r1/OpenR1-Math-220k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) +2025-05-01 16:39:13 - INFO - __main__ - Training parameters EfficientDistillationConfig( +_n_gpu=1, +accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +average_tokens_across_devices=False, +batch_eval_metrics=False, +benchmarks=[], +bf16=True, +bf16_full_eval=False, +callbacks=[], +chars_per_token=, +chat_template=None, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_persistent_workers=False, +dataloader_pin_memory=True, +dataloader_prefetch_factor=None, +dataset_batch_size=None, +dataset_kwargs=None, +dataset_num_proc=None, +dataset_text_field=text, +ddp_backend=None, +ddp_broadcast_buffers=None, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800000000, +debug=[], +deepspeed=None, +disable_dropout=True, +disable_tqdm=False, +dispatch_batches=None, +do_eval=True, +do_predict=False, +do_train=False, +eval_accumulation_steps=None, +eval_delay=0, +eval_do_concat_batches=True, +eval_on_start=False, +eval_packing=None, +eval_steps=None, +eval_strategy=IntervalStrategy.NO, +eval_use_gather_object=False, +evaluation_strategy=None, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=4, +gradient_checkpointing=False, +gradient_checkpointing_kwargs={'use_reentrant': False}, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_always_push=False, +hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-Open-R1-Distill, +hub_model_revision=main, +hub_private_repo=None, +hub_strategy=HubStrategy.EVERY_SAVE, +hub_token=, +ignore_data_skip=False, +include_for_metrics=[], +include_inputs_for_metrics=False, +include_num_input_tokens_seen=False, +include_tokens_per_second=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=5e-05, +length_column_name=length, +lmbda=0.0, +load_best_model_at_end=False, +local_rank=0, +log_level=info, +log_level_replica=warning, +log_on_each_node=True, +logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill/runs/May01_16-39-13_q-h100, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1, +logging_strategy=IntervalStrategy.STEPS, +loss_type=forward_kl, +lr_scheduler_kwargs={'min_lr_rate': 0.1}, +lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, +max_grad_norm=1.0, +max_length=6000, +max_new_tokens=1024, +max_seq_length=None, +max_steps=-1, +metric_for_best_model=None, +model_init_kwargs=None, +mp_parameters=, +neftune_noise_alpha=None, +no_cuda=False, +num_of_sequences=None, +num_train_epochs=1, +optim=OptimizerNames.ADAMW_TORCH, +optim_args=None, +optim_target_modules=None, +output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +overwrite_hub_revision=False, +overwrite_output_dir=True, +packing=False, +past_index=-1, +per_device_eval_batch_size=16, +per_device_train_batch_size=4, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_revision=False, +push_to_hub_token=, +ray_scope=last, +reduction=sum, +remove_unused_columns=True, +report_to=['wandb'], +restore_callback_states_from_checkpoint=False, +resume_from_checkpoint=/home/deepseek/hector/test/data/DeepSeek-Coder-V2-Lite-Instruct/distill/checkpoint-20, +run_name=data/DeepSeek-Coder-V2-Lite-Instruct/distill, +save_on_each_node=False, +save_only_model=False, +save_safetensors=True, +save_steps=20, +save_strategy=SaveStrategy.STEPS, +save_total_limit=1, +seed=1234, +skip_memory_metrics=True, +split_batches=None, +system_prompt=None, +teacher_model_init_kwargs=None, +teacher_model_name_or_path=None, +temperature=0.9, +tf32=None, +torch_compile=False, +torch_compile_backend=None, +torch_compile_mode=None, +torch_empty_cache_steps=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_cpu=False, +use_ipex=False, +use_legacy_prediction_loop=False, +use_liger=False, +use_liger_kernel=False, +use_mps_device=False, +wandb_entity=None, +wandb_project=None, +warmup_ratio=0.1, +warmup_steps=0, +weight_decay=0.0, +) +2025-05-01 16:39:16 - INFO - __main__ - *** Initializing model kwargs *** +2025-05-01 16:39:16 - INFO - __main__ - Loaded top k experts from data/DeepSeek-Coder-V2-Lite-Instruct/distill/top_6_experts.json: {'model.layers.1.mlp': [45, 51, 44, 61, 22, 14], 'model.layers.2.mlp': [25, 18, 27, 13, 23, 3], 'model.layers.3.mlp': [54, 28, 25, 41, 23, 57], 'model.layers.4.mlp': [11, 21, 49, 33, 14, 37], 'model.layers.5.mlp': [35, 54, 20, 9, 47, 52], 'model.layers.6.mlp': [45, 22, 1, 42, 47, 13], 'model.layers.7.mlp': [58, 24, 43, 62, 18, 44], 'model.layers.8.mlp': [47, 39, 54, 58, 30, 56], 'model.layers.9.mlp': [31, 22, 32, 13, 12, 24], 'model.layers.10.mlp': [22, 47, 42, 19, 2, 13], 'model.layers.11.mlp': [11, 17, 29, 10, 22, 59], 'model.layers.12.mlp': [4, 3, 59, 56, 5, 26], 'model.layers.13.mlp': [17, 10, 47, 14, 42, 58], 'model.layers.14.mlp': [51, 7, 27, 31, 61, 18], 'model.layers.15.mlp': [24, 14, 17, 55, 41, 5], 'model.layers.16.mlp': [61, 33, 19, 49, 9, 63], 'model.layers.17.mlp': [32, 29, 26, 43, 0, 27], 'model.layers.18.mlp': [56, 5, 2, 36, 1, 42], 'model.layers.19.mlp': [24, 36, 40, 0, 23, 2], 'model.layers.20.mlp': [1, 56, 38, 48, 58, 20], 'model.layers.21.mlp': [19, 5, 28, 15, 13, 10], 'model.layers.22.mlp': [32, 14, 58, 31, 3, 45], 'model.layers.23.mlp': [20, 58, 0, 42, 33, 45], 'model.layers.24.mlp': [7, 63, 47, 42, 10, 62], 'model.layers.25.mlp': [45, 39, 46, 11, 38, 48], 'model.layers.26.mlp': [6, 46, 49, 13, 57, 11]} +2025-05-01 16:39:16 - INFO - __main__ - Model memory before loading model:Memory allocated: 0.0 +Memory reserved: 0.0 +2025-05-01 16:39:39 - INFO - __main__ - Model memory after loading model:Memory allocated: 4836.39697265625 +Memory reserved: 7322.0 +2025-05-01 16:39:39 - INFO - __main__ - Replacing MoE layers with dense layers using selected experts... +2025-05-01 16:39:52 - INFO - __main__ - MoE layers replaced with Dense MLP layers +2025-05-01 16:39:52 - INFO - __main__ - Model memory after replacing MoE with dense:Memory allocated: 1404.39697265625 +Memory reserved: 1526.0 +2025-05-01 16:39:52 - INFO - __main__ - Initializing EfficientDistillationTrainer... +2025-05-01 16:40:20 - INFO - __main__ - Model memory after trainer initialization:Memory allocated: 6238.91943359375 +Memory reserved: 7812.0 +2025-05-01 16:40:20 - INFO - __main__ - *** Starting training *** +2025-05-01 16:40:20 - INFO - __main__ - Model architecture: DeepseekV2ForCausalLM( + (model): DeepseekV2Model( + (embed_tokens): Embedding(102400, 2048) + (layers): ModuleList( + (0): DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) + (up_proj): Linear(in_features=2048, out_features=10944, bias=False) + (down_proj): Linear(in_features=10944, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + (1-26): 26 x DeepseekV2DecoderLayer( + (self_attn): DeepseekV2FlashAttention2( + (q_proj): Linear(in_features=2048, out_features=3072, bias=False) + (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) + (kv_a_layernorm): DeepseekV2RMSNorm() + (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) + (o_proj): Linear(in_features=2048, out_features=2048, bias=False) + (rotary_emb): DeepseekV2YarnRotaryEmbedding() + ) + (mlp): DeepseekV2MoE( + (gate): MoEGate() + (shared_experts): DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) + (up_proj): Linear(in_features=2048, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=2048, bias=False) + (act_fn): SiLU() + ) + (selected_experts): ModuleList( + (0-5): 6 x DeepseekV2MLP( + (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) + (up_proj): Linear(in_features=2048, out_features=1408, bias=False) + (down_proj): Linear(in_features=1408, out_features=2048, bias=False) + (act_fn): SiLU() + ) + ) + (experts): ModuleList() + ) + (input_layernorm): DeepseekV2RMSNorm() + (post_attention_layernorm): DeepseekV2RMSNorm() + ) + ) + (norm): DeepseekV2RMSNorm() + ) + (lm_head): Linear(in_features=2048, out_features=102400, bias=False) +)