Add files using upload-large-folder tool
Browse files- .gitattributes +1 -0
- v0-20251203-162131/args.json +386 -0
- v0-20251203-162131/checkpoint-534/added_tokens.json +28 -0
- v0-20251203-162131/checkpoint-534/args.json +386 -0
- v0-20251203-162131/checkpoint-534/chat_template.jinja +61 -0
- v0-20251203-162131/checkpoint-534/config.json +68 -0
- v0-20251203-162131/checkpoint-534/generation_config.json +13 -0
- v0-20251203-162131/checkpoint-534/latest +1 -0
- v0-20251203-162131/checkpoint-534/merges.txt +0 -0
- v0-20251203-162131/checkpoint-534/model-00001-of-00002.safetensors +3 -0
- v0-20251203-162131/checkpoint-534/model-00002-of-00002.safetensors +3 -0
- v0-20251203-162131/checkpoint-534/model.safetensors.index.json +406 -0
- v0-20251203-162131/checkpoint-534/rng_state_0.pth +3 -0
- v0-20251203-162131/checkpoint-534/rng_state_1.pth +3 -0
- v0-20251203-162131/checkpoint-534/scheduler.pt +3 -0
- v0-20251203-162131/checkpoint-534/special_tokens_map.json +31 -0
- v0-20251203-162131/checkpoint-534/tokenizer.json +3 -0
- v0-20251203-162131/checkpoint-534/tokenizer_config.json +239 -0
- v0-20251203-162131/checkpoint-534/trainer_state.json +917 -0
- v0-20251203-162131/checkpoint-534/training_args.bin +3 -0
- v0-20251203-162131/checkpoint-534/vocab.json +0 -0
- v0-20251203-162131/checkpoint-534/zero_to_fp32.py +760 -0
- v0-20251203-162131/logging.jsonl +187 -0
- v0-20251203-162131/val_dataset.jsonl +0 -0
.gitattributes
CHANGED
|
@@ -62,3 +62,4 @@ format_w_comp/qwen2.5_3b_dapo_format0/validation_samples/5.jsonl filter=lfs diff
|
|
| 62 |
format_w_comp/qwen2.5_3b_dapo/global_step_34/merge_model/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 63 |
format_w_comp/qwen2.5_3b_dapo/validation_samples/5.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 64 |
format_w_comp/qwen2.5_3b_dapo_highent/global_step_34/merge_model/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 62 |
format_w_comp/qwen2.5_3b_dapo/global_step_34/merge_model/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 63 |
format_w_comp/qwen2.5_3b_dapo/validation_samples/5.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 64 |
format_w_comp/qwen2.5_3b_dapo_highent/global_step_34/merge_model/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 65 |
+
v0-20251203-162131/checkpoint-534/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
v0-20251203-162131/args.json
ADDED
|
@@ -0,0 +1,386 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"output_dir": "/ltstorage/home/pan/MT_Grpo/qwen3-4b-instruct-2507-cold_start/v0-20251203-162131",
|
| 3 |
+
"overwrite_output_dir": false,
|
| 4 |
+
"do_train": false,
|
| 5 |
+
"do_eval": false,
|
| 6 |
+
"do_predict": false,
|
| 7 |
+
"eval_strategy": "epoch",
|
| 8 |
+
"prediction_loss_only": false,
|
| 9 |
+
"per_device_train_batch_size": 2,
|
| 10 |
+
"per_device_eval_batch_size": 2,
|
| 11 |
+
"per_gpu_train_batch_size": null,
|
| 12 |
+
"per_gpu_eval_batch_size": null,
|
| 13 |
+
"gradient_accumulation_steps": 2,
|
| 14 |
+
"eval_accumulation_steps": null,
|
| 15 |
+
"eval_delay": 0,
|
| 16 |
+
"torch_empty_cache_steps": null,
|
| 17 |
+
"learning_rate": 2e-06,
|
| 18 |
+
"weight_decay": 0.0001,
|
| 19 |
+
"adam_beta1": 0.9,
|
| 20 |
+
"adam_beta2": 0.95,
|
| 21 |
+
"adam_epsilon": 1e-08,
|
| 22 |
+
"max_grad_norm": 1.0,
|
| 23 |
+
"num_train_epochs": 5.0,
|
| 24 |
+
"max_steps": -1,
|
| 25 |
+
"lr_scheduler_type": "cosine",
|
| 26 |
+
"lr_scheduler_kwargs": null,
|
| 27 |
+
"warmup_ratio": 0.1,
|
| 28 |
+
"warmup_steps": 0,
|
| 29 |
+
"log_level": "passive",
|
| 30 |
+
"log_level_replica": "warning",
|
| 31 |
+
"log_on_each_node": true,
|
| 32 |
+
"logging_dir": "/ltstorage/home/pan/MT_Grpo/qwen3-4b-instruct-2507-cold_start/v0-20251203-162131/runs",
|
| 33 |
+
"logging_strategy": "steps",
|
| 34 |
+
"logging_first_step": true,
|
| 35 |
+
"logging_steps": 5,
|
| 36 |
+
"logging_nan_inf_filter": true,
|
| 37 |
+
"save_strategy": "epoch",
|
| 38 |
+
"save_steps": 500,
|
| 39 |
+
"save_total_limit": 5,
|
| 40 |
+
"save_safetensors": true,
|
| 41 |
+
"save_on_each_node": false,
|
| 42 |
+
"save_only_model": false,
|
| 43 |
+
"restore_callback_states_from_checkpoint": false,
|
| 44 |
+
"no_cuda": false,
|
| 45 |
+
"use_cpu": false,
|
| 46 |
+
"use_mps_device": false,
|
| 47 |
+
"seed": 42,
|
| 48 |
+
"data_seed": 42,
|
| 49 |
+
"jit_mode_eval": false,
|
| 50 |
+
"bf16": true,
|
| 51 |
+
"fp16": false,
|
| 52 |
+
"fp16_opt_level": "O1",
|
| 53 |
+
"half_precision_backend": "auto",
|
| 54 |
+
"bf16_full_eval": false,
|
| 55 |
+
"fp16_full_eval": false,
|
| 56 |
+
"tf32": null,
|
| 57 |
+
"local_rank": 0,
|
| 58 |
+
"ddp_backend": null,
|
| 59 |
+
"tpu_num_cores": null,
|
| 60 |
+
"tpu_metrics_debug": false,
|
| 61 |
+
"debug": null,
|
| 62 |
+
"dataloader_drop_last": false,
|
| 63 |
+
"eval_steps": null,
|
| 64 |
+
"dataloader_num_workers": 4,
|
| 65 |
+
"dataloader_prefetch_factor": null,
|
| 66 |
+
"past_index": -1,
|
| 67 |
+
"run_name": "/ltstorage/home/pan/MT_Grpo/qwen3-4b-instruct-2507-cold_start/v0-20251203-162131",
|
| 68 |
+
"disable_tqdm": null,
|
| 69 |
+
"remove_unused_columns": true,
|
| 70 |
+
"label_names": null,
|
| 71 |
+
"load_best_model_at_end": true,
|
| 72 |
+
"metric_for_best_model": "eval_loss",
|
| 73 |
+
"greater_is_better": false,
|
| 74 |
+
"ignore_data_skip": false,
|
| 75 |
+
"fsdp": null,
|
| 76 |
+
"fsdp_min_num_params": 0,
|
| 77 |
+
"fsdp_config": null,
|
| 78 |
+
"fsdp_transformer_layer_cls_to_wrap": null,
|
| 79 |
+
"accelerator_config": {
|
| 80 |
+
"dispatch_batches": false
|
| 81 |
+
},
|
| 82 |
+
"parallelism_config": null,
|
| 83 |
+
"deepspeed": {
|
| 84 |
+
"fp16": {
|
| 85 |
+
"enabled": "auto",
|
| 86 |
+
"loss_scale": 0,
|
| 87 |
+
"loss_scale_window": 1000,
|
| 88 |
+
"initial_scale_power": 16,
|
| 89 |
+
"hysteresis": 2,
|
| 90 |
+
"min_loss_scale": 1
|
| 91 |
+
},
|
| 92 |
+
"bf16": {
|
| 93 |
+
"enabled": "auto"
|
| 94 |
+
},
|
| 95 |
+
"zero_optimization": {
|
| 96 |
+
"stage": 3,
|
| 97 |
+
"offload_optimizer": {
|
| 98 |
+
"device": "none",
|
| 99 |
+
"pin_memory": true
|
| 100 |
+
},
|
| 101 |
+
"offload_param": {
|
| 102 |
+
"device": "none",
|
| 103 |
+
"pin_memory": true
|
| 104 |
+
},
|
| 105 |
+
"overlap_comm": false,
|
| 106 |
+
"contiguous_gradients": true,
|
| 107 |
+
"sub_group_size": 1000000000.0,
|
| 108 |
+
"reduce_bucket_size": "auto",
|
| 109 |
+
"zero_quantized_weights": false,
|
| 110 |
+
"zero_quantized_gradients": false,
|
| 111 |
+
"stage3_prefetch_bucket_size": "auto",
|
| 112 |
+
"stage3_param_persistence_threshold": "auto",
|
| 113 |
+
"stage3_max_live_parameters": 1000000000.0,
|
| 114 |
+
"stage3_max_reuse_distance": 1000000000.0,
|
| 115 |
+
"stage3_gather_16bit_weights_on_model_save": true
|
| 116 |
+
},
|
| 117 |
+
"gradient_accumulation_steps": "auto",
|
| 118 |
+
"gradient_clipping": "auto",
|
| 119 |
+
"steps_per_print": 2000,
|
| 120 |
+
"train_batch_size": "auto",
|
| 121 |
+
"train_micro_batch_size_per_gpu": "auto",
|
| 122 |
+
"wall_clock_breakdown": false
|
| 123 |
+
},
|
| 124 |
+
"label_smoothing_factor": 0.0,
|
| 125 |
+
"optim": "adamw_torch_fused",
|
| 126 |
+
"optim_args": null,
|
| 127 |
+
"adafactor": false,
|
| 128 |
+
"group_by_length": false,
|
| 129 |
+
"length_column_name": "length",
|
| 130 |
+
"report_to": [
|
| 131 |
+
"wandb"
|
| 132 |
+
],
|
| 133 |
+
"project": "huggingface",
|
| 134 |
+
"trackio_space_id": "trackio",
|
| 135 |
+
"ddp_find_unused_parameters": null,
|
| 136 |
+
"ddp_bucket_cap_mb": null,
|
| 137 |
+
"ddp_broadcast_buffers": null,
|
| 138 |
+
"dataloader_pin_memory": true,
|
| 139 |
+
"dataloader_persistent_workers": false,
|
| 140 |
+
"skip_memory_metrics": true,
|
| 141 |
+
"use_legacy_prediction_loop": false,
|
| 142 |
+
"push_to_hub": false,
|
| 143 |
+
"resume_from_checkpoint": null,
|
| 144 |
+
"hub_model_id": null,
|
| 145 |
+
"hub_strategy": "every_save",
|
| 146 |
+
"hub_token": null,
|
| 147 |
+
"hub_private_repo": null,
|
| 148 |
+
"hub_always_push": false,
|
| 149 |
+
"hub_revision": null,
|
| 150 |
+
"gradient_checkpointing": true,
|
| 151 |
+
"gradient_checkpointing_kwargs": null,
|
| 152 |
+
"include_inputs_for_metrics": false,
|
| 153 |
+
"include_for_metrics": [],
|
| 154 |
+
"eval_do_concat_batches": true,
|
| 155 |
+
"fp16_backend": "auto",
|
| 156 |
+
"push_to_hub_model_id": null,
|
| 157 |
+
"push_to_hub_organization": null,
|
| 158 |
+
"push_to_hub_token": null,
|
| 159 |
+
"mp_parameters": "",
|
| 160 |
+
"auto_find_batch_size": false,
|
| 161 |
+
"full_determinism": false,
|
| 162 |
+
"torchdynamo": null,
|
| 163 |
+
"ray_scope": "last",
|
| 164 |
+
"ddp_timeout": 18000000,
|
| 165 |
+
"torch_compile": false,
|
| 166 |
+
"torch_compile_backend": null,
|
| 167 |
+
"torch_compile_mode": null,
|
| 168 |
+
"include_tokens_per_second": false,
|
| 169 |
+
"include_num_input_tokens_seen": false,
|
| 170 |
+
"neftune_noise_alpha": null,
|
| 171 |
+
"optim_target_modules": null,
|
| 172 |
+
"batch_eval_metrics": false,
|
| 173 |
+
"eval_on_start": false,
|
| 174 |
+
"use_liger_kernel": false,
|
| 175 |
+
"liger_kernel_config": null,
|
| 176 |
+
"eval_use_gather_object": false,
|
| 177 |
+
"average_tokens_across_devices": true,
|
| 178 |
+
"sortish_sampler": false,
|
| 179 |
+
"predict_with_generate": false,
|
| 180 |
+
"generation_max_length": null,
|
| 181 |
+
"generation_num_beams": null,
|
| 182 |
+
"generation_config": null,
|
| 183 |
+
"tuner_backend": "peft",
|
| 184 |
+
"vit_gradient_checkpointing": null,
|
| 185 |
+
"router_aux_loss_coef": 0.0,
|
| 186 |
+
"enable_dft_loss": false,
|
| 187 |
+
"enable_channel_loss": false,
|
| 188 |
+
"check_model": true,
|
| 189 |
+
"acc_strategy": "token",
|
| 190 |
+
"train_dataloader_shuffle": true,
|
| 191 |
+
"max_epochs": null,
|
| 192 |
+
"aligner_lr": null,
|
| 193 |
+
"vit_lr": null,
|
| 194 |
+
"use_logits_to_keep": null,
|
| 195 |
+
"ds3_gather_for_generation": true,
|
| 196 |
+
"resume_only_model": false,
|
| 197 |
+
"optimizer": null,
|
| 198 |
+
"loss_type": null,
|
| 199 |
+
"metric": null,
|
| 200 |
+
"eval_use_evalscope": false,
|
| 201 |
+
"eval_dataset": [],
|
| 202 |
+
"eval_dataset_args": null,
|
| 203 |
+
"eval_limit": null,
|
| 204 |
+
"eval_generation_config": null,
|
| 205 |
+
"extra_eval_args": null,
|
| 206 |
+
"use_flash_ckpt": false,
|
| 207 |
+
"use_ray": false,
|
| 208 |
+
"ray_exp_name": null,
|
| 209 |
+
"device_groups": null,
|
| 210 |
+
"model": "/ltstorage/home/pan/model/Qwen3-4B-Instruct-2507",
|
| 211 |
+
"model_type": "qwen3_nothinking",
|
| 212 |
+
"model_revision": null,
|
| 213 |
+
"task_type": "causal_lm",
|
| 214 |
+
"torch_dtype": "bfloat16",
|
| 215 |
+
"attn_impl": null,
|
| 216 |
+
"new_special_tokens": [],
|
| 217 |
+
"num_labels": null,
|
| 218 |
+
"problem_type": null,
|
| 219 |
+
"rope_scaling": null,
|
| 220 |
+
"device_map": null,
|
| 221 |
+
"max_memory": {},
|
| 222 |
+
"max_model_len": null,
|
| 223 |
+
"local_repo_path": null,
|
| 224 |
+
"init_strategy": null,
|
| 225 |
+
"template": "qwen3_nothinking",
|
| 226 |
+
"system": null,
|
| 227 |
+
"max_length": 32768,
|
| 228 |
+
"truncation_strategy": "delete",
|
| 229 |
+
"max_pixels": null,
|
| 230 |
+
"agent_template": null,
|
| 231 |
+
"norm_bbox": null,
|
| 232 |
+
"use_chat_template": true,
|
| 233 |
+
"padding_free": false,
|
| 234 |
+
"padding_side": "right",
|
| 235 |
+
"loss_scale": "default",
|
| 236 |
+
"sequence_parallel_size": 1,
|
| 237 |
+
"response_prefix": null,
|
| 238 |
+
"template_backend": "swift",
|
| 239 |
+
"dataset": [
|
| 240 |
+
"/ltstorage/home/pan/MT_Grpo/data/cold_start_interleaved/cold_start_train_1580.json"
|
| 241 |
+
],
|
| 242 |
+
"val_dataset": [],
|
| 243 |
+
"split_dataset_ratio": 0.1,
|
| 244 |
+
"dataset_num_proc": 1,
|
| 245 |
+
"load_from_cache_file": false,
|
| 246 |
+
"dataset_shuffle": true,
|
| 247 |
+
"val_dataset_shuffle": false,
|
| 248 |
+
"streaming": false,
|
| 249 |
+
"interleave_prob": null,
|
| 250 |
+
"stopping_strategy": "first_exhausted",
|
| 251 |
+
"shuffle_buffer_size": 1000,
|
| 252 |
+
"download_mode": "reuse_dataset_if_exists",
|
| 253 |
+
"columns": {},
|
| 254 |
+
"strict": false,
|
| 255 |
+
"model_name": null,
|
| 256 |
+
"model_author": null,
|
| 257 |
+
"custom_dataset_info": [],
|
| 258 |
+
"quant_method": null,
|
| 259 |
+
"quant_bits": null,
|
| 260 |
+
"hqq_axis": null,
|
| 261 |
+
"bnb_4bit_compute_dtype": "bfloat16",
|
| 262 |
+
"bnb_4bit_quant_type": "nf4",
|
| 263 |
+
"bnb_4bit_use_double_quant": true,
|
| 264 |
+
"bnb_4bit_quant_storage": null,
|
| 265 |
+
"max_new_tokens": 64,
|
| 266 |
+
"temperature": 0.0,
|
| 267 |
+
"top_k": null,
|
| 268 |
+
"top_p": null,
|
| 269 |
+
"repetition_penalty": null,
|
| 270 |
+
"num_beams": 1,
|
| 271 |
+
"stream": false,
|
| 272 |
+
"stop_words": [],
|
| 273 |
+
"logprobs": false,
|
| 274 |
+
"top_logprobs": null,
|
| 275 |
+
"ckpt_dir": null,
|
| 276 |
+
"lora_modules": [],
|
| 277 |
+
"train_type": "full",
|
| 278 |
+
"adapters": [],
|
| 279 |
+
"external_plugins": [],
|
| 280 |
+
"model_kwargs": {},
|
| 281 |
+
"load_args": false,
|
| 282 |
+
"load_data_args": false,
|
| 283 |
+
"packing": false,
|
| 284 |
+
"packing_length": null,
|
| 285 |
+
"lazy_tokenize": false,
|
| 286 |
+
"cached_dataset": [],
|
| 287 |
+
"custom_register_path": [],
|
| 288 |
+
"use_hf": false,
|
| 289 |
+
"ignore_args_error": false,
|
| 290 |
+
"use_swift_lora": false,
|
| 291 |
+
"freeze_parameters": [],
|
| 292 |
+
"freeze_parameters_regex": null,
|
| 293 |
+
"freeze_parameters_ratio": 0.0,
|
| 294 |
+
"trainable_parameters": [],
|
| 295 |
+
"trainable_parameters_regex": null,
|
| 296 |
+
"freeze_llm": false,
|
| 297 |
+
"freeze_vit": true,
|
| 298 |
+
"freeze_aligner": true,
|
| 299 |
+
"target_modules": [
|
| 300 |
+
"all-linear"
|
| 301 |
+
],
|
| 302 |
+
"target_regex": null,
|
| 303 |
+
"target_parameters": null,
|
| 304 |
+
"modules_to_save": [],
|
| 305 |
+
"lora_rank": 8,
|
| 306 |
+
"lora_alpha": 32,
|
| 307 |
+
"lora_dropout": 0.05,
|
| 308 |
+
"lora_bias": "none",
|
| 309 |
+
"lora_dtype": null,
|
| 310 |
+
"lorap_lr_ratio": null,
|
| 311 |
+
"use_rslora": false,
|
| 312 |
+
"use_dora": false,
|
| 313 |
+
"lora_ga_batch_size": 2,
|
| 314 |
+
"lora_ga_iters": 2,
|
| 315 |
+
"lora_ga_max_length": 1024,
|
| 316 |
+
"lora_ga_direction": "ArB2r",
|
| 317 |
+
"lora_ga_scale": "stable",
|
| 318 |
+
"lora_ga_stable_gamma": 16,
|
| 319 |
+
"init_weights": true,
|
| 320 |
+
"fourier_n_frequency": 2000,
|
| 321 |
+
"fourier_scaling": 300.0,
|
| 322 |
+
"boft_block_size": 4,
|
| 323 |
+
"boft_block_num": 0,
|
| 324 |
+
"boft_n_butterfly_factor": 1,
|
| 325 |
+
"boft_dropout": 0.0,
|
| 326 |
+
"vera_rank": 256,
|
| 327 |
+
"vera_projection_prng_key": 0,
|
| 328 |
+
"vera_dropout": 0.0,
|
| 329 |
+
"vera_d_initial": 0.1,
|
| 330 |
+
"adapter_act": "gelu",
|
| 331 |
+
"adapter_length": 128,
|
| 332 |
+
"use_galore": false,
|
| 333 |
+
"galore_target_modules": null,
|
| 334 |
+
"galore_rank": 128,
|
| 335 |
+
"galore_update_proj_gap": 50,
|
| 336 |
+
"galore_scale": 1.0,
|
| 337 |
+
"galore_proj_type": "std",
|
| 338 |
+
"galore_optim_per_parameter": false,
|
| 339 |
+
"galore_with_embedding": false,
|
| 340 |
+
"galore_quantization": false,
|
| 341 |
+
"galore_proj_quant": false,
|
| 342 |
+
"galore_proj_bits": 4,
|
| 343 |
+
"galore_proj_group_size": 256,
|
| 344 |
+
"galore_cos_threshold": 0.4,
|
| 345 |
+
"galore_gamma_proj": 2,
|
| 346 |
+
"galore_queue_size": 5,
|
| 347 |
+
"adalora_target_r": 8,
|
| 348 |
+
"adalora_init_r": 12,
|
| 349 |
+
"adalora_tinit": 0,
|
| 350 |
+
"adalora_tfinal": 0,
|
| 351 |
+
"adalora_deltaT": 1,
|
| 352 |
+
"adalora_beta1": 0.85,
|
| 353 |
+
"adalora_beta2": 0.85,
|
| 354 |
+
"adalora_orth_reg_weight": 0.5,
|
| 355 |
+
"llamapro_num_new_blocks": 4,
|
| 356 |
+
"llamapro_num_groups": null,
|
| 357 |
+
"lisa_activated_layers": 0,
|
| 358 |
+
"lisa_step_interval": 20,
|
| 359 |
+
"reft_layer_key": null,
|
| 360 |
+
"reft_layers": null,
|
| 361 |
+
"reft_rank": 4,
|
| 362 |
+
"reft_intervention_type": "LoreftIntervention",
|
| 363 |
+
"reft_args": null,
|
| 364 |
+
"swanlab_token": null,
|
| 365 |
+
"swanlab_project": null,
|
| 366 |
+
"swanlab_workspace": null,
|
| 367 |
+
"swanlab_exp_name": null,
|
| 368 |
+
"swanlab_lark_webhook_url": null,
|
| 369 |
+
"swanlab_lark_secret": null,
|
| 370 |
+
"swanlab_mode": "cloud",
|
| 371 |
+
"add_version": true,
|
| 372 |
+
"create_checkpoint_symlink": false,
|
| 373 |
+
"zero_hpz_partition_size": null,
|
| 374 |
+
"deepspeed_autotp_size": null,
|
| 375 |
+
"early_stop_interval": null,
|
| 376 |
+
"rank": 0,
|
| 377 |
+
"global_world_size": 2,
|
| 378 |
+
"local_world_size": 2,
|
| 379 |
+
"model_suffix": "Qwen3-4B-Instruct-2507",
|
| 380 |
+
"model_info": "ModelInfo(model_type='qwen3_nothinking', model_dir='/ltstorage/home/pan/model/Qwen3-4B-Instruct-2507', torch_dtype=torch.bfloat16, max_model_len=262144, quant_method=None, quant_bits=None, rope_scaling=None, is_moe_model=False, config=None, task_type='causal_lm', num_labels=None)",
|
| 381 |
+
"model_meta": "ModelMeta(model_type='qwen3_nothinking', model_groups=[ModelGroup(models=[Model(ms_model_id='Qwen/Qwen3-30B-A3B-Instruct-2507', hf_model_id='Qwen/Qwen3-30B-A3B-Instruct-2507', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-30B-A3B-Instruct-2507-FP8', hf_model_id='Qwen/Qwen3-30B-A3B-Instruct-2507-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-235B-A22B-Instruct-2507', hf_model_id='Qwen/Qwen3-235B-A22B-Instruct-2507', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-235B-A22B-Instruct-2507-FP8', hf_model_id='Qwen/Qwen3-235B-A22B-Instruct-2507-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='swift/Qwen3-235B-A22B-Instruct-2507-AWQ', hf_model_id=None, model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='Qwen/Qwen3-4B-Instruct-2507', hf_model_id='Qwen/Qwen3-4B-Instruct-2507', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-4B-Instruct-2507-FP8', hf_model_id='Qwen/Qwen3-4B-Instruct-2507-FP8', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='qwen3_nothinking', get_function=<function get_model_tokenizer_with_flash_attn at 0x7aaa82df11b0>, model_arch=None, architectures=['Qwen3MoeForCausalLM', 'Qwen3ForCausalLM'], additional_saved_files=[], torch_dtype=None, is_multimodal=False, is_reward=False, is_reranker=False, task_type=None, ignore_patterns=None, requires=['transformers>=4.51'], tags=[])",
|
| 382 |
+
"model_dir": "/ltstorage/home/pan/model/Qwen3-4B-Instruct-2507",
|
| 383 |
+
"hub": "<class 'swift.hub.hub.MSHub'>",
|
| 384 |
+
"evaluation_strategy": "epoch",
|
| 385 |
+
"training_args": "Seq2SeqTrainingArguments(output_dir='/ltstorage/home/pan/MT_Grpo/qwen3-4b-instruct-2507-cold_start/v0-20251203-162131', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.EPOCH: 'epoch'>, prediction_loss_only=False, per_device_train_batch_size=2, per_device_eval_batch_size=2, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=2, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=2e-06, weight_decay=0.0001, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=5.0, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs=None, warmup_ratio=0.1, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/ltstorage/home/pan/MT_Grpo/qwen3-4b-instruct-2507-cold_start/v0-20251203-162131/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=5, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.EPOCH: 'epoch'>, save_steps=500, save_total_limit=5, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=None, dataloader_num_workers=4, dataloader_prefetch_factor=10, past_index=-1, run_name='/ltstorage/home/pan/MT_Grpo/qwen3-4b-instruct-2507-cold_start/v0-20251203-162131', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=True, metric_for_best_model='eval_loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), parallelism_config=None, deepspeed={'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'zero_optimization': {'stage': 3, 'offload_optimizer': {'device': 'none', 'pin_memory': True}, 'offload_param': {'device': 'none', 'pin_memory': True}, 'overlap_comm': False, 'contiguous_gradients': True, 'sub_group_size': 1000000000.0, 'reduce_bucket_size': 'auto', 'zero_quantized_weights': False, 'zero_quantized_gradients': False, 'stage3_prefetch_bucket_size': 'auto', 'stage3_param_persistence_threshold': 'auto', 'stage3_max_live_parameters': 1000000000.0, 'stage3_max_reuse_distance': 1000000000.0, 'stage3_gather_16bit_weights_on_model_save': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False}, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH_FUSED: 'adamw_torch_fused'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['wandb'], project='huggingface', trackio_space_id='trackio', ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=None, hub_always_push=False, hub_revision=None, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=18000000, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, liger_kernel_config=None, eval_use_gather_object=False, average_tokens_across_devices=None, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=None, tuner_backend='peft', vit_gradient_checkpointing=True, router_aux_loss_coef=0.0, enable_dft_loss=False, enable_channel_loss=False, check_model=True, acc_strategy='token', train_dataloader_shuffle=True, max_epochs=None, aligner_lr=None, vit_lr=None, use_logits_to_keep=None, ds3_gather_for_generation=True, resume_only_model=False, optimizer=None, loss_type=None, metric=None, eval_use_evalscope=False, eval_dataset=[], eval_dataset_args=None, eval_limit=None, eval_generation_config=None, extra_eval_args=None, use_flash_ckpt=False, sft_alpha=0, chord_sft_dataset=[], chord_sft_per_device_train_batch_size=None, chord_enable_phi_function=False, chord_mu_warmup_steps=None, chord_mu_decay_steps=None, chord_mu_peak=None, chord_mu_valley=None, train_type='full', local_repo_path=None, galore_config=None, padding_side='right', padding_free=False, task_type='causal_lm', problem_type=None)"
|
| 386 |
+
}
|
v0-20251203-162131/checkpoint-534/added_tokens.json
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</think>": 151668,
|
| 3 |
+
"</tool_call>": 151658,
|
| 4 |
+
"</tool_response>": 151666,
|
| 5 |
+
"<think>": 151667,
|
| 6 |
+
"<tool_call>": 151657,
|
| 7 |
+
"<tool_response>": 151665,
|
| 8 |
+
"<|box_end|>": 151649,
|
| 9 |
+
"<|box_start|>": 151648,
|
| 10 |
+
"<|endoftext|>": 151643,
|
| 11 |
+
"<|file_sep|>": 151664,
|
| 12 |
+
"<|fim_middle|>": 151660,
|
| 13 |
+
"<|fim_pad|>": 151662,
|
| 14 |
+
"<|fim_prefix|>": 151659,
|
| 15 |
+
"<|fim_suffix|>": 151661,
|
| 16 |
+
"<|im_end|>": 151645,
|
| 17 |
+
"<|im_start|>": 151644,
|
| 18 |
+
"<|image_pad|>": 151655,
|
| 19 |
+
"<|object_ref_end|>": 151647,
|
| 20 |
+
"<|object_ref_start|>": 151646,
|
| 21 |
+
"<|quad_end|>": 151651,
|
| 22 |
+
"<|quad_start|>": 151650,
|
| 23 |
+
"<|repo_name|>": 151663,
|
| 24 |
+
"<|video_pad|>": 151656,
|
| 25 |
+
"<|vision_end|>": 151653,
|
| 26 |
+
"<|vision_pad|>": 151654,
|
| 27 |
+
"<|vision_start|>": 151652
|
| 28 |
+
}
|
v0-20251203-162131/checkpoint-534/args.json
ADDED
|
@@ -0,0 +1,386 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"output_dir": "/ltstorage/home/pan/MT_Grpo/qwen3-4b-instruct-2507-cold_start/v0-20251203-162131",
|
| 3 |
+
"overwrite_output_dir": false,
|
| 4 |
+
"do_train": false,
|
| 5 |
+
"do_eval": false,
|
| 6 |
+
"do_predict": false,
|
| 7 |
+
"eval_strategy": "epoch",
|
| 8 |
+
"prediction_loss_only": false,
|
| 9 |
+
"per_device_train_batch_size": 2,
|
| 10 |
+
"per_device_eval_batch_size": 2,
|
| 11 |
+
"per_gpu_train_batch_size": null,
|
| 12 |
+
"per_gpu_eval_batch_size": null,
|
| 13 |
+
"gradient_accumulation_steps": 2,
|
| 14 |
+
"eval_accumulation_steps": null,
|
| 15 |
+
"eval_delay": 0,
|
| 16 |
+
"torch_empty_cache_steps": null,
|
| 17 |
+
"learning_rate": 2e-06,
|
| 18 |
+
"weight_decay": 0.0001,
|
| 19 |
+
"adam_beta1": 0.9,
|
| 20 |
+
"adam_beta2": 0.95,
|
| 21 |
+
"adam_epsilon": 1e-08,
|
| 22 |
+
"max_grad_norm": 1.0,
|
| 23 |
+
"num_train_epochs": 5.0,
|
| 24 |
+
"max_steps": -1,
|
| 25 |
+
"lr_scheduler_type": "cosine",
|
| 26 |
+
"lr_scheduler_kwargs": null,
|
| 27 |
+
"warmup_ratio": 0.1,
|
| 28 |
+
"warmup_steps": 0,
|
| 29 |
+
"log_level": "passive",
|
| 30 |
+
"log_level_replica": "warning",
|
| 31 |
+
"log_on_each_node": true,
|
| 32 |
+
"logging_dir": "/ltstorage/home/pan/MT_Grpo/qwen3-4b-instruct-2507-cold_start/v0-20251203-162131/runs",
|
| 33 |
+
"logging_strategy": "steps",
|
| 34 |
+
"logging_first_step": true,
|
| 35 |
+
"logging_steps": 5,
|
| 36 |
+
"logging_nan_inf_filter": true,
|
| 37 |
+
"save_strategy": "epoch",
|
| 38 |
+
"save_steps": 500,
|
| 39 |
+
"save_total_limit": 5,
|
| 40 |
+
"save_safetensors": true,
|
| 41 |
+
"save_on_each_node": false,
|
| 42 |
+
"save_only_model": false,
|
| 43 |
+
"restore_callback_states_from_checkpoint": false,
|
| 44 |
+
"no_cuda": false,
|
| 45 |
+
"use_cpu": false,
|
| 46 |
+
"use_mps_device": false,
|
| 47 |
+
"seed": 42,
|
| 48 |
+
"data_seed": 42,
|
| 49 |
+
"jit_mode_eval": false,
|
| 50 |
+
"bf16": true,
|
| 51 |
+
"fp16": false,
|
| 52 |
+
"fp16_opt_level": "O1",
|
| 53 |
+
"half_precision_backend": "auto",
|
| 54 |
+
"bf16_full_eval": false,
|
| 55 |
+
"fp16_full_eval": false,
|
| 56 |
+
"tf32": null,
|
| 57 |
+
"local_rank": 0,
|
| 58 |
+
"ddp_backend": null,
|
| 59 |
+
"tpu_num_cores": null,
|
| 60 |
+
"tpu_metrics_debug": false,
|
| 61 |
+
"debug": null,
|
| 62 |
+
"dataloader_drop_last": false,
|
| 63 |
+
"eval_steps": null,
|
| 64 |
+
"dataloader_num_workers": 4,
|
| 65 |
+
"dataloader_prefetch_factor": null,
|
| 66 |
+
"past_index": -1,
|
| 67 |
+
"run_name": "/ltstorage/home/pan/MT_Grpo/qwen3-4b-instruct-2507-cold_start/v0-20251203-162131",
|
| 68 |
+
"disable_tqdm": null,
|
| 69 |
+
"remove_unused_columns": true,
|
| 70 |
+
"label_names": null,
|
| 71 |
+
"load_best_model_at_end": true,
|
| 72 |
+
"metric_for_best_model": "eval_loss",
|
| 73 |
+
"greater_is_better": false,
|
| 74 |
+
"ignore_data_skip": false,
|
| 75 |
+
"fsdp": null,
|
| 76 |
+
"fsdp_min_num_params": 0,
|
| 77 |
+
"fsdp_config": null,
|
| 78 |
+
"fsdp_transformer_layer_cls_to_wrap": null,
|
| 79 |
+
"accelerator_config": {
|
| 80 |
+
"dispatch_batches": false
|
| 81 |
+
},
|
| 82 |
+
"parallelism_config": null,
|
| 83 |
+
"deepspeed": {
|
| 84 |
+
"fp16": {
|
| 85 |
+
"enabled": "auto",
|
| 86 |
+
"loss_scale": 0,
|
| 87 |
+
"loss_scale_window": 1000,
|
| 88 |
+
"initial_scale_power": 16,
|
| 89 |
+
"hysteresis": 2,
|
| 90 |
+
"min_loss_scale": 1
|
| 91 |
+
},
|
| 92 |
+
"bf16": {
|
| 93 |
+
"enabled": "auto"
|
| 94 |
+
},
|
| 95 |
+
"zero_optimization": {
|
| 96 |
+
"stage": 3,
|
| 97 |
+
"offload_optimizer": {
|
| 98 |
+
"device": "none",
|
| 99 |
+
"pin_memory": true
|
| 100 |
+
},
|
| 101 |
+
"offload_param": {
|
| 102 |
+
"device": "none",
|
| 103 |
+
"pin_memory": true
|
| 104 |
+
},
|
| 105 |
+
"overlap_comm": false,
|
| 106 |
+
"contiguous_gradients": true,
|
| 107 |
+
"sub_group_size": 1000000000.0,
|
| 108 |
+
"reduce_bucket_size": "auto",
|
| 109 |
+
"zero_quantized_weights": false,
|
| 110 |
+
"zero_quantized_gradients": false,
|
| 111 |
+
"stage3_prefetch_bucket_size": "auto",
|
| 112 |
+
"stage3_param_persistence_threshold": "auto",
|
| 113 |
+
"stage3_max_live_parameters": 1000000000.0,
|
| 114 |
+
"stage3_max_reuse_distance": 1000000000.0,
|
| 115 |
+
"stage3_gather_16bit_weights_on_model_save": true
|
| 116 |
+
},
|
| 117 |
+
"gradient_accumulation_steps": "auto",
|
| 118 |
+
"gradient_clipping": "auto",
|
| 119 |
+
"steps_per_print": 2000,
|
| 120 |
+
"train_batch_size": "auto",
|
| 121 |
+
"train_micro_batch_size_per_gpu": "auto",
|
| 122 |
+
"wall_clock_breakdown": false
|
| 123 |
+
},
|
| 124 |
+
"label_smoothing_factor": 0.0,
|
| 125 |
+
"optim": "adamw_torch_fused",
|
| 126 |
+
"optim_args": null,
|
| 127 |
+
"adafactor": false,
|
| 128 |
+
"group_by_length": false,
|
| 129 |
+
"length_column_name": "length",
|
| 130 |
+
"report_to": [
|
| 131 |
+
"wandb"
|
| 132 |
+
],
|
| 133 |
+
"project": "huggingface",
|
| 134 |
+
"trackio_space_id": "trackio",
|
| 135 |
+
"ddp_find_unused_parameters": null,
|
| 136 |
+
"ddp_bucket_cap_mb": null,
|
| 137 |
+
"ddp_broadcast_buffers": null,
|
| 138 |
+
"dataloader_pin_memory": true,
|
| 139 |
+
"dataloader_persistent_workers": false,
|
| 140 |
+
"skip_memory_metrics": true,
|
| 141 |
+
"use_legacy_prediction_loop": false,
|
| 142 |
+
"push_to_hub": false,
|
| 143 |
+
"resume_from_checkpoint": null,
|
| 144 |
+
"hub_model_id": null,
|
| 145 |
+
"hub_strategy": "every_save",
|
| 146 |
+
"hub_token": null,
|
| 147 |
+
"hub_private_repo": null,
|
| 148 |
+
"hub_always_push": false,
|
| 149 |
+
"hub_revision": null,
|
| 150 |
+
"gradient_checkpointing": true,
|
| 151 |
+
"gradient_checkpointing_kwargs": null,
|
| 152 |
+
"include_inputs_for_metrics": false,
|
| 153 |
+
"include_for_metrics": [],
|
| 154 |
+
"eval_do_concat_batches": true,
|
| 155 |
+
"fp16_backend": "auto",
|
| 156 |
+
"push_to_hub_model_id": null,
|
| 157 |
+
"push_to_hub_organization": null,
|
| 158 |
+
"push_to_hub_token": null,
|
| 159 |
+
"mp_parameters": "",
|
| 160 |
+
"auto_find_batch_size": false,
|
| 161 |
+
"full_determinism": false,
|
| 162 |
+
"torchdynamo": null,
|
| 163 |
+
"ray_scope": "last",
|
| 164 |
+
"ddp_timeout": 18000000,
|
| 165 |
+
"torch_compile": false,
|
| 166 |
+
"torch_compile_backend": null,
|
| 167 |
+
"torch_compile_mode": null,
|
| 168 |
+
"include_tokens_per_second": false,
|
| 169 |
+
"include_num_input_tokens_seen": false,
|
| 170 |
+
"neftune_noise_alpha": null,
|
| 171 |
+
"optim_target_modules": null,
|
| 172 |
+
"batch_eval_metrics": false,
|
| 173 |
+
"eval_on_start": false,
|
| 174 |
+
"use_liger_kernel": false,
|
| 175 |
+
"liger_kernel_config": null,
|
| 176 |
+
"eval_use_gather_object": false,
|
| 177 |
+
"average_tokens_across_devices": true,
|
| 178 |
+
"sortish_sampler": false,
|
| 179 |
+
"predict_with_generate": false,
|
| 180 |
+
"generation_max_length": null,
|
| 181 |
+
"generation_num_beams": null,
|
| 182 |
+
"generation_config": null,
|
| 183 |
+
"tuner_backend": "peft",
|
| 184 |
+
"vit_gradient_checkpointing": null,
|
| 185 |
+
"router_aux_loss_coef": 0.0,
|
| 186 |
+
"enable_dft_loss": false,
|
| 187 |
+
"enable_channel_loss": false,
|
| 188 |
+
"check_model": true,
|
| 189 |
+
"acc_strategy": "token",
|
| 190 |
+
"train_dataloader_shuffle": true,
|
| 191 |
+
"max_epochs": null,
|
| 192 |
+
"aligner_lr": null,
|
| 193 |
+
"vit_lr": null,
|
| 194 |
+
"use_logits_to_keep": null,
|
| 195 |
+
"ds3_gather_for_generation": true,
|
| 196 |
+
"resume_only_model": false,
|
| 197 |
+
"optimizer": null,
|
| 198 |
+
"loss_type": null,
|
| 199 |
+
"metric": null,
|
| 200 |
+
"eval_use_evalscope": false,
|
| 201 |
+
"eval_dataset": [],
|
| 202 |
+
"eval_dataset_args": null,
|
| 203 |
+
"eval_limit": null,
|
| 204 |
+
"eval_generation_config": null,
|
| 205 |
+
"extra_eval_args": null,
|
| 206 |
+
"use_flash_ckpt": false,
|
| 207 |
+
"use_ray": false,
|
| 208 |
+
"ray_exp_name": null,
|
| 209 |
+
"device_groups": null,
|
| 210 |
+
"model": "/ltstorage/home/pan/model/Qwen3-4B-Instruct-2507",
|
| 211 |
+
"model_type": "qwen3_nothinking",
|
| 212 |
+
"model_revision": null,
|
| 213 |
+
"task_type": "causal_lm",
|
| 214 |
+
"torch_dtype": "bfloat16",
|
| 215 |
+
"attn_impl": null,
|
| 216 |
+
"new_special_tokens": [],
|
| 217 |
+
"num_labels": null,
|
| 218 |
+
"problem_type": null,
|
| 219 |
+
"rope_scaling": null,
|
| 220 |
+
"device_map": null,
|
| 221 |
+
"max_memory": {},
|
| 222 |
+
"max_model_len": null,
|
| 223 |
+
"local_repo_path": null,
|
| 224 |
+
"init_strategy": null,
|
| 225 |
+
"template": "qwen3_nothinking",
|
| 226 |
+
"system": null,
|
| 227 |
+
"max_length": 32768,
|
| 228 |
+
"truncation_strategy": "delete",
|
| 229 |
+
"max_pixels": null,
|
| 230 |
+
"agent_template": null,
|
| 231 |
+
"norm_bbox": null,
|
| 232 |
+
"use_chat_template": true,
|
| 233 |
+
"padding_free": false,
|
| 234 |
+
"padding_side": "right",
|
| 235 |
+
"loss_scale": "default",
|
| 236 |
+
"sequence_parallel_size": 1,
|
| 237 |
+
"response_prefix": null,
|
| 238 |
+
"template_backend": "swift",
|
| 239 |
+
"dataset": [
|
| 240 |
+
"/ltstorage/home/pan/MT_Grpo/data/cold_start_interleaved/cold_start_train_1580.json"
|
| 241 |
+
],
|
| 242 |
+
"val_dataset": [],
|
| 243 |
+
"split_dataset_ratio": 0.1,
|
| 244 |
+
"dataset_num_proc": 1,
|
| 245 |
+
"load_from_cache_file": false,
|
| 246 |
+
"dataset_shuffle": true,
|
| 247 |
+
"val_dataset_shuffle": false,
|
| 248 |
+
"streaming": false,
|
| 249 |
+
"interleave_prob": null,
|
| 250 |
+
"stopping_strategy": "first_exhausted",
|
| 251 |
+
"shuffle_buffer_size": 1000,
|
| 252 |
+
"download_mode": "reuse_dataset_if_exists",
|
| 253 |
+
"columns": {},
|
| 254 |
+
"strict": false,
|
| 255 |
+
"model_name": null,
|
| 256 |
+
"model_author": null,
|
| 257 |
+
"custom_dataset_info": [],
|
| 258 |
+
"quant_method": null,
|
| 259 |
+
"quant_bits": null,
|
| 260 |
+
"hqq_axis": null,
|
| 261 |
+
"bnb_4bit_compute_dtype": "bfloat16",
|
| 262 |
+
"bnb_4bit_quant_type": "nf4",
|
| 263 |
+
"bnb_4bit_use_double_quant": true,
|
| 264 |
+
"bnb_4bit_quant_storage": null,
|
| 265 |
+
"max_new_tokens": 64,
|
| 266 |
+
"temperature": 0.0,
|
| 267 |
+
"top_k": null,
|
| 268 |
+
"top_p": null,
|
| 269 |
+
"repetition_penalty": null,
|
| 270 |
+
"num_beams": 1,
|
| 271 |
+
"stream": false,
|
| 272 |
+
"stop_words": [],
|
| 273 |
+
"logprobs": false,
|
| 274 |
+
"top_logprobs": null,
|
| 275 |
+
"ckpt_dir": null,
|
| 276 |
+
"lora_modules": [],
|
| 277 |
+
"train_type": "full",
|
| 278 |
+
"adapters": [],
|
| 279 |
+
"external_plugins": [],
|
| 280 |
+
"model_kwargs": {},
|
| 281 |
+
"load_args": false,
|
| 282 |
+
"load_data_args": false,
|
| 283 |
+
"packing": false,
|
| 284 |
+
"packing_length": null,
|
| 285 |
+
"lazy_tokenize": false,
|
| 286 |
+
"cached_dataset": [],
|
| 287 |
+
"custom_register_path": [],
|
| 288 |
+
"use_hf": false,
|
| 289 |
+
"ignore_args_error": false,
|
| 290 |
+
"use_swift_lora": false,
|
| 291 |
+
"freeze_parameters": [],
|
| 292 |
+
"freeze_parameters_regex": null,
|
| 293 |
+
"freeze_parameters_ratio": 0.0,
|
| 294 |
+
"trainable_parameters": [],
|
| 295 |
+
"trainable_parameters_regex": null,
|
| 296 |
+
"freeze_llm": false,
|
| 297 |
+
"freeze_vit": true,
|
| 298 |
+
"freeze_aligner": true,
|
| 299 |
+
"target_modules": [
|
| 300 |
+
"all-linear"
|
| 301 |
+
],
|
| 302 |
+
"target_regex": null,
|
| 303 |
+
"target_parameters": null,
|
| 304 |
+
"modules_to_save": [],
|
| 305 |
+
"lora_rank": 8,
|
| 306 |
+
"lora_alpha": 32,
|
| 307 |
+
"lora_dropout": 0.05,
|
| 308 |
+
"lora_bias": "none",
|
| 309 |
+
"lora_dtype": null,
|
| 310 |
+
"lorap_lr_ratio": null,
|
| 311 |
+
"use_rslora": false,
|
| 312 |
+
"use_dora": false,
|
| 313 |
+
"lora_ga_batch_size": 2,
|
| 314 |
+
"lora_ga_iters": 2,
|
| 315 |
+
"lora_ga_max_length": 1024,
|
| 316 |
+
"lora_ga_direction": "ArB2r",
|
| 317 |
+
"lora_ga_scale": "stable",
|
| 318 |
+
"lora_ga_stable_gamma": 16,
|
| 319 |
+
"init_weights": true,
|
| 320 |
+
"fourier_n_frequency": 2000,
|
| 321 |
+
"fourier_scaling": 300.0,
|
| 322 |
+
"boft_block_size": 4,
|
| 323 |
+
"boft_block_num": 0,
|
| 324 |
+
"boft_n_butterfly_factor": 1,
|
| 325 |
+
"boft_dropout": 0.0,
|
| 326 |
+
"vera_rank": 256,
|
| 327 |
+
"vera_projection_prng_key": 0,
|
| 328 |
+
"vera_dropout": 0.0,
|
| 329 |
+
"vera_d_initial": 0.1,
|
| 330 |
+
"adapter_act": "gelu",
|
| 331 |
+
"adapter_length": 128,
|
| 332 |
+
"use_galore": false,
|
| 333 |
+
"galore_target_modules": null,
|
| 334 |
+
"galore_rank": 128,
|
| 335 |
+
"galore_update_proj_gap": 50,
|
| 336 |
+
"galore_scale": 1.0,
|
| 337 |
+
"galore_proj_type": "std",
|
| 338 |
+
"galore_optim_per_parameter": false,
|
| 339 |
+
"galore_with_embedding": false,
|
| 340 |
+
"galore_quantization": false,
|
| 341 |
+
"galore_proj_quant": false,
|
| 342 |
+
"galore_proj_bits": 4,
|
| 343 |
+
"galore_proj_group_size": 256,
|
| 344 |
+
"galore_cos_threshold": 0.4,
|
| 345 |
+
"galore_gamma_proj": 2,
|
| 346 |
+
"galore_queue_size": 5,
|
| 347 |
+
"adalora_target_r": 8,
|
| 348 |
+
"adalora_init_r": 12,
|
| 349 |
+
"adalora_tinit": 0,
|
| 350 |
+
"adalora_tfinal": 0,
|
| 351 |
+
"adalora_deltaT": 1,
|
| 352 |
+
"adalora_beta1": 0.85,
|
| 353 |
+
"adalora_beta2": 0.85,
|
| 354 |
+
"adalora_orth_reg_weight": 0.5,
|
| 355 |
+
"llamapro_num_new_blocks": 4,
|
| 356 |
+
"llamapro_num_groups": null,
|
| 357 |
+
"lisa_activated_layers": 0,
|
| 358 |
+
"lisa_step_interval": 20,
|
| 359 |
+
"reft_layer_key": null,
|
| 360 |
+
"reft_layers": null,
|
| 361 |
+
"reft_rank": 4,
|
| 362 |
+
"reft_intervention_type": "LoreftIntervention",
|
| 363 |
+
"reft_args": null,
|
| 364 |
+
"swanlab_token": null,
|
| 365 |
+
"swanlab_project": null,
|
| 366 |
+
"swanlab_workspace": null,
|
| 367 |
+
"swanlab_exp_name": null,
|
| 368 |
+
"swanlab_lark_webhook_url": null,
|
| 369 |
+
"swanlab_lark_secret": null,
|
| 370 |
+
"swanlab_mode": "cloud",
|
| 371 |
+
"add_version": true,
|
| 372 |
+
"create_checkpoint_symlink": false,
|
| 373 |
+
"zero_hpz_partition_size": null,
|
| 374 |
+
"deepspeed_autotp_size": null,
|
| 375 |
+
"early_stop_interval": null,
|
| 376 |
+
"rank": 0,
|
| 377 |
+
"global_world_size": 2,
|
| 378 |
+
"local_world_size": 2,
|
| 379 |
+
"model_suffix": "Qwen3-4B-Instruct-2507",
|
| 380 |
+
"model_info": "ModelInfo(model_type='qwen3_nothinking', model_dir='/ltstorage/home/pan/model/Qwen3-4B-Instruct-2507', torch_dtype=torch.bfloat16, max_model_len=262144, quant_method=None, quant_bits=None, rope_scaling=None, is_moe_model=False, config=None, task_type='causal_lm', num_labels=None)",
|
| 381 |
+
"model_meta": "ModelMeta(model_type='qwen3_nothinking', model_groups=[ModelGroup(models=[Model(ms_model_id='Qwen/Qwen3-30B-A3B-Instruct-2507', hf_model_id='Qwen/Qwen3-30B-A3B-Instruct-2507', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-30B-A3B-Instruct-2507-FP8', hf_model_id='Qwen/Qwen3-30B-A3B-Instruct-2507-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-235B-A22B-Instruct-2507', hf_model_id='Qwen/Qwen3-235B-A22B-Instruct-2507', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-235B-A22B-Instruct-2507-FP8', hf_model_id='Qwen/Qwen3-235B-A22B-Instruct-2507-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='swift/Qwen3-235B-A22B-Instruct-2507-AWQ', hf_model_id=None, model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='Qwen/Qwen3-4B-Instruct-2507', hf_model_id='Qwen/Qwen3-4B-Instruct-2507', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-4B-Instruct-2507-FP8', hf_model_id='Qwen/Qwen3-4B-Instruct-2507-FP8', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='qwen3_nothinking', get_function=<function get_model_tokenizer_with_flash_attn at 0x7aaa82df11b0>, model_arch=None, architectures=['Qwen3MoeForCausalLM', 'Qwen3ForCausalLM'], additional_saved_files=[], torch_dtype=None, is_multimodal=False, is_reward=False, is_reranker=False, task_type=None, ignore_patterns=None, requires=['transformers>=4.51'], tags=[])",
|
| 382 |
+
"model_dir": "/ltstorage/home/pan/model/Qwen3-4B-Instruct-2507",
|
| 383 |
+
"hub": "<class 'swift.hub.hub.MSHub'>",
|
| 384 |
+
"evaluation_strategy": "epoch",
|
| 385 |
+
"training_args": "Seq2SeqTrainingArguments(output_dir='/ltstorage/home/pan/MT_Grpo/qwen3-4b-instruct-2507-cold_start/v0-20251203-162131', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.EPOCH: 'epoch'>, prediction_loss_only=False, per_device_train_batch_size=2, per_device_eval_batch_size=2, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=2, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=2e-06, weight_decay=0.0001, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=5.0, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs=None, warmup_ratio=0.1, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/ltstorage/home/pan/MT_Grpo/qwen3-4b-instruct-2507-cold_start/v0-20251203-162131/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=5, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.EPOCH: 'epoch'>, save_steps=500, save_total_limit=5, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=None, dataloader_num_workers=4, dataloader_prefetch_factor=10, past_index=-1, run_name='/ltstorage/home/pan/MT_Grpo/qwen3-4b-instruct-2507-cold_start/v0-20251203-162131', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=True, metric_for_best_model='eval_loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), parallelism_config=None, deepspeed={'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'zero_optimization': {'stage': 3, 'offload_optimizer': {'device': 'none', 'pin_memory': True}, 'offload_param': {'device': 'none', 'pin_memory': True}, 'overlap_comm': False, 'contiguous_gradients': True, 'sub_group_size': 1000000000.0, 'reduce_bucket_size': 'auto', 'zero_quantized_weights': False, 'zero_quantized_gradients': False, 'stage3_prefetch_bucket_size': 'auto', 'stage3_param_persistence_threshold': 'auto', 'stage3_max_live_parameters': 1000000000.0, 'stage3_max_reuse_distance': 1000000000.0, 'stage3_gather_16bit_weights_on_model_save': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False}, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH_FUSED: 'adamw_torch_fused'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['wandb'], project='huggingface', trackio_space_id='trackio', ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=None, hub_always_push=False, hub_revision=None, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=18000000, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, liger_kernel_config=None, eval_use_gather_object=False, average_tokens_across_devices=None, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=None, tuner_backend='peft', vit_gradient_checkpointing=True, router_aux_loss_coef=0.0, enable_dft_loss=False, enable_channel_loss=False, check_model=True, acc_strategy='token', train_dataloader_shuffle=True, max_epochs=None, aligner_lr=None, vit_lr=None, use_logits_to_keep=None, ds3_gather_for_generation=True, resume_only_model=False, optimizer=None, loss_type=None, metric=None, eval_use_evalscope=False, eval_dataset=[], eval_dataset_args=None, eval_limit=None, eval_generation_config=None, extra_eval_args=None, use_flash_ckpt=False, sft_alpha=0, chord_sft_dataset=[], chord_sft_per_device_train_batch_size=None, chord_enable_phi_function=False, chord_mu_warmup_steps=None, chord_mu_decay_steps=None, chord_mu_peak=None, chord_mu_valley=None, train_type='full', local_repo_path=None, galore_config=None, padding_side='right', padding_free=False, task_type='causal_lm', problem_type=None)"
|
| 386 |
+
}
|
v0-20251203-162131/checkpoint-534/chat_template.jinja
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{%- if tools %}
|
| 2 |
+
{{- '<|im_start|>system\n' }}
|
| 3 |
+
{%- if messages[0].role == 'system' %}
|
| 4 |
+
{{- messages[0].content + '\n\n' }}
|
| 5 |
+
{%- endif %}
|
| 6 |
+
{{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
|
| 7 |
+
{%- for tool in tools %}
|
| 8 |
+
{{- "\n" }}
|
| 9 |
+
{{- tool | tojson }}
|
| 10 |
+
{%- endfor %}
|
| 11 |
+
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
|
| 12 |
+
{%- else %}
|
| 13 |
+
{%- if messages[0].role == 'system' %}
|
| 14 |
+
{{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
|
| 15 |
+
{%- endif %}
|
| 16 |
+
{%- endif %}
|
| 17 |
+
{%- for message in messages %}
|
| 18 |
+
{%- if message.content is string %}
|
| 19 |
+
{%- set content = message.content %}
|
| 20 |
+
{%- else %}
|
| 21 |
+
{%- set content = '' %}
|
| 22 |
+
{%- endif %}
|
| 23 |
+
{%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
|
| 24 |
+
{{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
|
| 25 |
+
{%- elif message.role == "assistant" %}
|
| 26 |
+
{{- '<|im_start|>' + message.role + '\n' + content }}
|
| 27 |
+
{%- if message.tool_calls %}
|
| 28 |
+
{%- for tool_call in message.tool_calls %}
|
| 29 |
+
{%- if (loop.first and content) or (not loop.first) %}
|
| 30 |
+
{{- '\n' }}
|
| 31 |
+
{%- endif %}
|
| 32 |
+
{%- if tool_call.function %}
|
| 33 |
+
{%- set tool_call = tool_call.function %}
|
| 34 |
+
{%- endif %}
|
| 35 |
+
{{- '<tool_call>\n{"name": "' }}
|
| 36 |
+
{{- tool_call.name }}
|
| 37 |
+
{{- '", "arguments": ' }}
|
| 38 |
+
{%- if tool_call.arguments is string %}
|
| 39 |
+
{{- tool_call.arguments }}
|
| 40 |
+
{%- else %}
|
| 41 |
+
{{- tool_call.arguments | tojson }}
|
| 42 |
+
{%- endif %}
|
| 43 |
+
{{- '}\n</tool_call>' }}
|
| 44 |
+
{%- endfor %}
|
| 45 |
+
{%- endif %}
|
| 46 |
+
{{- '<|im_end|>\n' }}
|
| 47 |
+
{%- elif message.role == "tool" %}
|
| 48 |
+
{%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
|
| 49 |
+
{{- '<|im_start|>user' }}
|
| 50 |
+
{%- endif %}
|
| 51 |
+
{{- '\n<tool_response>\n' }}
|
| 52 |
+
{{- content }}
|
| 53 |
+
{{- '\n</tool_response>' }}
|
| 54 |
+
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
|
| 55 |
+
{{- '<|im_end|>\n' }}
|
| 56 |
+
{%- endif %}
|
| 57 |
+
{%- endif %}
|
| 58 |
+
{%- endfor %}
|
| 59 |
+
{%- if add_generation_prompt %}
|
| 60 |
+
{{- '<|im_start|>assistant\n' }}
|
| 61 |
+
{%- endif %}
|
v0-20251203-162131/checkpoint-534/config.json
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"Qwen3ForCausalLM"
|
| 4 |
+
],
|
| 5 |
+
"attention_bias": false,
|
| 6 |
+
"attention_dropout": 0.0,
|
| 7 |
+
"dtype": "bfloat16",
|
| 8 |
+
"eos_token_id": 151645,
|
| 9 |
+
"head_dim": 128,
|
| 10 |
+
"hidden_act": "silu",
|
| 11 |
+
"hidden_size": 2560,
|
| 12 |
+
"initializer_range": 0.02,
|
| 13 |
+
"intermediate_size": 9728,
|
| 14 |
+
"layer_types": [
|
| 15 |
+
"full_attention",
|
| 16 |
+
"full_attention",
|
| 17 |
+
"full_attention",
|
| 18 |
+
"full_attention",
|
| 19 |
+
"full_attention",
|
| 20 |
+
"full_attention",
|
| 21 |
+
"full_attention",
|
| 22 |
+
"full_attention",
|
| 23 |
+
"full_attention",
|
| 24 |
+
"full_attention",
|
| 25 |
+
"full_attention",
|
| 26 |
+
"full_attention",
|
| 27 |
+
"full_attention",
|
| 28 |
+
"full_attention",
|
| 29 |
+
"full_attention",
|
| 30 |
+
"full_attention",
|
| 31 |
+
"full_attention",
|
| 32 |
+
"full_attention",
|
| 33 |
+
"full_attention",
|
| 34 |
+
"full_attention",
|
| 35 |
+
"full_attention",
|
| 36 |
+
"full_attention",
|
| 37 |
+
"full_attention",
|
| 38 |
+
"full_attention",
|
| 39 |
+
"full_attention",
|
| 40 |
+
"full_attention",
|
| 41 |
+
"full_attention",
|
| 42 |
+
"full_attention",
|
| 43 |
+
"full_attention",
|
| 44 |
+
"full_attention",
|
| 45 |
+
"full_attention",
|
| 46 |
+
"full_attention",
|
| 47 |
+
"full_attention",
|
| 48 |
+
"full_attention",
|
| 49 |
+
"full_attention",
|
| 50 |
+
"full_attention"
|
| 51 |
+
],
|
| 52 |
+
"max_position_embeddings": 262144,
|
| 53 |
+
"max_window_layers": 36,
|
| 54 |
+
"model_type": "qwen3",
|
| 55 |
+
"num_attention_heads": 32,
|
| 56 |
+
"num_hidden_layers": 36,
|
| 57 |
+
"num_key_value_heads": 8,
|
| 58 |
+
"pad_token_id": 151643,
|
| 59 |
+
"rms_norm_eps": 1e-06,
|
| 60 |
+
"rope_scaling": null,
|
| 61 |
+
"rope_theta": 5000000,
|
| 62 |
+
"sliding_window": null,
|
| 63 |
+
"tie_word_embeddings": true,
|
| 64 |
+
"transformers_version": "4.57.3",
|
| 65 |
+
"use_cache": false,
|
| 66 |
+
"use_sliding_window": false,
|
| 67 |
+
"vocab_size": 151936
|
| 68 |
+
}
|
v0-20251203-162131/checkpoint-534/generation_config.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token_id": 151643,
|
| 3 |
+
"do_sample": true,
|
| 4 |
+
"eos_token_id": [
|
| 5 |
+
151645,
|
| 6 |
+
151643
|
| 7 |
+
],
|
| 8 |
+
"pad_token_id": 151643,
|
| 9 |
+
"temperature": 0.7,
|
| 10 |
+
"top_k": 20,
|
| 11 |
+
"top_p": 0.8,
|
| 12 |
+
"transformers_version": "4.57.3"
|
| 13 |
+
}
|
v0-20251203-162131/checkpoint-534/latest
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
global_step534
|
v0-20251203-162131/checkpoint-534/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
v0-20251203-162131/checkpoint-534/model-00001-of-00002.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a20235d8904c1149fd5493d3e92a60c064b8deef4062282ed978dfb5a84aff5b
|
| 3 |
+
size 4967215360
|
v0-20251203-162131/checkpoint-534/model-00002-of-00002.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:58bed42f89695540193deddbf5a77f839d788c402df5a12ff3e72c8905de2b3d
|
| 3 |
+
size 3077766632
|
v0-20251203-162131/checkpoint-534/model.safetensors.index.json
ADDED
|
@@ -0,0 +1,406 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"metadata": {
|
| 3 |
+
"total_parameters": 196096,
|
| 4 |
+
"total_size": 8044936192
|
| 5 |
+
},
|
| 6 |
+
"weight_map": {
|
| 7 |
+
"model.embed_tokens.weight": "model-00001-of-00002.safetensors",
|
| 8 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 9 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 10 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 11 |
+
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 12 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 13 |
+
"model.layers.0.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 14 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 15 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 16 |
+
"model.layers.0.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 17 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 18 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 19 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 20 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 21 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 22 |
+
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 23 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 24 |
+
"model.layers.1.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 25 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 26 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 27 |
+
"model.layers.1.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 28 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 29 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 30 |
+
"model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 31 |
+
"model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 32 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 33 |
+
"model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 34 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 35 |
+
"model.layers.10.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 36 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 37 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 38 |
+
"model.layers.10.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 39 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 40 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 41 |
+
"model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 42 |
+
"model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 43 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 44 |
+
"model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 45 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 46 |
+
"model.layers.11.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 47 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 48 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 49 |
+
"model.layers.11.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 50 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 51 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 52 |
+
"model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 53 |
+
"model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 54 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 55 |
+
"model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 56 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 57 |
+
"model.layers.12.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 58 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 59 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 60 |
+
"model.layers.12.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 61 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 62 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 63 |
+
"model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 64 |
+
"model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 65 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 66 |
+
"model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 67 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 68 |
+
"model.layers.13.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 69 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 70 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 71 |
+
"model.layers.13.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 72 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 73 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 74 |
+
"model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 75 |
+
"model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 76 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 77 |
+
"model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 78 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 79 |
+
"model.layers.14.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 80 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 81 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 82 |
+
"model.layers.14.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 83 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 84 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 85 |
+
"model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 86 |
+
"model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 87 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 88 |
+
"model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 89 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 90 |
+
"model.layers.15.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 91 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 92 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 93 |
+
"model.layers.15.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 94 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 95 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 96 |
+
"model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 97 |
+
"model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 98 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 99 |
+
"model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 100 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 101 |
+
"model.layers.16.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 102 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 103 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 104 |
+
"model.layers.16.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 105 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 106 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 107 |
+
"model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 108 |
+
"model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 109 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 110 |
+
"model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 111 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 112 |
+
"model.layers.17.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 113 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 114 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 115 |
+
"model.layers.17.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 116 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 117 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 118 |
+
"model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 119 |
+
"model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 120 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 121 |
+
"model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 122 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 123 |
+
"model.layers.18.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 124 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 125 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 126 |
+
"model.layers.18.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 127 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 128 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 129 |
+
"model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 130 |
+
"model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 131 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 132 |
+
"model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 133 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 134 |
+
"model.layers.19.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 135 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 136 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 137 |
+
"model.layers.19.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 138 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 139 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 140 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 141 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 142 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 143 |
+
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 144 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 145 |
+
"model.layers.2.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 146 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 147 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 148 |
+
"model.layers.2.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 149 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 150 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 151 |
+
"model.layers.20.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 152 |
+
"model.layers.20.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 153 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 154 |
+
"model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 155 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 156 |
+
"model.layers.20.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 157 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 158 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 159 |
+
"model.layers.20.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 160 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 161 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 162 |
+
"model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 163 |
+
"model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 164 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 165 |
+
"model.layers.21.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 166 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 167 |
+
"model.layers.21.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 168 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 169 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 170 |
+
"model.layers.21.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 171 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 172 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 173 |
+
"model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 174 |
+
"model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 175 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 176 |
+
"model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 177 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 178 |
+
"model.layers.22.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 179 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 180 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 181 |
+
"model.layers.22.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 182 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 183 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 184 |
+
"model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 185 |
+
"model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 186 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 187 |
+
"model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 188 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 189 |
+
"model.layers.23.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 190 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 191 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 192 |
+
"model.layers.23.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 193 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 194 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 195 |
+
"model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 196 |
+
"model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 197 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 198 |
+
"model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 199 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 200 |
+
"model.layers.24.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 201 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 202 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 203 |
+
"model.layers.24.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 204 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 205 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 206 |
+
"model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 207 |
+
"model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 208 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 209 |
+
"model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 210 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 211 |
+
"model.layers.25.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 212 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 213 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 214 |
+
"model.layers.25.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 215 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 216 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 217 |
+
"model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 218 |
+
"model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 219 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 220 |
+
"model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 221 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 222 |
+
"model.layers.26.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 223 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 224 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 225 |
+
"model.layers.26.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 226 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 227 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 228 |
+
"model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 229 |
+
"model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 230 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 231 |
+
"model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 232 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 233 |
+
"model.layers.27.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 234 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 235 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 236 |
+
"model.layers.27.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 237 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 238 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 239 |
+
"model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 240 |
+
"model.layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 241 |
+
"model.layers.28.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 242 |
+
"model.layers.28.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 243 |
+
"model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 244 |
+
"model.layers.28.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 245 |
+
"model.layers.28.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 246 |
+
"model.layers.28.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 247 |
+
"model.layers.28.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 248 |
+
"model.layers.28.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 249 |
+
"model.layers.28.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 250 |
+
"model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 251 |
+
"model.layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 252 |
+
"model.layers.29.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 253 |
+
"model.layers.29.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 254 |
+
"model.layers.29.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 255 |
+
"model.layers.29.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 256 |
+
"model.layers.29.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 257 |
+
"model.layers.29.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 258 |
+
"model.layers.29.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 259 |
+
"model.layers.29.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 260 |
+
"model.layers.29.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 261 |
+
"model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 262 |
+
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 263 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 264 |
+
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 265 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 266 |
+
"model.layers.3.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 267 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 268 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 269 |
+
"model.layers.3.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 270 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 271 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 272 |
+
"model.layers.30.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 273 |
+
"model.layers.30.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 274 |
+
"model.layers.30.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 275 |
+
"model.layers.30.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 276 |
+
"model.layers.30.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 277 |
+
"model.layers.30.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 278 |
+
"model.layers.30.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 279 |
+
"model.layers.30.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 280 |
+
"model.layers.30.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 281 |
+
"model.layers.30.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 282 |
+
"model.layers.30.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 283 |
+
"model.layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 284 |
+
"model.layers.31.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 285 |
+
"model.layers.31.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 286 |
+
"model.layers.31.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 287 |
+
"model.layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 288 |
+
"model.layers.31.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 289 |
+
"model.layers.31.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 290 |
+
"model.layers.31.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 291 |
+
"model.layers.31.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 292 |
+
"model.layers.31.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 293 |
+
"model.layers.31.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 294 |
+
"model.layers.32.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 295 |
+
"model.layers.32.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 296 |
+
"model.layers.32.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 297 |
+
"model.layers.32.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 298 |
+
"model.layers.32.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 299 |
+
"model.layers.32.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 300 |
+
"model.layers.32.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 301 |
+
"model.layers.32.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 302 |
+
"model.layers.32.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 303 |
+
"model.layers.32.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 304 |
+
"model.layers.32.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 305 |
+
"model.layers.33.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 306 |
+
"model.layers.33.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 307 |
+
"model.layers.33.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 308 |
+
"model.layers.33.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 309 |
+
"model.layers.33.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 310 |
+
"model.layers.33.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 311 |
+
"model.layers.33.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 312 |
+
"model.layers.33.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 313 |
+
"model.layers.33.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 314 |
+
"model.layers.33.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 315 |
+
"model.layers.33.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 316 |
+
"model.layers.34.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 317 |
+
"model.layers.34.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 318 |
+
"model.layers.34.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 319 |
+
"model.layers.34.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 320 |
+
"model.layers.34.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 321 |
+
"model.layers.34.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 322 |
+
"model.layers.34.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 323 |
+
"model.layers.34.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 324 |
+
"model.layers.34.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 325 |
+
"model.layers.34.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 326 |
+
"model.layers.34.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 327 |
+
"model.layers.35.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 328 |
+
"model.layers.35.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 329 |
+
"model.layers.35.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
| 330 |
+
"model.layers.35.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 331 |
+
"model.layers.35.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 332 |
+
"model.layers.35.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
|
| 333 |
+
"model.layers.35.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 334 |
+
"model.layers.35.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 335 |
+
"model.layers.35.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
|
| 336 |
+
"model.layers.35.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 337 |
+
"model.layers.35.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 338 |
+
"model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 339 |
+
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 340 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 341 |
+
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 342 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 343 |
+
"model.layers.4.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 344 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 345 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 346 |
+
"model.layers.4.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 347 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 348 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 349 |
+
"model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 350 |
+
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 351 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 352 |
+
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 353 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 354 |
+
"model.layers.5.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 355 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 356 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 357 |
+
"model.layers.5.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 358 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 359 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 360 |
+
"model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 361 |
+
"model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 362 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 363 |
+
"model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 364 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 365 |
+
"model.layers.6.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 366 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 367 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 368 |
+
"model.layers.6.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 369 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 370 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 371 |
+
"model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 372 |
+
"model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 373 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 374 |
+
"model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 375 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 376 |
+
"model.layers.7.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 377 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 378 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 379 |
+
"model.layers.7.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 380 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 381 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 382 |
+
"model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 383 |
+
"model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 384 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 385 |
+
"model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 386 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 387 |
+
"model.layers.8.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 388 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 389 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 390 |
+
"model.layers.8.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 391 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 392 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 393 |
+
"model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 394 |
+
"model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 395 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 396 |
+
"model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 397 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 398 |
+
"model.layers.9.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
|
| 399 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 400 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 401 |
+
"model.layers.9.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
|
| 402 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 403 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 404 |
+
"model.norm.weight": "model-00002-of-00002.safetensors"
|
| 405 |
+
}
|
| 406 |
+
}
|
v0-20251203-162131/checkpoint-534/rng_state_0.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:63a85cfbd7247db6f26c9a0d7f41c9d9d831ca4fa145295df7f861445ecd6e4e
|
| 3 |
+
size 14917
|
v0-20251203-162131/checkpoint-534/rng_state_1.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dbf501485e53be1ee5b08e74cadccac58a8101146bcb7cedeb246637c894eb92
|
| 3 |
+
size 14917
|
v0-20251203-162131/checkpoint-534/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c2d9f3641127e6b7ee9c02d50e055f7b9f2238a6eea70733d4ebccc25631ba11
|
| 3 |
+
size 1465
|
v0-20251203-162131/checkpoint-534/special_tokens_map.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<|im_start|>",
|
| 4 |
+
"<|im_end|>",
|
| 5 |
+
"<|object_ref_start|>",
|
| 6 |
+
"<|object_ref_end|>",
|
| 7 |
+
"<|box_start|>",
|
| 8 |
+
"<|box_end|>",
|
| 9 |
+
"<|quad_start|>",
|
| 10 |
+
"<|quad_end|>",
|
| 11 |
+
"<|vision_start|>",
|
| 12 |
+
"<|vision_end|>",
|
| 13 |
+
"<|vision_pad|>",
|
| 14 |
+
"<|image_pad|>",
|
| 15 |
+
"<|video_pad|>"
|
| 16 |
+
],
|
| 17 |
+
"eos_token": {
|
| 18 |
+
"content": "<|im_end|>",
|
| 19 |
+
"lstrip": false,
|
| 20 |
+
"normalized": false,
|
| 21 |
+
"rstrip": false,
|
| 22 |
+
"single_word": false
|
| 23 |
+
},
|
| 24 |
+
"pad_token": {
|
| 25 |
+
"content": "<|endoftext|>",
|
| 26 |
+
"lstrip": false,
|
| 27 |
+
"normalized": false,
|
| 28 |
+
"rstrip": false,
|
| 29 |
+
"single_word": false
|
| 30 |
+
}
|
| 31 |
+
}
|
v0-20251203-162131/checkpoint-534/tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
|
| 3 |
+
size 11422654
|
v0-20251203-162131/checkpoint-534/tokenizer_config.json
ADDED
|
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"add_prefix_space": false,
|
| 4 |
+
"added_tokens_decoder": {
|
| 5 |
+
"151643": {
|
| 6 |
+
"content": "<|endoftext|>",
|
| 7 |
+
"lstrip": false,
|
| 8 |
+
"normalized": false,
|
| 9 |
+
"rstrip": false,
|
| 10 |
+
"single_word": false,
|
| 11 |
+
"special": true
|
| 12 |
+
},
|
| 13 |
+
"151644": {
|
| 14 |
+
"content": "<|im_start|>",
|
| 15 |
+
"lstrip": false,
|
| 16 |
+
"normalized": false,
|
| 17 |
+
"rstrip": false,
|
| 18 |
+
"single_word": false,
|
| 19 |
+
"special": true
|
| 20 |
+
},
|
| 21 |
+
"151645": {
|
| 22 |
+
"content": "<|im_end|>",
|
| 23 |
+
"lstrip": false,
|
| 24 |
+
"normalized": false,
|
| 25 |
+
"rstrip": false,
|
| 26 |
+
"single_word": false,
|
| 27 |
+
"special": true
|
| 28 |
+
},
|
| 29 |
+
"151646": {
|
| 30 |
+
"content": "<|object_ref_start|>",
|
| 31 |
+
"lstrip": false,
|
| 32 |
+
"normalized": false,
|
| 33 |
+
"rstrip": false,
|
| 34 |
+
"single_word": false,
|
| 35 |
+
"special": true
|
| 36 |
+
},
|
| 37 |
+
"151647": {
|
| 38 |
+
"content": "<|object_ref_end|>",
|
| 39 |
+
"lstrip": false,
|
| 40 |
+
"normalized": false,
|
| 41 |
+
"rstrip": false,
|
| 42 |
+
"single_word": false,
|
| 43 |
+
"special": true
|
| 44 |
+
},
|
| 45 |
+
"151648": {
|
| 46 |
+
"content": "<|box_start|>",
|
| 47 |
+
"lstrip": false,
|
| 48 |
+
"normalized": false,
|
| 49 |
+
"rstrip": false,
|
| 50 |
+
"single_word": false,
|
| 51 |
+
"special": true
|
| 52 |
+
},
|
| 53 |
+
"151649": {
|
| 54 |
+
"content": "<|box_end|>",
|
| 55 |
+
"lstrip": false,
|
| 56 |
+
"normalized": false,
|
| 57 |
+
"rstrip": false,
|
| 58 |
+
"single_word": false,
|
| 59 |
+
"special": true
|
| 60 |
+
},
|
| 61 |
+
"151650": {
|
| 62 |
+
"content": "<|quad_start|>",
|
| 63 |
+
"lstrip": false,
|
| 64 |
+
"normalized": false,
|
| 65 |
+
"rstrip": false,
|
| 66 |
+
"single_word": false,
|
| 67 |
+
"special": true
|
| 68 |
+
},
|
| 69 |
+
"151651": {
|
| 70 |
+
"content": "<|quad_end|>",
|
| 71 |
+
"lstrip": false,
|
| 72 |
+
"normalized": false,
|
| 73 |
+
"rstrip": false,
|
| 74 |
+
"single_word": false,
|
| 75 |
+
"special": true
|
| 76 |
+
},
|
| 77 |
+
"151652": {
|
| 78 |
+
"content": "<|vision_start|>",
|
| 79 |
+
"lstrip": false,
|
| 80 |
+
"normalized": false,
|
| 81 |
+
"rstrip": false,
|
| 82 |
+
"single_word": false,
|
| 83 |
+
"special": true
|
| 84 |
+
},
|
| 85 |
+
"151653": {
|
| 86 |
+
"content": "<|vision_end|>",
|
| 87 |
+
"lstrip": false,
|
| 88 |
+
"normalized": false,
|
| 89 |
+
"rstrip": false,
|
| 90 |
+
"single_word": false,
|
| 91 |
+
"special": true
|
| 92 |
+
},
|
| 93 |
+
"151654": {
|
| 94 |
+
"content": "<|vision_pad|>",
|
| 95 |
+
"lstrip": false,
|
| 96 |
+
"normalized": false,
|
| 97 |
+
"rstrip": false,
|
| 98 |
+
"single_word": false,
|
| 99 |
+
"special": true
|
| 100 |
+
},
|
| 101 |
+
"151655": {
|
| 102 |
+
"content": "<|image_pad|>",
|
| 103 |
+
"lstrip": false,
|
| 104 |
+
"normalized": false,
|
| 105 |
+
"rstrip": false,
|
| 106 |
+
"single_word": false,
|
| 107 |
+
"special": true
|
| 108 |
+
},
|
| 109 |
+
"151656": {
|
| 110 |
+
"content": "<|video_pad|>",
|
| 111 |
+
"lstrip": false,
|
| 112 |
+
"normalized": false,
|
| 113 |
+
"rstrip": false,
|
| 114 |
+
"single_word": false,
|
| 115 |
+
"special": true
|
| 116 |
+
},
|
| 117 |
+
"151657": {
|
| 118 |
+
"content": "<tool_call>",
|
| 119 |
+
"lstrip": false,
|
| 120 |
+
"normalized": false,
|
| 121 |
+
"rstrip": false,
|
| 122 |
+
"single_word": false,
|
| 123 |
+
"special": false
|
| 124 |
+
},
|
| 125 |
+
"151658": {
|
| 126 |
+
"content": "</tool_call>",
|
| 127 |
+
"lstrip": false,
|
| 128 |
+
"normalized": false,
|
| 129 |
+
"rstrip": false,
|
| 130 |
+
"single_word": false,
|
| 131 |
+
"special": false
|
| 132 |
+
},
|
| 133 |
+
"151659": {
|
| 134 |
+
"content": "<|fim_prefix|>",
|
| 135 |
+
"lstrip": false,
|
| 136 |
+
"normalized": false,
|
| 137 |
+
"rstrip": false,
|
| 138 |
+
"single_word": false,
|
| 139 |
+
"special": false
|
| 140 |
+
},
|
| 141 |
+
"151660": {
|
| 142 |
+
"content": "<|fim_middle|>",
|
| 143 |
+
"lstrip": false,
|
| 144 |
+
"normalized": false,
|
| 145 |
+
"rstrip": false,
|
| 146 |
+
"single_word": false,
|
| 147 |
+
"special": false
|
| 148 |
+
},
|
| 149 |
+
"151661": {
|
| 150 |
+
"content": "<|fim_suffix|>",
|
| 151 |
+
"lstrip": false,
|
| 152 |
+
"normalized": false,
|
| 153 |
+
"rstrip": false,
|
| 154 |
+
"single_word": false,
|
| 155 |
+
"special": false
|
| 156 |
+
},
|
| 157 |
+
"151662": {
|
| 158 |
+
"content": "<|fim_pad|>",
|
| 159 |
+
"lstrip": false,
|
| 160 |
+
"normalized": false,
|
| 161 |
+
"rstrip": false,
|
| 162 |
+
"single_word": false,
|
| 163 |
+
"special": false
|
| 164 |
+
},
|
| 165 |
+
"151663": {
|
| 166 |
+
"content": "<|repo_name|>",
|
| 167 |
+
"lstrip": false,
|
| 168 |
+
"normalized": false,
|
| 169 |
+
"rstrip": false,
|
| 170 |
+
"single_word": false,
|
| 171 |
+
"special": false
|
| 172 |
+
},
|
| 173 |
+
"151664": {
|
| 174 |
+
"content": "<|file_sep|>",
|
| 175 |
+
"lstrip": false,
|
| 176 |
+
"normalized": false,
|
| 177 |
+
"rstrip": false,
|
| 178 |
+
"single_word": false,
|
| 179 |
+
"special": false
|
| 180 |
+
},
|
| 181 |
+
"151665": {
|
| 182 |
+
"content": "<tool_response>",
|
| 183 |
+
"lstrip": false,
|
| 184 |
+
"normalized": false,
|
| 185 |
+
"rstrip": false,
|
| 186 |
+
"single_word": false,
|
| 187 |
+
"special": false
|
| 188 |
+
},
|
| 189 |
+
"151666": {
|
| 190 |
+
"content": "</tool_response>",
|
| 191 |
+
"lstrip": false,
|
| 192 |
+
"normalized": false,
|
| 193 |
+
"rstrip": false,
|
| 194 |
+
"single_word": false,
|
| 195 |
+
"special": false
|
| 196 |
+
},
|
| 197 |
+
"151667": {
|
| 198 |
+
"content": "<think>",
|
| 199 |
+
"lstrip": false,
|
| 200 |
+
"normalized": false,
|
| 201 |
+
"rstrip": false,
|
| 202 |
+
"single_word": false,
|
| 203 |
+
"special": false
|
| 204 |
+
},
|
| 205 |
+
"151668": {
|
| 206 |
+
"content": "</think>",
|
| 207 |
+
"lstrip": false,
|
| 208 |
+
"normalized": false,
|
| 209 |
+
"rstrip": false,
|
| 210 |
+
"single_word": false,
|
| 211 |
+
"special": false
|
| 212 |
+
}
|
| 213 |
+
},
|
| 214 |
+
"additional_special_tokens": [
|
| 215 |
+
"<|im_start|>",
|
| 216 |
+
"<|im_end|>",
|
| 217 |
+
"<|object_ref_start|>",
|
| 218 |
+
"<|object_ref_end|>",
|
| 219 |
+
"<|box_start|>",
|
| 220 |
+
"<|box_end|>",
|
| 221 |
+
"<|quad_start|>",
|
| 222 |
+
"<|quad_end|>",
|
| 223 |
+
"<|vision_start|>",
|
| 224 |
+
"<|vision_end|>",
|
| 225 |
+
"<|vision_pad|>",
|
| 226 |
+
"<|image_pad|>",
|
| 227 |
+
"<|video_pad|>"
|
| 228 |
+
],
|
| 229 |
+
"bos_token": null,
|
| 230 |
+
"clean_up_tokenization_spaces": false,
|
| 231 |
+
"eos_token": "<|im_end|>",
|
| 232 |
+
"errors": "replace",
|
| 233 |
+
"extra_special_tokens": {},
|
| 234 |
+
"model_max_length": 1010000,
|
| 235 |
+
"pad_token": "<|endoftext|>",
|
| 236 |
+
"split_special_tokens": false,
|
| 237 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
| 238 |
+
"unk_token": null
|
| 239 |
+
}
|
v0-20251203-162131/checkpoint-534/trainer_state.json
ADDED
|
@@ -0,0 +1,917 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_global_step": 534,
|
| 3 |
+
"best_metric": 0.88253623,
|
| 4 |
+
"best_model_checkpoint": "/ltstorage/home/pan/MT_Grpo/qwen3-4b-instruct-2507-cold_start/v0-20251203-162131/checkpoint-534",
|
| 5 |
+
"epoch": 3.0,
|
| 6 |
+
"eval_steps": 500,
|
| 7 |
+
"global_step": 534,
|
| 8 |
+
"is_hyper_param_search": false,
|
| 9 |
+
"is_local_process_zero": true,
|
| 10 |
+
"is_world_process_zero": true,
|
| 11 |
+
"log_history": [
|
| 12 |
+
{
|
| 13 |
+
"epoch": 0.0056179775280898875,
|
| 14 |
+
"grad_norm": 20.424147491457006,
|
| 15 |
+
"learning_rate": 2.2471910112359548e-08,
|
| 16 |
+
"loss": 1.9294220209121704,
|
| 17 |
+
"step": 1,
|
| 18 |
+
"token_acc": 0.6705118961788031
|
| 19 |
+
},
|
| 20 |
+
{
|
| 21 |
+
"epoch": 0.028089887640449437,
|
| 22 |
+
"grad_norm": 23.439119154528118,
|
| 23 |
+
"learning_rate": 1.1235955056179774e-07,
|
| 24 |
+
"loss": 1.904306411743164,
|
| 25 |
+
"step": 5,
|
| 26 |
+
"token_acc": 0.6692064001283233
|
| 27 |
+
},
|
| 28 |
+
{
|
| 29 |
+
"epoch": 0.056179775280898875,
|
| 30 |
+
"grad_norm": 19.32456440006131,
|
| 31 |
+
"learning_rate": 2.2471910112359549e-07,
|
| 32 |
+
"loss": 1.7001083374023438,
|
| 33 |
+
"step": 10,
|
| 34 |
+
"token_acc": 0.6961481903135525
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"epoch": 0.08426966292134831,
|
| 38 |
+
"grad_norm": 20.599442226361514,
|
| 39 |
+
"learning_rate": 3.3707865168539325e-07,
|
| 40 |
+
"loss": 1.8132465362548829,
|
| 41 |
+
"step": 15,
|
| 42 |
+
"token_acc": 0.6871491146893701
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"epoch": 0.11235955056179775,
|
| 46 |
+
"grad_norm": 15.93092434881123,
|
| 47 |
+
"learning_rate": 4.4943820224719097e-07,
|
| 48 |
+
"loss": 1.878952407836914,
|
| 49 |
+
"step": 20,
|
| 50 |
+
"token_acc": 0.6831177671885637
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"epoch": 0.1404494382022472,
|
| 54 |
+
"grad_norm": 19.692709514736602,
|
| 55 |
+
"learning_rate": 5.617977528089887e-07,
|
| 56 |
+
"loss": 1.7200767517089843,
|
| 57 |
+
"step": 25,
|
| 58 |
+
"token_acc": 0.6907898676567459
|
| 59 |
+
},
|
| 60 |
+
{
|
| 61 |
+
"epoch": 0.16853932584269662,
|
| 62 |
+
"grad_norm": 13.974844139329205,
|
| 63 |
+
"learning_rate": 6.741573033707865e-07,
|
| 64 |
+
"loss": 1.65771541595459,
|
| 65 |
+
"step": 30,
|
| 66 |
+
"token_acc": 0.6928436466698158
|
| 67 |
+
},
|
| 68 |
+
{
|
| 69 |
+
"epoch": 0.19662921348314608,
|
| 70 |
+
"grad_norm": 9.69346120447067,
|
| 71 |
+
"learning_rate": 7.865168539325843e-07,
|
| 72 |
+
"loss": 1.4616617202758788,
|
| 73 |
+
"step": 35,
|
| 74 |
+
"token_acc": 0.7045438057698353
|
| 75 |
+
},
|
| 76 |
+
{
|
| 77 |
+
"epoch": 0.2247191011235955,
|
| 78 |
+
"grad_norm": 6.598820229824133,
|
| 79 |
+
"learning_rate": 8.988764044943819e-07,
|
| 80 |
+
"loss": 1.4967164993286133,
|
| 81 |
+
"step": 40,
|
| 82 |
+
"token_acc": 0.6971656050955414
|
| 83 |
+
},
|
| 84 |
+
{
|
| 85 |
+
"epoch": 0.25280898876404495,
|
| 86 |
+
"grad_norm": 5.67433818854337,
|
| 87 |
+
"learning_rate": 1.0112359550561797e-06,
|
| 88 |
+
"loss": 1.23870849609375,
|
| 89 |
+
"step": 45,
|
| 90 |
+
"token_acc": 0.719047126911947
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"epoch": 0.2808988764044944,
|
| 94 |
+
"grad_norm": 5.529419440195868,
|
| 95 |
+
"learning_rate": 1.1235955056179775e-06,
|
| 96 |
+
"loss": 1.295097541809082,
|
| 97 |
+
"step": 50,
|
| 98 |
+
"token_acc": 0.7002483051083893
|
| 99 |
+
},
|
| 100 |
+
{
|
| 101 |
+
"epoch": 0.3089887640449438,
|
| 102 |
+
"grad_norm": 3.8790926273657087,
|
| 103 |
+
"learning_rate": 1.235955056179775e-06,
|
| 104 |
+
"loss": 1.2352296829223632,
|
| 105 |
+
"step": 55,
|
| 106 |
+
"token_acc": 0.7105542710741736
|
| 107 |
+
},
|
| 108 |
+
{
|
| 109 |
+
"epoch": 0.33707865168539325,
|
| 110 |
+
"grad_norm": 3.953557488831989,
|
| 111 |
+
"learning_rate": 1.348314606741573e-06,
|
| 112 |
+
"loss": 1.2055891036987305,
|
| 113 |
+
"step": 60,
|
| 114 |
+
"token_acc": 0.7074283178422622
|
| 115 |
+
},
|
| 116 |
+
{
|
| 117 |
+
"epoch": 0.3651685393258427,
|
| 118 |
+
"grad_norm": 3.643343084125879,
|
| 119 |
+
"learning_rate": 1.4606741573033708e-06,
|
| 120 |
+
"loss": 1.1811911582946777,
|
| 121 |
+
"step": 65,
|
| 122 |
+
"token_acc": 0.7093026264717298
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"epoch": 0.39325842696629215,
|
| 126 |
+
"grad_norm": 3.470861144393423,
|
| 127 |
+
"learning_rate": 1.5730337078651686e-06,
|
| 128 |
+
"loss": 1.039687156677246,
|
| 129 |
+
"step": 70,
|
| 130 |
+
"token_acc": 0.7354034344860033
|
| 131 |
+
},
|
| 132 |
+
{
|
| 133 |
+
"epoch": 0.42134831460674155,
|
| 134 |
+
"grad_norm": 2.587222430685054,
|
| 135 |
+
"learning_rate": 1.6853932584269661e-06,
|
| 136 |
+
"loss": 1.0405202865600587,
|
| 137 |
+
"step": 75,
|
| 138 |
+
"token_acc": 0.7375265518055227
|
| 139 |
+
},
|
| 140 |
+
{
|
| 141 |
+
"epoch": 0.449438202247191,
|
| 142 |
+
"grad_norm": 3.8709777420787734,
|
| 143 |
+
"learning_rate": 1.7977528089887639e-06,
|
| 144 |
+
"loss": 1.1237641334533692,
|
| 145 |
+
"step": 80,
|
| 146 |
+
"token_acc": 0.7187291772168768
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"epoch": 0.47752808988764045,
|
| 150 |
+
"grad_norm": 2.8690009715335694,
|
| 151 |
+
"learning_rate": 1.910112359550562e-06,
|
| 152 |
+
"loss": 1.1050168991088867,
|
| 153 |
+
"step": 85,
|
| 154 |
+
"token_acc": 0.723785465347461
|
| 155 |
+
},
|
| 156 |
+
{
|
| 157 |
+
"epoch": 0.5056179775280899,
|
| 158 |
+
"grad_norm": 2.7797847810326908,
|
| 159 |
+
"learning_rate": 1.999992308621909e-06,
|
| 160 |
+
"loss": 1.077320384979248,
|
| 161 |
+
"step": 90,
|
| 162 |
+
"token_acc": 0.7229019786217876
|
| 163 |
+
},
|
| 164 |
+
{
|
| 165 |
+
"epoch": 0.5337078651685393,
|
| 166 |
+
"grad_norm": 2.8893483997821936,
|
| 167 |
+
"learning_rate": 1.999723122811548e-06,
|
| 168 |
+
"loss": 1.0275424003601075,
|
| 169 |
+
"step": 95,
|
| 170 |
+
"token_acc": 0.7338508671720844
|
| 171 |
+
},
|
| 172 |
+
{
|
| 173 |
+
"epoch": 0.5617977528089888,
|
| 174 |
+
"grad_norm": 3.296545947442661,
|
| 175 |
+
"learning_rate": 1.999069486403046e-06,
|
| 176 |
+
"loss": 1.0429039001464844,
|
| 177 |
+
"step": 100,
|
| 178 |
+
"token_acc": 0.7256585417718827
|
| 179 |
+
},
|
| 180 |
+
{
|
| 181 |
+
"epoch": 0.5898876404494382,
|
| 182 |
+
"grad_norm": 4.223909130674462,
|
| 183 |
+
"learning_rate": 1.998031650756905e-06,
|
| 184 |
+
"loss": 1.1426443099975585,
|
| 185 |
+
"step": 105,
|
| 186 |
+
"token_acc": 0.713406109640382
|
| 187 |
+
},
|
| 188 |
+
{
|
| 189 |
+
"epoch": 0.6179775280898876,
|
| 190 |
+
"grad_norm": 3.1119951608284744,
|
| 191 |
+
"learning_rate": 1.9966100149801647e-06,
|
| 192 |
+
"loss": 1.1262334823608398,
|
| 193 |
+
"step": 110,
|
| 194 |
+
"token_acc": 0.7054803149606299
|
| 195 |
+
},
|
| 196 |
+
{
|
| 197 |
+
"epoch": 0.6460674157303371,
|
| 198 |
+
"grad_norm": 3.125320283936831,
|
| 199 |
+
"learning_rate": 1.994805125772918e-06,
|
| 200 |
+
"loss": 0.9934347152709961,
|
| 201 |
+
"step": 115,
|
| 202 |
+
"token_acc": 0.742525724186017
|
| 203 |
+
},
|
| 204 |
+
{
|
| 205 |
+
"epoch": 0.6741573033707865,
|
| 206 |
+
"grad_norm": 3.4364058770050354,
|
| 207 |
+
"learning_rate": 1.9926176772180765e-06,
|
| 208 |
+
"loss": 1.021185302734375,
|
| 209 |
+
"step": 120,
|
| 210 |
+
"token_acc": 0.7300833835643962
|
| 211 |
+
},
|
| 212 |
+
{
|
| 213 |
+
"epoch": 0.702247191011236,
|
| 214 |
+
"grad_norm": 3.4104033179708932,
|
| 215 |
+
"learning_rate": 1.9900485105144544e-06,
|
| 216 |
+
"loss": 1.048966407775879,
|
| 217 |
+
"step": 125,
|
| 218 |
+
"token_acc": 0.7253274730358846
|
| 219 |
+
},
|
| 220 |
+
{
|
| 221 |
+
"epoch": 0.7303370786516854,
|
| 222 |
+
"grad_norm": 2.5337665605612893,
|
| 223 |
+
"learning_rate": 1.987098613653279e-06,
|
| 224 |
+
"loss": 0.9771171569824219,
|
| 225 |
+
"step": 130,
|
| 226 |
+
"token_acc": 0.7396136051891289
|
| 227 |
+
},
|
| 228 |
+
{
|
| 229 |
+
"epoch": 0.7584269662921348,
|
| 230 |
+
"grad_norm": 3.5003717612077714,
|
| 231 |
+
"learning_rate": 1.983769121038254e-06,
|
| 232 |
+
"loss": 0.9926401138305664,
|
| 233 |
+
"step": 135,
|
| 234 |
+
"token_acc": 0.7304092259527841
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"epoch": 0.7865168539325843,
|
| 238 |
+
"grad_norm": 2.8085699558587556,
|
| 239 |
+
"learning_rate": 1.980061313049315e-06,
|
| 240 |
+
"loss": 1.0762109756469727,
|
| 241 |
+
"step": 140,
|
| 242 |
+
"token_acc": 0.7092926128948055
|
| 243 |
+
},
|
| 244 |
+
{
|
| 245 |
+
"epoch": 0.8146067415730337,
|
| 246 |
+
"grad_norm": 2.727404329934186,
|
| 247 |
+
"learning_rate": 1.9759766155502506e-06,
|
| 248 |
+
"loss": 0.9405284881591797,
|
| 249 |
+
"step": 145,
|
| 250 |
+
"token_acc": 0.7430621892375144
|
| 251 |
+
},
|
| 252 |
+
{
|
| 253 |
+
"epoch": 0.8426966292134831,
|
| 254 |
+
"grad_norm": 2.9003743944416174,
|
| 255 |
+
"learning_rate": 1.9715165993403754e-06,
|
| 256 |
+
"loss": 1.0379549026489259,
|
| 257 |
+
"step": 150,
|
| 258 |
+
"token_acc": 0.7221681798530047
|
| 259 |
+
},
|
| 260 |
+
{
|
| 261 |
+
"epoch": 0.8707865168539326,
|
| 262 |
+
"grad_norm": 3.394138821971619,
|
| 263 |
+
"learning_rate": 1.9666829795504693e-06,
|
| 264 |
+
"loss": 0.9658462524414062,
|
| 265 |
+
"step": 155,
|
| 266 |
+
"token_acc": 0.7353258912495498
|
| 267 |
+
},
|
| 268 |
+
{
|
| 269 |
+
"epoch": 0.898876404494382,
|
| 270 |
+
"grad_norm": 3.471117752337183,
|
| 271 |
+
"learning_rate": 1.9614776149832122e-06,
|
| 272 |
+
"loss": 0.9399328231811523,
|
| 273 |
+
"step": 160,
|
| 274 |
+
"token_acc": 0.7380347576771208
|
| 275 |
+
},
|
| 276 |
+
{
|
| 277 |
+
"epoch": 0.9269662921348315,
|
| 278 |
+
"grad_norm": 3.5307014243286683,
|
| 279 |
+
"learning_rate": 1.9559025073983677e-06,
|
| 280 |
+
"loss": 0.9858393669128418,
|
| 281 |
+
"step": 165,
|
| 282 |
+
"token_acc": 0.738056057622272
|
| 283 |
+
},
|
| 284 |
+
{
|
| 285 |
+
"epoch": 0.9550561797752809,
|
| 286 |
+
"grad_norm": 3.4326164862952107,
|
| 287 |
+
"learning_rate": 1.949959800742991e-06,
|
| 288 |
+
"loss": 1.0343259811401366,
|
| 289 |
+
"step": 170,
|
| 290 |
+
"token_acc": 0.7181796472554839
|
| 291 |
+
},
|
| 292 |
+
{
|
| 293 |
+
"epoch": 0.9831460674157303,
|
| 294 |
+
"grad_norm": 2.7472568192078333,
|
| 295 |
+
"learning_rate": 1.94365178032696e-06,
|
| 296 |
+
"loss": 1.009816551208496,
|
| 297 |
+
"step": 175,
|
| 298 |
+
"token_acc": 0.7324969121757785
|
| 299 |
+
},
|
| 300 |
+
{
|
| 301 |
+
"epoch": 1.0,
|
| 302 |
+
"eval_loss": 0.9415958523750305,
|
| 303 |
+
"eval_runtime": 50.628,
|
| 304 |
+
"eval_samples_per_second": 3.121,
|
| 305 |
+
"eval_steps_per_second": 0.79,
|
| 306 |
+
"eval_token_acc": 0.7514679577531556,
|
| 307 |
+
"step": 178
|
| 308 |
+
},
|
| 309 |
+
{
|
| 310 |
+
"epoch": 1.0112359550561798,
|
| 311 |
+
"grad_norm": 2.659664073867142,
|
| 312 |
+
"learning_rate": 1.9369808719441444e-06,
|
| 313 |
+
"loss": 0.8963218688964844,
|
| 314 |
+
"step": 180,
|
| 315 |
+
"token_acc": 0.764312885592814
|
| 316 |
+
},
|
| 317 |
+
{
|
| 318 |
+
"epoch": 1.0393258426966292,
|
| 319 |
+
"grad_norm": 3.0170005843317735,
|
| 320 |
+
"learning_rate": 1.929949640939548e-06,
|
| 321 |
+
"loss": 0.8709476470947266,
|
| 322 |
+
"step": 185,
|
| 323 |
+
"token_acc": 0.7628275648077628
|
| 324 |
+
},
|
| 325 |
+
{
|
| 326 |
+
"epoch": 1.0674157303370786,
|
| 327 |
+
"grad_norm": 3.554072348995685,
|
| 328 |
+
"learning_rate": 1.922560791222786e-06,
|
| 329 |
+
"loss": 0.9287420272827148,
|
| 330 |
+
"step": 190,
|
| 331 |
+
"token_acc": 0.7509577488079817
|
| 332 |
+
},
|
| 333 |
+
{
|
| 334 |
+
"epoch": 1.095505617977528,
|
| 335 |
+
"grad_norm": 2.223289753807769,
|
| 336 |
+
"learning_rate": 1.9148171642282808e-06,
|
| 337 |
+
"loss": 0.9102365493774414,
|
| 338 |
+
"step": 195,
|
| 339 |
+
"token_acc": 0.7591235059760956
|
| 340 |
+
},
|
| 341 |
+
{
|
| 342 |
+
"epoch": 1.1235955056179776,
|
| 343 |
+
"grad_norm": 3.23140581389699,
|
| 344 |
+
"learning_rate": 1.9067217378225652e-06,
|
| 345 |
+
"loss": 0.8868337631225586,
|
| 346 |
+
"step": 200,
|
| 347 |
+
"token_acc": 0.7549810811525944
|
| 348 |
+
},
|
| 349 |
+
{
|
| 350 |
+
"epoch": 1.151685393258427,
|
| 351 |
+
"grad_norm": 2.526779070640055,
|
| 352 |
+
"learning_rate": 1.8982776251591246e-06,
|
| 353 |
+
"loss": 0.8987601280212403,
|
| 354 |
+
"step": 205,
|
| 355 |
+
"token_acc": 0.7629664894601814
|
| 356 |
+
},
|
| 357 |
+
{
|
| 358 |
+
"epoch": 1.1797752808988764,
|
| 359 |
+
"grad_norm": 3.1348346979092736,
|
| 360 |
+
"learning_rate": 1.8894880734812103e-06,
|
| 361 |
+
"loss": 0.9315019607543945,
|
| 362 |
+
"step": 210,
|
| 363 |
+
"token_acc": 0.7562072271630245
|
| 364 |
+
},
|
| 365 |
+
{
|
| 366 |
+
"epoch": 1.2078651685393258,
|
| 367 |
+
"grad_norm": 3.8436975180594004,
|
| 368 |
+
"learning_rate": 1.8803564628730913e-06,
|
| 369 |
+
"loss": 0.8741535186767578,
|
| 370 |
+
"step": 215,
|
| 371 |
+
"token_acc": 0.7632421004514027
|
| 372 |
+
},
|
| 373 |
+
{
|
| 374 |
+
"epoch": 1.2359550561797752,
|
| 375 |
+
"grad_norm": 3.035898758347739,
|
| 376 |
+
"learning_rate": 1.8708863049602159e-06,
|
| 377 |
+
"loss": 0.9063589096069335,
|
| 378 |
+
"step": 220,
|
| 379 |
+
"token_acc": 0.7581121729177204
|
| 380 |
+
},
|
| 381 |
+
{
|
| 382 |
+
"epoch": 1.2640449438202248,
|
| 383 |
+
"grad_norm": 3.2189094575626696,
|
| 384 |
+
"learning_rate": 1.8610812415587947e-06,
|
| 385 |
+
"loss": 0.868967342376709,
|
| 386 |
+
"step": 225,
|
| 387 |
+
"token_acc": 0.7626130920702502
|
| 388 |
+
},
|
| 389 |
+
{
|
| 390 |
+
"epoch": 1.2921348314606742,
|
| 391 |
+
"grad_norm": 2.392753224965853,
|
| 392 |
+
"learning_rate": 1.850945043275312e-06,
|
| 393 |
+
"loss": 0.8877336502075195,
|
| 394 |
+
"step": 230,
|
| 395 |
+
"token_acc": 0.7599571479585764
|
| 396 |
+
},
|
| 397 |
+
{
|
| 398 |
+
"epoch": 1.3202247191011236,
|
| 399 |
+
"grad_norm": 2.2379100772993086,
|
| 400 |
+
"learning_rate": 1.8404816080565132e-06,
|
| 401 |
+
"loss": 0.8606734275817871,
|
| 402 |
+
"step": 235,
|
| 403 |
+
"token_acc": 0.7693435161613824
|
| 404 |
+
},
|
| 405 |
+
{
|
| 406 |
+
"epoch": 1.348314606741573,
|
| 407 |
+
"grad_norm": 2.7988132570934976,
|
| 408 |
+
"learning_rate": 1.829694959690422e-06,
|
| 409 |
+
"loss": 0.8484037399291993,
|
| 410 |
+
"step": 240,
|
| 411 |
+
"token_acc": 0.7654914529914529
|
| 412 |
+
},
|
| 413 |
+
{
|
| 414 |
+
"epoch": 1.3764044943820224,
|
| 415 |
+
"grad_norm": 2.8271302257023003,
|
| 416 |
+
"learning_rate": 1.8185892462589636e-06,
|
| 417 |
+
"loss": 0.8865579605102539,
|
| 418 |
+
"step": 245,
|
| 419 |
+
"token_acc": 0.7594392808166996
|
| 420 |
+
},
|
| 421 |
+
{
|
| 422 |
+
"epoch": 1.404494382022472,
|
| 423 |
+
"grad_norm": 3.222266204963613,
|
| 424 |
+
"learning_rate": 1.807168738542792e-06,
|
| 425 |
+
"loss": 0.8766312599182129,
|
| 426 |
+
"step": 250,
|
| 427 |
+
"token_acc": 0.7603124677735382
|
| 428 |
+
},
|
| 429 |
+
{
|
| 430 |
+
"epoch": 1.4325842696629214,
|
| 431 |
+
"grad_norm": 2.5744913216922187,
|
| 432 |
+
"learning_rate": 1.7954378283789287e-06,
|
| 433 |
+
"loss": 0.83121337890625,
|
| 434 |
+
"step": 255,
|
| 435 |
+
"token_acc": 0.7687455892731122
|
| 436 |
+
},
|
| 437 |
+
{
|
| 438 |
+
"epoch": 1.4606741573033708,
|
| 439 |
+
"grad_norm": 2.914984856086316,
|
| 440 |
+
"learning_rate": 1.7834010269718524e-06,
|
| 441 |
+
"loss": 0.8371623039245606,
|
| 442 |
+
"step": 260,
|
| 443 |
+
"token_acc": 0.7685514040820193
|
| 444 |
+
},
|
| 445 |
+
{
|
| 446 |
+
"epoch": 1.4887640449438202,
|
| 447 |
+
"grad_norm": 3.276894598583512,
|
| 448 |
+
"learning_rate": 1.7710629631586837e-06,
|
| 449 |
+
"loss": 0.8378698348999023,
|
| 450 |
+
"step": 265,
|
| 451 |
+
"token_acc": 0.7716363427323158
|
| 452 |
+
},
|
| 453 |
+
{
|
| 454 |
+
"epoch": 1.5168539325842696,
|
| 455 |
+
"grad_norm": 4.192503584217087,
|
| 456 |
+
"learning_rate": 1.7584283816291317e-06,
|
| 457 |
+
"loss": 0.8841152191162109,
|
| 458 |
+
"step": 270,
|
| 459 |
+
"token_acc": 0.7596019493575981
|
| 460 |
+
},
|
| 461 |
+
{
|
| 462 |
+
"epoch": 1.5449438202247192,
|
| 463 |
+
"grad_norm": 2.7715215774573174,
|
| 464 |
+
"learning_rate": 1.7455021411008906e-06,
|
| 465 |
+
"loss": 0.836890983581543,
|
| 466 |
+
"step": 275,
|
| 467 |
+
"token_acc": 0.7696108086307724
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"epoch": 1.5730337078651684,
|
| 471 |
+
"grad_norm": 3.1968896533225646,
|
| 472 |
+
"learning_rate": 1.7322892124511858e-06,
|
| 473 |
+
"loss": 0.8807048797607422,
|
| 474 |
+
"step": 280,
|
| 475 |
+
"token_acc": 0.7628409791071529
|
| 476 |
+
},
|
| 477 |
+
{
|
| 478 |
+
"epoch": 1.601123595505618,
|
| 479 |
+
"grad_norm": 2.85142297149823,
|
| 480 |
+
"learning_rate": 1.7187946768051877e-06,
|
| 481 |
+
"loss": 0.824298095703125,
|
| 482 |
+
"step": 285,
|
| 483 |
+
"token_acc": 0.7725494097360392
|
| 484 |
+
},
|
| 485 |
+
{
|
| 486 |
+
"epoch": 1.6292134831460674,
|
| 487 |
+
"grad_norm": 3.361622727935816,
|
| 488 |
+
"learning_rate": 1.7050237235820287e-06,
|
| 489 |
+
"loss": 0.836163330078125,
|
| 490 |
+
"step": 290,
|
| 491 |
+
"token_acc": 0.7743141527186292
|
| 492 |
+
},
|
| 493 |
+
{
|
| 494 |
+
"epoch": 1.6573033707865168,
|
| 495 |
+
"grad_norm": 3.1313176928578876,
|
| 496 |
+
"learning_rate": 1.6909816484991757e-06,
|
| 497 |
+
"loss": 0.9208276748657227,
|
| 498 |
+
"step": 295,
|
| 499 |
+
"token_acc": 0.7488928672382926
|
| 500 |
+
},
|
| 501 |
+
{
|
| 502 |
+
"epoch": 1.6853932584269664,
|
| 503 |
+
"grad_norm": 3.562588866831991,
|
| 504 |
+
"learning_rate": 1.6766738515359248e-06,
|
| 505 |
+
"loss": 0.9145614624023437,
|
| 506 |
+
"step": 300,
|
| 507 |
+
"token_acc": 0.7516417572463768
|
| 508 |
+
},
|
| 509 |
+
{
|
| 510 |
+
"epoch": 1.7134831460674156,
|
| 511 |
+
"grad_norm": 2.3855171093070835,
|
| 512 |
+
"learning_rate": 1.6621058348568004e-06,
|
| 513 |
+
"loss": 0.83453369140625,
|
| 514 |
+
"step": 305,
|
| 515 |
+
"token_acc": 0.7668994730339629
|
| 516 |
+
},
|
| 517 |
+
{
|
| 518 |
+
"epoch": 1.7415730337078652,
|
| 519 |
+
"grad_norm": 2.673202958523956,
|
| 520 |
+
"learning_rate": 1.647283200695659e-06,
|
| 521 |
+
"loss": 0.8809846878051758,
|
| 522 |
+
"step": 310,
|
| 523 |
+
"token_acc": 0.7534229976978069
|
| 524 |
+
},
|
| 525 |
+
{
|
| 526 |
+
"epoch": 1.7696629213483146,
|
| 527 |
+
"grad_norm": 3.0977353145628252,
|
| 528 |
+
"learning_rate": 1.6322116492013114e-06,
|
| 529 |
+
"loss": 0.8135520935058593,
|
| 530 |
+
"step": 315,
|
| 531 |
+
"token_acc": 0.7728059524117085
|
| 532 |
+
},
|
| 533 |
+
{
|
| 534 |
+
"epoch": 1.797752808988764,
|
| 535 |
+
"grad_norm": 3.161032491715687,
|
| 536 |
+
"learning_rate": 1.6168969762454894e-06,
|
| 537 |
+
"loss": 0.8380592346191407,
|
| 538 |
+
"step": 320,
|
| 539 |
+
"token_acc": 0.7688794415879125
|
| 540 |
+
},
|
| 541 |
+
{
|
| 542 |
+
"epoch": 1.8258426966292136,
|
| 543 |
+
"grad_norm": 2.1351035087572914,
|
| 544 |
+
"learning_rate": 1.6013450711940016e-06,
|
| 545 |
+
"loss": 0.84451904296875,
|
| 546 |
+
"step": 325,
|
| 547 |
+
"token_acc": 0.7644641980412931
|
| 548 |
+
},
|
| 549 |
+
{
|
| 550 |
+
"epoch": 1.8539325842696628,
|
| 551 |
+
"grad_norm": 2.5786089611584617,
|
| 552 |
+
"learning_rate": 1.585561914641938e-06,
|
| 553 |
+
"loss": 0.8825285911560059,
|
| 554 |
+
"step": 330,
|
| 555 |
+
"token_acc": 0.757299130479129
|
| 556 |
+
},
|
| 557 |
+
{
|
| 558 |
+
"epoch": 1.8820224719101124,
|
| 559 |
+
"grad_norm": 2.9606661774725493,
|
| 560 |
+
"learning_rate": 1.5695535761137888e-06,
|
| 561 |
+
"loss": 0.846768856048584,
|
| 562 |
+
"step": 335,
|
| 563 |
+
"token_acc": 0.7666048961279626
|
| 564 |
+
},
|
| 565 |
+
{
|
| 566 |
+
"epoch": 1.9101123595505618,
|
| 567 |
+
"grad_norm": 2.381192973088887,
|
| 568 |
+
"learning_rate": 1.5533262117293647e-06,
|
| 569 |
+
"loss": 0.8275468826293946,
|
| 570 |
+
"step": 340,
|
| 571 |
+
"token_acc": 0.7726063522172616
|
| 572 |
+
},
|
| 573 |
+
{
|
| 574 |
+
"epoch": 1.9382022471910112,
|
| 575 |
+
"grad_norm": 3.482913361948986,
|
| 576 |
+
"learning_rate": 1.5368860618364207e-06,
|
| 577 |
+
"loss": 0.8249627113342285,
|
| 578 |
+
"step": 345,
|
| 579 |
+
"token_acc": 0.7723052959501557
|
| 580 |
+
},
|
| 581 |
+
{
|
| 582 |
+
"epoch": 1.9662921348314608,
|
| 583 |
+
"grad_norm": 2.755958406611769,
|
| 584 |
+
"learning_rate": 1.520239448610882e-06,
|
| 585 |
+
"loss": 0.8758167266845703,
|
| 586 |
+
"step": 350,
|
| 587 |
+
"token_acc": 0.7568186777220162
|
| 588 |
+
},
|
| 589 |
+
{
|
| 590 |
+
"epoch": 1.99438202247191,
|
| 591 |
+
"grad_norm": 3.142910514263914,
|
| 592 |
+
"learning_rate": 1.5033927736256105e-06,
|
| 593 |
+
"loss": 0.8654172897338868,
|
| 594 |
+
"step": 355,
|
| 595 |
+
"token_acc": 0.76903667897267
|
| 596 |
+
},
|
| 597 |
+
{
|
| 598 |
+
"epoch": 2.0,
|
| 599 |
+
"eval_loss": 0.8831959962844849,
|
| 600 |
+
"eval_runtime": 52.0933,
|
| 601 |
+
"eval_samples_per_second": 3.033,
|
| 602 |
+
"eval_steps_per_second": 0.768,
|
| 603 |
+
"eval_token_acc": 0.7628892695290526,
|
| 604 |
+
"step": 356
|
| 605 |
+
},
|
| 606 |
+
{
|
| 607 |
+
"epoch": 2.0224719101123596,
|
| 608 |
+
"grad_norm": 2.6514649627160227,
|
| 609 |
+
"learning_rate": 1.486352515388631e-06,
|
| 610 |
+
"loss": 0.7637296676635742,
|
| 611 |
+
"step": 360,
|
| 612 |
+
"token_acc": 0.787094642955459
|
| 613 |
+
},
|
| 614 |
+
{
|
| 615 |
+
"epoch": 2.050561797752809,
|
| 616 |
+
"grad_norm": 2.5130483494890052,
|
| 617 |
+
"learning_rate": 1.4691252268517794e-06,
|
| 618 |
+
"loss": 0.7487339019775391,
|
| 619 |
+
"step": 365,
|
| 620 |
+
"token_acc": 0.7863372093023255
|
| 621 |
+
},
|
| 622 |
+
{
|
| 623 |
+
"epoch": 2.0786516853932584,
|
| 624 |
+
"grad_norm": 2.2367738572518903,
|
| 625 |
+
"learning_rate": 1.4517175328907139e-06,
|
| 626 |
+
"loss": 0.7428698539733887,
|
| 627 |
+
"step": 370,
|
| 628 |
+
"token_acc": 0.7893287614297589
|
| 629 |
+
},
|
| 630 |
+
{
|
| 631 |
+
"epoch": 2.106741573033708,
|
| 632 |
+
"grad_norm": 2.846055924613116,
|
| 633 |
+
"learning_rate": 1.4341361277572763e-06,
|
| 634 |
+
"loss": 0.7987964153289795,
|
| 635 |
+
"step": 375,
|
| 636 |
+
"token_acc": 0.7760270946841408
|
| 637 |
+
},
|
| 638 |
+
{
|
| 639 |
+
"epoch": 2.134831460674157,
|
| 640 |
+
"grad_norm": 2.2372491717862206,
|
| 641 |
+
"learning_rate": 1.4163877725051677e-06,
|
| 642 |
+
"loss": 0.7137997627258301,
|
| 643 |
+
"step": 380,
|
| 644 |
+
"token_acc": 0.7988278491738455
|
| 645 |
+
},
|
| 646 |
+
{
|
| 647 |
+
"epoch": 2.162921348314607,
|
| 648 |
+
"grad_norm": 2.3745693821214977,
|
| 649 |
+
"learning_rate": 1.3984792923899385e-06,
|
| 650 |
+
"loss": 0.7307297706604003,
|
| 651 |
+
"step": 385,
|
| 652 |
+
"token_acc": 0.7922184580638538
|
| 653 |
+
},
|
| 654 |
+
{
|
| 655 |
+
"epoch": 2.191011235955056,
|
| 656 |
+
"grad_norm": 3.3164812708414213,
|
| 657 |
+
"learning_rate": 1.3804175742442876e-06,
|
| 658 |
+
"loss": 0.7489160537719727,
|
| 659 |
+
"step": 390,
|
| 660 |
+
"token_acc": 0.7865196484828386
|
| 661 |
+
},
|
| 662 |
+
{
|
| 663 |
+
"epoch": 2.2191011235955056,
|
| 664 |
+
"grad_norm": 2.7509168990464805,
|
| 665 |
+
"learning_rate": 1.3622095638296825e-06,
|
| 666 |
+
"loss": 0.6962666511535645,
|
| 667 |
+
"step": 395,
|
| 668 |
+
"token_acc": 0.7949337099640075
|
| 669 |
+
},
|
| 670 |
+
{
|
| 671 |
+
"epoch": 2.247191011235955,
|
| 672 |
+
"grad_norm": 2.6822161528950352,
|
| 673 |
+
"learning_rate": 1.3438622631653175e-06,
|
| 674 |
+
"loss": 0.7728666305541992,
|
| 675 |
+
"step": 400,
|
| 676 |
+
"token_acc": 0.7806799555861048
|
| 677 |
+
},
|
| 678 |
+
{
|
| 679 |
+
"epoch": 2.2752808988764044,
|
| 680 |
+
"grad_norm": 2.2647028923132932,
|
| 681 |
+
"learning_rate": 1.3253827278354377e-06,
|
| 682 |
+
"loss": 0.7661968231201172,
|
| 683 |
+
"step": 405,
|
| 684 |
+
"token_acc": 0.781412619962867
|
| 685 |
+
},
|
| 686 |
+
{
|
| 687 |
+
"epoch": 2.303370786516854,
|
| 688 |
+
"grad_norm": 3.3285509551324908,
|
| 689 |
+
"learning_rate": 1.3067780642760637e-06,
|
| 690 |
+
"loss": 0.7373863220214844,
|
| 691 |
+
"step": 410,
|
| 692 |
+
"token_acc": 0.7888082123184778
|
| 693 |
+
},
|
| 694 |
+
{
|
| 695 |
+
"epoch": 2.331460674157303,
|
| 696 |
+
"grad_norm": 2.5609966409837317,
|
| 697 |
+
"learning_rate": 1.288055427042163e-06,
|
| 698 |
+
"loss": 0.724615478515625,
|
| 699 |
+
"step": 415,
|
| 700 |
+
"token_acc": 0.7954756740006198
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"epoch": 2.359550561797753,
|
| 704 |
+
"grad_norm": 3.3676744604512447,
|
| 705 |
+
"learning_rate": 1.2692220160563123e-06,
|
| 706 |
+
"loss": 0.7289250373840332,
|
| 707 |
+
"step": 420,
|
| 708 |
+
"token_acc": 0.7965610055661536
|
| 709 |
+
},
|
| 710 |
+
{
|
| 711 |
+
"epoch": 2.3876404494382024,
|
| 712 |
+
"grad_norm": 2.2418087002062337,
|
| 713 |
+
"learning_rate": 1.2502850738399199e-06,
|
| 714 |
+
"loss": 0.7592850685119629,
|
| 715 |
+
"step": 425,
|
| 716 |
+
"token_acc": 0.7827390963112456
|
| 717 |
+
},
|
| 718 |
+
{
|
| 719 |
+
"epoch": 2.4157303370786516,
|
| 720 |
+
"grad_norm": 2.5013085027451387,
|
| 721 |
+
"learning_rate": 1.2312518827280603e-06,
|
| 722 |
+
"loss": 0.7156426429748535,
|
| 723 |
+
"step": 430,
|
| 724 |
+
"token_acc": 0.7934545116452048
|
| 725 |
+
},
|
| 726 |
+
{
|
| 727 |
+
"epoch": 2.443820224719101,
|
| 728 |
+
"grad_norm": 8.920939750256267,
|
| 729 |
+
"learning_rate": 1.212129762069001e-06,
|
| 730 |
+
"loss": 0.780320692062378,
|
| 731 |
+
"step": 435,
|
| 732 |
+
"token_acc": 0.7789655269930149
|
| 733 |
+
},
|
| 734 |
+
{
|
| 735 |
+
"epoch": 2.4719101123595504,
|
| 736 |
+
"grad_norm": 2.7433020699351105,
|
| 737 |
+
"learning_rate": 1.1929260654094969e-06,
|
| 738 |
+
"loss": 0.7487343788146973,
|
| 739 |
+
"step": 440,
|
| 740 |
+
"token_acc": 0.7883545497406883
|
| 741 |
+
},
|
| 742 |
+
{
|
| 743 |
+
"epoch": 2.5,
|
| 744 |
+
"grad_norm": 2.640181631499408,
|
| 745 |
+
"learning_rate": 1.1736481776669305e-06,
|
| 746 |
+
"loss": 0.738771390914917,
|
| 747 |
+
"step": 445,
|
| 748 |
+
"token_acc": 0.7911210551106924
|
| 749 |
+
},
|
| 750 |
+
{
|
| 751 |
+
"epoch": 2.5280898876404496,
|
| 752 |
+
"grad_norm": 3.5065458578004125,
|
| 753 |
+
"learning_rate": 1.1543035122893896e-06,
|
| 754 |
+
"loss": 0.7107549667358398,
|
| 755 |
+
"step": 450,
|
| 756 |
+
"token_acc": 0.798298464985316
|
| 757 |
+
},
|
| 758 |
+
{
|
| 759 |
+
"epoch": 2.556179775280899,
|
| 760 |
+
"grad_norm": 2.465346116852975,
|
| 761 |
+
"learning_rate": 1.1348995084047749e-06,
|
| 762 |
+
"loss": 0.7216014385223388,
|
| 763 |
+
"step": 455,
|
| 764 |
+
"token_acc": 0.7923961415953193
|
| 765 |
+
},
|
| 766 |
+
{
|
| 767 |
+
"epoch": 2.5842696629213484,
|
| 768 |
+
"grad_norm": 2.682211286191937,
|
| 769 |
+
"learning_rate": 1.1154436279600285e-06,
|
| 770 |
+
"loss": 0.7756028175354004,
|
| 771 |
+
"step": 460,
|
| 772 |
+
"token_acc": 0.7788508988367995
|
| 773 |
+
},
|
| 774 |
+
{
|
| 775 |
+
"epoch": 2.6123595505617976,
|
| 776 |
+
"grad_norm": 2.6236931540512587,
|
| 777 |
+
"learning_rate": 1.095943352851592e-06,
|
| 778 |
+
"loss": 0.7410534858703614,
|
| 779 |
+
"step": 465,
|
| 780 |
+
"token_acc": 0.7881666112102604
|
| 781 |
+
},
|
| 782 |
+
{
|
| 783 |
+
"epoch": 2.640449438202247,
|
| 784 |
+
"grad_norm": 2.2427981019248606,
|
| 785 |
+
"learning_rate": 1.076406182048187e-06,
|
| 786 |
+
"loss": 0.7423382759094238,
|
| 787 |
+
"step": 470,
|
| 788 |
+
"token_acc": 0.7889353925960858
|
| 789 |
+
},
|
| 790 |
+
{
|
| 791 |
+
"epoch": 2.668539325842697,
|
| 792 |
+
"grad_norm": 2.7179974305774537,
|
| 793 |
+
"learning_rate": 1.0568396287070376e-06,
|
| 794 |
+
"loss": 0.7271196365356445,
|
| 795 |
+
"step": 475,
|
| 796 |
+
"token_acc": 0.7933038063497632
|
| 797 |
+
},
|
| 798 |
+
{
|
| 799 |
+
"epoch": 2.696629213483146,
|
| 800 |
+
"grad_norm": 2.6392886600933325,
|
| 801 |
+
"learning_rate": 1.0372512172846295e-06,
|
| 802 |
+
"loss": 0.7158174991607666,
|
| 803 |
+
"step": 480,
|
| 804 |
+
"token_acc": 0.7964601769911505
|
| 805 |
+
},
|
| 806 |
+
{
|
| 807 |
+
"epoch": 2.7247191011235956,
|
| 808 |
+
"grad_norm": 3.6626688290107627,
|
| 809 |
+
"learning_rate": 1.0176484806431287e-06,
|
| 810 |
+
"loss": 0.6989315032958985,
|
| 811 |
+
"step": 485,
|
| 812 |
+
"token_acc": 0.7987363319004068
|
| 813 |
+
},
|
| 814 |
+
{
|
| 815 |
+
"epoch": 2.752808988764045,
|
| 816 |
+
"grad_norm": 3.2545806610618713,
|
| 817 |
+
"learning_rate": 9.980389571535647e-07,
|
| 818 |
+
"loss": 0.779600715637207,
|
| 819 |
+
"step": 490,
|
| 820 |
+
"token_acc": 0.7797347327558319
|
| 821 |
+
},
|
| 822 |
+
{
|
| 823 |
+
"epoch": 2.7808988764044944,
|
| 824 |
+
"grad_norm": 3.108102123916376,
|
| 825 |
+
"learning_rate": 9.78430187796898e-07,
|
| 826 |
+
"loss": 0.6946596145629883,
|
| 827 |
+
"step": 495,
|
| 828 |
+
"token_acc": 0.8014796149124508
|
| 829 |
+
},
|
| 830 |
+
{
|
| 831 |
+
"epoch": 2.808988764044944,
|
| 832 |
+
"grad_norm": 3.2002284536041015,
|
| 833 |
+
"learning_rate": 9.588297132640824e-07,
|
| 834 |
+
"loss": 0.7704888343811035,
|
| 835 |
+
"step": 500,
|
| 836 |
+
"token_acc": 0.7819947043248014
|
| 837 |
+
},
|
| 838 |
+
{
|
| 839 |
+
"epoch": 2.837078651685393,
|
| 840 |
+
"grad_norm": 2.5594223016016078,
|
| 841 |
+
"learning_rate": 9.392450710562375e-07,
|
| 842 |
+
"loss": 0.7854640483856201,
|
| 843 |
+
"step": 505,
|
| 844 |
+
"token_acc": 0.7854151852729715
|
| 845 |
+
},
|
| 846 |
+
{
|
| 847 |
+
"epoch": 2.865168539325843,
|
| 848 |
+
"grad_norm": 2.7620022462801272,
|
| 849 |
+
"learning_rate": 9.196837925860515e-07,
|
| 850 |
+
"loss": 0.7502545356750489,
|
| 851 |
+
"step": 510,
|
| 852 |
+
"token_acc": 0.7842350581155231
|
| 853 |
+
},
|
| 854 |
+
{
|
| 855 |
+
"epoch": 2.893258426966292,
|
| 856 |
+
"grad_norm": 2.605083380262497,
|
| 857 |
+
"learning_rate": 9.001534002815207e-07,
|
| 858 |
+
"loss": 0.8321625709533691,
|
| 859 |
+
"step": 515,
|
| 860 |
+
"token_acc": 0.7827749732293101
|
| 861 |
+
},
|
| 862 |
+
{
|
| 863 |
+
"epoch": 2.9213483146067416,
|
| 864 |
+
"grad_norm": 2.220912204917523,
|
| 865 |
+
"learning_rate": 8.80661404693149e-07,
|
| 866 |
+
"loss": 0.7523886680603027,
|
| 867 |
+
"step": 520,
|
| 868 |
+
"token_acc": 0.7955712645761586
|
| 869 |
+
},
|
| 870 |
+
{
|
| 871 |
+
"epoch": 2.949438202247191,
|
| 872 |
+
"grad_norm": 3.178835042001676,
|
| 873 |
+
"learning_rate": 8.612153016057112e-07,
|
| 874 |
+
"loss": 0.7746825218200684,
|
| 875 |
+
"step": 525,
|
| 876 |
+
"token_acc": 0.7810026385224275
|
| 877 |
+
},
|
| 878 |
+
{
|
| 879 |
+
"epoch": 2.9775280898876404,
|
| 880 |
+
"grad_norm": 2.863725028456792,
|
| 881 |
+
"learning_rate": 8.41822569155696e-07,
|
| 882 |
+
"loss": 0.7034193992614746,
|
| 883 |
+
"step": 530,
|
| 884 |
+
"token_acc": 0.7964837506659563
|
| 885 |
+
},
|
| 886 |
+
{
|
| 887 |
+
"epoch": 3.0,
|
| 888 |
+
"eval_loss": 0.8825362324714661,
|
| 889 |
+
"eval_runtime": 52.2495,
|
| 890 |
+
"eval_samples_per_second": 3.024,
|
| 891 |
+
"eval_steps_per_second": 0.766,
|
| 892 |
+
"eval_token_acc": 0.7631736962196533,
|
| 893 |
+
"step": 534
|
| 894 |
+
}
|
| 895 |
+
],
|
| 896 |
+
"logging_steps": 5,
|
| 897 |
+
"max_steps": 890,
|
| 898 |
+
"num_input_tokens_seen": 0,
|
| 899 |
+
"num_train_epochs": 5,
|
| 900 |
+
"save_steps": 500,
|
| 901 |
+
"stateful_callbacks": {
|
| 902 |
+
"TrainerControl": {
|
| 903 |
+
"args": {
|
| 904 |
+
"should_epoch_stop": false,
|
| 905 |
+
"should_evaluate": false,
|
| 906 |
+
"should_log": false,
|
| 907 |
+
"should_save": true,
|
| 908 |
+
"should_training_stop": false
|
| 909 |
+
},
|
| 910 |
+
"attributes": {}
|
| 911 |
+
}
|
| 912 |
+
},
|
| 913 |
+
"total_flos": 11003023413248.0,
|
| 914 |
+
"train_batch_size": 2,
|
| 915 |
+
"trial_name": null,
|
| 916 |
+
"trial_params": null
|
| 917 |
+
}
|
v0-20251203-162131/checkpoint-534/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:44081cc16ce3fcb84e4e98af483258582b748144383caae6be06fcf036d6207c
|
| 3 |
+
size 9553
|
v0-20251203-162131/checkpoint-534/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
v0-20251203-162131/checkpoint-534/zero_to_fp32.py
ADDED
|
@@ -0,0 +1,760 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
# Copyright (c) Microsoft Corporation.
|
| 4 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 5 |
+
|
| 6 |
+
# DeepSpeed Team
|
| 7 |
+
|
| 8 |
+
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
|
| 9 |
+
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
| 10 |
+
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
| 11 |
+
# application.
|
| 12 |
+
#
|
| 13 |
+
# example:
|
| 14 |
+
# python zero_to_fp32.py . output_dir/
|
| 15 |
+
# or
|
| 16 |
+
# python zero_to_fp32.py . output_dir/ --safe_serialization
|
| 17 |
+
|
| 18 |
+
import argparse
|
| 19 |
+
import torch
|
| 20 |
+
import glob
|
| 21 |
+
import math
|
| 22 |
+
import os
|
| 23 |
+
import re
|
| 24 |
+
import gc
|
| 25 |
+
import json
|
| 26 |
+
import numpy as np
|
| 27 |
+
from tqdm import tqdm
|
| 28 |
+
from collections import OrderedDict
|
| 29 |
+
from dataclasses import dataclass
|
| 30 |
+
|
| 31 |
+
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
| 32 |
+
# DeepSpeed data structures it has to be available in the current python environment.
|
| 33 |
+
from deepspeed.utils import logger
|
| 34 |
+
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
|
| 35 |
+
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
|
| 36 |
+
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@dataclass
|
| 40 |
+
class zero_model_state:
|
| 41 |
+
buffers: dict()
|
| 42 |
+
param_shapes: dict()
|
| 43 |
+
shared_params: list
|
| 44 |
+
ds_version: int
|
| 45 |
+
frozen_param_shapes: dict()
|
| 46 |
+
frozen_param_fragments: dict()
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
debug = 0
|
| 50 |
+
|
| 51 |
+
# load to cpu
|
| 52 |
+
device = torch.device('cpu')
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def atoi(text):
|
| 56 |
+
return int(text) if text.isdigit() else text
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def natural_keys(text):
|
| 60 |
+
'''
|
| 61 |
+
alist.sort(key=natural_keys) sorts in human order
|
| 62 |
+
http://nedbatchelder.com/blog/200712/human_sorting.html
|
| 63 |
+
(See Toothy's implementation in the comments)
|
| 64 |
+
'''
|
| 65 |
+
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def get_model_state_file(checkpoint_dir, zero_stage):
|
| 69 |
+
if not os.path.isdir(checkpoint_dir):
|
| 70 |
+
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
| 71 |
+
|
| 72 |
+
# there should be only one file
|
| 73 |
+
if zero_stage <= 2:
|
| 74 |
+
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
| 75 |
+
elif zero_stage == 3:
|
| 76 |
+
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
| 77 |
+
|
| 78 |
+
if not os.path.exists(file):
|
| 79 |
+
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
| 80 |
+
|
| 81 |
+
return file
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def get_checkpoint_files(checkpoint_dir, glob_pattern):
|
| 85 |
+
# XXX: need to test that this simple glob rule works for multi-node setup too
|
| 86 |
+
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
|
| 87 |
+
|
| 88 |
+
if len(ckpt_files) == 0:
|
| 89 |
+
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
|
| 90 |
+
|
| 91 |
+
return ckpt_files
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def get_optim_files(checkpoint_dir):
|
| 95 |
+
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def get_model_state_files(checkpoint_dir):
|
| 99 |
+
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def parse_model_states(files):
|
| 103 |
+
zero_model_states = []
|
| 104 |
+
for file in files:
|
| 105 |
+
state_dict = torch.load(file, map_location=device, weights_only=False)
|
| 106 |
+
|
| 107 |
+
if BUFFER_NAMES not in state_dict:
|
| 108 |
+
raise ValueError(f"{file} is not a model state checkpoint")
|
| 109 |
+
buffer_names = state_dict[BUFFER_NAMES]
|
| 110 |
+
if debug:
|
| 111 |
+
print("Found buffers:", buffer_names)
|
| 112 |
+
|
| 113 |
+
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
| 114 |
+
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
|
| 115 |
+
param_shapes = state_dict[PARAM_SHAPES]
|
| 116 |
+
|
| 117 |
+
# collect parameters that are included in param_shapes
|
| 118 |
+
param_names = []
|
| 119 |
+
for s in param_shapes:
|
| 120 |
+
for name in s.keys():
|
| 121 |
+
param_names.append(name)
|
| 122 |
+
|
| 123 |
+
# update with frozen parameters
|
| 124 |
+
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
|
| 125 |
+
if frozen_param_shapes is not None:
|
| 126 |
+
if debug:
|
| 127 |
+
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
|
| 128 |
+
param_names += list(frozen_param_shapes.keys())
|
| 129 |
+
|
| 130 |
+
# handle shared params
|
| 131 |
+
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
|
| 132 |
+
|
| 133 |
+
ds_version = state_dict.get(DS_VERSION, None)
|
| 134 |
+
|
| 135 |
+
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
|
| 136 |
+
|
| 137 |
+
z_model_state = zero_model_state(buffers=buffers,
|
| 138 |
+
param_shapes=param_shapes,
|
| 139 |
+
shared_params=shared_params,
|
| 140 |
+
ds_version=ds_version,
|
| 141 |
+
frozen_param_shapes=frozen_param_shapes,
|
| 142 |
+
frozen_param_fragments=frozen_param_fragments)
|
| 143 |
+
zero_model_states.append(z_model_state)
|
| 144 |
+
|
| 145 |
+
return zero_model_states
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def parse_optim_states(files, ds_checkpoint_dir):
|
| 149 |
+
total_files = len(files)
|
| 150 |
+
state_dicts = []
|
| 151 |
+
for f in tqdm(files, desc='Loading checkpoint shards'):
|
| 152 |
+
state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
|
| 153 |
+
# immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
|
| 154 |
+
# and also handle the case where it was already removed by another helper script
|
| 155 |
+
state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
|
| 156 |
+
state_dicts.append(state_dict)
|
| 157 |
+
|
| 158 |
+
if ZERO_STAGE not in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
| 159 |
+
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
| 160 |
+
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
| 161 |
+
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
| 162 |
+
|
| 163 |
+
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
| 164 |
+
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
| 165 |
+
# use the max of the partition_count to get the dp world_size.
|
| 166 |
+
|
| 167 |
+
if type(world_size) is list:
|
| 168 |
+
world_size = max(world_size)
|
| 169 |
+
|
| 170 |
+
if world_size != total_files:
|
| 171 |
+
raise ValueError(
|
| 172 |
+
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
| 173 |
+
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
# the groups are named differently in each stage
|
| 177 |
+
if zero_stage <= 2:
|
| 178 |
+
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
| 179 |
+
elif zero_stage == 3:
|
| 180 |
+
fp32_groups_key = FP32_FLAT_GROUPS
|
| 181 |
+
else:
|
| 182 |
+
raise ValueError(f"unknown zero stage {zero_stage}")
|
| 183 |
+
|
| 184 |
+
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
|
| 185 |
+
return zero_stage, world_size, fp32_flat_groups
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
|
| 189 |
+
"""
|
| 190 |
+
Returns fp32 state_dict reconstructed from ds checkpoint
|
| 191 |
+
|
| 192 |
+
Args:
|
| 193 |
+
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
| 194 |
+
|
| 195 |
+
"""
|
| 196 |
+
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
| 197 |
+
|
| 198 |
+
optim_files = get_optim_files(ds_checkpoint_dir)
|
| 199 |
+
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
| 200 |
+
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
| 201 |
+
|
| 202 |
+
model_files = get_model_state_files(ds_checkpoint_dir)
|
| 203 |
+
|
| 204 |
+
zero_model_states = parse_model_states(model_files)
|
| 205 |
+
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
|
| 206 |
+
|
| 207 |
+
if zero_stage <= 2:
|
| 208 |
+
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 209 |
+
exclude_frozen_parameters)
|
| 210 |
+
elif zero_stage == 3:
|
| 211 |
+
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 212 |
+
exclude_frozen_parameters)
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def _zero2_merge_frozen_params(state_dict, zero_model_states):
|
| 216 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
| 217 |
+
return
|
| 218 |
+
|
| 219 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
| 220 |
+
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
|
| 221 |
+
|
| 222 |
+
if debug:
|
| 223 |
+
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
|
| 224 |
+
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
| 225 |
+
|
| 226 |
+
wanted_params = len(frozen_param_shapes)
|
| 227 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
| 228 |
+
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
|
| 229 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
| 230 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
| 231 |
+
|
| 232 |
+
total_params = 0
|
| 233 |
+
total_numel = 0
|
| 234 |
+
for name, shape in frozen_param_shapes.items():
|
| 235 |
+
total_params += 1
|
| 236 |
+
unpartitioned_numel = shape.numel()
|
| 237 |
+
total_numel += unpartitioned_numel
|
| 238 |
+
|
| 239 |
+
state_dict[name] = frozen_param_fragments[name]
|
| 240 |
+
|
| 241 |
+
if debug:
|
| 242 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
| 243 |
+
|
| 244 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def _has_callable(obj, fn):
|
| 248 |
+
attr = getattr(obj, fn, None)
|
| 249 |
+
return callable(attr)
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
| 253 |
+
param_shapes = zero_model_states[0].param_shapes
|
| 254 |
+
|
| 255 |
+
# Reconstruction protocol:
|
| 256 |
+
#
|
| 257 |
+
# XXX: document this
|
| 258 |
+
|
| 259 |
+
if debug:
|
| 260 |
+
for i in range(world_size):
|
| 261 |
+
for j in range(len(fp32_flat_groups[0])):
|
| 262 |
+
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
| 263 |
+
|
| 264 |
+
# XXX: memory usage doubles here (zero2)
|
| 265 |
+
num_param_groups = len(fp32_flat_groups[0])
|
| 266 |
+
merged_single_partition_of_fp32_groups = []
|
| 267 |
+
for i in range(num_param_groups):
|
| 268 |
+
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
| 269 |
+
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
| 270 |
+
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
| 271 |
+
avail_numel = sum(
|
| 272 |
+
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
|
| 273 |
+
|
| 274 |
+
if debug:
|
| 275 |
+
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
| 276 |
+
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
| 277 |
+
# not asserting if there is a mismatch due to possible padding
|
| 278 |
+
print(f"Have {avail_numel} numels to process.")
|
| 279 |
+
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
| 280 |
+
|
| 281 |
+
# params
|
| 282 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
| 283 |
+
# out-of-core computing solution
|
| 284 |
+
total_numel = 0
|
| 285 |
+
total_params = 0
|
| 286 |
+
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
| 287 |
+
offset = 0
|
| 288 |
+
avail_numel = full_single_fp32_vector.numel()
|
| 289 |
+
for name, shape in shapes.items():
|
| 290 |
+
|
| 291 |
+
unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
|
| 292 |
+
total_numel += unpartitioned_numel
|
| 293 |
+
total_params += 1
|
| 294 |
+
|
| 295 |
+
if debug:
|
| 296 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
| 297 |
+
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
|
| 298 |
+
offset += unpartitioned_numel
|
| 299 |
+
|
| 300 |
+
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
| 301 |
+
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
| 302 |
+
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
| 303 |
+
# live optimizer object, so we are checking that the numbers are within the right range
|
| 304 |
+
align_to = 2 * world_size
|
| 305 |
+
|
| 306 |
+
def zero2_align(x):
|
| 307 |
+
return align_to * math.ceil(x / align_to)
|
| 308 |
+
|
| 309 |
+
if debug:
|
| 310 |
+
print(f"original offset={offset}, avail_numel={avail_numel}")
|
| 311 |
+
|
| 312 |
+
offset = zero2_align(offset)
|
| 313 |
+
avail_numel = zero2_align(avail_numel)
|
| 314 |
+
|
| 315 |
+
if debug:
|
| 316 |
+
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
| 317 |
+
|
| 318 |
+
# Sanity check
|
| 319 |
+
if offset != avail_numel:
|
| 320 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
| 321 |
+
|
| 322 |
+
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 326 |
+
exclude_frozen_parameters):
|
| 327 |
+
state_dict = OrderedDict()
|
| 328 |
+
|
| 329 |
+
# buffers
|
| 330 |
+
buffers = zero_model_states[0].buffers
|
| 331 |
+
state_dict.update(buffers)
|
| 332 |
+
if debug:
|
| 333 |
+
print(f"added {len(buffers)} buffers")
|
| 334 |
+
|
| 335 |
+
if not exclude_frozen_parameters:
|
| 336 |
+
_zero2_merge_frozen_params(state_dict, zero_model_states)
|
| 337 |
+
|
| 338 |
+
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
| 339 |
+
|
| 340 |
+
# recover shared parameters
|
| 341 |
+
for pair in zero_model_states[0].shared_params:
|
| 342 |
+
if pair[1] in state_dict:
|
| 343 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
| 344 |
+
|
| 345 |
+
return state_dict
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
| 349 |
+
remainder = unpartitioned_numel % world_size
|
| 350 |
+
padding_numel = (world_size - remainder) if remainder else 0
|
| 351 |
+
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
| 352 |
+
return partitioned_numel, padding_numel
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
|
| 356 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
| 357 |
+
return
|
| 358 |
+
|
| 359 |
+
if debug:
|
| 360 |
+
for i in range(world_size):
|
| 361 |
+
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
|
| 362 |
+
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
| 363 |
+
|
| 364 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
| 365 |
+
wanted_params = len(frozen_param_shapes)
|
| 366 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
| 367 |
+
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
|
| 368 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
| 369 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
| 370 |
+
|
| 371 |
+
total_params = 0
|
| 372 |
+
total_numel = 0
|
| 373 |
+
for name, shape in zero_model_states[0].frozen_param_shapes.items():
|
| 374 |
+
total_params += 1
|
| 375 |
+
unpartitioned_numel = shape.numel()
|
| 376 |
+
total_numel += unpartitioned_numel
|
| 377 |
+
|
| 378 |
+
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
|
| 379 |
+
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
|
| 380 |
+
|
| 381 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
| 382 |
+
|
| 383 |
+
if debug:
|
| 384 |
+
print(
|
| 385 |
+
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
class GatheredTensor:
|
| 392 |
+
"""
|
| 393 |
+
A pseudo tensor that collects partitioned weights.
|
| 394 |
+
It is more memory efficient when there are multiple groups.
|
| 395 |
+
"""
|
| 396 |
+
|
| 397 |
+
def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
|
| 398 |
+
self.flat_groups = flat_groups
|
| 399 |
+
self.flat_groups_offset = flat_groups_offset
|
| 400 |
+
self.offset = offset
|
| 401 |
+
self.partitioned_numel = partitioned_numel
|
| 402 |
+
self.shape = shape
|
| 403 |
+
self.dtype = self.flat_groups[0][0].dtype
|
| 404 |
+
|
| 405 |
+
def contiguous(self):
|
| 406 |
+
"""
|
| 407 |
+
Merge partitioned weights from flat_groups into a single tensor.
|
| 408 |
+
"""
|
| 409 |
+
end_idx = self.offset + self.partitioned_numel
|
| 410 |
+
world_size = len(self.flat_groups)
|
| 411 |
+
pad_flat_param_chunks = []
|
| 412 |
+
|
| 413 |
+
for rank_i in range(world_size):
|
| 414 |
+
# for each rank, we need to collect weights from related group/groups
|
| 415 |
+
flat_groups_at_rank_i = self.flat_groups[rank_i]
|
| 416 |
+
start_group_id = None
|
| 417 |
+
end_group_id = None
|
| 418 |
+
for group_id in range(len(self.flat_groups_offset)):
|
| 419 |
+
if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
|
| 420 |
+
start_group_id = group_id
|
| 421 |
+
if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
|
| 422 |
+
end_group_id = group_id
|
| 423 |
+
break
|
| 424 |
+
# collect weights from related group/groups
|
| 425 |
+
for group_id in range(start_group_id, end_group_id + 1):
|
| 426 |
+
flat_tensor = flat_groups_at_rank_i[group_id]
|
| 427 |
+
start_offset = self.offset - self.flat_groups_offset[group_id]
|
| 428 |
+
end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
|
| 429 |
+
pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
|
| 430 |
+
|
| 431 |
+
# collect weights from all ranks
|
| 432 |
+
pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
|
| 433 |
+
param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
|
| 434 |
+
return param
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
| 438 |
+
param_shapes = zero_model_states[0].param_shapes
|
| 439 |
+
avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
|
| 440 |
+
|
| 441 |
+
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
| 442 |
+
# param, re-consolidating each param, while dealing with padding if any
|
| 443 |
+
|
| 444 |
+
# merge list of dicts, preserving order
|
| 445 |
+
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
| 446 |
+
|
| 447 |
+
if debug:
|
| 448 |
+
for i in range(world_size):
|
| 449 |
+
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
| 450 |
+
|
| 451 |
+
wanted_params = len(param_shapes)
|
| 452 |
+
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
| 453 |
+
# not asserting if there is a mismatch due to possible padding
|
| 454 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
| 455 |
+
print(f"Trainable params: Have {avail_numel} numels to process.")
|
| 456 |
+
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
|
| 457 |
+
|
| 458 |
+
# params
|
| 459 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
| 460 |
+
# out-of-core computing solution
|
| 461 |
+
offset = 0
|
| 462 |
+
total_numel = 0
|
| 463 |
+
total_params = 0
|
| 464 |
+
flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
|
| 465 |
+
for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
|
| 466 |
+
unpartitioned_numel = shape.numel()
|
| 467 |
+
total_numel += unpartitioned_numel
|
| 468 |
+
total_params += 1
|
| 469 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
| 470 |
+
|
| 471 |
+
if debug:
|
| 472 |
+
print(
|
| 473 |
+
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
| 474 |
+
)
|
| 475 |
+
|
| 476 |
+
# memory efficient tensor
|
| 477 |
+
tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
|
| 478 |
+
state_dict[name] = tensor
|
| 479 |
+
offset += partitioned_numel
|
| 480 |
+
|
| 481 |
+
offset *= world_size
|
| 482 |
+
|
| 483 |
+
# Sanity check
|
| 484 |
+
if offset != avail_numel:
|
| 485 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
| 486 |
+
|
| 487 |
+
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
| 491 |
+
exclude_frozen_parameters):
|
| 492 |
+
state_dict = OrderedDict()
|
| 493 |
+
|
| 494 |
+
# buffers
|
| 495 |
+
buffers = zero_model_states[0].buffers
|
| 496 |
+
state_dict.update(buffers)
|
| 497 |
+
if debug:
|
| 498 |
+
print(f"added {len(buffers)} buffers")
|
| 499 |
+
|
| 500 |
+
if not exclude_frozen_parameters:
|
| 501 |
+
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
|
| 502 |
+
|
| 503 |
+
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
| 504 |
+
|
| 505 |
+
# recover shared parameters
|
| 506 |
+
for pair in zero_model_states[0].shared_params:
|
| 507 |
+
if pair[1] in state_dict:
|
| 508 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
| 509 |
+
|
| 510 |
+
return state_dict
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
def to_torch_tensor(state_dict, return_empty_tensor=False):
|
| 514 |
+
"""
|
| 515 |
+
Convert state_dict of GatheredTensor to torch tensor
|
| 516 |
+
"""
|
| 517 |
+
torch_state_dict = {}
|
| 518 |
+
converted_tensors = {}
|
| 519 |
+
for name, tensor in state_dict.items():
|
| 520 |
+
tensor_id = id(tensor)
|
| 521 |
+
if tensor_id in converted_tensors: # shared tensors
|
| 522 |
+
shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
|
| 523 |
+
torch_state_dict[name] = shared_tensor
|
| 524 |
+
else:
|
| 525 |
+
converted_tensors[tensor_id] = name
|
| 526 |
+
if return_empty_tensor:
|
| 527 |
+
torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
|
| 528 |
+
else:
|
| 529 |
+
torch_state_dict[name] = tensor.contiguous()
|
| 530 |
+
return torch_state_dict
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
|
| 534 |
+
tag=None,
|
| 535 |
+
exclude_frozen_parameters=False,
|
| 536 |
+
lazy_mode=False):
|
| 537 |
+
"""
|
| 538 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
| 539 |
+
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
| 540 |
+
via a model hub.
|
| 541 |
+
|
| 542 |
+
Args:
|
| 543 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder
|
| 544 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
| 545 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
| 546 |
+
- ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
|
| 547 |
+
Convert the pesduo tensor to torch tensor by ``.contiguous()``
|
| 548 |
+
|
| 549 |
+
Returns:
|
| 550 |
+
- pytorch ``state_dict``
|
| 551 |
+
|
| 552 |
+
A typical usage might be ::
|
| 553 |
+
|
| 554 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
| 555 |
+
# do the training and checkpoint saving
|
| 556 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
| 557 |
+
model = model.cpu() # move to cpu
|
| 558 |
+
model.load_state_dict(state_dict)
|
| 559 |
+
# submit to model hub or save the model to share with others
|
| 560 |
+
|
| 561 |
+
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
| 562 |
+
application. i.e. you will need to re-initialize the deepspeed engine, since
|
| 563 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
| 564 |
+
|
| 565 |
+
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
| 566 |
+
|
| 567 |
+
Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
|
| 568 |
+
You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
| 569 |
+
the checkpoint. Or you can load state_dict in lazy mode ::
|
| 570 |
+
|
| 571 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
| 572 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
|
| 573 |
+
for name, lazy_tensor in state_dict.item():
|
| 574 |
+
tensor = lazy_tensor.contiguous() # to cpu
|
| 575 |
+
print(name, tensor)
|
| 576 |
+
# del tensor to release memory if it no longer in use
|
| 577 |
+
"""
|
| 578 |
+
if tag is None:
|
| 579 |
+
latest_path = os.path.join(checkpoint_dir, 'latest')
|
| 580 |
+
if os.path.isfile(latest_path):
|
| 581 |
+
with open(latest_path, 'r') as fd:
|
| 582 |
+
tag = fd.read().strip()
|
| 583 |
+
else:
|
| 584 |
+
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
| 585 |
+
|
| 586 |
+
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
| 587 |
+
|
| 588 |
+
if not os.path.isdir(ds_checkpoint_dir):
|
| 589 |
+
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
| 590 |
+
|
| 591 |
+
state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
|
| 592 |
+
if lazy_mode:
|
| 593 |
+
return state_dict
|
| 594 |
+
else:
|
| 595 |
+
return to_torch_tensor(state_dict)
|
| 596 |
+
|
| 597 |
+
|
| 598 |
+
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
|
| 599 |
+
output_dir,
|
| 600 |
+
max_shard_size="5GB",
|
| 601 |
+
safe_serialization=False,
|
| 602 |
+
tag=None,
|
| 603 |
+
exclude_frozen_parameters=False):
|
| 604 |
+
"""
|
| 605 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
| 606 |
+
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
| 607 |
+
|
| 608 |
+
Args:
|
| 609 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
| 610 |
+
- ``output_dir``: directory to the pytorch fp32 state_dict output files
|
| 611 |
+
- ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
|
| 612 |
+
- ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
|
| 613 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
| 614 |
+
- ``exclude_frozen_parameters``: exclude frozen parameters
|
| 615 |
+
"""
|
| 616 |
+
|
| 617 |
+
# Dependency pre-check
|
| 618 |
+
if safe_serialization:
|
| 619 |
+
try:
|
| 620 |
+
from safetensors.torch import save_file
|
| 621 |
+
except ImportError:
|
| 622 |
+
print('If you want to use `safe_serialization`, please `pip install safetensors`')
|
| 623 |
+
raise
|
| 624 |
+
if max_shard_size is not None:
|
| 625 |
+
try:
|
| 626 |
+
from huggingface_hub import split_torch_state_dict_into_shards
|
| 627 |
+
except ImportError:
|
| 628 |
+
print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
|
| 629 |
+
raise
|
| 630 |
+
|
| 631 |
+
# Convert zero checkpoint to state_dict
|
| 632 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
|
| 633 |
+
tag,
|
| 634 |
+
exclude_frozen_parameters,
|
| 635 |
+
lazy_mode=True)
|
| 636 |
+
|
| 637 |
+
# Shard the model if it is too big.
|
| 638 |
+
weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
|
| 639 |
+
if max_shard_size is not None:
|
| 640 |
+
filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
|
| 641 |
+
# an memory-efficient approach for sharding
|
| 642 |
+
empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
|
| 643 |
+
state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
|
| 644 |
+
filename_pattern=filename_pattern,
|
| 645 |
+
max_shard_size=max_shard_size)
|
| 646 |
+
else:
|
| 647 |
+
from collections import namedtuple
|
| 648 |
+
StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
|
| 649 |
+
state_dict_split = StateDictSplit(is_sharded=False,
|
| 650 |
+
filename_to_tensors={weights_name: list(state_dict.keys())})
|
| 651 |
+
|
| 652 |
+
# Save the model by shard
|
| 653 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 654 |
+
filename_to_tensors = state_dict_split.filename_to_tensors.items()
|
| 655 |
+
for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
|
| 656 |
+
shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
|
| 657 |
+
shard_state_dict = to_torch_tensor(shard_state_dict)
|
| 658 |
+
output_path = os.path.join(output_dir, shard_file)
|
| 659 |
+
if safe_serialization:
|
| 660 |
+
save_file(shard_state_dict, output_path, metadata={"format": "pt"})
|
| 661 |
+
else:
|
| 662 |
+
torch.save(shard_state_dict, output_path)
|
| 663 |
+
# release the memory of current shard
|
| 664 |
+
for tensor_name in list(shard_state_dict.keys()):
|
| 665 |
+
del state_dict[tensor_name]
|
| 666 |
+
del shard_state_dict[tensor_name]
|
| 667 |
+
del shard_state_dict
|
| 668 |
+
gc.collect()
|
| 669 |
+
|
| 670 |
+
# Save index if sharded
|
| 671 |
+
if state_dict_split.is_sharded:
|
| 672 |
+
index = {
|
| 673 |
+
"metadata": state_dict_split.metadata,
|
| 674 |
+
"weight_map": state_dict_split.tensor_to_filename,
|
| 675 |
+
}
|
| 676 |
+
save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
|
| 677 |
+
save_index_file = os.path.join(output_dir, save_index_file)
|
| 678 |
+
with open(save_index_file, "w", encoding="utf-8") as f:
|
| 679 |
+
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
|
| 680 |
+
f.write(content)
|
| 681 |
+
|
| 682 |
+
|
| 683 |
+
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
| 684 |
+
"""
|
| 685 |
+
1. Put the provided model to cpu
|
| 686 |
+
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
| 687 |
+
3. Load it into the provided model
|
| 688 |
+
|
| 689 |
+
Args:
|
| 690 |
+
- ``model``: the model object to update
|
| 691 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
| 692 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
| 693 |
+
|
| 694 |
+
Returns:
|
| 695 |
+
- ``model`: modified model
|
| 696 |
+
|
| 697 |
+
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
| 698 |
+
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
| 699 |
+
conveniently placed for you in the checkpoint folder.
|
| 700 |
+
|
| 701 |
+
A typical usage might be ::
|
| 702 |
+
|
| 703 |
+
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
| 704 |
+
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
| 705 |
+
# submit to model hub or save the model to share with others
|
| 706 |
+
|
| 707 |
+
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
| 708 |
+
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
| 709 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
| 710 |
+
|
| 711 |
+
"""
|
| 712 |
+
logger.info("Extracting fp32 weights")
|
| 713 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
| 714 |
+
|
| 715 |
+
logger.info("Overwriting model with fp32 weights")
|
| 716 |
+
model = model.cpu()
|
| 717 |
+
model.load_state_dict(state_dict, strict=False)
|
| 718 |
+
|
| 719 |
+
return model
|
| 720 |
+
|
| 721 |
+
|
| 722 |
+
if __name__ == "__main__":
|
| 723 |
+
parser = argparse.ArgumentParser()
|
| 724 |
+
parser.add_argument("checkpoint_dir",
|
| 725 |
+
type=str,
|
| 726 |
+
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
| 727 |
+
parser.add_argument("output_dir",
|
| 728 |
+
type=str,
|
| 729 |
+
help="directory to the pytorch fp32 state_dict output files"
|
| 730 |
+
"(e.g. path/checkpoint-12-output/)")
|
| 731 |
+
parser.add_argument(
|
| 732 |
+
"--max_shard_size",
|
| 733 |
+
type=str,
|
| 734 |
+
default="5GB",
|
| 735 |
+
help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
|
| 736 |
+
"lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
|
| 737 |
+
"We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
|
| 738 |
+
"without CPU OOM issues.")
|
| 739 |
+
parser.add_argument(
|
| 740 |
+
"--safe_serialization",
|
| 741 |
+
default=False,
|
| 742 |
+
action='store_true',
|
| 743 |
+
help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
|
| 744 |
+
parser.add_argument("-t",
|
| 745 |
+
"--tag",
|
| 746 |
+
type=str,
|
| 747 |
+
default=None,
|
| 748 |
+
help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
|
| 749 |
+
parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
|
| 750 |
+
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
| 751 |
+
args = parser.parse_args()
|
| 752 |
+
|
| 753 |
+
debug = args.debug
|
| 754 |
+
|
| 755 |
+
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
|
| 756 |
+
args.output_dir,
|
| 757 |
+
max_shard_size=args.max_shard_size,
|
| 758 |
+
safe_serialization=args.safe_serialization,
|
| 759 |
+
tag=args.tag,
|
| 760 |
+
exclude_frozen_parameters=args.exclude_frozen_parameters)
|
v0-20251203-162131/logging.jsonl
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"loss": 1.92942202, "grad_norm": 20.42414749, "learning_rate": 2e-08, "token_acc": 0.6705119, "epoch": 0.00561798, "global_step/max_steps": "1/890", "percentage": "0.11%", "elapsed_time": "15s", "remaining_time": "3h 44m 13s", "memory(GiB)": 59.43, "train_speed(iter/s)": 0.066079}
|
| 2 |
+
{"loss": 1.90430641, "grad_norm": 23.43911915, "learning_rate": 1.1e-07, "token_acc": 0.6692064, "epoch": 0.02808989, "global_step/max_steps": "5/890", "percentage": "0.56%", "elapsed_time": "49s", "remaining_time": "2h 25m 29s", "memory(GiB)": 59.43, "train_speed(iter/s)": 0.101378}
|
| 3 |
+
{"loss": 1.70010834, "grad_norm": 19.3245644, "learning_rate": 2.2e-07, "token_acc": 0.69614819, "epoch": 0.05617978, "global_step/max_steps": "10/890", "percentage": "1.12%", "elapsed_time": "1m 34s", "remaining_time": "2h 18m 31s", "memory(GiB)": 63.15, "train_speed(iter/s)": 0.105878}
|
| 4 |
+
{"loss": 1.81324654, "grad_norm": 20.59944223, "learning_rate": 3.4e-07, "token_acc": 0.68714911, "epoch": 0.08426966, "global_step/max_steps": "15/890", "percentage": "1.69%", "elapsed_time": "2m 17s", "remaining_time": "2h 13m 14s", "memory(GiB)": 74.53, "train_speed(iter/s)": 0.109456}
|
| 5 |
+
{"loss": 1.87895241, "grad_norm": 15.93092435, "learning_rate": 4.5e-07, "token_acc": 0.68311777, "epoch": 0.11235955, "global_step/max_steps": "20/890", "percentage": "2.25%", "elapsed_time": "2m 58s", "remaining_time": "2h 9m 10s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.112247}
|
| 6 |
+
{"loss": 1.72007675, "grad_norm": 19.69270951, "learning_rate": 5.6e-07, "token_acc": 0.69078987, "epoch": 0.14044944, "global_step/max_steps": "25/890", "percentage": "2.81%", "elapsed_time": "3m 43s", "remaining_time": "2h 9m 1s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.111735}
|
| 7 |
+
{"loss": 1.65771542, "grad_norm": 13.97484414, "learning_rate": 6.7e-07, "token_acc": 0.69284365, "epoch": 0.16853933, "global_step/max_steps": "30/890", "percentage": "3.37%", "elapsed_time": "4m 26s", "remaining_time": "2h 7m 22s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.112528}
|
| 8 |
+
{"loss": 1.46166172, "grad_norm": 9.6934612, "learning_rate": 7.9e-07, "token_acc": 0.70454381, "epoch": 0.19662921, "global_step/max_steps": "35/890", "percentage": "3.93%", "elapsed_time": "5m 15s", "remaining_time": "2h 8m 31s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.110879}
|
| 9 |
+
{"loss": 1.4967165, "grad_norm": 6.59882023, "learning_rate": 9e-07, "token_acc": 0.69716561, "epoch": 0.2247191, "global_step/max_steps": "40/890", "percentage": "4.49%", "elapsed_time": "5m 54s", "remaining_time": "2h 5m 40s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.112719}
|
| 10 |
+
{"loss": 1.2387085, "grad_norm": 5.67433819, "learning_rate": 1.01e-06, "token_acc": 0.71904713, "epoch": 0.25280899, "global_step/max_steps": "45/890", "percentage": "5.06%", "elapsed_time": "6m 37s", "remaining_time": "2h 4m 27s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.113159}
|
| 11 |
+
{"loss": 1.29509754, "grad_norm": 5.52941944, "learning_rate": 1.12e-06, "token_acc": 0.70024831, "epoch": 0.28089888, "global_step/max_steps": "50/890", "percentage": "5.62%", "elapsed_time": "7m 21s", "remaining_time": "2h 3m 36s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.113266}
|
| 12 |
+
{"loss": 1.23522968, "grad_norm": 3.87909263, "learning_rate": 1.24e-06, "token_acc": 0.71055427, "epoch": 0.30898876, "global_step/max_steps": "55/890", "percentage": "6.18%", "elapsed_time": "8m 0s", "remaining_time": "2h 1m 40s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.114376}
|
| 13 |
+
{"loss": 1.2055891, "grad_norm": 3.95355749, "learning_rate": 1.35e-06, "token_acc": 0.70742832, "epoch": 0.33707865, "global_step/max_steps": "60/890", "percentage": "6.74%", "elapsed_time": "8m 44s", "remaining_time": "2h 0m 50s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.114476}
|
| 14 |
+
{"loss": 1.18119116, "grad_norm": 3.64334308, "learning_rate": 1.46e-06, "token_acc": 0.70930263, "epoch": 0.36516854, "global_step/max_steps": "65/890", "percentage": "7.30%", "elapsed_time": "9m 29s", "remaining_time": "2h 0m 23s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.114216}
|
| 15 |
+
{"loss": 1.03968716, "grad_norm": 3.47086114, "learning_rate": 1.57e-06, "token_acc": 0.73540343, "epoch": 0.39325843, "global_step/max_steps": "70/890", "percentage": "7.87%", "elapsed_time": "10m 14s", "remaining_time": "2h 0m 1s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.113865}
|
| 16 |
+
{"loss": 1.04052029, "grad_norm": 2.58722243, "learning_rate": 1.69e-06, "token_acc": 0.73752655, "epoch": 0.42134831, "global_step/max_steps": "75/890", "percentage": "8.43%", "elapsed_time": "11m 1s", "remaining_time": "1h 59m 47s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.113394}
|
| 17 |
+
{"loss": 1.12376413, "grad_norm": 3.87097774, "learning_rate": 1.8e-06, "token_acc": 0.71872918, "epoch": 0.4494382, "global_step/max_steps": "80/890", "percentage": "8.99%", "elapsed_time": "11m 44s", "remaining_time": "1h 58m 56s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.113504}
|
| 18 |
+
{"loss": 1.1050169, "grad_norm": 2.86900097, "learning_rate": 1.91e-06, "token_acc": 0.72378547, "epoch": 0.47752809, "global_step/max_steps": "85/890", "percentage": "9.55%", "elapsed_time": "12m 30s", "remaining_time": "1h 58m 30s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.11322}
|
| 19 |
+
{"loss": 1.07732038, "grad_norm": 2.77978478, "learning_rate": 2e-06, "token_acc": 0.72290198, "epoch": 0.50561798, "global_step/max_steps": "90/890", "percentage": "10.11%", "elapsed_time": "13m 15s", "remaining_time": "1h 57m 55s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.113067}
|
| 20 |
+
{"loss": 1.0275424, "grad_norm": 2.8893484, "learning_rate": 2e-06, "token_acc": 0.73385087, "epoch": 0.53370787, "global_step/max_steps": "95/890", "percentage": "10.67%", "elapsed_time": "14m 1s", "remaining_time": "1h 57m 19s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.112933}
|
| 21 |
+
{"loss": 1.0429039, "grad_norm": 3.29654595, "learning_rate": 2e-06, "token_acc": 0.72565854, "epoch": 0.56179775, "global_step/max_steps": "100/890", "percentage": "11.24%", "elapsed_time": "14m 44s", "remaining_time": "1h 56m 23s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.11312}
|
| 22 |
+
{"loss": 1.14264431, "grad_norm": 4.22390913, "learning_rate": 2e-06, "token_acc": 0.71340611, "epoch": 0.58988764, "global_step/max_steps": "105/890", "percentage": "11.80%", "elapsed_time": "15m 29s", "remaining_time": "1h 55m 45s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.113022}
|
| 23 |
+
{"loss": 1.12623348, "grad_norm": 3.11199516, "learning_rate": 2e-06, "token_acc": 0.70548031, "epoch": 0.61797753, "global_step/max_steps": "110/890", "percentage": "12.36%", "elapsed_time": "16m 9s", "remaining_time": "1h 54m 32s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.113502}
|
| 24 |
+
{"loss": 0.99343472, "grad_norm": 3.12532028, "learning_rate": 1.99e-06, "token_acc": 0.74252572, "epoch": 0.64606742, "global_step/max_steps": "115/890", "percentage": "12.92%", "elapsed_time": "16m 55s", "remaining_time": "1h 54m 3s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.11324}
|
| 25 |
+
{"loss": 1.0211853, "grad_norm": 3.43640588, "learning_rate": 1.99e-06, "token_acc": 0.73008338, "epoch": 0.6741573, "global_step/max_steps": "120/890", "percentage": "13.48%", "elapsed_time": "17m 39s", "remaining_time": "1h 53m 15s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.113313}
|
| 26 |
+
{"loss": 1.04896641, "grad_norm": 3.41040332, "learning_rate": 1.99e-06, "token_acc": 0.72532747, "epoch": 0.70224719, "global_step/max_steps": "125/890", "percentage": "14.04%", "elapsed_time": "18m 20s", "remaining_time": "1h 52m 12s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.11363}
|
| 27 |
+
{"loss": 0.97711716, "grad_norm": 2.53376656, "learning_rate": 1.99e-06, "token_acc": 0.73961361, "epoch": 0.73033708, "global_step/max_steps": "130/890", "percentage": "14.61%", "elapsed_time": "19m 6s", "remaining_time": "1h 51m 39s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.113438}
|
| 28 |
+
{"loss": 0.99264011, "grad_norm": 3.50037176, "learning_rate": 1.98e-06, "token_acc": 0.73040923, "epoch": 0.75842697, "global_step/max_steps": "135/890", "percentage": "15.17%", "elapsed_time": "19m 47s", "remaining_time": "1h 50m 42s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.113654}
|
| 29 |
+
{"loss": 1.07621098, "grad_norm": 2.80856996, "learning_rate": 1.98e-06, "token_acc": 0.70929261, "epoch": 0.78651685, "global_step/max_steps": "140/890", "percentage": "15.73%", "elapsed_time": "20m 31s", "remaining_time": "1h 49m 55s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.113705}
|
| 30 |
+
{"loss": 0.94052849, "grad_norm": 2.72740433, "learning_rate": 1.98e-06, "token_acc": 0.74306219, "epoch": 0.81460674, "global_step/max_steps": "145/890", "percentage": "16.29%", "elapsed_time": "21m 13s", "remaining_time": "1h 49m 4s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.113831}
|
| 31 |
+
{"loss": 1.0379549, "grad_norm": 2.90037439, "learning_rate": 1.97e-06, "token_acc": 0.72216818, "epoch": 0.84269663, "global_step/max_steps": "150/890", "percentage": "16.85%", "elapsed_time": "21m 57s", "remaining_time": "1h 48m 18s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.113873}
|
| 32 |
+
{"loss": 0.96584625, "grad_norm": 3.39413882, "learning_rate": 1.97e-06, "token_acc": 0.73532589, "epoch": 0.87078652, "global_step/max_steps": "155/890", "percentage": "17.42%", "elapsed_time": "22m 36s", "remaining_time": "1h 47m 12s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.114263}
|
| 33 |
+
{"loss": 0.93993282, "grad_norm": 3.47111775, "learning_rate": 1.96e-06, "token_acc": 0.73803476, "epoch": 0.8988764, "global_step/max_steps": "160/890", "percentage": "17.98%", "elapsed_time": "23m 18s", "remaining_time": "1h 46m 21s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.114394}
|
| 34 |
+
{"loss": 0.98583937, "grad_norm": 3.53070142, "learning_rate": 1.96e-06, "token_acc": 0.73805606, "epoch": 0.92696629, "global_step/max_steps": "165/890", "percentage": "18.54%", "elapsed_time": "24m 3s", "remaining_time": "1h 45m 40s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.114343}
|
| 35 |
+
{"loss": 1.03432598, "grad_norm": 3.43261649, "learning_rate": 1.95e-06, "token_acc": 0.71817965, "epoch": 0.95505618, "global_step/max_steps": "170/890", "percentage": "19.10%", "elapsed_time": "24m 43s", "remaining_time": "1h 44m 43s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.114589}
|
| 36 |
+
{"loss": 1.00981655, "grad_norm": 2.74725682, "learning_rate": 1.94e-06, "token_acc": 0.73249691, "epoch": 0.98314607, "global_step/max_steps": "175/890", "percentage": "19.66%", "elapsed_time": "25m 22s", "remaining_time": "1h 43m 42s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.114914}
|
| 37 |
+
{"eval_loss": 0.94159585, "eval_runtime": 50.628, "eval_samples_per_second": 3.121, "eval_steps_per_second": 0.79, "eval_token_acc": 0.75146796, "epoch": 1.0, "global_step/max_steps": "178/890", "percentage": "20.00%", "elapsed_time": "26m 38s", "remaining_time": "1h 46m 33s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.111359}
|
| 38 |
+
{"loss": 0.89632187, "grad_norm": 2.65966407, "learning_rate": 1.94e-06, "token_acc": 0.76431289, "epoch": 1.01123596, "global_step/max_steps": "180/890", "percentage": "20.22%", "elapsed_time": "28m 10s", "remaining_time": "1h 51m 7s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106483}
|
| 39 |
+
{"loss": 0.87094765, "grad_norm": 3.01700058, "learning_rate": 1.93e-06, "token_acc": 0.76282756, "epoch": 1.03932584, "global_step/max_steps": "185/890", "percentage": "20.79%", "elapsed_time": "28m 53s", "remaining_time": "1h 50m 6s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106721}
|
| 40 |
+
{"loss": 0.92874203, "grad_norm": 3.55407235, "learning_rate": 1.92e-06, "token_acc": 0.75095775, "epoch": 1.06741573, "global_step/max_steps": "190/890", "percentage": "21.35%", "elapsed_time": "29m 35s", "remaining_time": "1h 49m 2s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106985}
|
| 41 |
+
{"loss": 0.91023655, "grad_norm": 2.22328975, "learning_rate": 1.91e-06, "token_acc": 0.75912351, "epoch": 1.09550562, "global_step/max_steps": "195/890", "percentage": "21.91%", "elapsed_time": "30m 18s", "remaining_time": "1h 48m 1s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107226}
|
| 42 |
+
{"loss": 0.88683376, "grad_norm": 3.23140581, "learning_rate": 1.91e-06, "token_acc": 0.75498108, "epoch": 1.12359551, "global_step/max_steps": "200/890", "percentage": "22.47%", "elapsed_time": "31m 2s", "remaining_time": "1h 47m 5s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107381}
|
| 43 |
+
{"loss": 0.89876013, "grad_norm": 2.52677907, "learning_rate": 1.9e-06, "token_acc": 0.76296649, "epoch": 1.15168539, "global_step/max_steps": "205/890", "percentage": "23.03%", "elapsed_time": "31m 47s", "remaining_time": "1h 46m 13s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107473}
|
| 44 |
+
{"loss": 0.93150196, "grad_norm": 3.1348347, "learning_rate": 1.89e-06, "token_acc": 0.75620723, "epoch": 1.17977528, "global_step/max_steps": "210/890", "percentage": "23.60%", "elapsed_time": "32m 25s", "remaining_time": "1h 45m 0s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.10793}
|
| 45 |
+
{"loss": 0.87415352, "grad_norm": 3.84369752, "learning_rate": 1.88e-06, "token_acc": 0.7632421, "epoch": 1.20786517, "global_step/max_steps": "215/890", "percentage": "24.16%", "elapsed_time": "33m 9s", "remaining_time": "1h 44m 7s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.108051}
|
| 46 |
+
{"loss": 0.90635891, "grad_norm": 3.03589876, "learning_rate": 1.87e-06, "token_acc": 0.75811217, "epoch": 1.23595506, "global_step/max_steps": "220/890", "percentage": "24.72%", "elapsed_time": "33m 53s", "remaining_time": "1h 43m 13s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.108171}
|
| 47 |
+
{"loss": 0.86896734, "grad_norm": 3.21890946, "learning_rate": 1.86e-06, "token_acc": 0.76261309, "epoch": 1.26404494, "global_step/max_steps": "225/890", "percentage": "25.28%", "elapsed_time": "34m 41s", "remaining_time": "1h 42m 31s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.108095}
|
| 48 |
+
{"loss": 0.88773365, "grad_norm": 2.39275322, "learning_rate": 1.85e-06, "token_acc": 0.75995715, "epoch": 1.29213483, "global_step/max_steps": "230/890", "percentage": "25.84%", "elapsed_time": "35m 29s", "remaining_time": "1h 41m 51s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107986}
|
| 49 |
+
{"loss": 0.86067343, "grad_norm": 2.23791008, "learning_rate": 1.84e-06, "token_acc": 0.76934352, "epoch": 1.32022472, "global_step/max_steps": "235/890", "percentage": "26.40%", "elapsed_time": "36m 12s", "remaining_time": "1h 40m 56s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.10815}
|
| 50 |
+
{"loss": 0.84840374, "grad_norm": 2.79881326, "learning_rate": 1.83e-06, "token_acc": 0.76549145, "epoch": 1.34831461, "global_step/max_steps": "240/890", "percentage": "26.97%", "elapsed_time": "36m 57s", "remaining_time": "1h 40m 6s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.10822}
|
| 51 |
+
{"loss": 0.88655796, "grad_norm": 2.82713023, "learning_rate": 1.82e-06, "token_acc": 0.75943928, "epoch": 1.37640449, "global_step/max_steps": "245/890", "percentage": "27.53%", "elapsed_time": "37m 40s", "remaining_time": "1h 39m 10s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.108388}
|
| 52 |
+
{"loss": 0.87663126, "grad_norm": 3.2222662, "learning_rate": 1.81e-06, "token_acc": 0.76031247, "epoch": 1.40449438, "global_step/max_steps": "250/890", "percentage": "28.09%", "elapsed_time": "38m 24s", "remaining_time": "1h 38m 18s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.108502}
|
| 53 |
+
{"loss": 0.83121338, "grad_norm": 2.57449132, "learning_rate": 1.8e-06, "token_acc": 0.76874559, "epoch": 1.43258427, "global_step/max_steps": "255/890", "percentage": "28.65%", "elapsed_time": "39m 10s", "remaining_time": "1h 37m 32s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.108497}
|
| 54 |
+
{"loss": 0.8371623, "grad_norm": 2.91498486, "learning_rate": 1.78e-06, "token_acc": 0.7685514, "epoch": 1.46067416, "global_step/max_steps": "260/890", "percentage": "29.21%", "elapsed_time": "39m 58s", "remaining_time": "1h 36m 50s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.108421}
|
| 55 |
+
{"loss": 0.83786983, "grad_norm": 3.2768946, "learning_rate": 1.77e-06, "token_acc": 0.77163634, "epoch": 1.48876404, "global_step/max_steps": "265/890", "percentage": "29.78%", "elapsed_time": "40m 40s", "remaining_time": "1h 35m 56s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.108574}
|
| 56 |
+
{"loss": 0.88411522, "grad_norm": 4.19250358, "learning_rate": 1.76e-06, "token_acc": 0.75960195, "epoch": 1.51685393, "global_step/max_steps": "270/890", "percentage": "30.34%", "elapsed_time": "41m 21s", "remaining_time": "1h 34m 58s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.108808}
|
| 57 |
+
{"loss": 0.83689098, "grad_norm": 2.77152158, "learning_rate": 1.75e-06, "token_acc": 0.76961081, "epoch": 1.54494382, "global_step/max_steps": "275/890", "percentage": "30.90%", "elapsed_time": "42m 5s", "remaining_time": "1h 34m 7s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.108906}
|
| 58 |
+
{"loss": 0.88070488, "grad_norm": 3.19688965, "learning_rate": 1.73e-06, "token_acc": 0.76284098, "epoch": 1.57303371, "global_step/max_steps": "280/890", "percentage": "31.46%", "elapsed_time": "42m 50s", "remaining_time": "1h 33m 19s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.108944}
|
| 59 |
+
{"loss": 0.8242981, "grad_norm": 2.85142297, "learning_rate": 1.72e-06, "token_acc": 0.77254941, "epoch": 1.6011236, "global_step/max_steps": "285/890", "percentage": "32.02%", "elapsed_time": "43m 30s", "remaining_time": "1h 32m 21s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.109183}
|
| 60 |
+
{"loss": 0.83616333, "grad_norm": 3.36162273, "learning_rate": 1.71e-06, "token_acc": 0.77431415, "epoch": 1.62921348, "global_step/max_steps": "290/890", "percentage": "32.58%", "elapsed_time": "44m 15s", "remaining_time": "1h 31m 33s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.109224}
|
| 61 |
+
{"loss": 0.92082767, "grad_norm": 3.13131769, "learning_rate": 1.69e-06, "token_acc": 0.74889287, "epoch": 1.65730337, "global_step/max_steps": "295/890", "percentage": "33.15%", "elapsed_time": "44m 54s", "remaining_time": "1h 30m 33s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.109497}
|
| 62 |
+
{"loss": 0.91456146, "grad_norm": 3.56258887, "learning_rate": 1.68e-06, "token_acc": 0.75164176, "epoch": 1.68539326, "global_step/max_steps": "300/890", "percentage": "33.71%", "elapsed_time": "45m 36s", "remaining_time": "1h 29m 41s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.109626}
|
| 63 |
+
{"loss": 0.83453369, "grad_norm": 2.38551711, "learning_rate": 1.66e-06, "token_acc": 0.76689947, "epoch": 1.71348315, "global_step/max_steps": "305/890", "percentage": "34.27%", "elapsed_time": "46m 19s", "remaining_time": "1h 28m 51s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.109721}
|
| 64 |
+
{"loss": 0.88098469, "grad_norm": 2.67320296, "learning_rate": 1.65e-06, "token_acc": 0.753423, "epoch": 1.74157303, "global_step/max_steps": "310/890", "percentage": "34.83%", "elapsed_time": "46m 59s", "remaining_time": "1h 27m 55s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.109951}
|
| 65 |
+
{"loss": 0.81355209, "grad_norm": 3.09773531, "learning_rate": 1.63e-06, "token_acc": 0.77280595, "epoch": 1.76966292, "global_step/max_steps": "315/890", "percentage": "35.39%", "elapsed_time": "47m 41s", "remaining_time": "1h 27m 3s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.110073}
|
| 66 |
+
{"loss": 0.83805923, "grad_norm": 3.16103249, "learning_rate": 1.62e-06, "token_acc": 0.76887944, "epoch": 1.79775281, "global_step/max_steps": "320/890", "percentage": "35.96%", "elapsed_time": "48m 26s", "remaining_time": "1h 26m 17s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.110101}
|
| 67 |
+
{"loss": 0.84451904, "grad_norm": 2.13510351, "learning_rate": 1.6e-06, "token_acc": 0.7644642, "epoch": 1.8258427, "global_step/max_steps": "325/890", "percentage": "36.52%", "elapsed_time": "49m 14s", "remaining_time": "1h 25m 36s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.109995}
|
| 68 |
+
{"loss": 0.88252859, "grad_norm": 2.57860896, "learning_rate": 1.59e-06, "token_acc": 0.75729913, "epoch": 1.85393258, "global_step/max_steps": "330/890", "percentage": "37.08%", "elapsed_time": "49m 59s", "remaining_time": "1h 24m 50s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.110018}
|
| 69 |
+
{"loss": 0.84676886, "grad_norm": 2.96066618, "learning_rate": 1.57e-06, "token_acc": 0.7666049, "epoch": 1.88202247, "global_step/max_steps": "335/890", "percentage": "37.64%", "elapsed_time": "50m 43s", "remaining_time": "1h 24m 3s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.110053}
|
| 70 |
+
{"loss": 0.82754688, "grad_norm": 2.38119297, "learning_rate": 1.55e-06, "token_acc": 0.77260635, "epoch": 1.91011236, "global_step/max_steps": "340/890", "percentage": "38.20%", "elapsed_time": "51m 28s", "remaining_time": "1h 23m 16s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.110074}
|
| 71 |
+
{"loss": 0.82496271, "grad_norm": 3.48291336, "learning_rate": 1.54e-06, "token_acc": 0.7723053, "epoch": 1.93820225, "global_step/max_steps": "345/890", "percentage": "38.76%", "elapsed_time": "52m 14s", "remaining_time": "1h 22m 31s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.110062}
|
| 72 |
+
{"loss": 0.87581673, "grad_norm": 2.75595841, "learning_rate": 1.52e-06, "token_acc": 0.75681868, "epoch": 1.96629213, "global_step/max_steps": "350/890", "percentage": "39.33%", "elapsed_time": "53m 0s", "remaining_time": "1h 21m 47s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.110042}
|
| 73 |
+
{"loss": 0.86541729, "grad_norm": 3.14291051, "learning_rate": 1.5e-06, "token_acc": 0.76903668, "epoch": 1.99438202, "global_step/max_steps": "355/890", "percentage": "39.89%", "elapsed_time": "53m 44s", "remaining_time": "1h 21m 0s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.110078}
|
| 74 |
+
{"eval_loss": 0.883196, "eval_runtime": 52.0933, "eval_samples_per_second": 3.033, "eval_steps_per_second": 0.768, "eval_token_acc": 0.76288927, "epoch": 2.0, "global_step/max_steps": "356/890", "percentage": "40.00%", "elapsed_time": "54m 45s", "remaining_time": "1h 22m 8s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.10834}
|
| 75 |
+
{"loss": 0.76372967, "grad_norm": 2.65146496, "learning_rate": 1.49e-06, "token_acc": 0.78709464, "epoch": 2.02247191, "global_step/max_steps": "360/890", "percentage": "40.45%", "elapsed_time": "56m 35s", "remaining_time": "1h 23m 18s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106022}
|
| 76 |
+
{"loss": 0.7487339, "grad_norm": 2.51304835, "learning_rate": 1.47e-06, "token_acc": 0.78633721, "epoch": 2.0505618, "global_step/max_steps": "365/890", "percentage": "41.01%", "elapsed_time": "57m 17s", "remaining_time": "1h 22m 24s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106169}
|
| 77 |
+
{"loss": 0.74286985, "grad_norm": 2.23677386, "learning_rate": 1.45e-06, "token_acc": 0.78932876, "epoch": 2.07865169, "global_step/max_steps": "370/890", "percentage": "41.57%", "elapsed_time": "58m 0s", "remaining_time": "1h 21m 32s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106294}
|
| 78 |
+
{"loss": 0.79879642, "grad_norm": 2.84605592, "learning_rate": 1.43e-06, "token_acc": 0.77602709, "epoch": 2.10674157, "global_step/max_steps": "375/890", "percentage": "42.13%", "elapsed_time": "58m 42s", "remaining_time": "1h 20m 37s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106463}
|
| 79 |
+
{"loss": 0.71379976, "grad_norm": 2.23724917, "learning_rate": 1.42e-06, "token_acc": 0.79882785, "epoch": 2.13483146, "global_step/max_steps": "380/890", "percentage": "42.70%", "elapsed_time": "59m 30s", "remaining_time": "1h 19m 52s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106426}
|
| 80 |
+
{"loss": 0.73072977, "grad_norm": 2.37456938, "learning_rate": 1.4e-06, "token_acc": 0.79221846, "epoch": 2.16292135, "global_step/max_steps": "385/890", "percentage": "43.26%", "elapsed_time": "1h 0m 17s", "remaining_time": "1h 19m 5s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106413}
|
| 81 |
+
{"loss": 0.74891605, "grad_norm": 3.31648127, "learning_rate": 1.38e-06, "token_acc": 0.78651965, "epoch": 2.19101124, "global_step/max_steps": "390/890", "percentage": "43.82%", "elapsed_time": "1h 1m 1s", "remaining_time": "1h 18m 14s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106507}
|
| 82 |
+
{"loss": 0.69626665, "grad_norm": 2.7509169, "learning_rate": 1.36e-06, "token_acc": 0.79493371, "epoch": 2.21910112, "global_step/max_steps": "395/890", "percentage": "44.38%", "elapsed_time": "1h 1m 49s", "remaining_time": "1h 17m 28s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106489}
|
| 83 |
+
{"loss": 0.77286663, "grad_norm": 2.68221615, "learning_rate": 1.34e-06, "token_acc": 0.78067996, "epoch": 2.24719101, "global_step/max_steps": "400/890", "percentage": "44.94%", "elapsed_time": "1h 2m 33s", "remaining_time": "1h 16m 37s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106578}
|
| 84 |
+
{"loss": 0.76619682, "grad_norm": 2.26470289, "learning_rate": 1.33e-06, "token_acc": 0.78141262, "epoch": 2.2752809, "global_step/max_steps": "405/890", "percentage": "45.51%", "elapsed_time": "1h 3m 14s", "remaining_time": "1h 15m 44s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106727}
|
| 85 |
+
{"loss": 0.73738632, "grad_norm": 3.32855096, "learning_rate": 1.31e-06, "token_acc": 0.78880821, "epoch": 2.30337079, "global_step/max_steps": "410/890", "percentage": "46.07%", "elapsed_time": "1h 3m 54s", "remaining_time": "1h 14m 49s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.10692}
|
| 86 |
+
{"loss": 0.72461548, "grad_norm": 2.56099664, "learning_rate": 1.29e-06, "token_acc": 0.79547567, "epoch": 2.33146067, "global_step/max_steps": "415/890", "percentage": "46.63%", "elapsed_time": "1h 4m 41s", "remaining_time": "1h 14m 3s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106904}
|
| 87 |
+
{"loss": 0.72892504, "grad_norm": 3.36767446, "learning_rate": 1.27e-06, "token_acc": 0.79656101, "epoch": 2.35955056, "global_step/max_steps": "420/890", "percentage": "47.19%", "elapsed_time": "1h 5m 29s", "remaining_time": "1h 13m 17s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106883}
|
| 88 |
+
{"loss": 0.75928507, "grad_norm": 2.2418087, "learning_rate": 1.25e-06, "token_acc": 0.7827391, "epoch": 2.38764045, "global_step/max_steps": "425/890", "percentage": "47.75%", "elapsed_time": "1h 6m 15s", "remaining_time": "1h 12m 29s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106904}
|
| 89 |
+
{"loss": 0.71564264, "grad_norm": 2.5013085, "learning_rate": 1.23e-06, "token_acc": 0.79345451, "epoch": 2.41573034, "global_step/max_steps": "430/890", "percentage": "48.31%", "elapsed_time": "1h 7m 0s", "remaining_time": "1h 11m 40s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106962}
|
| 90 |
+
{"loss": 0.78032069, "grad_norm": 8.92093975, "learning_rate": 1.21e-06, "token_acc": 0.77896553, "epoch": 2.44382022, "global_step/max_steps": "435/890", "percentage": "48.88%", "elapsed_time": "1h 7m 41s", "remaining_time": "1h 10m 47s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107112}
|
| 91 |
+
{"loss": 0.74873438, "grad_norm": 2.74330207, "learning_rate": 1.19e-06, "token_acc": 0.78835455, "epoch": 2.47191011, "global_step/max_steps": "440/890", "percentage": "49.44%", "elapsed_time": "1h 8m 27s", "remaining_time": "1h 10m 0s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107118}
|
| 92 |
+
{"loss": 0.73877139, "grad_norm": 2.64018163, "learning_rate": 1.17e-06, "token_acc": 0.79112106, "epoch": 2.5, "global_step/max_steps": "445/890", "percentage": "50.00%", "elapsed_time": "1h 9m 10s", "remaining_time": "1h 9m 10s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107213}
|
| 93 |
+
{"loss": 0.71075497, "grad_norm": 3.50654586, "learning_rate": 1.15e-06, "token_acc": 0.79829846, "epoch": 2.52808989, "global_step/max_steps": "450/890", "percentage": "50.56%", "elapsed_time": "1h 9m 53s", "remaining_time": "1h 8m 20s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107317}
|
| 94 |
+
{"loss": 0.72160144, "grad_norm": 2.46534612, "learning_rate": 1.13e-06, "token_acc": 0.79239614, "epoch": 2.55617978, "global_step/max_steps": "455/890", "percentage": "51.12%", "elapsed_time": "1h 10m 38s", "remaining_time": "1h 7m 32s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107339}
|
| 95 |
+
{"loss": 0.77560282, "grad_norm": 2.68221129, "learning_rate": 1.12e-06, "token_acc": 0.7788509, "epoch": 2.58426966, "global_step/max_steps": "460/890", "percentage": "51.69%", "elapsed_time": "1h 11m 17s", "remaining_time": "1h 6m 38s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107534}
|
| 96 |
+
{"loss": 0.74105349, "grad_norm": 2.62369315, "learning_rate": 1.1e-06, "token_acc": 0.78816661, "epoch": 2.61235955, "global_step/max_steps": "465/890", "percentage": "52.25%", "elapsed_time": "1h 12m 1s", "remaining_time": "1h 5m 49s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107611}
|
| 97 |
+
{"loss": 0.74233828, "grad_norm": 2.2427981, "learning_rate": 1.08e-06, "token_acc": 0.78893539, "epoch": 2.64044944, "global_step/max_steps": "470/890", "percentage": "52.81%", "elapsed_time": "1h 12m 43s", "remaining_time": "1h 4m 59s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107719}
|
| 98 |
+
{"loss": 0.72711964, "grad_norm": 2.71799743, "learning_rate": 1.06e-06, "token_acc": 0.79330381, "epoch": 2.66853933, "global_step/max_steps": "475/890", "percentage": "53.37%", "elapsed_time": "1h 13m 27s", "remaining_time": "1h 4m 10s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107779}
|
| 99 |
+
{"loss": 0.7158175, "grad_norm": 2.63928866, "learning_rate": 1.04e-06, "token_acc": 0.79646018, "epoch": 2.69662921, "global_step/max_steps": "480/890", "percentage": "53.93%", "elapsed_time": "1h 14m 11s", "remaining_time": "1h 3m 22s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107832}
|
| 100 |
+
{"loss": 0.6989315, "grad_norm": 3.66266883, "learning_rate": 1.02e-06, "token_acc": 0.79873633, "epoch": 2.7247191, "global_step/max_steps": "485/890", "percentage": "54.49%", "elapsed_time": "1h 14m 53s", "remaining_time": "1h 2m 32s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107936}
|
| 101 |
+
{"loss": 0.77960072, "grad_norm": 3.25458066, "learning_rate": 1e-06, "token_acc": 0.77973473, "epoch": 2.75280899, "global_step/max_steps": "490/890", "percentage": "55.06%", "elapsed_time": "1h 15m 32s", "remaining_time": "1h 1m 40s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.108096}
|
| 102 |
+
{"loss": 0.69465961, "grad_norm": 3.10810212, "learning_rate": 9.8e-07, "token_acc": 0.80147961, "epoch": 2.78089888, "global_step/max_steps": "495/890", "percentage": "55.62%", "elapsed_time": "1h 16m 14s", "remaining_time": "1h 0m 50s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.108198}
|
| 103 |
+
{"loss": 0.77048883, "grad_norm": 3.20022845, "learning_rate": 9.6e-07, "token_acc": 0.7819947, "epoch": 2.80898876, "global_step/max_steps": "500/890", "percentage": "56.18%", "elapsed_time": "1h 16m 57s", "remaining_time": "1h 0m 1s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.108275}
|
| 104 |
+
{"loss": 0.78546405, "grad_norm": 2.5594223, "learning_rate": 9.4e-07, "token_acc": 0.78541519, "epoch": 2.83707865, "global_step/max_steps": "505/890", "percentage": "56.74%", "elapsed_time": "1h 17m 40s", "remaining_time": "59m 12s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.108368}
|
| 105 |
+
{"loss": 0.75025454, "grad_norm": 2.76200225, "learning_rate": 9.2e-07, "token_acc": 0.78423506, "epoch": 2.86516854, "global_step/max_steps": "510/890", "percentage": "57.30%", "elapsed_time": "1h 18m 23s", "remaining_time": "58m 24s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.108421}
|
| 106 |
+
{"loss": 0.83216257, "grad_norm": 2.60508338, "learning_rate": 9e-07, "token_acc": 0.78277497, "epoch": 2.89325843, "global_step/max_steps": "515/890", "percentage": "57.87%", "elapsed_time": "1h 19m 5s", "remaining_time": "57m 35s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.108521}
|
| 107 |
+
{"loss": 0.75238867, "grad_norm": 2.2209122, "learning_rate": 8.8e-07, "token_acc": 0.79557126, "epoch": 2.92134831, "global_step/max_steps": "520/890", "percentage": "58.43%", "elapsed_time": "1h 19m 52s", "remaining_time": "56m 50s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.108501}
|
| 108 |
+
{"loss": 0.77468252, "grad_norm": 3.17883504, "learning_rate": 8.6e-07, "token_acc": 0.78100264, "epoch": 2.9494382, "global_step/max_steps": "525/890", "percentage": "58.99%", "elapsed_time": "1h 20m 35s", "remaining_time": "56m 2s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.108566}
|
| 109 |
+
{"loss": 0.7034194, "grad_norm": 2.86372503, "learning_rate": 8.4e-07, "token_acc": 0.79648375, "epoch": 2.97752809, "global_step/max_steps": "530/890", "percentage": "59.55%", "elapsed_time": "1h 21m 17s", "remaining_time": "55m 13s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.108657}
|
| 110 |
+
{"eval_loss": 0.88253623, "eval_runtime": 52.2495, "eval_samples_per_second": 3.024, "eval_steps_per_second": 0.766, "eval_token_acc": 0.7631737, "epoch": 3.0, "global_step/max_steps": "534/890", "percentage": "60.00%", "elapsed_time": "1h 22m 42s", "remaining_time": "55m 8s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107607}
|
| 111 |
+
{"loss": 0.74983625, "grad_norm": 2.39575934, "learning_rate": 8.2e-07, "token_acc": 0.7876027, "epoch": 3.00561798, "global_step/max_steps": "535/890", "percentage": "60.11%", "elapsed_time": "1h 24m 5s", "remaining_time": "55m 48s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106033}
|
| 112 |
+
{"loss": 0.7054306, "grad_norm": 3.22017355, "learning_rate": 8e-07, "token_acc": 0.79645623, "epoch": 3.03370787, "global_step/max_steps": "540/890", "percentage": "60.67%", "elapsed_time": "1h 24m 47s", "remaining_time": "54m 57s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106135}
|
| 113 |
+
{"loss": 0.68219485, "grad_norm": 2.67577362, "learning_rate": 7.8e-07, "token_acc": 0.80475537, "epoch": 3.06179775, "global_step/max_steps": "545/890", "percentage": "61.24%", "elapsed_time": "1h 25m 30s", "remaining_time": "54m 7s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106226}
|
| 114 |
+
{"loss": 0.67324762, "grad_norm": 2.19214227, "learning_rate": 7.6e-07, "token_acc": 0.80340705, "epoch": 3.08988764, "global_step/max_steps": "550/890", "percentage": "61.80%", "elapsed_time": "1h 26m 15s", "remaining_time": "53m 19s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106267}
|
| 115 |
+
{"loss": 0.71505938, "grad_norm": 2.66482452, "learning_rate": 7.5e-07, "token_acc": 0.79718903, "epoch": 3.11797753, "global_step/max_steps": "555/890", "percentage": "62.36%", "elapsed_time": "1h 26m 58s", "remaining_time": "52m 29s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106356}
|
| 116 |
+
{"loss": 0.69992352, "grad_norm": 2.79701072, "learning_rate": 7.3e-07, "token_acc": 0.79660772, "epoch": 3.14606742, "global_step/max_steps": "560/890", "percentage": "62.92%", "elapsed_time": "1h 27m 37s", "remaining_time": "51m 38s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106513}
|
| 117 |
+
{"loss": 0.67914023, "grad_norm": 2.71490815, "learning_rate": 7.1e-07, "token_acc": 0.805145, "epoch": 3.1741573, "global_step/max_steps": "565/890", "percentage": "63.48%", "elapsed_time": "1h 28m 18s", "remaining_time": "50m 48s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106626}
|
| 118 |
+
{"loss": 0.67046242, "grad_norm": 2.4021522, "learning_rate": 6.9e-07, "token_acc": 0.80561292, "epoch": 3.20224719, "global_step/max_steps": "570/890", "percentage": "64.04%", "elapsed_time": "1h 29m 0s", "remaining_time": "49m 58s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106728}
|
| 119 |
+
{"loss": 0.67434311, "grad_norm": 2.6575065, "learning_rate": 6.7e-07, "token_acc": 0.80764098, "epoch": 3.23033708, "global_step/max_steps": "575/890", "percentage": "64.61%", "elapsed_time": "1h 29m 48s", "remaining_time": "49m 11s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106708}
|
| 120 |
+
{"loss": 0.66529789, "grad_norm": 2.58273698, "learning_rate": 6.5e-07, "token_acc": 0.81021639, "epoch": 3.25842697, "global_step/max_steps": "580/890", "percentage": "65.17%", "elapsed_time": "1h 30m 29s", "remaining_time": "48m 22s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.10682}
|
| 121 |
+
{"loss": 0.6723856, "grad_norm": 2.48444411, "learning_rate": 6.3e-07, "token_acc": 0.80973774, "epoch": 3.28651685, "global_step/max_steps": "585/890", "percentage": "65.73%", "elapsed_time": "1h 31m 17s", "remaining_time": "47m 35s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106803}
|
| 122 |
+
{"loss": 0.70687037, "grad_norm": 3.01402488, "learning_rate": 6.2e-07, "token_acc": 0.7949242, "epoch": 3.31460674, "global_step/max_steps": "590/890", "percentage": "66.29%", "elapsed_time": "1h 32m 2s", "remaining_time": "46m 48s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106835}
|
| 123 |
+
{"loss": 0.63781862, "grad_norm": 2.91476846, "learning_rate": 6e-07, "token_acc": 0.81250733, "epoch": 3.34269663, "global_step/max_steps": "595/890", "percentage": "66.85%", "elapsed_time": "1h 32m 46s", "remaining_time": "46m 0s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106881}
|
| 124 |
+
{"loss": 0.66496277, "grad_norm": 2.66926078, "learning_rate": 5.8e-07, "token_acc": 0.80427915, "epoch": 3.37078652, "global_step/max_steps": "600/890", "percentage": "67.42%", "elapsed_time": "1h 33m 32s", "remaining_time": "45m 12s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106913}
|
| 125 |
+
{"loss": 0.66866417, "grad_norm": 2.84250333, "learning_rate": 5.6e-07, "token_acc": 0.80316703, "epoch": 3.3988764, "global_step/max_steps": "605/890", "percentage": "67.98%", "elapsed_time": "1h 34m 14s", "remaining_time": "44m 23s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106994}
|
| 126 |
+
{"loss": 0.63074079, "grad_norm": 1.97778277, "learning_rate": 5.4e-07, "token_acc": 0.82520772, "epoch": 3.42696629, "global_step/max_steps": "610/890", "percentage": "68.54%", "elapsed_time": "1h 35m 6s", "remaining_time": "43m 39s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106902}
|
| 127 |
+
{"loss": 0.70237098, "grad_norm": 2.59753263, "learning_rate": 5.3e-07, "token_acc": 0.803327, "epoch": 3.45505618, "global_step/max_steps": "615/890", "percentage": "69.10%", "elapsed_time": "1h 35m 48s", "remaining_time": "42m 50s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106993}
|
| 128 |
+
{"loss": 0.6916687, "grad_norm": 2.76292305, "learning_rate": 5.1e-07, "token_acc": 0.80136174, "epoch": 3.48314607, "global_step/max_steps": "620/890", "percentage": "69.66%", "elapsed_time": "1h 36m 35s", "remaining_time": "42m 4s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106972}
|
| 129 |
+
{"loss": 0.66749506, "grad_norm": 2.19839459, "learning_rate": 4.9e-07, "token_acc": 0.81129248, "epoch": 3.51123596, "global_step/max_steps": "625/890", "percentage": "70.22%", "elapsed_time": "1h 37m 18s", "remaining_time": "41m 15s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107056}
|
| 130 |
+
{"loss": 0.6818738, "grad_norm": 3.06806552, "learning_rate": 4.8e-07, "token_acc": 0.8015991, "epoch": 3.53932584, "global_step/max_steps": "630/890", "percentage": "70.79%", "elapsed_time": "1h 38m 4s", "remaining_time": "40m 28s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107063}
|
| 131 |
+
{"loss": 0.66456943, "grad_norm": 2.68769, "learning_rate": 4.6e-07, "token_acc": 0.81087249, "epoch": 3.56741573, "global_step/max_steps": "635/890", "percentage": "71.35%", "elapsed_time": "1h 38m 49s", "remaining_time": "39m 41s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107097}
|
| 132 |
+
{"loss": 0.72464094, "grad_norm": 2.74738545, "learning_rate": 4.4e-07, "token_acc": 0.79762302, "epoch": 3.59550562, "global_step/max_steps": "640/890", "percentage": "71.91%", "elapsed_time": "1h 39m 29s", "remaining_time": "38m 52s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107203}
|
| 133 |
+
{"loss": 0.69279404, "grad_norm": 2.51768681, "learning_rate": 4.3e-07, "token_acc": 0.79968143, "epoch": 3.62359551, "global_step/max_steps": "645/890", "percentage": "72.47%", "elapsed_time": "1h 40m 13s", "remaining_time": "38m 4s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107267}
|
| 134 |
+
{"loss": 0.62826686, "grad_norm": 2.89620834, "learning_rate": 4.1e-07, "token_acc": 0.8196018, "epoch": 3.65168539, "global_step/max_steps": "650/890", "percentage": "73.03%", "elapsed_time": "1h 40m 56s", "remaining_time": "37m 16s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107321}
|
| 135 |
+
{"loss": 0.65086451, "grad_norm": 2.55087984, "learning_rate": 4e-07, "token_acc": 0.81114342, "epoch": 3.67977528, "global_step/max_steps": "655/890", "percentage": "73.60%", "elapsed_time": "1h 41m 43s", "remaining_time": "36m 29s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107314}
|
| 136 |
+
{"loss": 0.68152933, "grad_norm": 2.23748264, "learning_rate": 3.8e-07, "token_acc": 0.81022859, "epoch": 3.70786517, "global_step/max_steps": "660/890", "percentage": "74.16%", "elapsed_time": "1h 42m 26s", "remaining_time": "35m 41s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107383}
|
| 137 |
+
{"loss": 0.638732, "grad_norm": 2.95248319, "learning_rate": 3.6e-07, "token_acc": 0.81360921, "epoch": 3.73595506, "global_step/max_steps": "665/890", "percentage": "74.72%", "elapsed_time": "1h 43m 12s", "remaining_time": "34m 55s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107386}
|
| 138 |
+
{"loss": 0.63976817, "grad_norm": 1.9208081, "learning_rate": 3.5e-07, "token_acc": 0.82471653, "epoch": 3.76404494, "global_step/max_steps": "670/890", "percentage": "75.28%", "elapsed_time": "1h 43m 54s", "remaining_time": "34m 7s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107461}
|
| 139 |
+
{"loss": 0.61602488, "grad_norm": 2.83851958, "learning_rate": 3.3e-07, "token_acc": 0.81885554, "epoch": 3.79213483, "global_step/max_steps": "675/890", "percentage": "75.84%", "elapsed_time": "1h 44m 40s", "remaining_time": "33m 20s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107482}
|
| 140 |
+
{"loss": 0.65128002, "grad_norm": 3.25649989, "learning_rate": 3.2e-07, "token_acc": 0.81121946, "epoch": 3.82022472, "global_step/max_steps": "680/890", "percentage": "76.40%", "elapsed_time": "1h 45m 23s", "remaining_time": "32m 32s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107538}
|
| 141 |
+
{"loss": 0.65334473, "grad_norm": 2.69950213, "learning_rate": 3.1e-07, "token_acc": 0.80848352, "epoch": 3.84831461, "global_step/max_steps": "685/890", "percentage": "76.97%", "elapsed_time": "1h 46m 4s", "remaining_time": "31m 44s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.10763}
|
| 142 |
+
{"loss": 0.69999504, "grad_norm": 2.80537542, "learning_rate": 2.9e-07, "token_acc": 0.79630187, "epoch": 3.87640449, "global_step/max_steps": "690/890", "percentage": "77.53%", "elapsed_time": "1h 46m 47s", "remaining_time": "30m 57s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107688}
|
| 143 |
+
{"loss": 0.66152668, "grad_norm": 2.57855035, "learning_rate": 2.8e-07, "token_acc": 0.80817303, "epoch": 3.90449438, "global_step/max_steps": "695/890", "percentage": "78.09%", "elapsed_time": "1h 47m 28s", "remaining_time": "30m 9s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107781}
|
| 144 |
+
{"loss": 0.69109988, "grad_norm": 2.86464343, "learning_rate": 2.7e-07, "token_acc": 0.80025366, "epoch": 3.93258427, "global_step/max_steps": "700/890", "percentage": "78.65%", "elapsed_time": "1h 48m 11s", "remaining_time": "29m 22s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107828}
|
| 145 |
+
{"loss": 0.67540474, "grad_norm": 2.9179898, "learning_rate": 2.5e-07, "token_acc": 0.81227611, "epoch": 3.96067416, "global_step/max_steps": "705/890", "percentage": "79.21%", "elapsed_time": "1h 48m 55s", "remaining_time": "28m 35s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107865}
|
| 146 |
+
{"loss": 0.62726965, "grad_norm": 2.75012608, "learning_rate": 2.4e-07, "token_acc": 0.81691564, "epoch": 3.98876404, "global_step/max_steps": "710/890", "percentage": "79.78%", "elapsed_time": "1h 49m 38s", "remaining_time": "27m 47s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107925}
|
| 147 |
+
{"eval_loss": 0.89803088, "eval_runtime": 51.6098, "eval_samples_per_second": 3.061, "eval_steps_per_second": 0.775, "eval_token_acc": 0.76206127, "epoch": 4.0, "global_step/max_steps": "712/890", "percentage": "80.00%", "elapsed_time": "1h 50m 47s", "remaining_time": "27m 41s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107115}
|
| 148 |
+
{"loss": 0.63509369, "grad_norm": 2.14097142, "learning_rate": 2.3e-07, "token_acc": 0.81665384, "epoch": 4.01685393, "global_step/max_steps": "715/890", "percentage": "80.34%", "elapsed_time": "1h 52m 31s", "remaining_time": "27m 32s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.105898}
|
| 149 |
+
{"loss": 0.63643165, "grad_norm": 2.66535277, "learning_rate": 2.1e-07, "token_acc": 0.81476936, "epoch": 4.04494382, "global_step/max_steps": "720/890", "percentage": "80.90%", "elapsed_time": "1h 53m 17s", "remaining_time": "26m 44s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.105925}
|
| 150 |
+
{"loss": 0.66582613, "grad_norm": 2.35593267, "learning_rate": 2e-07, "token_acc": 0.80987527, "epoch": 4.07303371, "global_step/max_steps": "725/890", "percentage": "81.46%", "elapsed_time": "1h 54m 0s", "remaining_time": "25m 56s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.10599}
|
| 151 |
+
{"loss": 0.58594761, "grad_norm": 2.41867698, "learning_rate": 1.9e-07, "token_acc": 0.83039678, "epoch": 4.1011236, "global_step/max_steps": "730/890", "percentage": "82.02%", "elapsed_time": "1h 54m 49s", "remaining_time": "25m 9s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.105964}
|
| 152 |
+
{"loss": 0.69309998, "grad_norm": 2.76227102, "learning_rate": 1.8e-07, "token_acc": 0.80263716, "epoch": 4.12921348, "global_step/max_steps": "735/890", "percentage": "82.58%", "elapsed_time": "1h 55m 31s", "remaining_time": "24m 21s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.10604}
|
| 153 |
+
{"loss": 0.644524, "grad_norm": 2.52618936, "learning_rate": 1.7e-07, "token_acc": 0.81604746, "epoch": 4.15730337, "global_step/max_steps": "740/890", "percentage": "83.15%", "elapsed_time": "1h 56m 11s", "remaining_time": "23m 33s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106141}
|
| 154 |
+
{"loss": 0.6446363, "grad_norm": 2.56275378, "learning_rate": 1.6e-07, "token_acc": 0.81520026, "epoch": 4.18539326, "global_step/max_steps": "745/890", "percentage": "83.71%", "elapsed_time": "1h 56m 56s", "remaining_time": "22m 45s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106178}
|
| 155 |
+
{"loss": 0.63664732, "grad_norm": 2.05207859, "learning_rate": 1.5e-07, "token_acc": 0.81871459, "epoch": 4.21348315, "global_step/max_steps": "750/890", "percentage": "84.27%", "elapsed_time": "1h 57m 40s", "remaining_time": "21m 58s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106218}
|
| 156 |
+
{"loss": 0.5823761, "grad_norm": 2.25161315, "learning_rate": 1.4e-07, "token_acc": 0.83088307, "epoch": 4.24157303, "global_step/max_steps": "755/890", "percentage": "84.83%", "elapsed_time": "1h 58m 31s", "remaining_time": "21m 11s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.10617}
|
| 157 |
+
{"loss": 0.6482645, "grad_norm": 3.01319968, "learning_rate": 1.3e-07, "token_acc": 0.81539195, "epoch": 4.26966292, "global_step/max_steps": "760/890", "percentage": "85.39%", "elapsed_time": "1h 59m 10s", "remaining_time": "20m 23s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106282}
|
| 158 |
+
{"loss": 0.61976237, "grad_norm": 2.68560478, "learning_rate": 1.2e-07, "token_acc": 0.81683207, "epoch": 4.29775281, "global_step/max_steps": "765/890", "percentage": "85.96%", "elapsed_time": "1h 59m 56s", "remaining_time": "19m 35s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106296}
|
| 159 |
+
{"loss": 0.64509468, "grad_norm": 2.36101355, "learning_rate": 1.1e-07, "token_acc": 0.81699101, "epoch": 4.3258427, "global_step/max_steps": "770/890", "percentage": "86.52%", "elapsed_time": "2h 0m 41s", "remaining_time": "18m 48s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106329}
|
| 160 |
+
{"loss": 0.63039732, "grad_norm": 9.95604217, "learning_rate": 1e-07, "token_acc": 0.81813457, "epoch": 4.35393258, "global_step/max_steps": "775/890", "percentage": "87.08%", "elapsed_time": "2h 1m 27s", "remaining_time": "18m 1s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106344}
|
| 161 |
+
{"loss": 0.62071986, "grad_norm": 3.44054972, "learning_rate": 9e-08, "token_acc": 0.82631551, "epoch": 4.38202247, "global_step/max_steps": "780/890", "percentage": "87.64%", "elapsed_time": "2h 2m 11s", "remaining_time": "17m 13s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106389}
|
| 162 |
+
{"loss": 0.61474776, "grad_norm": 2.47376733, "learning_rate": 8e-08, "token_acc": 0.81814733, "epoch": 4.41011236, "global_step/max_steps": "785/890", "percentage": "88.20%", "elapsed_time": "2h 2m 55s", "remaining_time": "16m 26s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106441}
|
| 163 |
+
{"loss": 0.65505819, "grad_norm": 2.57256575, "learning_rate": 8e-08, "token_acc": 0.80797186, "epoch": 4.43820225, "global_step/max_steps": "790/890", "percentage": "88.76%", "elapsed_time": "2h 3m 35s", "remaining_time": "15m 38s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106534}
|
| 164 |
+
{"loss": 0.62731009, "grad_norm": 3.07301178, "learning_rate": 7e-08, "token_acc": 0.81529302, "epoch": 4.46629213, "global_step/max_steps": "795/890", "percentage": "89.33%", "elapsed_time": "2h 4m 21s", "remaining_time": "14m 51s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106551}
|
| 165 |
+
{"loss": 0.62096758, "grad_norm": 3.29653563, "learning_rate": 6e-08, "token_acc": 0.82509463, "epoch": 4.49438202, "global_step/max_steps": "800/890", "percentage": "89.89%", "elapsed_time": "2h 5m 3s", "remaining_time": "14m 4s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106618}
|
| 166 |
+
{"loss": 0.62403965, "grad_norm": 2.87938642, "learning_rate": 6e-08, "token_acc": 0.81497822, "epoch": 4.52247191, "global_step/max_steps": "805/890", "percentage": "90.45%", "elapsed_time": "2h 5m 47s", "remaining_time": "13m 16s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106657}
|
| 167 |
+
{"loss": 0.6181345, "grad_norm": 2.27651359, "learning_rate": 5e-08, "token_acc": 0.81844841, "epoch": 4.5505618, "global_step/max_steps": "810/890", "percentage": "91.01%", "elapsed_time": "2h 6m 32s", "remaining_time": "12m 29s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106686}
|
| 168 |
+
{"loss": 0.65880566, "grad_norm": 2.9483979, "learning_rate": 4e-08, "token_acc": 0.80996857, "epoch": 4.57865169, "global_step/max_steps": "815/890", "percentage": "91.57%", "elapsed_time": "2h 7m 15s", "remaining_time": "11m 42s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106744}
|
| 169 |
+
{"loss": 0.64787788, "grad_norm": 2.88745826, "learning_rate": 4e-08, "token_acc": 0.81552829, "epoch": 4.60674157, "global_step/max_steps": "820/890", "percentage": "92.13%", "elapsed_time": "2h 7m 54s", "remaining_time": "10m 55s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106841}
|
| 170 |
+
{"loss": 0.62310028, "grad_norm": 2.56522962, "learning_rate": 3e-08, "token_acc": 0.81744604, "epoch": 4.63483146, "global_step/max_steps": "825/890", "percentage": "92.70%", "elapsed_time": "2h 8m 38s", "remaining_time": "10m 8s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106879}
|
| 171 |
+
{"loss": 0.61821036, "grad_norm": 2.60425408, "learning_rate": 3e-08, "token_acc": 0.82025231, "epoch": 4.66292135, "global_step/max_steps": "830/890", "percentage": "93.26%", "elapsed_time": "2h 9m 21s", "remaining_time": "9m 21s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106936}
|
| 172 |
+
{"loss": 0.65851917, "grad_norm": 3.29264018, "learning_rate": 2e-08, "token_acc": 0.80970547, "epoch": 4.69101124, "global_step/max_steps": "835/890", "percentage": "93.82%", "elapsed_time": "2h 10m 8s", "remaining_time": "8m 34s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106938}
|
| 173 |
+
{"loss": 0.64176149, "grad_norm": 2.8497161, "learning_rate": 2e-08, "token_acc": 0.80927275, "epoch": 4.71910112, "global_step/max_steps": "840/890", "percentage": "94.38%", "elapsed_time": "2h 10m 50s", "remaining_time": "7m 47s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106999}
|
| 174 |
+
{"loss": 0.6665369, "grad_norm": 2.53953601, "learning_rate": 2e-08, "token_acc": 0.8113002, "epoch": 4.74719101, "global_step/max_steps": "845/890", "percentage": "94.94%", "elapsed_time": "2h 11m 35s", "remaining_time": "7m 0s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107025}
|
| 175 |
+
{"loss": 0.60728722, "grad_norm": 2.37881922, "learning_rate": 1e-08, "token_acc": 0.82045766, "epoch": 4.7752809, "global_step/max_steps": "850/890", "percentage": "95.51%", "elapsed_time": "2h 12m 22s", "remaining_time": "6m 13s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107016}
|
| 176 |
+
{"loss": 0.64152608, "grad_norm": 2.7730652, "learning_rate": 1e-08, "token_acc": 0.80657476, "epoch": 4.80337079, "global_step/max_steps": "855/890", "percentage": "96.07%", "elapsed_time": "2h 13m 8s", "remaining_time": "5m 26s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107035}
|
| 177 |
+
{"loss": 0.63794065, "grad_norm": 2.49984673, "learning_rate": 1e-08, "token_acc": 0.8102028, "epoch": 4.83146067, "global_step/max_steps": "860/890", "percentage": "96.63%", "elapsed_time": "2h 13m 49s", "remaining_time": "4m 40s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107101}
|
| 178 |
+
{"loss": 0.64644642, "grad_norm": 2.22279365, "learning_rate": 0.0, "token_acc": 0.81083568, "epoch": 4.85955056, "global_step/max_steps": "865/890", "percentage": "97.19%", "elapsed_time": "2h 14m 34s", "remaining_time": "3m 53s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107125}
|
| 179 |
+
{"loss": 0.65510898, "grad_norm": 2.52516112, "learning_rate": 0.0, "token_acc": 0.80992913, "epoch": 4.88764045, "global_step/max_steps": "870/890", "percentage": "97.75%", "elapsed_time": "2h 15m 15s", "remaining_time": "3m 6s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107202}
|
| 180 |
+
{"loss": 0.66538706, "grad_norm": 3.10808529, "learning_rate": 0.0, "token_acc": 0.81240392, "epoch": 4.91573034, "global_step/max_steps": "875/890", "percentage": "98.31%", "elapsed_time": "2h 15m 58s", "remaining_time": "2m 19s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.10725}
|
| 181 |
+
{"loss": 0.63949537, "grad_norm": 2.75956734, "learning_rate": 0.0, "token_acc": 0.81129721, "epoch": 4.94382022, "global_step/max_steps": "880/890", "percentage": "98.88%", "elapsed_time": "2h 16m 41s", "remaining_time": "1m 33s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107296}
|
| 182 |
+
{"loss": 0.62417135, "grad_norm": 3.03134844, "learning_rate": 0.0, "token_acc": 0.8159511, "epoch": 4.97191011, "global_step/max_steps": "885/890", "percentage": "99.44%", "elapsed_time": "2h 17m 25s", "remaining_time": "46s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107331}
|
| 183 |
+
{"loss": 0.62945595, "grad_norm": 3.27047626, "learning_rate": 0.0, "token_acc": 0.81621944, "epoch": 5.0, "global_step/max_steps": "890/890", "percentage": "100.00%", "elapsed_time": "2h 18m 6s", "remaining_time": "0s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.107406}
|
| 184 |
+
{"eval_loss": 0.90608197, "eval_runtime": 52.8028, "eval_samples_per_second": 2.992, "eval_steps_per_second": 0.758, "eval_token_acc": 0.76147346, "epoch": 5.0, "global_step/max_steps": "890/890", "percentage": "100.00%", "elapsed_time": "2h 18m 59s", "remaining_time": "0s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.106726}
|
| 185 |
+
{"eval_loss": 0.90608197, "eval_runtime": 53.3953, "eval_samples_per_second": 2.959, "eval_steps_per_second": 0.749, "eval_token_acc": 0.76147346, "epoch": 5.0, "global_step/max_steps": "890/890", "percentage": "100.00%", "elapsed_time": "2h 21m 5s", "remaining_time": "0s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.105139}
|
| 186 |
+
{"train_runtime": 8561.3347, "train_samples_per_second": 0.83, "train_steps_per_second": 0.104, "total_flos": 18284469882880.0, "train_loss": 0.82615118, "epoch": 5.0, "global_step/max_steps": "890/890", "percentage": "100.00%", "elapsed_time": "2h 22m 38s", "remaining_time": "0s", "memory(GiB)": 78.4, "train_speed(iter/s)": 0.103988}
|
| 187 |
+
{"train_dataset": "1679.460619±965.239485, min=523.000000, max=6528.000000, size=1422", "val_dataset": "1763.449367±1037.026168, min=562.000000, max=5428.000000, size=158", "model_parameter_info": "Qwen3ForCausalLM: 4022.4681M Params (4022.4681M Trainable [100.0000%]), 0.0001M Buffers.", "last_model_checkpoint": "/ltstorage/home/pan/MT_Grpo/qwen3-4b-instruct-2507-cold_start/v0-20251203-162131/checkpoint-890", "best_model_checkpoint": "/ltstorage/home/pan/MT_Grpo/qwen3-4b-instruct-2507-cold_start/v0-20251203-162131/checkpoint-534", "best_metric": 0.88253623, "global_step": 890, "log_history": [{"loss": 1.9294220209121704, "grad_norm": 20.424147491457006, "learning_rate": 2.2471910112359548e-08, "token_acc": 0.6705118961788031, "epoch": 0.0056179775280898875, "step": 1}, {"loss": 1.904306411743164, "grad_norm": 23.439119154528118, "learning_rate": 1.1235955056179774e-07, "token_acc": 0.6692064001283233, "epoch": 0.028089887640449437, "step": 5}, {"loss": 1.7001083374023438, "grad_norm": 19.32456440006131, "learning_rate": 2.2471910112359549e-07, "token_acc": 0.6961481903135525, "epoch": 0.056179775280898875, "step": 10}, {"loss": 1.8132465362548829, "grad_norm": 20.599442226361514, "learning_rate": 3.3707865168539325e-07, "token_acc": 0.6871491146893701, "epoch": 0.08426966292134831, "step": 15}, {"loss": 1.878952407836914, "grad_norm": 15.93092434881123, "learning_rate": 4.4943820224719097e-07, "token_acc": 0.6831177671885637, "epoch": 0.11235955056179775, "step": 20}, {"loss": 1.7200767517089843, "grad_norm": 19.692709514736602, "learning_rate": 5.617977528089887e-07, "token_acc": 0.6907898676567459, "epoch": 0.1404494382022472, "step": 25}, {"loss": 1.65771541595459, "grad_norm": 13.974844139329205, "learning_rate": 6.741573033707865e-07, "token_acc": 0.6928436466698158, "epoch": 0.16853932584269662, "step": 30}, {"loss": 1.4616617202758788, "grad_norm": 9.69346120447067, "learning_rate": 7.865168539325843e-07, "token_acc": 0.7045438057698353, "epoch": 0.19662921348314608, "step": 35}, {"loss": 1.4967164993286133, "grad_norm": 6.598820229824133, "learning_rate": 8.988764044943819e-07, "token_acc": 0.6971656050955414, "epoch": 0.2247191011235955, "step": 40}, {"loss": 1.23870849609375, "grad_norm": 5.67433818854337, "learning_rate": 1.0112359550561797e-06, "token_acc": 0.719047126911947, "epoch": 0.25280898876404495, "step": 45}, {"loss": 1.295097541809082, "grad_norm": 5.529419440195868, "learning_rate": 1.1235955056179775e-06, "token_acc": 0.7002483051083893, "epoch": 0.2808988764044944, "step": 50}, {"loss": 1.2352296829223632, "grad_norm": 3.8790926273657087, "learning_rate": 1.235955056179775e-06, "token_acc": 0.7105542710741736, "epoch": 0.3089887640449438, "step": 55}, {"loss": 1.2055891036987305, "grad_norm": 3.953557488831989, "learning_rate": 1.348314606741573e-06, "token_acc": 0.7074283178422622, "epoch": 0.33707865168539325, "step": 60}, {"loss": 1.1811911582946777, "grad_norm": 3.643343084125879, "learning_rate": 1.4606741573033708e-06, "token_acc": 0.7093026264717298, "epoch": 0.3651685393258427, "step": 65}, {"loss": 1.039687156677246, "grad_norm": 3.470861144393423, "learning_rate": 1.5730337078651686e-06, "token_acc": 0.7354034344860033, "epoch": 0.39325842696629215, "step": 70}, {"loss": 1.0405202865600587, "grad_norm": 2.587222430685054, "learning_rate": 1.6853932584269661e-06, "token_acc": 0.7375265518055227, "epoch": 0.42134831460674155, "step": 75}, {"loss": 1.1237641334533692, "grad_norm": 3.8709777420787734, "learning_rate": 1.7977528089887639e-06, "token_acc": 0.7187291772168768, "epoch": 0.449438202247191, "step": 80}, {"loss": 1.1050168991088867, "grad_norm": 2.8690009715335694, "learning_rate": 1.910112359550562e-06, "token_acc": 0.723785465347461, "epoch": 0.47752808988764045, "step": 85}, {"loss": 1.077320384979248, "grad_norm": 2.7797847810326908, "learning_rate": 1.999992308621909e-06, "token_acc": 0.7229019786217876, "epoch": 0.5056179775280899, "step": 90}, {"loss": 1.0275424003601075, "grad_norm": 2.8893483997821936, "learning_rate": 1.999723122811548e-06, "token_acc": 0.7338508671720844, "epoch": 0.5337078651685393, "step": 95}, {"loss": 1.0429039001464844, "grad_norm": 3.296545947442661, "learning_rate": 1.999069486403046e-06, "token_acc": 0.7256585417718827, "epoch": 0.5617977528089888, "step": 100}, {"loss": 1.1426443099975585, "grad_norm": 4.223909130674462, "learning_rate": 1.998031650756905e-06, "token_acc": 0.713406109640382, "epoch": 0.5898876404494382, "step": 105}, {"loss": 1.1262334823608398, "grad_norm": 3.1119951608284744, "learning_rate": 1.9966100149801647e-06, "token_acc": 0.7054803149606299, "epoch": 0.6179775280898876, "step": 110}, {"loss": 0.9934347152709961, "grad_norm": 3.125320283936831, "learning_rate": 1.994805125772918e-06, "token_acc": 0.742525724186017, "epoch": 0.6460674157303371, "step": 115}, {"loss": 1.021185302734375, "grad_norm": 3.4364058770050354, "learning_rate": 1.9926176772180765e-06, "token_acc": 0.7300833835643962, "epoch": 0.6741573033707865, "step": 120}, {"loss": 1.048966407775879, "grad_norm": 3.4104033179708932, "learning_rate": 1.9900485105144544e-06, "token_acc": 0.7253274730358846, "epoch": 0.702247191011236, "step": 125}, {"loss": 0.9771171569824219, "grad_norm": 2.5337665605612893, "learning_rate": 1.987098613653279e-06, "token_acc": 0.7396136051891289, "epoch": 0.7303370786516854, "step": 130}, {"loss": 0.9926401138305664, "grad_norm": 3.5003717612077714, "learning_rate": 1.983769121038254e-06, "token_acc": 0.7304092259527841, "epoch": 0.7584269662921348, "step": 135}, {"loss": 1.0762109756469727, "grad_norm": 2.8085699558587556, "learning_rate": 1.980061313049315e-06, "token_acc": 0.7092926128948055, "epoch": 0.7865168539325843, "step": 140}, {"loss": 0.9405284881591797, "grad_norm": 2.727404329934186, "learning_rate": 1.9759766155502506e-06, "token_acc": 0.7430621892375144, "epoch": 0.8146067415730337, "step": 145}, {"loss": 1.0379549026489259, "grad_norm": 2.9003743944416174, "learning_rate": 1.9715165993403754e-06, "token_acc": 0.7221681798530047, "epoch": 0.8426966292134831, "step": 150}, {"loss": 0.9658462524414062, "grad_norm": 3.394138821971619, "learning_rate": 1.9666829795504693e-06, "token_acc": 0.7353258912495498, "epoch": 0.8707865168539326, "step": 155}, {"loss": 0.9399328231811523, "grad_norm": 3.471117752337183, "learning_rate": 1.9614776149832122e-06, "token_acc": 0.7380347576771208, "epoch": 0.898876404494382, "step": 160}, {"loss": 0.9858393669128418, "grad_norm": 3.5307014243286683, "learning_rate": 1.9559025073983677e-06, "token_acc": 0.738056057622272, "epoch": 0.9269662921348315, "step": 165}, {"loss": 1.0343259811401366, "grad_norm": 3.4326164862952107, "learning_rate": 1.949959800742991e-06, "token_acc": 0.7181796472554839, "epoch": 0.9550561797752809, "step": 170}, {"loss": 1.009816551208496, "grad_norm": 2.7472568192078333, "learning_rate": 1.94365178032696e-06, "token_acc": 0.7324969121757785, "epoch": 0.9831460674157303, "step": 175}, {"eval_loss": 0.9415958523750305, "eval_runtime": 50.628, "eval_samples_per_second": 3.121, "eval_steps_per_second": 0.79, "eval_token_acc": 0.7514679577531556, "epoch": 1.0, "step": 178}, {"loss": 0.8963218688964844, "grad_norm": 2.659664073867142, "learning_rate": 1.9369808719441444e-06, "token_acc": 0.764312885592814, "epoch": 1.0112359550561798, "step": 180}, {"loss": 0.8709476470947266, "grad_norm": 3.0170005843317735, "learning_rate": 1.929949640939548e-06, "token_acc": 0.7628275648077628, "epoch": 1.0393258426966292, "step": 185}, {"loss": 0.9287420272827148, "grad_norm": 3.554072348995685, "learning_rate": 1.922560791222786e-06, "token_acc": 0.7509577488079817, "epoch": 1.0674157303370786, "step": 190}, {"loss": 0.9102365493774414, "grad_norm": 2.223289753807769, "learning_rate": 1.9148171642282808e-06, "token_acc": 0.7591235059760956, "epoch": 1.095505617977528, "step": 195}, {"loss": 0.8868337631225586, "grad_norm": 3.23140581389699, "learning_rate": 1.9067217378225652e-06, "token_acc": 0.7549810811525944, "epoch": 1.1235955056179776, "step": 200}, {"loss": 0.8987601280212403, "grad_norm": 2.526779070640055, "learning_rate": 1.8982776251591246e-06, "token_acc": 0.7629664894601814, "epoch": 1.151685393258427, "step": 205}, {"loss": 0.9315019607543945, "grad_norm": 3.1348346979092736, "learning_rate": 1.8894880734812103e-06, "token_acc": 0.7562072271630245, "epoch": 1.1797752808988764, "step": 210}, {"loss": 0.8741535186767578, "grad_norm": 3.8436975180594004, "learning_rate": 1.8803564628730913e-06, "token_acc": 0.7632421004514027, "epoch": 1.2078651685393258, "step": 215}, {"loss": 0.9063589096069335, "grad_norm": 3.035898758347739, "learning_rate": 1.8708863049602159e-06, "token_acc": 0.7581121729177204, "epoch": 1.2359550561797752, "step": 220}, {"loss": 0.868967342376709, "grad_norm": 3.2189094575626696, "learning_rate": 1.8610812415587947e-06, "token_acc": 0.7626130920702502, "epoch": 1.2640449438202248, "step": 225}, {"loss": 0.8877336502075195, "grad_norm": 2.392753224965853, "learning_rate": 1.850945043275312e-06, "token_acc": 0.7599571479585764, "epoch": 1.2921348314606742, "step": 230}, {"loss": 0.8606734275817871, "grad_norm": 2.2379100772993086, "learning_rate": 1.8404816080565132e-06, "token_acc": 0.7693435161613824, "epoch": 1.3202247191011236, "step": 235}, {"loss": 0.8484037399291993, "grad_norm": 2.7988132570934976, "learning_rate": 1.829694959690422e-06, "token_acc": 0.7654914529914529, "epoch": 1.348314606741573, "step": 240}, {"loss": 0.8865579605102539, "grad_norm": 2.8271302257023003, "learning_rate": 1.8185892462589636e-06, "token_acc": 0.7594392808166996, "epoch": 1.3764044943820224, "step": 245}, {"loss": 0.8766312599182129, "grad_norm": 3.222266204963613, "learning_rate": 1.807168738542792e-06, "token_acc": 0.7603124677735382, "epoch": 1.404494382022472, "step": 250}, {"loss": 0.83121337890625, "grad_norm": 2.5744913216922187, "learning_rate": 1.7954378283789287e-06, "token_acc": 0.7687455892731122, "epoch": 1.4325842696629214, "step": 255}, {"loss": 0.8371623039245606, "grad_norm": 2.914984856086316, "learning_rate": 1.7834010269718524e-06, "token_acc": 0.7685514040820193, "epoch": 1.4606741573033708, "step": 260}, {"loss": 0.8378698348999023, "grad_norm": 3.276894598583512, "learning_rate": 1.7710629631586837e-06, "token_acc": 0.7716363427323158, "epoch": 1.4887640449438202, "step": 265}, {"loss": 0.8841152191162109, "grad_norm": 4.192503584217087, "learning_rate": 1.7584283816291317e-06, "token_acc": 0.7596019493575981, "epoch": 1.5168539325842696, "step": 270}, {"loss": 0.836890983581543, "grad_norm": 2.7715215774573174, "learning_rate": 1.7455021411008906e-06, "token_acc": 0.7696108086307724, "epoch": 1.5449438202247192, "step": 275}, {"loss": 0.8807048797607422, "grad_norm": 3.1968896533225646, "learning_rate": 1.7322892124511858e-06, "token_acc": 0.7628409791071529, "epoch": 1.5730337078651684, "step": 280}, {"loss": 0.824298095703125, "grad_norm": 2.85142297149823, "learning_rate": 1.7187946768051877e-06, "token_acc": 0.7725494097360392, "epoch": 1.601123595505618, "step": 285}, {"loss": 0.836163330078125, "grad_norm": 3.361622727935816, "learning_rate": 1.7050237235820287e-06, "token_acc": 0.7743141527186292, "epoch": 1.6292134831460674, "step": 290}, {"loss": 0.9208276748657227, "grad_norm": 3.1313176928578876, "learning_rate": 1.6909816484991757e-06, "token_acc": 0.7488928672382926, "epoch": 1.6573033707865168, "step": 295}, {"loss": 0.9145614624023437, "grad_norm": 3.562588866831991, "learning_rate": 1.6766738515359248e-06, "token_acc": 0.7516417572463768, "epoch": 1.6853932584269664, "step": 300}, {"loss": 0.83453369140625, "grad_norm": 2.3855171093070835, "learning_rate": 1.6621058348568004e-06, "token_acc": 0.7668994730339629, "epoch": 1.7134831460674156, "step": 305}, {"loss": 0.8809846878051758, "grad_norm": 2.673202958523956, "learning_rate": 1.647283200695659e-06, "token_acc": 0.7534229976978069, "epoch": 1.7415730337078652, "step": 310}, {"loss": 0.8135520935058593, "grad_norm": 3.0977353145628252, "learning_rate": 1.6322116492013114e-06, "token_acc": 0.7728059524117085, "epoch": 1.7696629213483146, "step": 315}, {"loss": 0.8380592346191407, "grad_norm": 3.161032491715687, "learning_rate": 1.6168969762454894e-06, "token_acc": 0.7688794415879125, "epoch": 1.797752808988764, "step": 320}, {"loss": 0.84451904296875, "grad_norm": 2.1351035087572914, "learning_rate": 1.6013450711940016e-06, "token_acc": 0.7644641980412931, "epoch": 1.8258426966292136, "step": 325}, {"loss": 0.8825285911560059, "grad_norm": 2.5786089611584617, "learning_rate": 1.585561914641938e-06, "token_acc": 0.757299130479129, "epoch": 1.8539325842696628, "step": 330}, {"loss": 0.846768856048584, "grad_norm": 2.9606661774725493, "learning_rate": 1.5695535761137888e-06, "token_acc": 0.7666048961279626, "epoch": 1.8820224719101124, "step": 335}, {"loss": 0.8275468826293946, "grad_norm": 2.381192973088887, "learning_rate": 1.5533262117293647e-06, "token_acc": 0.7726063522172616, "epoch": 1.9101123595505618, "step": 340}, {"loss": 0.8249627113342285, "grad_norm": 3.482913361948986, "learning_rate": 1.5368860618364207e-06, "token_acc": 0.7723052959501557, "epoch": 1.9382022471910112, "step": 345}, {"loss": 0.8758167266845703, "grad_norm": 2.755958406611769, "learning_rate": 1.520239448610882e-06, "token_acc": 0.7568186777220162, "epoch": 1.9662921348314608, "step": 350}, {"loss": 0.8654172897338868, "grad_norm": 3.142910514263914, "learning_rate": 1.5033927736256105e-06, "token_acc": 0.76903667897267, "epoch": 1.99438202247191, "step": 355}, {"eval_loss": 0.8831959962844849, "eval_runtime": 52.0933, "eval_samples_per_second": 3.033, "eval_steps_per_second": 0.768, "eval_token_acc": 0.7628892695290526, "epoch": 2.0, "step": 356}, {"loss": 0.7637296676635742, "grad_norm": 2.6514649627160227, "learning_rate": 1.486352515388631e-06, "token_acc": 0.787094642955459, "epoch": 2.0224719101123596, "step": 360}, {"loss": 0.7487339019775391, "grad_norm": 2.5130483494890052, "learning_rate": 1.4691252268517794e-06, "token_acc": 0.7863372093023255, "epoch": 2.050561797752809, "step": 365}, {"loss": 0.7428698539733887, "grad_norm": 2.2367738572518903, "learning_rate": 1.4517175328907139e-06, "token_acc": 0.7893287614297589, "epoch": 2.0786516853932584, "step": 370}, {"loss": 0.7987964153289795, "grad_norm": 2.846055924613116, "learning_rate": 1.4341361277572763e-06, "token_acc": 0.7760270946841408, "epoch": 2.106741573033708, "step": 375}, {"loss": 0.7137997627258301, "grad_norm": 2.2372491717862206, "learning_rate": 1.4163877725051677e-06, "token_acc": 0.7988278491738455, "epoch": 2.134831460674157, "step": 380}, {"loss": 0.7307297706604003, "grad_norm": 2.3745693821214977, "learning_rate": 1.3984792923899385e-06, "token_acc": 0.7922184580638538, "epoch": 2.162921348314607, "step": 385}, {"loss": 0.7489160537719727, "grad_norm": 3.3164812708414213, "learning_rate": 1.3804175742442876e-06, "token_acc": 0.7865196484828386, "epoch": 2.191011235955056, "step": 390}, {"loss": 0.6962666511535645, "grad_norm": 2.7509168990464805, "learning_rate": 1.3622095638296825e-06, "token_acc": 0.7949337099640075, "epoch": 2.2191011235955056, "step": 395}, {"loss": 0.7728666305541992, "grad_norm": 2.6822161528950352, "learning_rate": 1.3438622631653175e-06, "token_acc": 0.7806799555861048, "epoch": 2.247191011235955, "step": 400}, {"loss": 0.7661968231201172, "grad_norm": 2.2647028923132932, "learning_rate": 1.3253827278354377e-06, "token_acc": 0.781412619962867, "epoch": 2.2752808988764044, "step": 405}, {"loss": 0.7373863220214844, "grad_norm": 3.3285509551324908, "learning_rate": 1.3067780642760637e-06, "token_acc": 0.7888082123184778, "epoch": 2.303370786516854, "step": 410}, {"loss": 0.724615478515625, "grad_norm": 2.5609966409837317, "learning_rate": 1.288055427042163e-06, "token_acc": 0.7954756740006198, "epoch": 2.331460674157303, "step": 415}, {"loss": 0.7289250373840332, "grad_norm": 3.3676744604512447, "learning_rate": 1.2692220160563123e-06, "token_acc": 0.7965610055661536, "epoch": 2.359550561797753, "step": 420}, {"loss": 0.7592850685119629, "grad_norm": 2.2418087002062337, "learning_rate": 1.2502850738399199e-06, "token_acc": 0.7827390963112456, "epoch": 2.3876404494382024, "step": 425}, {"loss": 0.7156426429748535, "grad_norm": 2.5013085027451387, "learning_rate": 1.2312518827280603e-06, "token_acc": 0.7934545116452048, "epoch": 2.4157303370786516, "step": 430}, {"loss": 0.780320692062378, "grad_norm": 8.920939750256267, "learning_rate": 1.212129762069001e-06, "token_acc": 0.7789655269930149, "epoch": 2.443820224719101, "step": 435}, {"loss": 0.7487343788146973, "grad_norm": 2.7433020699351105, "learning_rate": 1.1929260654094969e-06, "token_acc": 0.7883545497406883, "epoch": 2.4719101123595504, "step": 440}, {"loss": 0.738771390914917, "grad_norm": 2.640181631499408, "learning_rate": 1.1736481776669305e-06, "token_acc": 0.7911210551106924, "epoch": 2.5, "step": 445}, {"loss": 0.7107549667358398, "grad_norm": 3.5065458578004125, "learning_rate": 1.1543035122893896e-06, "token_acc": 0.798298464985316, "epoch": 2.5280898876404496, "step": 450}, {"loss": 0.7216014385223388, "grad_norm": 2.465346116852975, "learning_rate": 1.1348995084047749e-06, "token_acc": 0.7923961415953193, "epoch": 2.556179775280899, "step": 455}, {"loss": 0.7756028175354004, "grad_norm": 2.682211286191937, "learning_rate": 1.1154436279600285e-06, "token_acc": 0.7788508988367995, "epoch": 2.5842696629213484, "step": 460}, {"loss": 0.7410534858703614, "grad_norm": 2.6236931540512587, "learning_rate": 1.095943352851592e-06, "token_acc": 0.7881666112102604, "epoch": 2.6123595505617976, "step": 465}, {"loss": 0.7423382759094238, "grad_norm": 2.2427981019248606, "learning_rate": 1.076406182048187e-06, "token_acc": 0.7889353925960858, "epoch": 2.640449438202247, "step": 470}, {"loss": 0.7271196365356445, "grad_norm": 2.7179974305774537, "learning_rate": 1.0568396287070376e-06, "token_acc": 0.7933038063497632, "epoch": 2.668539325842697, "step": 475}, {"loss": 0.7158174991607666, "grad_norm": 2.6392886600933325, "learning_rate": 1.0372512172846295e-06, "token_acc": 0.7964601769911505, "epoch": 2.696629213483146, "step": 480}, {"loss": 0.6989315032958985, "grad_norm": 3.6626688290107627, "learning_rate": 1.0176484806431287e-06, "token_acc": 0.7987363319004068, "epoch": 2.7247191011235956, "step": 485}, {"loss": 0.779600715637207, "grad_norm": 3.2545806610618713, "learning_rate": 9.980389571535647e-07, "token_acc": 0.7797347327558319, "epoch": 2.752808988764045, "step": 490}, {"loss": 0.6946596145629883, "grad_norm": 3.108102123916376, "learning_rate": 9.78430187796898e-07, "token_acc": 0.8014796149124508, "epoch": 2.7808988764044944, "step": 495}, {"loss": 0.7704888343811035, "grad_norm": 3.2002284536041015, "learning_rate": 9.588297132640824e-07, "token_acc": 0.7819947043248014, "epoch": 2.808988764044944, "step": 500}, {"loss": 0.7854640483856201, "grad_norm": 2.5594223016016078, "learning_rate": 9.392450710562375e-07, "token_acc": 0.7854151852729715, "epoch": 2.837078651685393, "step": 505}, {"loss": 0.7502545356750489, "grad_norm": 2.7620022462801272, "learning_rate": 9.196837925860515e-07, "token_acc": 0.7842350581155231, "epoch": 2.865168539325843, "step": 510}, {"loss": 0.8321625709533691, "grad_norm": 2.605083380262497, "learning_rate": 9.001534002815207e-07, "token_acc": 0.7827749732293101, "epoch": 2.893258426966292, "step": 515}, {"loss": 0.7523886680603027, "grad_norm": 2.220912204917523, "learning_rate": 8.80661404693149e-07, "token_acc": 0.7955712645761586, "epoch": 2.9213483146067416, "step": 520}, {"loss": 0.7746825218200684, "grad_norm": 3.178835042001676, "learning_rate": 8.612153016057112e-07, "token_acc": 0.7810026385224275, "epoch": 2.949438202247191, "step": 525}, {"loss": 0.7034193992614746, "grad_norm": 2.863725028456792, "learning_rate": 8.41822569155696e-07, "token_acc": 0.7964837506659563, "epoch": 2.9775280898876404, "step": 530}, {"eval_loss": 0.8825362324714661, "eval_runtime": 52.2495, "eval_samples_per_second": 3.024, "eval_steps_per_second": 0.766, "eval_token_acc": 0.7631736962196533, "epoch": 3.0, "step": 534}, {"loss": 0.7498362541198731, "grad_norm": 2.395759337225569, "learning_rate": 8.224906649555365e-07, "token_acc": 0.7876026964398567, "epoch": 3.00561797752809, "step": 535}, {"loss": 0.7054306030273437, "grad_norm": 3.2201735526193302, "learning_rate": 8.032270232257311e-07, "token_acc": 0.796456228687976, "epoch": 3.033707865168539, "step": 540}, {"loss": 0.6821948528289795, "grad_norm": 2.675773616052882, "learning_rate": 7.840390519359643e-07, "token_acc": 0.8047553726566072, "epoch": 3.061797752808989, "step": 545}, {"loss": 0.6732476234436036, "grad_norm": 2.192142273959621, "learning_rate": 7.64934129956315e-07, "token_acc": 0.8034070531978482, "epoch": 3.0898876404494384, "step": 550}, {"loss": 0.7150593757629394, "grad_norm": 2.6648245247897995, "learning_rate": 7.459196042196646e-07, "token_acc": 0.7971890333490422, "epoch": 3.1179775280898876, "step": 555}, {"loss": 0.6999235153198242, "grad_norm": 2.7970107221137237, "learning_rate": 7.27002786896379e-07, "token_acc": 0.7966077207582062, "epoch": 3.146067415730337, "step": 560}, {"loss": 0.6791402339935303, "grad_norm": 2.7149081460144187, "learning_rate": 7.081909525823624e-07, "token_acc": 0.8051450005598477, "epoch": 3.1741573033707864, "step": 565}, {"loss": 0.6704624176025391, "grad_norm": 2.402152195905122, "learning_rate": 6.894913355015611e-07, "token_acc": 0.8056129233032506, "epoch": 3.202247191011236, "step": 570}, {"loss": 0.6743431091308594, "grad_norm": 2.657506495966265, "learning_rate": 6.7091112672399e-07, "token_acc": 0.8076409786732959, "epoch": 3.2303370786516856, "step": 575}, {"loss": 0.6652978897094727, "grad_norm": 2.5827369760682872, "learning_rate": 6.524574714003561e-07, "token_acc": 0.8102163887903512, "epoch": 3.258426966292135, "step": 580}, {"loss": 0.6723855972290039, "grad_norm": 2.4844441093225873, "learning_rate": 6.341374660143418e-07, "token_acc": 0.8097377367654202, "epoch": 3.2865168539325844, "step": 585}, {"loss": 0.7068703651428223, "grad_norm": 3.0140248848523896, "learning_rate": 6.159581556535987e-07, "token_acc": 0.7949241993066695, "epoch": 3.3146067415730336, "step": 590}, {"loss": 0.6378186225891114, "grad_norm": 2.914768458097443, "learning_rate": 5.979265313005127e-07, "token_acc": 0.8125073262220138, "epoch": 3.342696629213483, "step": 595}, {"loss": 0.6649627685546875, "grad_norm": 2.669260780004739, "learning_rate": 5.800495271437711e-07, "token_acc": 0.804279146225567, "epoch": 3.370786516853933, "step": 600}, {"loss": 0.6686641693115234, "grad_norm": 2.842503331123678, "learning_rate": 5.623340179117694e-07, "token_acc": 0.8031670256738662, "epoch": 3.398876404494382, "step": 605}, {"loss": 0.6307407855987549, "grad_norm": 1.9777827662885858, "learning_rate": 5.447868162288895e-07, "token_acc": 0.8252077223851417, "epoch": 3.4269662921348316, "step": 610}, {"loss": 0.7023709774017334, "grad_norm": 2.5975326256420272, "learning_rate": 5.27414669995653e-07, "token_acc": 0.8033269961977186, "epoch": 3.455056179775281, "step": 615}, {"loss": 0.691668701171875, "grad_norm": 2.762923053022917, "learning_rate": 5.102242597937717e-07, "token_acc": 0.801361738470298, "epoch": 3.4831460674157304, "step": 620}, {"loss": 0.6674950599670411, "grad_norm": 2.198394586742568, "learning_rate": 4.9322219631708e-07, "token_acc": 0.8112924762242145, "epoch": 3.51123595505618, "step": 625}, {"loss": 0.6818737983703613, "grad_norm": 3.0680655248904567, "learning_rate": 4.76415017829347e-07, "token_acc": 0.8015991022583813, "epoch": 3.539325842696629, "step": 630}, {"loss": 0.6645694255828858, "grad_norm": 2.687690002198965, "learning_rate": 4.598091876499417e-07, "token_acc": 0.8108724942942778, "epoch": 3.567415730337079, "step": 635}, {"loss": 0.724640941619873, "grad_norm": 2.7473854525046146, "learning_rate": 4.4341109166831557e-07, "token_acc": 0.7976230202530996, "epoch": 3.595505617977528, "step": 640}, {"loss": 0.6927940368652343, "grad_norm": 2.517686811114013, "learning_rate": 4.272270358882666e-07, "token_acc": 0.7996814320388349, "epoch": 3.6235955056179776, "step": 645}, {"loss": 0.6282668590545655, "grad_norm": 2.896208342687571, "learning_rate": 4.1126324400291756e-07, "token_acc": 0.8196018006586302, "epoch": 3.6516853932584272, "step": 650}, {"loss": 0.6508645057678223, "grad_norm": 2.5508798363070504, "learning_rate": 3.955258550013544e-07, "token_acc": 0.8111434174048279, "epoch": 3.6797752808988764, "step": 655}, {"loss": 0.6815293312072754, "grad_norm": 2.2374826423006167, "learning_rate": 3.800209208078311e-07, "token_acc": 0.8102285852426871, "epoch": 3.7078651685393256, "step": 660}, {"loss": 0.6387320041656495, "grad_norm": 2.9524831932449036, "learning_rate": 3.6475440395446147e-07, "token_acc": 0.8136092137978337, "epoch": 3.735955056179775, "step": 665}, {"loss": 0.6397681713104248, "grad_norm": 1.920808103159051, "learning_rate": 3.497321752882856e-07, "token_acc": 0.8247165296993655, "epoch": 3.764044943820225, "step": 670}, {"loss": 0.6160248756408692, "grad_norm": 2.8385195831057652, "learning_rate": 3.3496001171359203e-07, "token_acc": 0.8188555396749775, "epoch": 3.7921348314606744, "step": 675}, {"loss": 0.6512800216674804, "grad_norm": 3.2564998932507394, "learning_rate": 3.2044359397037046e-07, "token_acc": 0.8112194555690986, "epoch": 3.8202247191011236, "step": 680}, {"loss": 0.6533447265625, "grad_norm": 2.6995021278159683, "learning_rate": 3.061885044497423e-07, "token_acc": 0.808483524485709, "epoch": 3.8483146067415728, "step": 685}, {"loss": 0.6999950408935547, "grad_norm": 2.805375420819483, "learning_rate": 2.922002250472119e-07, "token_acc": 0.7963018682130255, "epoch": 3.8764044943820224, "step": 690}, {"loss": 0.6615266799926758, "grad_norm": 2.5785503469274675, "learning_rate": 2.784841350545656e-07, "token_acc": 0.8081730343054694, "epoch": 3.904494382022472, "step": 695}, {"loss": 0.6910998821258545, "grad_norm": 2.8646434294673133, "learning_rate": 2.650455090912267e-07, "token_acc": 0.8002536613381832, "epoch": 3.932584269662921, "step": 700}, {"loss": 0.6754047393798828, "grad_norm": 2.9179898032381284, "learning_rate": 2.518895150758642e-07, "token_acc": 0.8122761054376303, "epoch": 3.960674157303371, "step": 705}, {"loss": 0.6272696495056153, "grad_norm": 2.750126080882231, "learning_rate": 2.3902121223903226e-07, "token_acc": 0.8169156383055496, "epoch": 3.98876404494382, "step": 710}, {"eval_loss": 0.8980308771133423, "eval_runtime": 51.6098, "eval_samples_per_second": 3.061, "eval_steps_per_second": 0.775, "eval_token_acc": 0.7620612718297485, "epoch": 4.0, "step": 712}, {"loss": 0.6350936889648438, "grad_norm": 2.140971422330691, "learning_rate": 2.264455491776067e-07, "token_acc": 0.8166538424545542, "epoch": 4.01685393258427, "step": 715}, {"loss": 0.6364316463470459, "grad_norm": 2.6653527675031468, "learning_rate": 2.1416736195176865e-07, "token_acc": 0.8147693621108236, "epoch": 4.044943820224719, "step": 720}, {"loss": 0.6658261299133301, "grad_norm": 2.3559326662179836, "learning_rate": 2.0219137222526183e-07, "token_acc": 0.8098752655373387, "epoch": 4.073033707865169, "step": 725}, {"loss": 0.5859476089477539, "grad_norm": 2.4186769841326496, "learning_rate": 1.9052218544964471e-07, "token_acc": 0.8303967756869366, "epoch": 4.101123595505618, "step": 730}, {"loss": 0.6930999755859375, "grad_norm": 2.7622710156719967, "learning_rate": 1.7916428909323055e-07, "token_acc": 0.8026371617872646, "epoch": 4.129213483146067, "step": 735}, {"loss": 0.6445240020751953, "grad_norm": 2.526189363147431, "learning_rate": 1.6812205091539978e-07, "token_acc": 0.8160474572494848, "epoch": 4.157303370786517, "step": 740}, {"loss": 0.6446362972259522, "grad_norm": 2.5627537802583675, "learning_rate": 1.5739971728694845e-07, "token_acc": 0.8152002601756185, "epoch": 4.185393258426966, "step": 745}, {"loss": 0.6366473197937011, "grad_norm": 2.0520785942294717, "learning_rate": 1.470014115571143e-07, "token_acc": 0.8187145926938856, "epoch": 4.213483146067416, "step": 750}, {"loss": 0.5823760986328125, "grad_norm": 2.251613148105564, "learning_rate": 1.3693113246791588e-07, "token_acc": 0.8308830688345828, "epoch": 4.241573033707865, "step": 755}, {"loss": 0.6482645034790039, "grad_norm": 3.01319967824544, "learning_rate": 1.2719275261640583e-07, "token_acc": 0.815391954205756, "epoch": 4.269662921348314, "step": 760}, {"loss": 0.619762372970581, "grad_norm": 2.6856047756576302, "learning_rate": 1.17790016965438e-07, "token_acc": 0.8168320741306236, "epoch": 4.297752808988764, "step": 765}, {"loss": 0.6450946807861329, "grad_norm": 2.3610135494329065, "learning_rate": 1.0872654140351457e-07, "token_acc": 0.8169910079184002, "epoch": 4.325842696629214, "step": 770}, {"loss": 0.6303973197937012, "grad_norm": 9.956042165852814, "learning_rate": 1.0000581135427066e-07, "token_acc": 0.8181345654057176, "epoch": 4.353932584269663, "step": 775}, {"loss": 0.6207198619842529, "grad_norm": 3.4405497192039394, "learning_rate": 9.163118043613083e-08, "token_acc": 0.8263155071608647, "epoch": 4.382022471910112, "step": 780}, {"loss": 0.6147477626800537, "grad_norm": 2.4737673280857364, "learning_rate": 8.360586917264977e-08, "token_acc": 0.8181473332827555, "epoch": 4.410112359550562, "step": 785}, {"loss": 0.6550581932067872, "grad_norm": 2.572565746690504, "learning_rate": 7.593296375403913e-08, "token_acc": 0.8079718640093787, "epoch": 4.438202247191011, "step": 790}, {"loss": 0.6273100852966309, "grad_norm": 3.0730117763549307, "learning_rate": 6.861541485034939e-08, "token_acc": 0.8152930205867585, "epoch": 4.466292134831461, "step": 795}, {"loss": 0.6209675788879394, "grad_norm": 3.296535629417622, "learning_rate": 6.165603647677054e-08, "token_acc": 0.8250946305902145, "epoch": 4.49438202247191, "step": 800}, {"loss": 0.6240396499633789, "grad_norm": 2.87938642385584, "learning_rate": 5.50575049114812e-08, "token_acc": 0.814978215000137, "epoch": 4.52247191011236, "step": 805}, {"loss": 0.6181344985961914, "grad_norm": 2.2765135856663274, "learning_rate": 4.8822357666467476e-08, "token_acc": 0.8184484137563316, "epoch": 4.550561797752809, "step": 810}, {"loss": 0.6588056564331055, "grad_norm": 2.9483978960639456, "learning_rate": 4.295299251170537e-08, "token_acc": 0.8099685675797036, "epoch": 4.578651685393258, "step": 815}, {"loss": 0.6478778839111328, "grad_norm": 2.8874582609342507, "learning_rate": 3.7451666553080185e-08, "token_acc": 0.8155282939051772, "epoch": 4.606741573033708, "step": 820}, {"loss": 0.6231002807617188, "grad_norm": 2.5652296223862114, "learning_rate": 3.2320495364401624e-08, "token_acc": 0.8174460431654677, "epoch": 4.634831460674158, "step": 825}, {"loss": 0.6182103633880616, "grad_norm": 2.604254081645488, "learning_rate": 2.75614521738442e-08, "token_acc": 0.8202523137376557, "epoch": 4.662921348314606, "step": 830}, {"loss": 0.658519172668457, "grad_norm": 3.2926401813355834, "learning_rate": 2.317636710512849e-08, "token_acc": 0.8097054670663133, "epoch": 4.691011235955056, "step": 835}, {"loss": 0.6417614936828613, "grad_norm": 2.8497161008517264, "learning_rate": 1.9166926473734634e-08, "token_acc": 0.8092727513577957, "epoch": 4.719101123595506, "step": 840}, {"loss": 0.6665369033813476, "grad_norm": 2.5395360111391305, "learning_rate": 1.553467213841664e-08, "token_acc": 0.8113002042205582, "epoch": 4.747191011235955, "step": 845}, {"loss": 0.6072872161865235, "grad_norm": 2.3788192169546583, "learning_rate": 1.2281000908271333e-08, "token_acc": 0.8204576615762849, "epoch": 4.775280898876405, "step": 850}, {"loss": 0.6415260791778564, "grad_norm": 2.7730651984944896, "learning_rate": 9.407164005584057e-09, "token_acc": 0.8065747562428575, "epoch": 4.803370786516854, "step": 855}, {"loss": 0.6379406452178955, "grad_norm": 2.499846728077708, "learning_rate": 6.914266584662987e-09, "token_acc": 0.8102027970114125, "epoch": 4.831460674157303, "step": 860}, {"loss": 0.646446418762207, "grad_norm": 2.2227936530626673, "learning_rate": 4.803267306844106e-09, "token_acc": 0.8108356787183373, "epoch": 4.859550561797753, "step": 865}, {"loss": 0.6551089763641358, "grad_norm": 2.5251611175984925, "learning_rate": 3.0749779718314274e-09, "token_acc": 0.80992913120102, "epoch": 4.887640449438202, "step": 870}, {"loss": 0.6653870582580567, "grad_norm": 3.108085287020426, "learning_rate": 1.730063205513277e-09, "token_acc": 0.8124039226080042, "epoch": 4.915730337078652, "step": 875}, {"loss": 0.6394953727722168, "grad_norm": 2.759567340413772, "learning_rate": 7.690402043758481e-10, "token_acc": 0.8112972103599356, "epoch": 4.943820224719101, "step": 880}, {"loss": 0.6241713523864746, "grad_norm": 3.031348442648791, "learning_rate": 1.922785366117452e-10, "token_acc": 0.8159511040536896, "epoch": 4.97191011235955, "step": 885}, {"loss": 0.6294559478759766, "grad_norm": 3.270476256231695, "learning_rate": 0.0, "token_acc": 0.8162194394752534, "epoch": 5.0, "step": 890}, {"eval_loss": 0.9060819745063782, "eval_runtime": 52.8028, "eval_samples_per_second": 2.992, "eval_steps_per_second": 0.758, "eval_token_acc": 0.7614734566691739, "epoch": 5.0, "step": 890}, {"eval_loss": 0.9060819745063782, "eval_runtime": 53.3953, "eval_samples_per_second": 2.959, "eval_steps_per_second": 0.749, "eval_token_acc": 0.7614734566691739, "epoch": 5.0, "step": 890}, {"train_runtime": 8561.3347, "train_samples_per_second": 0.83, "train_steps_per_second": 0.104, "total_flos": 18284469882880.0, "train_loss": 0.8261511836159096, "epoch": 5.0, "step": 890}], "memory": 78.40234375}
|
v0-20251203-162131/val_dataset.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|