21223wj commited on
Commit
c6e6d70
·
verified ·
1 Parent(s): 8c44dd9

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
args.json ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Qwen3-8B-Base",
3
+ "model_type": "qwen3",
4
+ "model_revision": null,
5
+ "task_type": "causal_lm",
6
+ "torch_dtype": "bfloat16",
7
+ "attn_impl": "flash_attn",
8
+ "num_labels": null,
9
+ "problem_type": null,
10
+ "rope_scaling": null,
11
+ "device_map": null,
12
+ "max_memory": {},
13
+ "local_repo_path": null,
14
+ "init_strategy": null,
15
+ "template": "qwen3",
16
+ "system": null,
17
+ "max_length": 24000,
18
+ "truncation_strategy": "delete",
19
+ "max_pixels": null,
20
+ "agent_template": null,
21
+ "norm_bbox": null,
22
+ "response_prefix": null,
23
+ "padding_side": "right",
24
+ "loss_scale": "default",
25
+ "sequence_parallel_size": 1,
26
+ "use_chat_template": true,
27
+ "template_backend": "swift",
28
+ "dataset": [
29
+ "376k_sft.jsonl"
30
+ ],
31
+ "val_dataset": [],
32
+ "split_dataset_ratio": 0.01,
33
+ "data_seed": 42,
34
+ "dataset_num_proc": 1,
35
+ "load_from_cache_file": true,
36
+ "dataset_shuffle": true,
37
+ "val_dataset_shuffle": false,
38
+ "streaming": false,
39
+ "interleave_prob": null,
40
+ "stopping_strategy": "first_exhausted",
41
+ "shuffle_buffer_size": 1000,
42
+ "download_mode": "reuse_dataset_if_exists",
43
+ "columns": {},
44
+ "strict": false,
45
+ "remove_unused_columns": true,
46
+ "model_name": [
47
+ null,
48
+ null
49
+ ],
50
+ "model_author": [
51
+ null,
52
+ null
53
+ ],
54
+ "custom_dataset_info": [],
55
+ "quant_method": null,
56
+ "quant_bits": null,
57
+ "hqq_axis": null,
58
+ "bnb_4bit_compute_dtype": "bfloat16",
59
+ "bnb_4bit_quant_type": "nf4",
60
+ "bnb_4bit_use_double_quant": true,
61
+ "bnb_4bit_quant_storage": null,
62
+ "max_new_tokens": 64,
63
+ "temperature": 0.0,
64
+ "top_k": null,
65
+ "top_p": null,
66
+ "repetition_penalty": null,
67
+ "num_beams": 1,
68
+ "stream": false,
69
+ "stop_words": [],
70
+ "logprobs": false,
71
+ "top_logprobs": null,
72
+ "ckpt_dir": null,
73
+ "lora_modules": [],
74
+ "tuner_backend": "peft",
75
+ "train_type": "full",
76
+ "adapters": [],
77
+ "external_plugins": [],
78
+ "seed": 42,
79
+ "model_kwargs": {},
80
+ "load_args": false,
81
+ "load_data_args": false,
82
+ "use_hf": false,
83
+ "hub_token": null,
84
+ "custom_register_path": [],
85
+ "ddp_timeout": 1800,
86
+ "ddp_backend": "nccl",
87
+ "ignore_args_error": false,
88
+ "use_swift_lora": false,
89
+ "output_dir": "/openpai_config/sft/Long_Cot_data/r1-SFT-380k-24k-length-Qwen3-8B-Base-bs176-7p-5e-5/v0-20250824-012153",
90
+ "overwrite_output_dir": false,
91
+ "do_train": false,
92
+ "do_eval": false,
93
+ "do_predict": false,
94
+ "eval_strategy": "steps",
95
+ "prediction_loss_only": false,
96
+ "per_device_train_batch_size": 1,
97
+ "per_device_eval_batch_size": 1,
98
+ "per_gpu_train_batch_size": null,
99
+ "per_gpu_eval_batch_size": null,
100
+ "gradient_accumulation_steps": 2,
101
+ "eval_accumulation_steps": null,
102
+ "eval_delay": 0,
103
+ "torch_empty_cache_steps": null,
104
+ "learning_rate": 5e-05,
105
+ "weight_decay": 0.1,
106
+ "adam_beta1": 0.9,
107
+ "adam_beta2": 0.95,
108
+ "adam_epsilon": 1e-08,
109
+ "max_grad_norm": 1.0,
110
+ "num_train_epochs": 7.0,
111
+ "max_steps": -1,
112
+ "lr_scheduler_type": "cosine",
113
+ "lr_scheduler_kwargs": null,
114
+ "warmup_ratio": 0.03,
115
+ "warmup_steps": 0,
116
+ "log_level": "passive",
117
+ "log_level_replica": "warning",
118
+ "log_on_each_node": true,
119
+ "logging_dir": "/openpai_config/sft/Long_Cot_data/r1-SFT-380k-24k-length-Qwen3-8B-Base-bs176-7p-5e-5/v0-20250824-012153/runs",
120
+ "logging_strategy": "steps",
121
+ "logging_first_step": true,
122
+ "logging_steps": 5,
123
+ "logging_nan_inf_filter": true,
124
+ "save_strategy": "steps",
125
+ "save_steps": 400.0,
126
+ "save_total_limit": 40,
127
+ "save_safetensors": true,
128
+ "save_on_each_node": false,
129
+ "save_only_model": true,
130
+ "restore_callback_states_from_checkpoint": false,
131
+ "no_cuda": false,
132
+ "use_cpu": false,
133
+ "use_mps_device": false,
134
+ "jit_mode_eval": false,
135
+ "use_ipex": false,
136
+ "bf16": true,
137
+ "fp16": false,
138
+ "fp16_opt_level": "O1",
139
+ "half_precision_backend": "auto",
140
+ "bf16_full_eval": false,
141
+ "fp16_full_eval": false,
142
+ "tf32": null,
143
+ "local_rank": 0,
144
+ "tpu_num_cores": null,
145
+ "tpu_metrics_debug": false,
146
+ "debug": null,
147
+ "dataloader_drop_last": false,
148
+ "eval_steps": 400.0,
149
+ "dataloader_num_workers": 4,
150
+ "dataloader_prefetch_factor": null,
151
+ "past_index": -1,
152
+ "run_name": "/openpai_config/sft/Long_Cot_data/r1-SFT-380k-24k-length-Qwen3-8B-Base-bs176-7p-5e-5/v0-20250824-012153",
153
+ "disable_tqdm": null,
154
+ "label_names": null,
155
+ "load_best_model_at_end": false,
156
+ "metric_for_best_model": "loss",
157
+ "greater_is_better": false,
158
+ "ignore_data_skip": false,
159
+ "fsdp": "",
160
+ "fsdp_min_num_params": 0,
161
+ "fsdp_config": null,
162
+ "fsdp_transformer_layer_cls_to_wrap": null,
163
+ "accelerator_config": {
164
+ "dispatch_batches": false
165
+ },
166
+ "deepspeed": {
167
+ "fp16": {
168
+ "enabled": "auto",
169
+ "loss_scale": 0,
170
+ "loss_scale_window": 1000,
171
+ "initial_scale_power": 16,
172
+ "hysteresis": 2,
173
+ "min_loss_scale": 1
174
+ },
175
+ "bf16": {
176
+ "enabled": "auto"
177
+ },
178
+ "zero_optimization": {
179
+ "stage": 3,
180
+ "offload_optimizer": {
181
+ "device": "cpu",
182
+ "pin_memory": true
183
+ },
184
+ "offload_param": {
185
+ "device": "cpu",
186
+ "pin_memory": true
187
+ },
188
+ "overlap_comm": false,
189
+ "contiguous_gradients": true,
190
+ "sub_group_size": 1000000000.0,
191
+ "reduce_bucket_size": "auto",
192
+ "stage3_prefetch_bucket_size": "auto",
193
+ "stage3_param_persistence_threshold": "auto",
194
+ "stage3_max_live_parameters": 1000000000.0,
195
+ "stage3_max_reuse_distance": 1000000000.0,
196
+ "stage3_gather_16bit_weights_on_model_save": true
197
+ },
198
+ "gradient_accumulation_steps": "auto",
199
+ "gradient_clipping": "auto",
200
+ "steps_per_print": 2000,
201
+ "train_batch_size": "auto",
202
+ "train_micro_batch_size_per_gpu": "auto",
203
+ "wall_clock_breakdown": false
204
+ },
205
+ "label_smoothing_factor": 0.0,
206
+ "optim": "adamw_torch",
207
+ "optim_args": null,
208
+ "adafactor": false,
209
+ "group_by_length": false,
210
+ "length_column_name": "length",
211
+ "report_to": [
212
+ "tensorboard"
213
+ ],
214
+ "ddp_find_unused_parameters": null,
215
+ "ddp_bucket_cap_mb": null,
216
+ "ddp_broadcast_buffers": null,
217
+ "dataloader_pin_memory": true,
218
+ "dataloader_persistent_workers": false,
219
+ "skip_memory_metrics": true,
220
+ "use_legacy_prediction_loop": false,
221
+ "push_to_hub": false,
222
+ "resume_from_checkpoint": null,
223
+ "hub_model_id": null,
224
+ "hub_strategy": "every_save",
225
+ "hub_private_repo": null,
226
+ "hub_always_push": false,
227
+ "gradient_checkpointing": true,
228
+ "gradient_checkpointing_kwargs": null,
229
+ "include_inputs_for_metrics": false,
230
+ "include_for_metrics": [],
231
+ "eval_do_concat_batches": true,
232
+ "fp16_backend": "auto",
233
+ "push_to_hub_model_id": null,
234
+ "push_to_hub_organization": null,
235
+ "push_to_hub_token": null,
236
+ "mp_parameters": "",
237
+ "auto_find_batch_size": false,
238
+ "full_determinism": false,
239
+ "torchdynamo": null,
240
+ "ray_scope": "last",
241
+ "torch_compile": false,
242
+ "torch_compile_backend": null,
243
+ "torch_compile_mode": null,
244
+ "include_tokens_per_second": false,
245
+ "include_num_input_tokens_seen": false,
246
+ "neftune_noise_alpha": null,
247
+ "optim_target_modules": null,
248
+ "batch_eval_metrics": false,
249
+ "eval_on_start": false,
250
+ "use_liger_kernel": false,
251
+ "eval_use_gather_object": false,
252
+ "average_tokens_across_devices": false,
253
+ "sortish_sampler": false,
254
+ "predict_with_generate": false,
255
+ "generation_max_length": null,
256
+ "generation_num_beams": null,
257
+ "generation_config": null,
258
+ "check_model": true,
259
+ "acc_strategy": "token",
260
+ "train_dataloader_shuffle": true,
261
+ "max_epochs": null,
262
+ "metric_warmup_step": 0,
263
+ "fsdp_num": 1,
264
+ "acc_steps": 1,
265
+ "eval_use_evalscope": false,
266
+ "eval_datasets": [],
267
+ "eval_limit": null,
268
+ "eval_datasets_args": null,
269
+ "eval_generation_config": null,
270
+ "freeze_parameters": [],
271
+ "freeze_parameters_regex": null,
272
+ "freeze_parameters_ratio": 0.0,
273
+ "trainable_parameters": [],
274
+ "trainable_parameters_regex": null,
275
+ "freeze_llm": false,
276
+ "freeze_vit": true,
277
+ "freeze_aligner": true,
278
+ "target_modules": [
279
+ "all-linear"
280
+ ],
281
+ "target_regex": null,
282
+ "modules_to_save": [],
283
+ "lora_rank": 8,
284
+ "lora_alpha": 32,
285
+ "lora_dropout": 0.05,
286
+ "lora_bias": "none",
287
+ "lora_dtype": null,
288
+ "lorap_lr_ratio": null,
289
+ "use_rslora": false,
290
+ "use_dora": false,
291
+ "lora_ga_batch_size": 2,
292
+ "lora_ga_iters": 2,
293
+ "lora_ga_max_length": 1024,
294
+ "lora_ga_direction": "ArB2r",
295
+ "lora_ga_scale": "stable",
296
+ "lora_ga_stable_gamma": 16,
297
+ "init_weights": true,
298
+ "fourier_n_frequency": 2000,
299
+ "fourier_scaling": 300.0,
300
+ "boft_block_size": 4,
301
+ "boft_block_num": 0,
302
+ "boft_n_butterfly_factor": 1,
303
+ "boft_dropout": 0.0,
304
+ "vera_rank": 256,
305
+ "vera_projection_prng_key": 0,
306
+ "vera_dropout": 0.0,
307
+ "vera_d_initial": 0.1,
308
+ "adapter_act": "gelu",
309
+ "adapter_length": 128,
310
+ "use_galore": false,
311
+ "galore_target_modules": null,
312
+ "galore_rank": 128,
313
+ "galore_update_proj_gap": 50,
314
+ "galore_scale": 1.0,
315
+ "galore_proj_type": "std",
316
+ "galore_optim_per_parameter": false,
317
+ "galore_with_embedding": false,
318
+ "galore_quantization": false,
319
+ "galore_proj_quant": false,
320
+ "galore_proj_bits": 4,
321
+ "galore_proj_group_size": 256,
322
+ "galore_cos_threshold": 0.4,
323
+ "galore_gamma_proj": 2,
324
+ "galore_queue_size": 5,
325
+ "adalora_target_r": 8,
326
+ "adalora_init_r": 12,
327
+ "adalora_tinit": 0,
328
+ "adalora_tfinal": 0,
329
+ "adalora_deltaT": 1,
330
+ "adalora_beta1": 0.85,
331
+ "adalora_beta2": 0.85,
332
+ "adalora_orth_reg_weight": 0.5,
333
+ "llamapro_num_new_blocks": 4,
334
+ "llamapro_num_groups": null,
335
+ "lisa_activated_layers": 0,
336
+ "lisa_step_interval": 20,
337
+ "reft_layer_key": null,
338
+ "reft_layers": null,
339
+ "reft_rank": 4,
340
+ "reft_intervention_type": "LoreftIntervention",
341
+ "reft_args": null,
342
+ "swanlab_token": null,
343
+ "swanlab_project": null,
344
+ "swanlab_workspace": null,
345
+ "swanlab_exp_name": null,
346
+ "swanlab_mode": "cloud",
347
+ "add_version": true,
348
+ "resume_only_model": false,
349
+ "create_checkpoint_symlink": false,
350
+ "packing": false,
351
+ "lazy_tokenize": true,
352
+ "loss_type": null,
353
+ "optimizer": null,
354
+ "metric": null,
355
+ "zero_hpz_partition_size": null,
356
+ "rank": 0,
357
+ "global_world_size": 88,
358
+ "local_world_size": 8,
359
+ "model_suffix": "Qwen3-8B-Base",
360
+ "model_info": "ModelInfo(model_type='qwen3', model_dir='/openpai_config/sft/Long_Cot_data/Qwen3-8B-Base', torch_dtype=torch.bfloat16, max_model_len=32768, quant_method=None, quant_bits=None, rope_scaling=None, config=None, task_type='causal_lm', num_labels=None)",
361
+ "model_meta": "ModelMeta(model_type='qwen3', model_groups=[ModelGroup(models=[Model(ms_model_id='Qwen/Qwen3-0.6B-Base', hf_model_id='Qwen/Qwen3-0.6B-Base', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-1.7B-Base', hf_model_id='Qwen/Qwen3-1.7B-Base', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-4B-Base', hf_model_id='Qwen/Qwen3-4B-Base', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-8B-Base', hf_model_id='Qwen/Qwen3-8B-Base', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-14B-Base', hf_model_id='Qwen/Qwen3-14B-Base', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-32B-Base', hf_model_id='Qwen/Qwen3-32B-Base', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-0.6B', hf_model_id='Qwen/Qwen3-0.6B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-1.7B', hf_model_id='Qwen/Qwen3-1.7B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-4B', hf_model_id='Qwen/Qwen3-4B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-8B', hf_model_id='Qwen/Qwen3-8B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-14B', hf_model_id='Qwen/Qwen3-14B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-32B', hf_model_id='Qwen/Qwen3-32B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-0.6B-FP8', hf_model_id='Qwen/Qwen3-0.6B-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-1.7B-FP8', hf_model_id='Qwen/Qwen3-1.7B-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-4B-FP8', hf_model_id='Qwen/Qwen3-4B-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-8B-FP8', hf_model_id='Qwen/Qwen3-8B-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-14B-FP8', hf_model_id='Qwen/Qwen3-14B-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-32B-FP8', hf_model_id='Qwen/Qwen3-32B-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-4B-AWQ', hf_model_id='Qwen/Qwen3-4B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-8B-AWQ', hf_model_id='Qwen/Qwen3-8B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-14B-AWQ', hf_model_id='Qwen/Qwen3-14B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen3-32B-AWQ', hf_model_id='Qwen/Qwen3-32B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='swift/Qwen3-32B-AWQ', hf_model_id=None, model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='qwen3', get_function=<function get_model_tokenizer_with_flash_attn at 0x7fe7aa7e5da0>, model_arch='llama', architectures=['Qwen3ForCausalLM'], additional_saved_files=[], torch_dtype=None, is_multimodal=False, is_reward=False, task_type=None, ignore_patterns=None, requires=['transformers>=4.51'], tags=[])",
362
+ "model_dir": "/openpai_config/sft/Long_Cot_data/Qwen3-8B-Base",
363
+ "hub": "<class 'swift.hub.hub.MSHub'>",
364
+ "evaluation_strategy": "steps",
365
+ "training_args": "Seq2SeqTrainingArguments(output_dir='/openpai_config/sft/Long_Cot_data/r1-SFT-380k-24k-length-Qwen3-8B-Base-bs176-7p-5e-5/v0-20250824-012153', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.STEPS: 'steps'>, prediction_loss_only=False, per_device_train_batch_size=1, per_device_eval_batch_size=1, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=2, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=5e-05, weight_decay=0.1, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=7.0, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs=None, warmup_ratio=0.03, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/openpai_config/sft/Long_Cot_data/r1-SFT-380k-24k-length-Qwen3-8B-Base-bs176-7p-5e-5/v0-20250824-012153/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=5, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.STEPS: 'steps'>, save_steps=400, save_total_limit=40, save_safetensors=True, save_on_each_node=False, save_only_model=True, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend='nccl', tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=400, dataloader_num_workers=4, dataloader_prefetch_factor=10, past_index=-1, run_name='/openpai_config/sft/Long_Cot_data/r1-SFT-380k-24k-length-Qwen3-8B-Base-bs176-7p-5e-5/v0-20250824-012153', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model='loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed={'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'zero_optimization': {'stage': 3, 'offload_optimizer': {'device': 'cpu', 'pin_memory': True}, 'offload_param': {'device': 'cpu', 'pin_memory': True}, 'overlap_comm': False, 'contiguous_gradients': True, 'sub_group_size': 1000000000.0, 'reduce_bucket_size': 'auto', 'stage3_prefetch_bucket_size': 'auto', 'stage3_param_persistence_threshold': 'auto', 'stage3_max_live_parameters': 1000000000.0, 'stage3_max_reuse_distance': 1000000000.0, 'stage3_gather_16bit_weights_on_model_save': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False}, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['tensorboard'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=None, hub_always_push=False, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=1800, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, eval_use_gather_object=False, average_tokens_across_devices=None, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=None, check_model=True, acc_strategy='token', train_dataloader_shuffle=True, max_epochs=None, metric_warmup_step=0, fsdp_num=1, acc_steps=1, eval_use_evalscope=False, eval_datasets=[], eval_limit=None, eval_datasets_args=None, eval_generation_config=None, train_type='full', optimizer=None, local_repo_path=None, galore_config=None)"
366
+ }
chat_template.jinja ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
27
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
28
+ {%- elif message.role == "assistant" %}
29
+ {%- set content = message.content %}
30
+ {%- set reasoning_content = '' %}
31
+ {%- if message.reasoning_content is defined and message.reasoning_content is not none %}
32
+ {%- set reasoning_content = message.reasoning_content %}
33
+ {%- else %}
34
+ {%- if '</think>' in message.content %}
35
+ {%- set content = message.content.split('</think>')[-1].lstrip('\n') %}
36
+ {%- set reasoning_content = message.content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
37
+ {%- endif %}
38
+ {%- endif %}
39
+ {%- if loop.index0 > ns.last_query_index %}
40
+ {%- if loop.last or (not loop.last and reasoning_content) %}
41
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
42
+ {%- else %}
43
+ {{- '<|im_start|>' + message.role + '\n' + content }}
44
+ {%- endif %}
45
+ {%- else %}
46
+ {{- '<|im_start|>' + message.role + '\n' + content }}
47
+ {%- endif %}
48
+ {%- if message.tool_calls %}
49
+ {%- for tool_call in message.tool_calls %}
50
+ {%- if (loop.first and content) or (not loop.first) %}
51
+ {{- '\n' }}
52
+ {%- endif %}
53
+ {%- if tool_call.function %}
54
+ {%- set tool_call = tool_call.function %}
55
+ {%- endif %}
56
+ {{- '<tool_call>\n{"name": "' }}
57
+ {{- tool_call.name }}
58
+ {{- '", "arguments": ' }}
59
+ {%- if tool_call.arguments is string %}
60
+ {{- tool_call.arguments }}
61
+ {%- else %}
62
+ {{- tool_call.arguments | tojson }}
63
+ {%- endif %}
64
+ {{- '}\n</tool_call>' }}
65
+ {%- endfor %}
66
+ {%- endif %}
67
+ {{- '<|im_end|>\n' }}
68
+ {%- elif message.role == "tool" %}
69
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
70
+ {{- '<|im_start|>user' }}
71
+ {%- endif %}
72
+ {{- '\n<tool_response>\n' }}
73
+ {{- message.content }}
74
+ {{- '\n</tool_response>' }}
75
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
76
+ {{- '<|im_end|>\n' }}
77
+ {%- endif %}
78
+ {%- endif %}
79
+ {%- endfor %}
80
+ {%- if add_generation_prompt %}
81
+ {{- '<|im_start|>assistant\n' }}
82
+ {%- if enable_thinking is defined and enable_thinking is false %}
83
+ {{- '<think>\n\n</think>\n\n' }}
84
+ {%- endif %}
85
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3ForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151643,
9
+ "head_dim": 128,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 4096,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 12288,
14
+ "max_position_embeddings": 32768,
15
+ "max_window_layers": 36,
16
+ "model_type": "qwen3",
17
+ "num_attention_heads": 32,
18
+ "num_hidden_layers": 36,
19
+ "num_key_value_heads": 8,
20
+ "pad_token_id": 151643,
21
+ "rms_norm_eps": 1e-06,
22
+ "rope_scaling": null,
23
+ "rope_theta": 1000000,
24
+ "sliding_window": null,
25
+ "tie_word_embeddings": false,
26
+ "torch_dtype": "bfloat16",
27
+ "transformers_version": "4.52.3",
28
+ "use_cache": false,
29
+ "use_sliding_window": false,
30
+ "vocab_size": 151936
31
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "eos_token_id": 151643,
4
+ "max_new_tokens": 2048,
5
+ "transformers_version": "4.52.3"
6
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cba405bd4483ddb136956bf44b21015df196e38917d0de9ffc97092ddda36fa4
3
+ size 4902257696
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:461e07415ac26eafee1aef7cb1ae1ba3e0d8687b7e97b2af5e91dae842297b29
3
+ size 4915960368
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7881b3054e5034cfb606b5d251506f3bd8bef0ec8935a61519d543ab1295f928
3
+ size 4983068496
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91ba0ad9442ba46a7aa49d7ec8da9aaedf113f80f47a4cffdcb4041af0f8c21f
3
+ size 1580230264
model.safetensors.index.json ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 16381470720
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00004-of-00004.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
13
+ "model.layers.0.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
14
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
15
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
16
+ "model.layers.0.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
17
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
18
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
19
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
20
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
21
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
22
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
23
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
24
+ "model.layers.1.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
25
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
26
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
27
+ "model.layers.1.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
28
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
29
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
30
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
31
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
32
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
33
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
34
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
35
+ "model.layers.10.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
36
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
37
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
38
+ "model.layers.10.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
39
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
40
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
41
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
42
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
43
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
44
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
45
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
46
+ "model.layers.11.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
47
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
48
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
49
+ "model.layers.11.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
50
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
51
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
52
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
53
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
54
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
55
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
56
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
57
+ "model.layers.12.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
58
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
59
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
60
+ "model.layers.12.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
61
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
62
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
63
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
64
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
65
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
66
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
67
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
68
+ "model.layers.13.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
69
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
70
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
71
+ "model.layers.13.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
72
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
73
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
74
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
75
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
76
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
77
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
78
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
79
+ "model.layers.14.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
80
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
81
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
82
+ "model.layers.14.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
83
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
84
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
85
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
86
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
87
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
88
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
89
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
90
+ "model.layers.15.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
91
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
92
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
93
+ "model.layers.15.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
94
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
95
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
96
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
97
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
98
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
99
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
100
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
101
+ "model.layers.16.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
102
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
103
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
104
+ "model.layers.16.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
105
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
106
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
107
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
108
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
109
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
110
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
111
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
112
+ "model.layers.17.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
113
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
114
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
115
+ "model.layers.17.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
116
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
117
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
118
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
119
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
120
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
121
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
122
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
123
+ "model.layers.18.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
124
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
125
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
126
+ "model.layers.18.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
127
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
128
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
129
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
130
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
131
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
132
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
133
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
134
+ "model.layers.19.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
135
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
136
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
137
+ "model.layers.19.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
138
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
139
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
140
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
141
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
142
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
143
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
144
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
145
+ "model.layers.2.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
146
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
147
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
148
+ "model.layers.2.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
149
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
150
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
151
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00004.safetensors",
152
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
153
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
154
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
155
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
156
+ "model.layers.20.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
157
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
158
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
159
+ "model.layers.20.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
160
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
161
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
162
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00004.safetensors",
163
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
164
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
165
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
166
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
167
+ "model.layers.21.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
168
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
169
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
170
+ "model.layers.21.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
171
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
172
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
173
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
174
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
175
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
176
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
177
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
178
+ "model.layers.22.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
179
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
180
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
181
+ "model.layers.22.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
182
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
183
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
184
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
185
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
186
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
187
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
188
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
189
+ "model.layers.23.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
190
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
191
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
192
+ "model.layers.23.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
193
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
194
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
195
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
196
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
197
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
198
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
199
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
200
+ "model.layers.24.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
201
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
202
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
203
+ "model.layers.24.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
204
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
205
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
206
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
207
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
208
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
209
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
210
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
211
+ "model.layers.25.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
212
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
213
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
214
+ "model.layers.25.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
215
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
216
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
217
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
218
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
219
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
220
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
221
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
222
+ "model.layers.26.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
223
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
224
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
225
+ "model.layers.26.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
226
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
227
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
228
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
229
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
230
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
231
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
232
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
233
+ "model.layers.27.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
234
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
235
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
236
+ "model.layers.27.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
237
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
238
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
239
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
240
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
241
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
242
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
243
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
244
+ "model.layers.28.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
245
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
246
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
247
+ "model.layers.28.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
248
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
249
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
250
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
251
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
252
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
253
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
254
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
255
+ "model.layers.29.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
256
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
257
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
258
+ "model.layers.29.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
259
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
260
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
261
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
262
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
263
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
264
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
265
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
266
+ "model.layers.3.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
267
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
268
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
269
+ "model.layers.3.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
270
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
271
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
272
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
273
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
274
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
275
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
276
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
277
+ "model.layers.30.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
278
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
279
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
280
+ "model.layers.30.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
281
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
282
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
283
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
284
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
285
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
286
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
287
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
288
+ "model.layers.31.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
289
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
290
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
291
+ "model.layers.31.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
292
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
293
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
294
+ "model.layers.32.input_layernorm.weight": "model-00003-of-00004.safetensors",
295
+ "model.layers.32.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
296
+ "model.layers.32.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
297
+ "model.layers.32.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
298
+ "model.layers.32.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
299
+ "model.layers.32.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
300
+ "model.layers.32.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
301
+ "model.layers.32.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
302
+ "model.layers.32.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
303
+ "model.layers.32.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
304
+ "model.layers.32.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
305
+ "model.layers.33.input_layernorm.weight": "model-00003-of-00004.safetensors",
306
+ "model.layers.33.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
307
+ "model.layers.33.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
308
+ "model.layers.33.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
309
+ "model.layers.33.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
310
+ "model.layers.33.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
311
+ "model.layers.33.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
312
+ "model.layers.33.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
313
+ "model.layers.33.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
314
+ "model.layers.33.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
315
+ "model.layers.33.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
316
+ "model.layers.34.input_layernorm.weight": "model-00003-of-00004.safetensors",
317
+ "model.layers.34.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
318
+ "model.layers.34.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
319
+ "model.layers.34.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
320
+ "model.layers.34.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
321
+ "model.layers.34.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
322
+ "model.layers.34.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
323
+ "model.layers.34.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
324
+ "model.layers.34.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
325
+ "model.layers.34.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
326
+ "model.layers.34.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
327
+ "model.layers.35.input_layernorm.weight": "model-00004-of-00004.safetensors",
328
+ "model.layers.35.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
329
+ "model.layers.35.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
330
+ "model.layers.35.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
331
+ "model.layers.35.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
332
+ "model.layers.35.self_attn.k_norm.weight": "model-00004-of-00004.safetensors",
333
+ "model.layers.35.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
334
+ "model.layers.35.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
335
+ "model.layers.35.self_attn.q_norm.weight": "model-00004-of-00004.safetensors",
336
+ "model.layers.35.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
337
+ "model.layers.35.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
338
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
339
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
340
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
341
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
342
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
343
+ "model.layers.4.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
344
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
345
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
346
+ "model.layers.4.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
347
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
348
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
349
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
350
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
351
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
352
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
353
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
354
+ "model.layers.5.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
355
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
356
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
357
+ "model.layers.5.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
358
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
359
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
360
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
361
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
362
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
363
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
364
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
365
+ "model.layers.6.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
366
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
367
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
368
+ "model.layers.6.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
369
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
370
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
371
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
372
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
373
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
374
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
375
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
376
+ "model.layers.7.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
377
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
378
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
379
+ "model.layers.7.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
380
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
381
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
382
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
383
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
384
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
385
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
386
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
387
+ "model.layers.8.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
388
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
389
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
390
+ "model.layers.8.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
391
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
392
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
393
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
394
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
395
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
396
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
397
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
398
+ "model.layers.9.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
399
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
400
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
401
+ "model.layers.9.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
402
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
403
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
404
+ "model.norm.weight": "model-00004-of-00004.safetensors"
405
+ }
406
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
+ size 11422654
tokenizer_config.json ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|endoftext|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "model_max_length": 131072,
235
+ "pad_token": "<|endoftext|>",
236
+ "split_special_tokens": false,
237
+ "tokenizer_class": "Qwen2Tokenizer",
238
+ "unk_token": null
239
+ }
trainer_state.json ADDED
@@ -0,0 +1,1793 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 1200,
3
+ "best_metric": 0.43056953,
4
+ "best_model_checkpoint": "/openpai_config/sft/Long_Cot_data/Stage1-380k-24k-length-Qwen3-8B-Base-resume-iter4600-4p-3e-5/v0-20250826-235423/checkpoint-1200",
5
+ "epoch": 2.2665292804396358,
6
+ "eval_steps": 300,
7
+ "global_step": 3300,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.0006869311351537008,
14
+ "grad_norm": 0.07274238765239716,
15
+ "learning_rate": 2.9999997817680842e-05,
16
+ "loss": 0.2985985279083252,
17
+ "memory(GiB)": 56.61,
18
+ "step": 1,
19
+ "token_acc": 0.8812937590030485,
20
+ "train_speed(iter/s)": 0.013692
21
+ },
22
+ {
23
+ "epoch": 0.013738622703074016,
24
+ "grad_norm": 0.10364966094493866,
25
+ "learning_rate": 2.9999127080781484e-05,
26
+ "loss": 0.32278080990439967,
27
+ "memory(GiB)": 70.37,
28
+ "step": 20,
29
+ "token_acc": 0.8852460572314409,
30
+ "train_speed(iter/s)": 0.033578
31
+ },
32
+ {
33
+ "epoch": 0.027477245406148033,
34
+ "grad_norm": 0.0828627347946167,
35
+ "learning_rate": 2.999650842472434e-05,
36
+ "loss": 0.3287534713745117,
37
+ "memory(GiB)": 70.37,
38
+ "step": 40,
39
+ "token_acc": 0.8729899276605303,
40
+ "train_speed(iter/s)": 0.035269
41
+ },
42
+ {
43
+ "epoch": 0.04121586810922205,
44
+ "grad_norm": 0.075782909989357,
45
+ "learning_rate": 2.9992144336611927e-05,
46
+ "loss": 0.32648520469665526,
47
+ "memory(GiB)": 70.37,
48
+ "step": 60,
49
+ "token_acc": 0.871392951813677,
50
+ "train_speed(iter/s)": 0.03605
51
+ },
52
+ {
53
+ "epoch": 0.054954490812296065,
54
+ "grad_norm": 0.09333578497171402,
55
+ "learning_rate": 2.998603532437709e-05,
56
+ "loss": 0.3258840799331665,
57
+ "memory(GiB)": 70.37,
58
+ "step": 80,
59
+ "token_acc": 0.8820731351865331,
60
+ "train_speed(iter/s)": 0.036483
61
+ },
62
+ {
63
+ "epoch": 0.06869311351537008,
64
+ "grad_norm": 0.08618636429309845,
65
+ "learning_rate": 2.9978182099043062e-05,
66
+ "loss": 0.3262542724609375,
67
+ "memory(GiB)": 70.37,
68
+ "step": 100,
69
+ "token_acc": 0.8849732393640833,
70
+ "train_speed(iter/s)": 0.036776
71
+ },
72
+ {
73
+ "epoch": 0.0824317362184441,
74
+ "grad_norm": 0.08525851368904114,
75
+ "learning_rate": 2.9968585574640675e-05,
76
+ "loss": 0.32371375560760496,
77
+ "memory(GiB)": 70.37,
78
+ "step": 120,
79
+ "token_acc": 0.8871000703215756,
80
+ "train_speed(iter/s)": 0.036983
81
+ },
82
+ {
83
+ "epoch": 0.09617035892151812,
84
+ "grad_norm": 0.08820851147174835,
85
+ "learning_rate": 2.995724686810202e-05,
86
+ "loss": 0.3224189281463623,
87
+ "memory(GiB)": 70.37,
88
+ "step": 140,
89
+ "token_acc": 0.8856544898235427,
90
+ "train_speed(iter/s)": 0.03715
91
+ },
92
+ {
93
+ "epoch": 0.10990898162459213,
94
+ "grad_norm": 0.0837075412273407,
95
+ "learning_rate": 2.9944167299130397e-05,
96
+ "loss": 0.32613368034362794,
97
+ "memory(GiB)": 70.4,
98
+ "step": 160,
99
+ "token_acc": 0.8848279812123834,
100
+ "train_speed(iter/s)": 0.037079
101
+ },
102
+ {
103
+ "epoch": 0.12364760432766615,
104
+ "grad_norm": 0.09300094842910767,
105
+ "learning_rate": 2.9929348390046766e-05,
106
+ "loss": 0.32998530864715575,
107
+ "memory(GiB)": 70.4,
108
+ "step": 180,
109
+ "token_acc": 0.8633912245766957,
110
+ "train_speed(iter/s)": 0.037181
111
+ },
112
+ {
113
+ "epoch": 0.13738622703074016,
114
+ "grad_norm": 0.08325231075286865,
115
+ "learning_rate": 2.9912791865612525e-05,
116
+ "loss": 0.3349442958831787,
117
+ "memory(GiB)": 70.4,
118
+ "step": 200,
119
+ "token_acc": 0.8835910835510771,
120
+ "train_speed(iter/s)": 0.037269
121
+ },
122
+ {
123
+ "epoch": 0.15112484973381418,
124
+ "grad_norm": 0.08610066026449203,
125
+ "learning_rate": 2.9894499652828798e-05,
126
+ "loss": 0.33656883239746094,
127
+ "memory(GiB)": 70.4,
128
+ "step": 220,
129
+ "token_acc": 0.8754293623532549,
130
+ "train_speed(iter/s)": 0.037356
131
+ },
132
+ {
133
+ "epoch": 0.1648634724368882,
134
+ "grad_norm": 0.09172473102807999,
135
+ "learning_rate": 2.9874473880712125e-05,
136
+ "loss": 0.3357390403747559,
137
+ "memory(GiB)": 70.4,
138
+ "step": 240,
139
+ "token_acc": 0.8816886144038251,
140
+ "train_speed(iter/s)": 0.037427
141
+ },
142
+ {
143
+ "epoch": 0.17860209513996222,
144
+ "grad_norm": 0.08784262090921402,
145
+ "learning_rate": 2.9852716880046687e-05,
146
+ "loss": 0.33715412616729734,
147
+ "memory(GiB)": 70.4,
148
+ "step": 260,
149
+ "token_acc": 0.8768648698038725,
150
+ "train_speed(iter/s)": 0.037172
151
+ },
152
+ {
153
+ "epoch": 0.19234071784303625,
154
+ "grad_norm": 0.08109364658594131,
155
+ "learning_rate": 2.9829231183113013e-05,
156
+ "loss": 0.3330291509628296,
157
+ "memory(GiB)": 70.4,
158
+ "step": 280,
159
+ "token_acc": 0.884993071444176,
160
+ "train_speed(iter/s)": 0.037251
161
+ },
162
+ {
163
+ "epoch": 0.20607934054611024,
164
+ "grad_norm": 0.08930086344480515,
165
+ "learning_rate": 2.980401952339328e-05,
166
+ "loss": 0.32976431846618653,
167
+ "memory(GiB)": 70.4,
168
+ "step": 300,
169
+ "token_acc": 0.8701908549811445,
170
+ "train_speed(iter/s)": 0.037312
171
+ },
172
+ {
173
+ "epoch": 0.20607934054611024,
174
+ "eval_loss": 0.4335128664970398,
175
+ "eval_runtime": 97.7946,
176
+ "eval_samples_per_second": 38.489,
177
+ "eval_steps_per_second": 0.603,
178
+ "eval_token_acc": 0.8434016162256411,
179
+ "step": 300
180
+ },
181
+ {
182
+ "epoch": 0.21981796324918426,
183
+ "grad_norm": 0.08906491845846176,
184
+ "learning_rate": 2.9777084835253107e-05,
185
+ "loss": 0.3341225624084473,
186
+ "memory(GiB)": 72.41,
187
+ "step": 320,
188
+ "token_acc": 0.8633943248975853,
189
+ "train_speed(iter/s)": 0.036714
190
+ },
191
+ {
192
+ "epoch": 0.23355658595225828,
193
+ "grad_norm": 0.09569748491048813,
194
+ "learning_rate": 2.9748430253600103e-05,
195
+ "loss": 0.3317814826965332,
196
+ "memory(GiB)": 72.41,
197
+ "step": 340,
198
+ "token_acc": 0.8690857078605088,
199
+ "train_speed(iter/s)": 0.036742
200
+ },
201
+ {
202
+ "epoch": 0.2472952086553323,
203
+ "grad_norm": 0.09385869652032852,
204
+ "learning_rate": 2.9718059113518926e-05,
205
+ "loss": 0.33147258758544923,
206
+ "memory(GiB)": 72.41,
207
+ "step": 360,
208
+ "token_acc": 0.8744091464585358,
209
+ "train_speed(iter/s)": 0.036789
210
+ },
211
+ {
212
+ "epoch": 0.2610338313584063,
213
+ "grad_norm": 0.09747012704610825,
214
+ "learning_rate": 2.9685974949883163e-05,
215
+ "loss": 0.3316455841064453,
216
+ "memory(GiB)": 72.41,
217
+ "step": 380,
218
+ "token_acc": 0.8744771091993391,
219
+ "train_speed(iter/s)": 0.036834
220
+ },
221
+ {
222
+ "epoch": 0.2747724540614803,
223
+ "grad_norm": 0.0942377969622612,
224
+ "learning_rate": 2.9652181496943888e-05,
225
+ "loss": 0.33130803108215334,
226
+ "memory(GiB)": 72.41,
227
+ "step": 400,
228
+ "token_acc": 0.8743932496768547,
229
+ "train_speed(iter/s)": 0.036889
230
+ },
231
+ {
232
+ "epoch": 0.28851107676455434,
233
+ "grad_norm": 0.08894450962543488,
234
+ "learning_rate": 2.9616682687895038e-05,
235
+ "loss": 0.3286163806915283,
236
+ "memory(GiB)": 72.41,
237
+ "step": 420,
238
+ "token_acc": 0.8871290646641795,
239
+ "train_speed(iter/s)": 0.036939
240
+ },
241
+ {
242
+ "epoch": 0.30224969946762836,
243
+ "grad_norm": 0.09808226674795151,
244
+ "learning_rate": 2.9579482654415627e-05,
245
+ "loss": 0.3300768375396729,
246
+ "memory(GiB)": 72.41,
247
+ "step": 440,
248
+ "token_acc": 0.8833857380634584,
249
+ "train_speed(iter/s)": 0.036991
250
+ },
251
+ {
252
+ "epoch": 0.3159883221707024,
253
+ "grad_norm": 0.09648197889328003,
254
+ "learning_rate": 2.9540585726188883e-05,
255
+ "loss": 0.3316764831542969,
256
+ "memory(GiB)": 72.41,
257
+ "step": 460,
258
+ "token_acc": 0.8862556346113715,
259
+ "train_speed(iter/s)": 0.037032
260
+ },
261
+ {
262
+ "epoch": 0.3297269448737764,
263
+ "grad_norm": 0.08601760119199753,
264
+ "learning_rate": 2.9499996430398296e-05,
265
+ "loss": 0.3263013124465942,
266
+ "memory(GiB)": 72.41,
267
+ "step": 480,
268
+ "token_acc": 0.8804854791092939,
269
+ "train_speed(iter/s)": 0.037074
270
+ },
271
+ {
272
+ "epoch": 0.34346556757685043,
273
+ "grad_norm": 0.08579235523939133,
274
+ "learning_rate": 2.945771949120071e-05,
275
+ "loss": 0.33027398586273193,
276
+ "memory(GiB)": 72.41,
277
+ "step": 500,
278
+ "token_acc": 0.8894077316408038,
279
+ "train_speed(iter/s)": 0.037103
280
+ },
281
+ {
282
+ "epoch": 0.35720419027992445,
283
+ "grad_norm": 0.08523233234882355,
284
+ "learning_rate": 2.9413759829176497e-05,
285
+ "loss": 0.32302305698394773,
286
+ "memory(GiB)": 72.41,
287
+ "step": 520,
288
+ "token_acc": 0.8851045135756845,
289
+ "train_speed(iter/s)": 0.037131
290
+ },
291
+ {
292
+ "epoch": 0.37094281298299847,
293
+ "grad_norm": 0.08190900087356567,
294
+ "learning_rate": 2.9368122560756822e-05,
295
+ "loss": 0.3292397975921631,
296
+ "memory(GiB)": 72.41,
297
+ "step": 540,
298
+ "token_acc": 0.8731282020961862,
299
+ "train_speed(iter/s)": 0.037164
300
+ },
301
+ {
302
+ "epoch": 0.3846814356860725,
303
+ "grad_norm": 0.0842556357383728,
304
+ "learning_rate": 2.9320812997628184e-05,
305
+ "loss": 0.3262279748916626,
306
+ "memory(GiB)": 72.41,
307
+ "step": 560,
308
+ "token_acc": 0.8791384953802229,
309
+ "train_speed(iter/s)": 0.037189
310
+ },
311
+ {
312
+ "epoch": 0.3984200583891465,
313
+ "grad_norm": 0.09352608770132065,
314
+ "learning_rate": 2.9271836646114166e-05,
315
+ "loss": 0.3300283908843994,
316
+ "memory(GiB)": 72.41,
317
+ "step": 580,
318
+ "token_acc": 0.8848605133294101,
319
+ "train_speed(iter/s)": 0.037218
320
+ },
321
+ {
322
+ "epoch": 0.4121586810922205,
323
+ "grad_norm": 0.08545698970556259,
324
+ "learning_rate": 2.922119920653457e-05,
325
+ "loss": 0.33294997215270994,
326
+ "memory(GiB)": 72.41,
327
+ "step": 600,
328
+ "token_acc": 0.8852764804825887,
329
+ "train_speed(iter/s)": 0.037244
330
+ },
331
+ {
332
+ "epoch": 0.4121586810922205,
333
+ "eval_loss": 0.43278947472572327,
334
+ "eval_runtime": 98.2411,
335
+ "eval_samples_per_second": 38.314,
336
+ "eval_steps_per_second": 0.601,
337
+ "eval_token_acc": 0.8435347324817475,
338
+ "step": 600
339
+ },
340
+ {
341
+ "epoch": 0.4258973037952945,
342
+ "grad_norm": 0.09264256060123444,
343
+ "learning_rate": 2.916890657254194e-05,
344
+ "loss": 0.32485041618347166,
345
+ "memory(GiB)": 72.41,
346
+ "step": 620,
347
+ "token_acc": 0.8645305113582833,
348
+ "train_speed(iter/s)": 0.036936
349
+ },
350
+ {
351
+ "epoch": 0.4396359264983685,
352
+ "grad_norm": 0.08746380358934402,
353
+ "learning_rate": 2.9114964830435648e-05,
354
+ "loss": 0.32976808547973635,
355
+ "memory(GiB)": 72.41,
356
+ "step": 640,
357
+ "token_acc": 0.8707312036033282,
358
+ "train_speed(iter/s)": 0.036935
359
+ },
360
+ {
361
+ "epoch": 0.45337454920144254,
362
+ "grad_norm": 0.08795125037431717,
363
+ "learning_rate": 2.9059380258453473e-05,
364
+ "loss": 0.3318798303604126,
365
+ "memory(GiB)": 72.41,
366
+ "step": 660,
367
+ "token_acc": 0.8750859732168144,
368
+ "train_speed(iter/s)": 0.03695
369
+ },
370
+ {
371
+ "epoch": 0.46711317190451657,
372
+ "grad_norm": 0.09344589710235596,
373
+ "learning_rate": 2.9002159326040897e-05,
374
+ "loss": 0.3324501752853394,
375
+ "memory(GiB)": 72.41,
376
+ "step": 680,
377
+ "token_acc": 0.8820933572199229,
378
+ "train_speed(iter/s)": 0.036974
379
+ },
380
+ {
381
+ "epoch": 0.4808517946075906,
382
+ "grad_norm": 0.08175963908433914,
383
+ "learning_rate": 2.894330869309814e-05,
384
+ "loss": 0.3342601776123047,
385
+ "memory(GiB)": 72.41,
386
+ "step": 700,
387
+ "token_acc": 0.888566898590454,
388
+ "train_speed(iter/s)": 0.037001
389
+ },
390
+ {
391
+ "epoch": 0.4945904173106646,
392
+ "grad_norm": 0.08012371510267258,
393
+ "learning_rate": 2.8882835209205e-05,
394
+ "loss": 0.3325735807418823,
395
+ "memory(GiB)": 72.41,
396
+ "step": 720,
397
+ "token_acc": 0.8726016398435306,
398
+ "train_speed(iter/s)": 0.03703
399
+ },
400
+ {
401
+ "epoch": 0.5083290400137386,
402
+ "grad_norm": 0.08838852494955063,
403
+ "learning_rate": 2.8820745912823653e-05,
404
+ "loss": 0.3366635799407959,
405
+ "memory(GiB)": 72.41,
406
+ "step": 740,
407
+ "token_acc": 0.8669464276591397,
408
+ "train_speed(iter/s)": 0.037058
409
+ },
410
+ {
411
+ "epoch": 0.5220676627168126,
412
+ "grad_norm": 0.08403000980615616,
413
+ "learning_rate": 2.8757048030479438e-05,
414
+ "loss": 0.33883938789367674,
415
+ "memory(GiB)": 72.41,
416
+ "step": 760,
417
+ "token_acc": 0.8731299584725523,
418
+ "train_speed(iter/s)": 0.037083
419
+ },
420
+ {
421
+ "epoch": 0.5358062854198866,
422
+ "grad_norm": 0.09687156975269318,
423
+ "learning_rate": 2.8691748975919784e-05,
424
+ "loss": 0.3338437557220459,
425
+ "memory(GiB)": 72.41,
426
+ "step": 780,
427
+ "token_acc": 0.8848287485135103,
428
+ "train_speed(iter/s)": 0.037115
429
+ },
430
+ {
431
+ "epoch": 0.5495449081229606,
432
+ "grad_norm": 0.08311958611011505,
433
+ "learning_rate": 2.862485634925131e-05,
434
+ "loss": 0.3358239889144897,
435
+ "memory(GiB)": 72.41,
436
+ "step": 800,
437
+ "token_acc": 0.8782527593369446,
438
+ "train_speed(iter/s)": 0.037145
439
+ },
440
+ {
441
+ "epoch": 0.5632835308260347,
442
+ "grad_norm": 0.08971302956342697,
443
+ "learning_rate": 2.855637793605527e-05,
444
+ "loss": 0.3317149877548218,
445
+ "memory(GiB)": 72.41,
446
+ "step": 820,
447
+ "token_acc": 0.8807387938176727,
448
+ "train_speed(iter/s)": 0.037168
449
+ },
450
+ {
451
+ "epoch": 0.5770221535291087,
452
+ "grad_norm": 0.08680903911590576,
453
+ "learning_rate": 2.848632170648139e-05,
454
+ "loss": 0.3325679779052734,
455
+ "memory(GiB)": 72.41,
456
+ "step": 840,
457
+ "token_acc": 0.8763491204567425,
458
+ "train_speed(iter/s)": 0.037194
459
+ },
460
+ {
461
+ "epoch": 0.5907607762321827,
462
+ "grad_norm": 0.08324452489614487,
463
+ "learning_rate": 2.8414695814320224e-05,
464
+ "loss": 0.3364755868911743,
465
+ "memory(GiB)": 72.41,
466
+ "step": 860,
467
+ "token_acc": 0.8762890432412627,
468
+ "train_speed(iter/s)": 0.037217
469
+ },
470
+ {
471
+ "epoch": 0.6044993989352567,
472
+ "grad_norm": 0.08400746434926987,
473
+ "learning_rate": 2.834150859605415e-05,
474
+ "loss": 0.3361694812774658,
475
+ "memory(GiB)": 72.41,
476
+ "step": 880,
477
+ "token_acc": 0.8813631606405008,
478
+ "train_speed(iter/s)": 0.037241
479
+ },
480
+ {
481
+ "epoch": 0.6182380216383307,
482
+ "grad_norm": 0.08885052800178528,
483
+ "learning_rate": 2.8266768569887078e-05,
484
+ "loss": 0.33670692443847655,
485
+ "memory(GiB)": 72.41,
486
+ "step": 900,
487
+ "token_acc": 0.8707416462917685,
488
+ "train_speed(iter/s)": 0.037262
489
+ },
490
+ {
491
+ "epoch": 0.6182380216383307,
492
+ "eval_loss": 0.43130558729171753,
493
+ "eval_runtime": 98.6067,
494
+ "eval_samples_per_second": 38.172,
495
+ "eval_steps_per_second": 0.598,
496
+ "eval_token_acc": 0.8438052359165757,
497
+ "step": 900
498
+ },
499
+ {
500
+ "epoch": 0.6319766443414048,
501
+ "grad_norm": 0.08866509050130844,
502
+ "learning_rate": 2.8190484434753047e-05,
503
+ "loss": 0.3301401615142822,
504
+ "memory(GiB)": 72.41,
505
+ "step": 920,
506
+ "token_acc": 0.8694062120826557,
507
+ "train_speed(iter/s)": 0.037051
508
+ },
509
+ {
510
+ "epoch": 0.6457152670444788,
511
+ "grad_norm": 0.0904315784573555,
512
+ "learning_rate": 2.811266506930373e-05,
513
+ "loss": 0.3325372219085693,
514
+ "memory(GiB)": 72.41,
515
+ "step": 940,
516
+ "token_acc": 0.8789977772514003,
517
+ "train_speed(iter/s)": 0.037059
518
+ },
519
+ {
520
+ "epoch": 0.6594538897475528,
521
+ "grad_norm": 0.08624011278152466,
522
+ "learning_rate": 2.80333195308751e-05,
523
+ "loss": 0.33223259449005127,
524
+ "memory(GiB)": 72.41,
525
+ "step": 960,
526
+ "token_acc": 0.8850906146562588,
527
+ "train_speed(iter/s)": 0.037068
528
+ },
529
+ {
530
+ "epoch": 0.6731925124506268,
531
+ "grad_norm": 0.09679839015007019,
532
+ "learning_rate": 2.7952457054433193e-05,
533
+ "loss": 0.3346273183822632,
534
+ "memory(GiB)": 72.41,
535
+ "step": 980,
536
+ "token_acc": 0.8790110005974765,
537
+ "train_speed(iter/s)": 0.037086
538
+ },
539
+ {
540
+ "epoch": 0.6869311351537009,
541
+ "grad_norm": 0.08788934350013733,
542
+ "learning_rate": 2.787008705149932e-05,
543
+ "loss": 0.331668758392334,
544
+ "memory(GiB)": 72.41,
545
+ "step": 1000,
546
+ "token_acc": 0.8727144921802968,
547
+ "train_speed(iter/s)": 0.037101
548
+ },
549
+ {
550
+ "epoch": 0.7006697578567749,
551
+ "grad_norm": 0.07837922871112823,
552
+ "learning_rate": 2.7786219109054618e-05,
553
+ "loss": 0.33264620304107667,
554
+ "memory(GiB)": 72.41,
555
+ "step": 1020,
556
+ "token_acc": 0.8763268597016804,
557
+ "train_speed(iter/s)": 0.037116
558
+ },
559
+ {
560
+ "epoch": 0.7144083805598489,
561
+ "grad_norm": 0.08805106580257416,
562
+ "learning_rate": 2.770086298842426e-05,
563
+ "loss": 0.33027656078338624,
564
+ "memory(GiB)": 72.41,
565
+ "step": 1040,
566
+ "token_acc": 0.8825189704888438,
567
+ "train_speed(iter/s)": 0.037133
568
+ },
569
+ {
570
+ "epoch": 0.7281470032629229,
571
+ "grad_norm": 0.07906992733478546,
572
+ "learning_rate": 2.7614028624141333e-05,
573
+ "loss": 0.3281256914138794,
574
+ "memory(GiB)": 72.41,
575
+ "step": 1060,
576
+ "token_acc": 0.8848472346715264,
577
+ "train_speed(iter/s)": 0.037145
578
+ },
579
+ {
580
+ "epoch": 0.7418856259659969,
581
+ "grad_norm": 0.08966130018234253,
582
+ "learning_rate": 2.7525726122790556e-05,
583
+ "loss": 0.33036127090454104,
584
+ "memory(GiB)": 72.41,
585
+ "step": 1080,
586
+ "token_acc": 0.8711485117219462,
587
+ "train_speed(iter/s)": 0.037158
588
+ },
589
+ {
590
+ "epoch": 0.755624248669071,
591
+ "grad_norm": 0.0860779657959938,
592
+ "learning_rate": 2.7435965761831987e-05,
593
+ "loss": 0.32908782958984373,
594
+ "memory(GiB)": 72.41,
595
+ "step": 1100,
596
+ "token_acc": 0.868969553437273,
597
+ "train_speed(iter/s)": 0.037175
598
+ },
599
+ {
600
+ "epoch": 0.769362871372145,
601
+ "grad_norm": 0.08935214579105377,
602
+ "learning_rate": 2.7344757988404845e-05,
603
+ "loss": 0.33276095390319826,
604
+ "memory(GiB)": 72.41,
605
+ "step": 1120,
606
+ "token_acc": 0.8682377205407336,
607
+ "train_speed(iter/s)": 0.037189
608
+ },
609
+ {
610
+ "epoch": 0.783101494075219,
611
+ "grad_norm": 0.08778548985719681,
612
+ "learning_rate": 2.725211341811158e-05,
613
+ "loss": 0.33044397830963135,
614
+ "memory(GiB)": 72.41,
615
+ "step": 1140,
616
+ "token_acc": 0.8793117868359244,
617
+ "train_speed(iter/s)": 0.037201
618
+ },
619
+ {
620
+ "epoch": 0.796840116778293,
621
+ "grad_norm": 0.09090530127286911,
622
+ "learning_rate": 2.71580428337823e-05,
623
+ "loss": 0.32858192920684814,
624
+ "memory(GiB)": 72.41,
625
+ "step": 1160,
626
+ "token_acc": 0.8733004247503157,
627
+ "train_speed(iter/s)": 0.037216
628
+ },
629
+ {
630
+ "epoch": 0.8105787394813669,
631
+ "grad_norm": 0.08553975820541382,
632
+ "learning_rate": 2.7062557184219806e-05,
633
+ "loss": 0.3291203498840332,
634
+ "memory(GiB)": 72.41,
635
+ "step": 1180,
636
+ "token_acc": 0.8764492301755195,
637
+ "train_speed(iter/s)": 0.037231
638
+ },
639
+ {
640
+ "epoch": 0.824317362184441,
641
+ "grad_norm": 0.08339999616146088,
642
+ "learning_rate": 2.6965667582925247e-05,
643
+ "loss": 0.3333151817321777,
644
+ "memory(GiB)": 72.41,
645
+ "step": 1200,
646
+ "token_acc": 0.8695492444572112,
647
+ "train_speed(iter/s)": 0.037244
648
+ },
649
+ {
650
+ "epoch": 0.824317362184441,
651
+ "eval_loss": 0.43056952953338623,
652
+ "eval_runtime": 99.0376,
653
+ "eval_samples_per_second": 38.006,
654
+ "eval_steps_per_second": 0.596,
655
+ "eval_token_acc": 0.8441900740502716,
656
+ "step": 1200
657
+ },
658
+ {
659
+ "epoch": 0.838055984887515,
660
+ "grad_norm": 0.08720073848962784,
661
+ "learning_rate": 2.686738530680462e-05,
662
+ "loss": 0.33159494400024414,
663
+ "memory(GiB)": 72.41,
664
+ "step": 1220,
665
+ "token_acc": 0.8612746275278335,
666
+ "train_speed(iter/s)": 0.037076
667
+ },
668
+ {
669
+ "epoch": 0.851794607590589,
670
+ "grad_norm": 0.08212270587682724,
671
+ "learning_rate": 2.676772179485629e-05,
672
+ "loss": 0.3343451976776123,
673
+ "memory(GiB)": 72.41,
674
+ "step": 1240,
675
+ "token_acc": 0.874983845473253,
676
+ "train_speed(iter/s)": 0.037077
677
+ },
678
+ {
679
+ "epoch": 0.865533230293663,
680
+ "grad_norm": 0.08813036233186722,
681
+ "learning_rate": 2.6666688646839574e-05,
682
+ "loss": 0.3311768531799316,
683
+ "memory(GiB)": 72.41,
684
+ "step": 1260,
685
+ "token_acc": 0.8867381088510741,
686
+ "train_speed(iter/s)": 0.037081
687
+ },
688
+ {
689
+ "epoch": 0.879271852996737,
690
+ "grad_norm": 0.08220379054546356,
691
+ "learning_rate": 2.6564297621924696e-05,
692
+ "loss": 0.33231358528137206,
693
+ "memory(GiB)": 72.41,
694
+ "step": 1280,
695
+ "token_acc": 0.8829307086452494,
696
+ "train_speed(iter/s)": 0.037092
697
+ },
698
+ {
699
+ "epoch": 0.8930104756998111,
700
+ "grad_norm": 0.08680058270692825,
701
+ "learning_rate": 2.6460560637324113e-05,
702
+ "loss": 0.3345161199569702,
703
+ "memory(GiB)": 72.41,
704
+ "step": 1300,
705
+ "token_acc": 0.8796637788480545,
706
+ "train_speed(iter/s)": 0.037101
707
+ },
708
+ {
709
+ "epoch": 0.9067490984028851,
710
+ "grad_norm": 0.09523913264274597,
711
+ "learning_rate": 2.6355489766905496e-05,
712
+ "loss": 0.33291900157928467,
713
+ "memory(GiB)": 72.41,
714
+ "step": 1320,
715
+ "token_acc": 0.8843804465166379,
716
+ "train_speed(iter/s)": 0.037112
717
+ },
718
+ {
719
+ "epoch": 0.9204877211059591,
720
+ "grad_norm": 0.08730066567659378,
721
+ "learning_rate": 2.6249097239786456e-05,
722
+ "loss": 0.33270628452301027,
723
+ "memory(GiB)": 72.41,
724
+ "step": 1340,
725
+ "token_acc": 0.8798414921651669,
726
+ "train_speed(iter/s)": 0.037122
727
+ },
728
+ {
729
+ "epoch": 0.9342263438090331,
730
+ "grad_norm": 0.0898577868938446,
731
+ "learning_rate": 2.6141395438911216e-05,
732
+ "loss": 0.3346142530441284,
733
+ "memory(GiB)": 72.41,
734
+ "step": 1360,
735
+ "token_acc": 0.8822741759710485,
736
+ "train_speed(iter/s)": 0.037134
737
+ },
738
+ {
739
+ "epoch": 0.9479649665121072,
740
+ "grad_norm": 0.08267758786678314,
741
+ "learning_rate": 2.603239689960935e-05,
742
+ "loss": 0.33362205028533937,
743
+ "memory(GiB)": 72.41,
744
+ "step": 1380,
745
+ "token_acc": 0.875556680651538,
746
+ "train_speed(iter/s)": 0.037147
747
+ },
748
+ {
749
+ "epoch": 0.9617035892151812,
750
+ "grad_norm": 0.08812654763460159,
751
+ "learning_rate": 2.5922114308136826e-05,
752
+ "loss": 0.3352126359939575,
753
+ "memory(GiB)": 72.41,
754
+ "step": 1400,
755
+ "token_acc": 0.8624814158268795,
756
+ "train_speed(iter/s)": 0.037157
757
+ },
758
+ {
759
+ "epoch": 0.9754422119182552,
760
+ "grad_norm": 0.08206140995025635,
761
+ "learning_rate": 2.5810560500199454e-05,
762
+ "loss": 0.32973828315734866,
763
+ "memory(GiB)": 72.41,
764
+ "step": 1420,
765
+ "token_acc": 0.8715281106649746,
766
+ "train_speed(iter/s)": 0.037167
767
+ },
768
+ {
769
+ "epoch": 0.9891808346213292,
770
+ "grad_norm": 0.08127112686634064,
771
+ "learning_rate": 2.5697748459458945e-05,
772
+ "loss": 0.33533248901367185,
773
+ "memory(GiB)": 72.41,
774
+ "step": 1440,
775
+ "token_acc": 0.8709140359593514,
776
+ "train_speed(iter/s)": 0.037179
777
+ },
778
+ {
779
+ "epoch": 1.002747724540615,
780
+ "grad_norm": 0.13326233625411987,
781
+ "learning_rate": 2.5583691316021758e-05,
782
+ "loss": 0.32816076278686523,
783
+ "memory(GiB)": 72.41,
784
+ "step": 1460,
785
+ "token_acc": 0.8743706550630328,
786
+ "train_speed(iter/s)": 0.037189
787
+ },
788
+ {
789
+ "epoch": 1.016486347243689,
790
+ "grad_norm": 0.09180466085672379,
791
+ "learning_rate": 2.5468402344910895e-05,
792
+ "loss": 0.30609779357910155,
793
+ "memory(GiB)": 72.41,
794
+ "step": 1480,
795
+ "token_acc": 0.890758443681086,
796
+ "train_speed(iter/s)": 0.037196
797
+ },
798
+ {
799
+ "epoch": 1.030224969946763,
800
+ "grad_norm": 0.09560491889715195,
801
+ "learning_rate": 2.5351894964520832e-05,
802
+ "loss": 0.3120020627975464,
803
+ "memory(GiB)": 72.41,
804
+ "step": 1500,
805
+ "token_acc": 0.8784849657881351,
806
+ "train_speed(iter/s)": 0.037203
807
+ },
808
+ {
809
+ "epoch": 1.030224969946763,
810
+ "eval_loss": 0.43459564447402954,
811
+ "eval_runtime": 99.9824,
812
+ "eval_samples_per_second": 37.647,
813
+ "eval_steps_per_second": 0.59,
814
+ "eval_token_acc": 0.8433591170448618,
815
+ "step": 1500
816
+ },
817
+ {
818
+ "epoch": 1.043963592649837,
819
+ "grad_norm": 0.08771445602178574,
820
+ "learning_rate": 2.523418273505576e-05,
821
+ "loss": 0.31691765785217285,
822
+ "memory(GiB)": 72.41,
823
+ "step": 1520,
824
+ "token_acc": 0.8715976442080544,
825
+ "train_speed(iter/s)": 0.037064
826
+ },
827
+ {
828
+ "epoch": 1.057702215352911,
829
+ "grad_norm": 0.07920734584331512,
830
+ "learning_rate": 2.511527935695133e-05,
831
+ "loss": 0.31117587089538573,
832
+ "memory(GiB)": 72.41,
833
+ "step": 1540,
834
+ "token_acc": 0.891813564000213,
835
+ "train_speed(iter/s)": 0.037065
836
+ },
837
+ {
838
+ "epoch": 1.071440838055985,
839
+ "grad_norm": 0.08091707527637482,
840
+ "learning_rate": 2.499519866928006e-05,
841
+ "loss": 0.3078420639038086,
842
+ "memory(GiB)": 72.41,
843
+ "step": 1560,
844
+ "token_acc": 0.8856428771072653,
845
+ "train_speed(iter/s)": 0.037064
846
+ },
847
+ {
848
+ "epoch": 1.0851794607590588,
849
+ "grad_norm": 0.08118196576833725,
850
+ "learning_rate": 2.487395464814062e-05,
851
+ "loss": 0.30695157051086425,
852
+ "memory(GiB)": 72.41,
853
+ "step": 1580,
854
+ "token_acc": 0.896800843691485,
855
+ "train_speed(iter/s)": 0.037065
856
+ },
857
+ {
858
+ "epoch": 1.0989180834621328,
859
+ "grad_norm": 0.08192740380764008,
860
+ "learning_rate": 2.475156140503116e-05,
861
+ "loss": 0.30917532444000245,
862
+ "memory(GiB)": 72.41,
863
+ "step": 1600,
864
+ "token_acc": 0.8825165654283423,
865
+ "train_speed(iter/s)": 0.037072
866
+ },
867
+ {
868
+ "epoch": 1.1126567061652068,
869
+ "grad_norm": 0.08665605634450912,
870
+ "learning_rate": 2.4628033185206914e-05,
871
+ "loss": 0.3106253147125244,
872
+ "memory(GiB)": 72.41,
873
+ "step": 1620,
874
+ "token_acc": 0.885648658540145,
875
+ "train_speed(iter/s)": 0.037078
876
+ },
877
+ {
878
+ "epoch": 1.1263953288682809,
879
+ "grad_norm": 0.08092272281646729,
880
+ "learning_rate": 2.4503384366022153e-05,
881
+ "loss": 0.3136306285858154,
882
+ "memory(GiB)": 72.41,
883
+ "step": 1640,
884
+ "token_acc": 0.8739959227894275,
885
+ "train_speed(iter/s)": 0.037085
886
+ },
887
+ {
888
+ "epoch": 1.1401339515713549,
889
+ "grad_norm": 0.08443531394004822,
890
+ "learning_rate": 2.437762945525686e-05,
891
+ "loss": 0.317700719833374,
892
+ "memory(GiB)": 72.41,
893
+ "step": 1660,
894
+ "token_acc": 0.8844603109257694,
895
+ "train_speed(iter/s)": 0.037093
896
+ },
897
+ {
898
+ "epoch": 1.153872574274429,
899
+ "grad_norm": 0.0871417298913002,
900
+ "learning_rate": 2.425078308942815e-05,
901
+ "loss": 0.3168033123016357,
902
+ "memory(GiB)": 72.41,
903
+ "step": 1680,
904
+ "token_acc": 0.8770761500087126,
905
+ "train_speed(iter/s)": 0.0371
906
+ },
907
+ {
908
+ "epoch": 1.167611196977503,
909
+ "grad_norm": 0.0831030011177063,
910
+ "learning_rate": 2.4122860032086763e-05,
911
+ "loss": 0.31378917694091796,
912
+ "memory(GiB)": 72.41,
913
+ "step": 1700,
914
+ "token_acc": 0.8724602675981399,
915
+ "train_speed(iter/s)": 0.037109
916
+ },
917
+ {
918
+ "epoch": 1.181349819680577,
919
+ "grad_norm": 0.08853127807378769,
920
+ "learning_rate": 2.3993875172098737e-05,
921
+ "loss": 0.31873183250427245,
922
+ "memory(GiB)": 72.41,
923
+ "step": 1720,
924
+ "token_acc": 0.887010406125234,
925
+ "train_speed(iter/s)": 0.037115
926
+ },
927
+ {
928
+ "epoch": 1.195088442383651,
929
+ "grad_norm": 0.08741605281829834,
930
+ "learning_rate": 2.3863843521912497e-05,
931
+ "loss": 0.31804475784301756,
932
+ "memory(GiB)": 72.41,
933
+ "step": 1740,
934
+ "token_acc": 0.8780459946059642,
935
+ "train_speed(iter/s)": 0.037121
936
+ },
937
+ {
938
+ "epoch": 1.208827065086725,
939
+ "grad_norm": 0.0871034637093544,
940
+ "learning_rate": 2.3732780215811563e-05,
941
+ "loss": 0.31754317283630373,
942
+ "memory(GiB)": 72.41,
943
+ "step": 1760,
944
+ "token_acc": 0.8814304365386241,
945
+ "train_speed(iter/s)": 0.03713
946
+ },
947
+ {
948
+ "epoch": 1.222565687789799,
949
+ "grad_norm": 0.08191618323326111,
950
+ "learning_rate": 2.3600700508153103e-05,
951
+ "loss": 0.31642465591430663,
952
+ "memory(GiB)": 72.41,
953
+ "step": 1780,
954
+ "token_acc": 0.8742795205201467,
955
+ "train_speed(iter/s)": 0.037136
956
+ },
957
+ {
958
+ "epoch": 1.236304310492873,
959
+ "grad_norm": 0.0853760614991188,
960
+ "learning_rate": 2.346761977159248e-05,
961
+ "loss": 0.31632657051086427,
962
+ "memory(GiB)": 72.41,
963
+ "step": 1800,
964
+ "token_acc": 0.8847685541277756,
965
+ "train_speed(iter/s)": 0.037145
966
+ },
967
+ {
968
+ "epoch": 1.236304310492873,
969
+ "eval_loss": 0.4353775978088379,
970
+ "eval_runtime": 98.8783,
971
+ "eval_samples_per_second": 38.067,
972
+ "eval_steps_per_second": 0.597,
973
+ "eval_token_acc": 0.8432962434627534,
974
+ "step": 1800
975
+ },
976
+ {
977
+ "epoch": 1.250042933195947,
978
+ "grad_norm": 0.08532160520553589,
979
+ "learning_rate": 2.3333553495294033e-05,
980
+ "loss": 0.3085492610931396,
981
+ "memory(GiB)": 72.41,
982
+ "step": 1820,
983
+ "token_acc": 0.8650615076497046,
984
+ "train_speed(iter/s)": 0.037034
985
+ },
986
+ {
987
+ "epoch": 1.263781555899021,
988
+ "grad_norm": 0.09372863918542862,
989
+ "learning_rate": 2.3198517283128316e-05,
990
+ "loss": 0.314247727394104,
991
+ "memory(GiB)": 72.41,
992
+ "step": 1840,
993
+ "token_acc": 0.8870913422078638,
994
+ "train_speed(iter/s)": 0.037033
995
+ },
996
+ {
997
+ "epoch": 1.277520178602095,
998
+ "grad_norm": 0.0852976143360138,
999
+ "learning_rate": 2.3062526851855962e-05,
1000
+ "loss": 0.31310009956359863,
1001
+ "memory(GiB)": 72.41,
1002
+ "step": 1860,
1003
+ "token_acc": 0.8906191502723332,
1004
+ "train_speed(iter/s)": 0.037035
1005
+ },
1006
+ {
1007
+ "epoch": 1.2912588013051691,
1008
+ "grad_norm": 0.07643554359674454,
1009
+ "learning_rate": 2.2925598029298437e-05,
1010
+ "loss": 0.3103055715560913,
1011
+ "memory(GiB)": 72.41,
1012
+ "step": 1880,
1013
+ "token_acc": 0.887677412229967,
1014
+ "train_speed(iter/s)": 0.037042
1015
+ },
1016
+ {
1017
+ "epoch": 1.3049974240082431,
1018
+ "grad_norm": 0.08572836965322495,
1019
+ "learning_rate": 2.278774675249585e-05,
1020
+ "loss": 0.31417174339294435,
1021
+ "memory(GiB)": 72.41,
1022
+ "step": 1900,
1023
+ "token_acc": 0.8894350828946791,
1024
+ "train_speed(iter/s)": 0.037046
1025
+ },
1026
+ {
1027
+ "epoch": 1.3187360467113172,
1028
+ "grad_norm": 0.08305912464857101,
1029
+ "learning_rate": 2.264898906585204e-05,
1030
+ "loss": 0.31093263626098633,
1031
+ "memory(GiB)": 72.41,
1032
+ "step": 1920,
1033
+ "token_acc": 0.8820933517164594,
1034
+ "train_speed(iter/s)": 0.037052
1035
+ },
1036
+ {
1037
+ "epoch": 1.3324746694143912,
1038
+ "grad_norm": 0.08301204442977905,
1039
+ "learning_rate": 2.2509341119267193e-05,
1040
+ "loss": 0.3095247268676758,
1041
+ "memory(GiB)": 72.41,
1042
+ "step": 1940,
1043
+ "token_acc": 0.8671473791714781,
1044
+ "train_speed(iter/s)": 0.037058
1045
+ },
1046
+ {
1047
+ "epoch": 1.3462132921174652,
1048
+ "grad_norm": 0.0815897062420845,
1049
+ "learning_rate": 2.236881916625816e-05,
1050
+ "loss": 0.3098980188369751,
1051
+ "memory(GiB)": 72.41,
1052
+ "step": 1960,
1053
+ "token_acc": 0.8935473891956769,
1054
+ "train_speed(iter/s)": 0.037064
1055
+ },
1056
+ {
1057
+ "epoch": 1.3599519148205392,
1058
+ "grad_norm": 0.08821182698011398,
1059
+ "learning_rate": 2.2227439562066734e-05,
1060
+ "loss": 0.30906736850738525,
1061
+ "memory(GiB)": 72.41,
1062
+ "step": 1980,
1063
+ "token_acc": 0.8789100589878682,
1064
+ "train_speed(iter/s)": 0.037069
1065
+ },
1066
+ {
1067
+ "epoch": 1.3736905375236133,
1068
+ "grad_norm": 0.0886450707912445,
1069
+ "learning_rate": 2.2085218761756058e-05,
1070
+ "loss": 0.3117701768875122,
1071
+ "memory(GiB)": 72.41,
1072
+ "step": 2000,
1073
+ "token_acc": 0.8915136412607484,
1074
+ "train_speed(iter/s)": 0.037071
1075
+ },
1076
+ {
1077
+ "epoch": 1.3874291602266873,
1078
+ "grad_norm": 0.08608590811491013,
1079
+ "learning_rate": 2.1942173318295443e-05,
1080
+ "loss": 0.3138264179229736,
1081
+ "memory(GiB)": 72.41,
1082
+ "step": 2020,
1083
+ "token_acc": 0.8859320703790349,
1084
+ "train_speed(iter/s)": 0.037077
1085
+ },
1086
+ {
1087
+ "epoch": 1.4011677829297613,
1088
+ "grad_norm": 0.07869933545589447,
1089
+ "learning_rate": 2.1798319880633795e-05,
1090
+ "loss": 0.3135652542114258,
1091
+ "memory(GiB)": 72.41,
1092
+ "step": 2040,
1093
+ "token_acc": 0.8910194771797223,
1094
+ "train_speed(iter/s)": 0.037082
1095
+ },
1096
+ {
1097
+ "epoch": 1.4149064056328353,
1098
+ "grad_norm": 0.07948032766580582,
1099
+ "learning_rate": 2.165367519176183e-05,
1100
+ "loss": 0.3114771842956543,
1101
+ "memory(GiB)": 72.41,
1102
+ "step": 2060,
1103
+ "token_acc": 0.888334672346102,
1104
+ "train_speed(iter/s)": 0.037086
1105
+ },
1106
+ {
1107
+ "epoch": 1.4286450283359093,
1108
+ "grad_norm": 0.08114151656627655,
1109
+ "learning_rate": 2.1508256086763372e-05,
1110
+ "loss": 0.3094203948974609,
1111
+ "memory(GiB)": 72.41,
1112
+ "step": 2080,
1113
+ "token_acc": 0.8877183536236418,
1114
+ "train_speed(iter/s)": 0.03709
1115
+ },
1116
+ {
1117
+ "epoch": 1.4423836510389834,
1118
+ "grad_norm": 0.0871092826128006,
1119
+ "learning_rate": 2.1362079490855968e-05,
1120
+ "loss": 0.3111464738845825,
1121
+ "memory(GiB)": 72.41,
1122
+ "step": 2100,
1123
+ "token_acc": 0.881144622390869,
1124
+ "train_speed(iter/s)": 0.037093
1125
+ },
1126
+ {
1127
+ "epoch": 1.4423836510389834,
1128
+ "eval_loss": 0.4344118535518646,
1129
+ "eval_runtime": 99.5043,
1130
+ "eval_samples_per_second": 37.828,
1131
+ "eval_steps_per_second": 0.593,
1132
+ "eval_token_acc": 0.8434381641208087,
1133
+ "step": 2100
1134
+ },
1135
+ {
1136
+ "epoch": 1.4561222737420574,
1137
+ "grad_norm": 0.08234430849552155,
1138
+ "learning_rate": 2.1215162417420926e-05,
1139
+ "loss": 0.3089058637619019,
1140
+ "memory(GiB)": 72.41,
1141
+ "step": 2120,
1142
+ "token_acc": 0.8675401686436827,
1143
+ "train_speed(iter/s)": 0.036998
1144
+ },
1145
+ {
1146
+ "epoch": 1.4698608964451314,
1147
+ "grad_norm": 0.08042703568935394,
1148
+ "learning_rate": 2.1067521966023165e-05,
1149
+ "loss": 0.31057741641998293,
1150
+ "memory(GiB)": 72.41,
1151
+ "step": 2140,
1152
+ "token_acc": 0.8962609916378795,
1153
+ "train_speed(iter/s)": 0.036997
1154
+ },
1155
+ {
1156
+ "epoch": 1.4835995191482054,
1157
+ "grad_norm": 0.08385493606328964,
1158
+ "learning_rate": 2.0919175320421023e-05,
1159
+ "loss": 0.3134245634078979,
1160
+ "memory(GiB)": 72.41,
1161
+ "step": 2160,
1162
+ "token_acc": 0.8868531518893562,
1163
+ "train_speed(iter/s)": 0.036998
1164
+ },
1165
+ {
1166
+ "epoch": 1.4973381418512794,
1167
+ "grad_norm": 0.09416038542985916,
1168
+ "learning_rate": 2.0770139746566223e-05,
1169
+ "loss": 0.31356468200683596,
1170
+ "memory(GiB)": 72.41,
1171
+ "step": 2180,
1172
+ "token_acc": 0.8753653697079176,
1173
+ "train_speed(iter/s)": 0.037001
1174
+ },
1175
+ {
1176
+ "epoch": 1.5110767645543535,
1177
+ "grad_norm": 0.08807655423879623,
1178
+ "learning_rate": 2.062043259059432e-05,
1179
+ "loss": 0.31597309112548827,
1180
+ "memory(GiB)": 72.41,
1181
+ "step": 2200,
1182
+ "token_acc": 0.8919064810265528,
1183
+ "train_speed(iter/s)": 0.037007
1184
+ },
1185
+ {
1186
+ "epoch": 1.5248153872574275,
1187
+ "grad_norm": 0.08815551549196243,
1188
+ "learning_rate": 2.047007127680579e-05,
1189
+ "loss": 0.3196309804916382,
1190
+ "memory(GiB)": 72.41,
1191
+ "step": 2220,
1192
+ "token_acc": 0.8772186268233043,
1193
+ "train_speed(iter/s)": 0.037012
1194
+ },
1195
+ {
1196
+ "epoch": 1.5385540099605015,
1197
+ "grad_norm": 0.08227042853832245,
1198
+ "learning_rate": 2.0319073305638035e-05,
1199
+ "loss": 0.31729488372802733,
1200
+ "memory(GiB)": 72.41,
1201
+ "step": 2240,
1202
+ "token_acc": 0.8858860714860183,
1203
+ "train_speed(iter/s)": 0.037014
1204
+ },
1205
+ {
1206
+ "epoch": 1.5522926326635753,
1207
+ "grad_norm": 0.08253244310617447,
1208
+ "learning_rate": 2.0167456251628524e-05,
1209
+ "loss": 0.31553847789764405,
1210
+ "memory(GiB)": 72.41,
1211
+ "step": 2260,
1212
+ "token_acc": 0.8908496364853541,
1213
+ "train_speed(iter/s)": 0.037016
1214
+ },
1215
+ {
1216
+ "epoch": 1.5660312553666493,
1217
+ "grad_norm": 0.08127789944410324,
1218
+ "learning_rate": 2.00152377613693e-05,
1219
+ "loss": 0.3174169063568115,
1220
+ "memory(GiB)": 72.41,
1221
+ "step": 2280,
1222
+ "token_acc": 0.8759036896828214,
1223
+ "train_speed(iter/s)": 0.037021
1224
+ },
1225
+ {
1226
+ "epoch": 1.5797698780697234,
1227
+ "grad_norm": 0.08351726084947586,
1228
+ "learning_rate": 1.9862435551453103e-05,
1229
+ "loss": 0.31812009811401365,
1230
+ "memory(GiB)": 72.41,
1231
+ "step": 2300,
1232
+ "token_acc": 0.8801280981073656,
1233
+ "train_speed(iter/s)": 0.037028
1234
+ },
1235
+ {
1236
+ "epoch": 1.5935085007727974,
1237
+ "grad_norm": 0.08042768388986588,
1238
+ "learning_rate": 1.9709067406411352e-05,
1239
+ "loss": 0.3188045024871826,
1240
+ "memory(GiB)": 72.41,
1241
+ "step": 2320,
1242
+ "token_acc": 0.8883485418399553,
1243
+ "train_speed(iter/s)": 0.037034
1244
+ },
1245
+ {
1246
+ "epoch": 1.6072471234758714,
1247
+ "grad_norm": 0.0847587063908577,
1248
+ "learning_rate": 1.9555151176644223e-05,
1249
+ "loss": 0.31552605628967284,
1250
+ "memory(GiB)": 72.41,
1251
+ "step": 2340,
1252
+ "token_acc": 0.8933710959011879,
1253
+ "train_speed(iter/s)": 0.03704
1254
+ },
1255
+ {
1256
+ "epoch": 1.6209857461789454,
1257
+ "grad_norm": 0.0849500447511673,
1258
+ "learning_rate": 1.9400704776343047e-05,
1259
+ "loss": 0.3190001010894775,
1260
+ "memory(GiB)": 72.41,
1261
+ "step": 2360,
1262
+ "token_acc": 0.8655127619672538,
1263
+ "train_speed(iter/s)": 0.037046
1264
+ },
1265
+ {
1266
+ "epoch": 1.6347243688820194,
1267
+ "grad_norm": 0.0820319652557373,
1268
+ "learning_rate": 1.9245746181405306e-05,
1269
+ "loss": 0.3157363414764404,
1270
+ "memory(GiB)": 72.41,
1271
+ "step": 2380,
1272
+ "token_acc": 0.8931401676158139,
1273
+ "train_speed(iter/s)": 0.037052
1274
+ },
1275
+ {
1276
+ "epoch": 1.6484629915850935,
1277
+ "grad_norm": 0.07777854800224304,
1278
+ "learning_rate": 1.9090293427342406e-05,
1279
+ "loss": 0.30912251472473146,
1280
+ "memory(GiB)": 72.41,
1281
+ "step": 2400,
1282
+ "token_acc": 0.8933260366449716,
1283
+ "train_speed(iter/s)": 0.037059
1284
+ },
1285
+ {
1286
+ "epoch": 1.6484629915850935,
1287
+ "eval_loss": 0.43367844820022583,
1288
+ "eval_runtime": 99.7879,
1289
+ "eval_samples_per_second": 37.72,
1290
+ "eval_steps_per_second": 0.591,
1291
+ "eval_token_acc": 0.843613657031226,
1292
+ "step": 2400
1293
+ },
1294
+ {
1295
+ "epoch": 1.6622016142881675,
1296
+ "grad_norm": 0.08155303448438644,
1297
+ "learning_rate": 1.893436460718056e-05,
1298
+ "loss": 0.3163402795791626,
1299
+ "memory(GiB)": 72.41,
1300
+ "step": 2420,
1301
+ "token_acc": 0.8672117073299662,
1302
+ "train_speed(iter/s)": 0.036975
1303
+ },
1304
+ {
1305
+ "epoch": 1.6759402369912415,
1306
+ "grad_norm": 0.08382421731948853,
1307
+ "learning_rate": 1.877797786935495e-05,
1308
+ "loss": 0.3165715217590332,
1309
+ "memory(GiB)": 72.41,
1310
+ "step": 2440,
1311
+ "token_acc": 0.8854491510650321,
1312
+ "train_speed(iter/s)": 0.03697
1313
+ },
1314
+ {
1315
+ "epoch": 1.6896788596943155,
1316
+ "grad_norm": 0.08178658783435822,
1317
+ "learning_rate": 1.862115141559744e-05,
1318
+ "loss": 0.3171123504638672,
1319
+ "memory(GiB)": 72.41,
1320
+ "step": 2460,
1321
+ "token_acc": 0.8836779780841286,
1322
+ "train_speed(iter/s)": 0.036973
1323
+ },
1324
+ {
1325
+ "epoch": 1.7034174823973895,
1326
+ "grad_norm": 0.07710675150156021,
1327
+ "learning_rate": 1.8463903498818088e-05,
1328
+ "loss": 0.31471326351165774,
1329
+ "memory(GiB)": 72.41,
1330
+ "step": 2480,
1331
+ "token_acc": 0.8908230830682876,
1332
+ "train_speed(iter/s)": 0.036976
1333
+ },
1334
+ {
1335
+ "epoch": 1.7171561051004636,
1336
+ "grad_norm": 0.08034602552652359,
1337
+ "learning_rate": 1.8306252420980704e-05,
1338
+ "loss": 0.31853632926940917,
1339
+ "memory(GiB)": 72.41,
1340
+ "step": 2500,
1341
+ "token_acc": 0.8883582169845952,
1342
+ "train_speed(iter/s)": 0.036978
1343
+ },
1344
+ {
1345
+ "epoch": 1.7308947278035376,
1346
+ "grad_norm": 0.07948100566864014,
1347
+ "learning_rate": 1.8148216530972714e-05,
1348
+ "loss": 0.3109827995300293,
1349
+ "memory(GiB)": 72.41,
1350
+ "step": 2520,
1351
+ "token_acc": 0.8949681174869483,
1352
+ "train_speed(iter/s)": 0.036981
1353
+ },
1354
+ {
1355
+ "epoch": 1.7446333505066116,
1356
+ "grad_norm": 0.07899336516857147,
1357
+ "learning_rate": 1.7989814222469538e-05,
1358
+ "loss": 0.3090771436691284,
1359
+ "memory(GiB)": 72.41,
1360
+ "step": 2540,
1361
+ "token_acc": 0.8906750005261931,
1362
+ "train_speed(iter/s)": 0.036983
1363
+ },
1364
+ {
1365
+ "epoch": 1.7583719732096856,
1366
+ "grad_norm": 0.07443471997976303,
1367
+ "learning_rate": 1.783106393179375e-05,
1368
+ "loss": 0.31173481941223147,
1369
+ "memory(GiB)": 72.41,
1370
+ "step": 2560,
1371
+ "token_acc": 0.8891939493597887,
1372
+ "train_speed(iter/s)": 0.036988
1373
+ },
1374
+ {
1375
+ "epoch": 1.7721105959127597,
1376
+ "grad_norm": 0.07555528730154037,
1377
+ "learning_rate": 1.767198413576931e-05,
1378
+ "loss": 0.30927410125732424,
1379
+ "memory(GiB)": 72.41,
1380
+ "step": 2580,
1381
+ "token_acc": 0.8850328545945815,
1382
+ "train_speed(iter/s)": 0.036994
1383
+ },
1384
+ {
1385
+ "epoch": 1.7858492186158337,
1386
+ "grad_norm": 0.08017897605895996,
1387
+ "learning_rate": 1.7512593349571046e-05,
1388
+ "loss": 0.31209754943847656,
1389
+ "memory(GiB)": 72.41,
1390
+ "step": 2600,
1391
+ "token_acc": 0.8816632260591382,
1392
+ "train_speed(iter/s)": 0.036998
1393
+ },
1394
+ {
1395
+ "epoch": 1.7995878413189077,
1396
+ "grad_norm": 0.07677578181028366,
1397
+ "learning_rate": 1.7352910124569695e-05,
1398
+ "loss": 0.30882983207702636,
1399
+ "memory(GiB)": 72.41,
1400
+ "step": 2620,
1401
+ "token_acc": 0.8925267013383078,
1402
+ "train_speed(iter/s)": 0.037
1403
+ },
1404
+ {
1405
+ "epoch": 1.8133264640219817,
1406
+ "grad_norm": 0.07692938297986984,
1407
+ "learning_rate": 1.7192953046172726e-05,
1408
+ "loss": 0.3074300289154053,
1409
+ "memory(GiB)": 72.41,
1410
+ "step": 2640,
1411
+ "token_acc": 0.8861350676140611,
1412
+ "train_speed(iter/s)": 0.037005
1413
+ },
1414
+ {
1415
+ "epoch": 1.8270650867250557,
1416
+ "grad_norm": 0.07619909197092056,
1417
+ "learning_rate": 1.7032740731661178e-05,
1418
+ "loss": 0.30927472114562987,
1419
+ "memory(GiB)": 72.41,
1420
+ "step": 2660,
1421
+ "token_acc": 0.8921508449028042,
1422
+ "train_speed(iter/s)": 0.037009
1423
+ },
1424
+ {
1425
+ "epoch": 1.8408037094281298,
1426
+ "grad_norm": 0.08186180889606476,
1427
+ "learning_rate": 1.687229182802284e-05,
1428
+ "loss": 0.3076324939727783,
1429
+ "memory(GiB)": 72.41,
1430
+ "step": 2680,
1431
+ "token_acc": 0.874112111934862,
1432
+ "train_speed(iter/s)": 0.037013
1433
+ },
1434
+ {
1435
+ "epoch": 1.8545423321312038,
1436
+ "grad_norm": 0.0749615728855133,
1437
+ "learning_rate": 1.6711625009781926e-05,
1438
+ "loss": 0.3025542736053467,
1439
+ "memory(GiB)": 72.41,
1440
+ "step": 2700,
1441
+ "token_acc": 0.9005740784776456,
1442
+ "train_speed(iter/s)": 0.037016
1443
+ },
1444
+ {
1445
+ "epoch": 1.8545423321312038,
1446
+ "eval_loss": 0.43260329961776733,
1447
+ "eval_runtime": 99.7086,
1448
+ "eval_samples_per_second": 37.75,
1449
+ "eval_steps_per_second": 0.592,
1450
+ "eval_token_acc": 0.8437799954640701,
1451
+ "step": 2700
1452
+ },
1453
+ {
1454
+ "epoch": 1.8682809548342778,
1455
+ "grad_norm": 0.07678617537021637,
1456
+ "learning_rate": 1.655075897682555e-05,
1457
+ "loss": 0.3069960117340088,
1458
+ "memory(GiB)": 72.41,
1459
+ "step": 2720,
1460
+ "token_acc": 0.8656336346071796,
1461
+ "train_speed(iter/s)": 0.036945
1462
+ },
1463
+ {
1464
+ "epoch": 1.8820195775373518,
1465
+ "grad_norm": 0.08224895596504211,
1466
+ "learning_rate": 1.6389712452227295e-05,
1467
+ "loss": 0.31150364875793457,
1468
+ "memory(GiB)": 72.41,
1469
+ "step": 2740,
1470
+ "token_acc": 0.8871026948734946,
1471
+ "train_speed(iter/s)": 0.036944
1472
+ },
1473
+ {
1474
+ "epoch": 1.8957582002404259,
1475
+ "grad_norm": 0.07674538344144821,
1476
+ "learning_rate": 1.6228504180068003e-05,
1477
+ "loss": 0.31361680030822753,
1478
+ "memory(GiB)": 72.41,
1479
+ "step": 2760,
1480
+ "token_acc": 0.8885069679173144,
1481
+ "train_speed(iter/s)": 0.036944
1482
+ },
1483
+ {
1484
+ "epoch": 1.9094968229434999,
1485
+ "grad_norm": 0.07724355906248093,
1486
+ "learning_rate": 1.60671529232542e-05,
1487
+ "loss": 0.31092076301574706,
1488
+ "memory(GiB)": 72.41,
1489
+ "step": 2780,
1490
+ "token_acc": 0.8775532573683428,
1491
+ "train_speed(iter/s)": 0.036948
1492
+ },
1493
+ {
1494
+ "epoch": 1.923235445646574,
1495
+ "grad_norm": 0.0748470202088356,
1496
+ "learning_rate": 1.5905677461334292e-05,
1497
+ "loss": 0.3125690698623657,
1498
+ "memory(GiB)": 72.41,
1499
+ "step": 2800,
1500
+ "token_acc": 0.8846377126342211,
1501
+ "train_speed(iter/s)": 0.036916
1502
+ },
1503
+ {
1504
+ "epoch": 1.936974068349648,
1505
+ "grad_norm": 0.08404634892940521,
1506
+ "learning_rate": 1.574409658831281e-05,
1507
+ "loss": 0.3153404235839844,
1508
+ "memory(GiB)": 72.41,
1509
+ "step": 2820,
1510
+ "token_acc": 0.8762131944710342,
1511
+ "train_speed(iter/s)": 0.036918
1512
+ },
1513
+ {
1514
+ "epoch": 1.950712691052722,
1515
+ "grad_norm": 0.07959295809268951,
1516
+ "learning_rate": 1.558242911046302e-05,
1517
+ "loss": 0.31249830722808836,
1518
+ "memory(GiB)": 72.41,
1519
+ "step": 2840,
1520
+ "token_acc": 0.8892035392544179,
1521
+ "train_speed(iter/s)": 0.036921
1522
+ },
1523
+ {
1524
+ "epoch": 1.964451313755796,
1525
+ "grad_norm": 0.08044803887605667,
1526
+ "learning_rate": 1.5420693844138036e-05,
1527
+ "loss": 0.3130341053009033,
1528
+ "memory(GiB)": 72.41,
1529
+ "step": 2860,
1530
+ "token_acc": 0.8932265094341124,
1531
+ "train_speed(iter/s)": 0.036924
1532
+ },
1533
+ {
1534
+ "epoch": 1.97818993645887,
1535
+ "grad_norm": 0.07583785802125931,
1536
+ "learning_rate": 1.525890961358083e-05,
1537
+ "loss": 0.3141756772994995,
1538
+ "memory(GiB)": 72.41,
1539
+ "step": 2880,
1540
+ "token_acc": 0.8839660044002189,
1541
+ "train_speed(iter/s)": 0.036928
1542
+ },
1543
+ {
1544
+ "epoch": 1.991928559161944,
1545
+ "grad_norm": 0.07464556396007538,
1546
+ "learning_rate": 1.5097095248733284e-05,
1547
+ "loss": 0.31082568168640134,
1548
+ "memory(GiB)": 72.41,
1549
+ "step": 2900,
1550
+ "token_acc": 0.8775183645838733,
1551
+ "train_speed(iter/s)": 0.036932
1552
+ },
1553
+ {
1554
+ "epoch": 2.00549544908123,
1555
+ "grad_norm": 0.12503303587436676,
1556
+ "learning_rate": 1.4935269583044581e-05,
1557
+ "loss": 0.2993995904922485,
1558
+ "memory(GiB)": 72.41,
1559
+ "step": 2920,
1560
+ "token_acc": 0.8823204490957476,
1561
+ "train_speed(iter/s)": 0.036937
1562
+ },
1563
+ {
1564
+ "epoch": 2.019234071784304,
1565
+ "grad_norm": 0.08332613110542297,
1566
+ "learning_rate": 1.4773451451279213e-05,
1567
+ "loss": 0.29198360443115234,
1568
+ "memory(GiB)": 72.41,
1569
+ "step": 2940,
1570
+ "token_acc": 0.8980348203187491,
1571
+ "train_speed(iter/s)": 0.036941
1572
+ },
1573
+ {
1574
+ "epoch": 2.032972694487378,
1575
+ "grad_norm": 0.08240070939064026,
1576
+ "learning_rate": 1.461165968732479e-05,
1577
+ "loss": 0.2935274362564087,
1578
+ "memory(GiB)": 72.41,
1579
+ "step": 2960,
1580
+ "token_acc": 0.8904662128095143,
1581
+ "train_speed(iter/s)": 0.036946
1582
+ },
1583
+ {
1584
+ "epoch": 2.046711317190452,
1585
+ "grad_norm": 0.08113058656454086,
1586
+ "learning_rate": 1.4449913122000005e-05,
1587
+ "loss": 0.29198508262634276,
1588
+ "memory(GiB)": 72.41,
1589
+ "step": 2980,
1590
+ "token_acc": 0.8908448858293387,
1591
+ "train_speed(iter/s)": 0.036953
1592
+ },
1593
+ {
1594
+ "epoch": 2.060449939893526,
1595
+ "grad_norm": 0.08105536550283432,
1596
+ "learning_rate": 1.4288230580862905e-05,
1597
+ "loss": 0.290987491607666,
1598
+ "memory(GiB)": 72.41,
1599
+ "step": 3000,
1600
+ "token_acc": 0.8764201959142056,
1601
+ "train_speed(iter/s)": 0.036958
1602
+ },
1603
+ {
1604
+ "epoch": 2.060449939893526,
1605
+ "eval_loss": 0.4393101930618286,
1606
+ "eval_runtime": 100.5668,
1607
+ "eval_samples_per_second": 37.428,
1608
+ "eval_steps_per_second": 0.587,
1609
+ "eval_token_acc": 0.84265782805066,
1610
+ "step": 3000
1611
+ },
1612
+ {
1613
+ "epoch": 2.0741885625966,
1614
+ "grad_norm": 0.08123844116926193,
1615
+ "learning_rate": 1.412663088201982e-05,
1616
+ "loss": 0.29090156555175783,
1617
+ "memory(GiB)": 72.41,
1618
+ "step": 3020,
1619
+ "token_acc": 0.8735638063478551,
1620
+ "train_speed(iter/s)": 0.036892
1621
+ },
1622
+ {
1623
+ "epoch": 2.087927185299674,
1624
+ "grad_norm": 0.0814339891076088,
1625
+ "learning_rate": 1.3965132833935126e-05,
1626
+ "loss": 0.2902204990386963,
1627
+ "memory(GiB)": 72.41,
1628
+ "step": 3040,
1629
+ "token_acc": 0.8839492383548759,
1630
+ "train_speed(iter/s)": 0.036893
1631
+ },
1632
+ {
1633
+ "epoch": 2.101665808002748,
1634
+ "grad_norm": 0.0760771632194519,
1635
+ "learning_rate": 1.380375523324215e-05,
1636
+ "loss": 0.29666552543640134,
1637
+ "memory(GiB)": 72.41,
1638
+ "step": 3060,
1639
+ "token_acc": 0.8921821581883145,
1640
+ "train_speed(iter/s)": 0.036894
1641
+ },
1642
+ {
1643
+ "epoch": 2.115404430705822,
1644
+ "grad_norm": 0.0808984562754631,
1645
+ "learning_rate": 1.3642516862555433e-05,
1646
+ "loss": 0.28961887359619143,
1647
+ "memory(GiB)": 72.41,
1648
+ "step": 3080,
1649
+ "token_acc": 0.8996385382943057,
1650
+ "train_speed(iter/s)": 0.036898
1651
+ },
1652
+ {
1653
+ "epoch": 2.129143053408896,
1654
+ "grad_norm": 0.08168598264455795,
1655
+ "learning_rate": 1.3481436488284648e-05,
1656
+ "loss": 0.2952747821807861,
1657
+ "memory(GiB)": 72.41,
1658
+ "step": 3100,
1659
+ "token_acc": 0.8932001882680204,
1660
+ "train_speed(iter/s)": 0.036901
1661
+ },
1662
+ {
1663
+ "epoch": 2.14288167611197,
1664
+ "grad_norm": 0.08384265005588531,
1665
+ "learning_rate": 1.3320532858450382e-05,
1666
+ "loss": 0.29767014980316164,
1667
+ "memory(GiB)": 72.41,
1668
+ "step": 3120,
1669
+ "token_acc": 0.8847414688023099,
1670
+ "train_speed(iter/s)": 0.036904
1671
+ },
1672
+ {
1673
+ "epoch": 2.156620298815044,
1674
+ "grad_norm": 0.08613137155771255,
1675
+ "learning_rate": 1.3159824700502083e-05,
1676
+ "loss": 0.2987870693206787,
1677
+ "memory(GiB)": 72.41,
1678
+ "step": 3140,
1679
+ "token_acc": 0.8912630847005318,
1680
+ "train_speed(iter/s)": 0.036909
1681
+ },
1682
+ {
1683
+ "epoch": 2.1703589215181176,
1684
+ "grad_norm": 0.08044470101594925,
1685
+ "learning_rate": 1.2999330719138363e-05,
1686
+ "loss": 0.29793477058410645,
1687
+ "memory(GiB)": 72.41,
1688
+ "step": 3160,
1689
+ "token_acc": 0.8911335210006078,
1690
+ "train_speed(iter/s)": 0.036914
1691
+ },
1692
+ {
1693
+ "epoch": 2.1840975442211916,
1694
+ "grad_norm": 0.0807594358921051,
1695
+ "learning_rate": 1.283906959413e-05,
1696
+ "loss": 0.2947986125946045,
1697
+ "memory(GiB)": 72.41,
1698
+ "step": 3180,
1699
+ "token_acc": 0.8855782459322568,
1700
+ "train_speed(iter/s)": 0.036917
1701
+ },
1702
+ {
1703
+ "epoch": 2.1978361669242656,
1704
+ "grad_norm": 0.08066173642873764,
1705
+ "learning_rate": 1.267905997814578e-05,
1706
+ "loss": 0.2977961778640747,
1707
+ "memory(GiB)": 72.41,
1708
+ "step": 3200,
1709
+ "token_acc": 0.8803478438446615,
1710
+ "train_speed(iter/s)": 0.036922
1711
+ },
1712
+ {
1713
+ "epoch": 2.2115747896273397,
1714
+ "grad_norm": 0.08045271784067154,
1715
+ "learning_rate": 1.2519320494581581e-05,
1716
+ "loss": 0.29424285888671875,
1717
+ "memory(GiB)": 72.41,
1718
+ "step": 3220,
1719
+ "token_acc": 0.8828892872837293,
1720
+ "train_speed(iter/s)": 0.036927
1721
+ },
1722
+ {
1723
+ "epoch": 2.2253134123304137,
1724
+ "grad_norm": 0.08237405866384506,
1725
+ "learning_rate": 1.2359869735392746e-05,
1726
+ "loss": 0.29676170349121095,
1727
+ "memory(GiB)": 72.41,
1728
+ "step": 3240,
1729
+ "token_acc": 0.8954581030873394,
1730
+ "train_speed(iter/s)": 0.036931
1731
+ },
1732
+ {
1733
+ "epoch": 2.2390520350334877,
1734
+ "grad_norm": 0.08597618341445923,
1735
+ "learning_rate": 1.220072625893023e-05,
1736
+ "loss": 0.296732759475708,
1737
+ "memory(GiB)": 72.41,
1738
+ "step": 3260,
1739
+ "token_acc": 0.8882645330425789,
1740
+ "train_speed(iter/s)": 0.036937
1741
+ },
1742
+ {
1743
+ "epoch": 2.2527906577365617,
1744
+ "grad_norm": 0.07742371410131454,
1745
+ "learning_rate": 1.2041908587780571e-05,
1746
+ "loss": 0.293271803855896,
1747
+ "memory(GiB)": 72.41,
1748
+ "step": 3280,
1749
+ "token_acc": 0.8859188183637006,
1750
+ "train_speed(iter/s)": 0.036942
1751
+ },
1752
+ {
1753
+ "epoch": 2.2665292804396358,
1754
+ "grad_norm": 0.07885393500328064,
1755
+ "learning_rate": 1.1883435206610095e-05,
1756
+ "loss": 0.29781594276428225,
1757
+ "memory(GiB)": 72.41,
1758
+ "step": 3300,
1759
+ "token_acc": 0.8873998820001778,
1760
+ "train_speed(iter/s)": 0.036945
1761
+ },
1762
+ {
1763
+ "epoch": 2.2665292804396358,
1764
+ "eval_loss": 0.4392697215080261,
1765
+ "eval_runtime": 99.0852,
1766
+ "eval_samples_per_second": 37.988,
1767
+ "eval_steps_per_second": 0.595,
1768
+ "eval_token_acc": 0.8427081339178593,
1769
+ "step": 3300
1770
+ }
1771
+ ],
1772
+ "logging_steps": 20,
1773
+ "max_steps": 5824,
1774
+ "num_input_tokens_seen": 0,
1775
+ "num_train_epochs": 4,
1776
+ "save_steps": 300,
1777
+ "stateful_callbacks": {
1778
+ "TrainerControl": {
1779
+ "args": {
1780
+ "should_epoch_stop": false,
1781
+ "should_evaluate": false,
1782
+ "should_log": false,
1783
+ "should_save": true,
1784
+ "should_training_stop": false
1785
+ },
1786
+ "attributes": {}
1787
+ }
1788
+ },
1789
+ "total_flos": 2.596186893058048e+16,
1790
+ "train_batch_size": 1,
1791
+ "trial_name": null,
1792
+ "trial_params": null
1793
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13bbf28e790c5ff417dd7db694a26752dccc43cd8db6cb53667f26f9890b9240
3
+ size 8248
vocab.json ADDED
The diff for this file is too large to render. See raw diff