| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 1400, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.014285714285714285, | |
| "grad_norm": 1.993065357208252, | |
| "learning_rate": 1.4285714285714285e-05, | |
| "loss": 2.6875, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.02857142857142857, | |
| "grad_norm": 1.730460524559021, | |
| "learning_rate": 2.857142857142857e-05, | |
| "loss": 2.2829, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.04285714285714286, | |
| "grad_norm": 1.8479743003845215, | |
| "learning_rate": 4.2857142857142856e-05, | |
| "loss": 1.7818, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.05714285714285714, | |
| "grad_norm": 1.4088687896728516, | |
| "learning_rate": 5.714285714285714e-05, | |
| "loss": 1.2963, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.07142857142857142, | |
| "grad_norm": 1.4508253335952759, | |
| "learning_rate": 7.142857142857143e-05, | |
| "loss": 1.0661, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.08571428571428572, | |
| "grad_norm": 1.4717705249786377, | |
| "learning_rate": 8.571428571428571e-05, | |
| "loss": 0.9221, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 1.364703893661499, | |
| "learning_rate": 0.0001, | |
| "loss": 0.8801, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.11428571428571428, | |
| "grad_norm": 1.5449830293655396, | |
| "learning_rate": 9.841269841269841e-05, | |
| "loss": 0.8574, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.12857142857142856, | |
| "grad_norm": 1.3663296699523926, | |
| "learning_rate": 9.682539682539682e-05, | |
| "loss": 0.77, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.14285714285714285, | |
| "grad_norm": 1.585909128189087, | |
| "learning_rate": 9.523809523809524e-05, | |
| "loss": 0.7391, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.15714285714285714, | |
| "grad_norm": 1.2754377126693726, | |
| "learning_rate": 9.365079365079366e-05, | |
| "loss": 0.6384, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.17142857142857143, | |
| "grad_norm": 1.4652817249298096, | |
| "learning_rate": 9.206349206349206e-05, | |
| "loss": 0.6413, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.18571428571428572, | |
| "grad_norm": 1.5677884817123413, | |
| "learning_rate": 9.047619047619048e-05, | |
| "loss": 0.6228, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 1.7060600519180298, | |
| "learning_rate": 8.888888888888889e-05, | |
| "loss": 0.6543, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.21428571428571427, | |
| "grad_norm": 1.4226174354553223, | |
| "learning_rate": 8.730158730158731e-05, | |
| "loss": 0.6236, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.22857142857142856, | |
| "grad_norm": 1.5981993675231934, | |
| "learning_rate": 8.571428571428571e-05, | |
| "loss": 0.6275, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.24285714285714285, | |
| "grad_norm": 1.336606502532959, | |
| "learning_rate": 8.412698412698413e-05, | |
| "loss": 0.592, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.2571428571428571, | |
| "grad_norm": 1.4357742071151733, | |
| "learning_rate": 8.253968253968255e-05, | |
| "loss": 0.5973, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.2714285714285714, | |
| "grad_norm": 1.2808163166046143, | |
| "learning_rate": 8.095238095238096e-05, | |
| "loss": 0.6327, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.2857142857142857, | |
| "grad_norm": 1.493129849433899, | |
| "learning_rate": 7.936507936507937e-05, | |
| "loss": 0.6172, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 1.570550560951233, | |
| "learning_rate": 7.777777777777778e-05, | |
| "loss": 0.6201, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.3142857142857143, | |
| "grad_norm": 1.8142577409744263, | |
| "learning_rate": 7.619047619047618e-05, | |
| "loss": 0.6119, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.32857142857142857, | |
| "grad_norm": 1.5308444499969482, | |
| "learning_rate": 7.460317460317461e-05, | |
| "loss": 0.5743, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.34285714285714286, | |
| "grad_norm": 1.669749140739441, | |
| "learning_rate": 7.301587301587302e-05, | |
| "loss": 0.5811, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.35714285714285715, | |
| "grad_norm": 1.7088674306869507, | |
| "learning_rate": 7.142857142857143e-05, | |
| "loss": 0.5639, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.37142857142857144, | |
| "grad_norm": 1.4929900169372559, | |
| "learning_rate": 6.984126984126984e-05, | |
| "loss": 0.5602, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.38571428571428573, | |
| "grad_norm": 1.5759623050689697, | |
| "learning_rate": 6.825396825396825e-05, | |
| "loss": 0.5375, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 1.1905630826950073, | |
| "learning_rate": 6.666666666666667e-05, | |
| "loss": 0.528, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.4142857142857143, | |
| "grad_norm": 1.3444722890853882, | |
| "learning_rate": 6.507936507936509e-05, | |
| "loss": 0.5504, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.42857142857142855, | |
| "grad_norm": 1.388431429862976, | |
| "learning_rate": 6.349206349206349e-05, | |
| "loss": 0.5425, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.44285714285714284, | |
| "grad_norm": 1.4573171138763428, | |
| "learning_rate": 6.19047619047619e-05, | |
| "loss": 0.5461, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.45714285714285713, | |
| "grad_norm": 1.4060829877853394, | |
| "learning_rate": 6.0317460317460316e-05, | |
| "loss": 0.5259, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.4714285714285714, | |
| "grad_norm": 1.3451800346374512, | |
| "learning_rate": 5.873015873015873e-05, | |
| "loss": 0.5244, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.4857142857142857, | |
| "grad_norm": 1.5249203443527222, | |
| "learning_rate": 5.714285714285714e-05, | |
| "loss": 0.5214, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 1.387425422668457, | |
| "learning_rate": 5.555555555555556e-05, | |
| "loss": 0.5014, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.5142857142857142, | |
| "grad_norm": 1.4318238496780396, | |
| "learning_rate": 5.396825396825397e-05, | |
| "loss": 0.553, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.5285714285714286, | |
| "grad_norm": 1.4433730840682983, | |
| "learning_rate": 5.2380952380952384e-05, | |
| "loss": 0.5273, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.5428571428571428, | |
| "grad_norm": 1.4052830934524536, | |
| "learning_rate": 5.0793650793650794e-05, | |
| "loss": 0.5331, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.5571428571428572, | |
| "grad_norm": 1.4758517742156982, | |
| "learning_rate": 4.9206349206349204e-05, | |
| "loss": 0.5138, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.5714285714285714, | |
| "grad_norm": 1.4489214420318604, | |
| "learning_rate": 4.761904761904762e-05, | |
| "loss": 0.5194, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.5857142857142857, | |
| "grad_norm": 1.4176747798919678, | |
| "learning_rate": 4.603174603174603e-05, | |
| "loss": 0.5288, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 1.6866075992584229, | |
| "learning_rate": 4.4444444444444447e-05, | |
| "loss": 0.5479, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.6142857142857143, | |
| "grad_norm": 1.6059644222259521, | |
| "learning_rate": 4.2857142857142856e-05, | |
| "loss": 0.5363, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.6285714285714286, | |
| "grad_norm": 1.455381989479065, | |
| "learning_rate": 4.126984126984127e-05, | |
| "loss": 0.5019, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.6428571428571429, | |
| "grad_norm": 1.6726956367492676, | |
| "learning_rate": 3.968253968253968e-05, | |
| "loss": 0.5084, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.6571428571428571, | |
| "grad_norm": 1.8781988620758057, | |
| "learning_rate": 3.809523809523809e-05, | |
| "loss": 0.5251, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.6714285714285714, | |
| "grad_norm": 1.7066335678100586, | |
| "learning_rate": 3.650793650793651e-05, | |
| "loss": 0.4725, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.6857142857142857, | |
| "grad_norm": 1.7107722759246826, | |
| "learning_rate": 3.492063492063492e-05, | |
| "loss": 0.5251, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "grad_norm": 1.4758689403533936, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 0.531, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.7142857142857143, | |
| "grad_norm": 1.3672478199005127, | |
| "learning_rate": 3.1746031746031745e-05, | |
| "loss": 0.5009, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.7285714285714285, | |
| "grad_norm": 1.4896430969238281, | |
| "learning_rate": 3.0158730158730158e-05, | |
| "loss": 0.5204, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.7428571428571429, | |
| "grad_norm": 1.2683701515197754, | |
| "learning_rate": 2.857142857142857e-05, | |
| "loss": 0.4953, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.7571428571428571, | |
| "grad_norm": 1.4984241724014282, | |
| "learning_rate": 2.6984126984126984e-05, | |
| "loss": 0.4819, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.7714285714285715, | |
| "grad_norm": 1.516722321510315, | |
| "learning_rate": 2.5396825396825397e-05, | |
| "loss": 0.4815, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.7857142857142857, | |
| "grad_norm": 1.7547037601470947, | |
| "learning_rate": 2.380952380952381e-05, | |
| "loss": 0.4803, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 1.368112564086914, | |
| "learning_rate": 2.2222222222222223e-05, | |
| "loss": 0.4984, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.8142857142857143, | |
| "grad_norm": 1.5048097372055054, | |
| "learning_rate": 2.0634920634920636e-05, | |
| "loss": 0.4843, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.8285714285714286, | |
| "grad_norm": 1.5499813556671143, | |
| "learning_rate": 1.9047619047619046e-05, | |
| "loss": 0.4645, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.8428571428571429, | |
| "grad_norm": 1.5108025074005127, | |
| "learning_rate": 1.746031746031746e-05, | |
| "loss": 0.4868, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.8571428571428571, | |
| "grad_norm": 1.5287548303604126, | |
| "learning_rate": 1.5873015873015872e-05, | |
| "loss": 0.4726, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.8714285714285714, | |
| "grad_norm": 1.441148042678833, | |
| "learning_rate": 1.4285714285714285e-05, | |
| "loss": 0.4755, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.8857142857142857, | |
| "grad_norm": 1.4745419025421143, | |
| "learning_rate": 1.2698412698412699e-05, | |
| "loss": 0.4786, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "grad_norm": 1.432034969329834, | |
| "learning_rate": 1.1111111111111112e-05, | |
| "loss": 0.4827, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.9142857142857143, | |
| "grad_norm": 1.4481098651885986, | |
| "learning_rate": 9.523809523809523e-06, | |
| "loss": 0.4761, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.9285714285714286, | |
| "grad_norm": 1.5955500602722168, | |
| "learning_rate": 7.936507936507936e-06, | |
| "loss": 0.4582, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.9428571428571428, | |
| "grad_norm": 1.6500182151794434, | |
| "learning_rate": 6.349206349206349e-06, | |
| "loss": 0.492, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.9571428571428572, | |
| "grad_norm": 1.5469691753387451, | |
| "learning_rate": 4.7619047619047615e-06, | |
| "loss": 0.4797, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.9714285714285714, | |
| "grad_norm": 1.6121824979782104, | |
| "learning_rate": 3.1746031746031746e-06, | |
| "loss": 0.4938, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.9857142857142858, | |
| "grad_norm": 1.512402892112732, | |
| "learning_rate": 1.5873015873015873e-06, | |
| "loss": 0.4829, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 1.7282683849334717, | |
| "learning_rate": 0.0, | |
| "loss": 0.4737, | |
| "step": 1400 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 1400, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8419093040332800.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |