| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 1400, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.014285714285714285, | |
| "grad_norm": 1.8742432594299316, | |
| "learning_rate": 1.4285714285714285e-05, | |
| "loss": 2.6888, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.02857142857142857, | |
| "grad_norm": 1.6443878412246704, | |
| "learning_rate": 2.857142857142857e-05, | |
| "loss": 2.289, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.04285714285714286, | |
| "grad_norm": 1.8094583749771118, | |
| "learning_rate": 4.2857142857142856e-05, | |
| "loss": 1.7939, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.05714285714285714, | |
| "grad_norm": 1.4206470251083374, | |
| "learning_rate": 5.714285714285714e-05, | |
| "loss": 1.312, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.07142857142857142, | |
| "grad_norm": 1.4811224937438965, | |
| "learning_rate": 7.142857142857143e-05, | |
| "loss": 1.0735, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.08571428571428572, | |
| "grad_norm": 1.4770212173461914, | |
| "learning_rate": 8.571428571428571e-05, | |
| "loss": 0.9261, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 1.3422306776046753, | |
| "learning_rate": 0.0001, | |
| "loss": 0.882, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.11428571428571428, | |
| "grad_norm": 1.5494495630264282, | |
| "learning_rate": 9.841269841269841e-05, | |
| "loss": 0.8599, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.12857142857142856, | |
| "grad_norm": 1.4366521835327148, | |
| "learning_rate": 9.682539682539682e-05, | |
| "loss": 0.7784, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.14285714285714285, | |
| "grad_norm": 1.731536865234375, | |
| "learning_rate": 9.523809523809524e-05, | |
| "loss": 0.7404, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.15714285714285714, | |
| "grad_norm": 1.2290751934051514, | |
| "learning_rate": 9.365079365079366e-05, | |
| "loss": 0.6393, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.17142857142857143, | |
| "grad_norm": 1.5464402437210083, | |
| "learning_rate": 9.206349206349206e-05, | |
| "loss": 0.6414, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.18571428571428572, | |
| "grad_norm": 1.628503680229187, | |
| "learning_rate": 9.047619047619048e-05, | |
| "loss": 0.6233, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 1.8485362529754639, | |
| "learning_rate": 8.888888888888889e-05, | |
| "loss": 0.6558, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.21428571428571427, | |
| "grad_norm": 1.4331471920013428, | |
| "learning_rate": 8.730158730158731e-05, | |
| "loss": 0.624, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.22857142857142856, | |
| "grad_norm": 1.6106165647506714, | |
| "learning_rate": 8.571428571428571e-05, | |
| "loss": 0.6276, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.24285714285714285, | |
| "grad_norm": 1.328774094581604, | |
| "learning_rate": 8.412698412698413e-05, | |
| "loss": 0.5928, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.2571428571428571, | |
| "grad_norm": 1.4172090291976929, | |
| "learning_rate": 8.253968253968255e-05, | |
| "loss": 0.597, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.2714285714285714, | |
| "grad_norm": 1.2621384859085083, | |
| "learning_rate": 8.095238095238096e-05, | |
| "loss": 0.6336, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.2857142857142857, | |
| "grad_norm": 1.4633510112762451, | |
| "learning_rate": 7.936507936507937e-05, | |
| "loss": 0.6165, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 1.5444340705871582, | |
| "learning_rate": 7.777777777777778e-05, | |
| "loss": 0.6204, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.3142857142857143, | |
| "grad_norm": 1.758965253829956, | |
| "learning_rate": 7.619047619047618e-05, | |
| "loss": 0.6122, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.32857142857142857, | |
| "grad_norm": 1.4885847568511963, | |
| "learning_rate": 7.460317460317461e-05, | |
| "loss": 0.5739, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.34285714285714286, | |
| "grad_norm": 1.6695561408996582, | |
| "learning_rate": 7.301587301587302e-05, | |
| "loss": 0.5807, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.35714285714285715, | |
| "grad_norm": 1.7690757513046265, | |
| "learning_rate": 7.142857142857143e-05, | |
| "loss": 0.5633, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.37142857142857144, | |
| "grad_norm": 1.4946894645690918, | |
| "learning_rate": 6.984126984126984e-05, | |
| "loss": 0.5602, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.38571428571428573, | |
| "grad_norm": 1.5215212106704712, | |
| "learning_rate": 6.825396825396825e-05, | |
| "loss": 0.5381, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 1.1956950426101685, | |
| "learning_rate": 6.666666666666667e-05, | |
| "loss": 0.5281, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.4142857142857143, | |
| "grad_norm": 1.315496563911438, | |
| "learning_rate": 6.507936507936509e-05, | |
| "loss": 0.551, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.42857142857142855, | |
| "grad_norm": 1.3885098695755005, | |
| "learning_rate": 6.349206349206349e-05, | |
| "loss": 0.5428, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.44285714285714284, | |
| "grad_norm": 1.4572407007217407, | |
| "learning_rate": 6.19047619047619e-05, | |
| "loss": 0.5465, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.45714285714285713, | |
| "grad_norm": 1.411399006843567, | |
| "learning_rate": 6.0317460317460316e-05, | |
| "loss": 0.5263, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.4714285714285714, | |
| "grad_norm": 1.3295258283615112, | |
| "learning_rate": 5.873015873015873e-05, | |
| "loss": 0.5248, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.4857142857142857, | |
| "grad_norm": 1.512662410736084, | |
| "learning_rate": 5.714285714285714e-05, | |
| "loss": 0.5213, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 1.3572126626968384, | |
| "learning_rate": 5.555555555555556e-05, | |
| "loss": 0.5003, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.5142857142857142, | |
| "grad_norm": 1.4415481090545654, | |
| "learning_rate": 5.396825396825397e-05, | |
| "loss": 0.5529, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.5285714285714286, | |
| "grad_norm": 1.4465086460113525, | |
| "learning_rate": 5.2380952380952384e-05, | |
| "loss": 0.5266, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.5428571428571428, | |
| "grad_norm": 1.3659744262695312, | |
| "learning_rate": 5.0793650793650794e-05, | |
| "loss": 0.533, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.5571428571428572, | |
| "grad_norm": 1.5549241304397583, | |
| "learning_rate": 4.9206349206349204e-05, | |
| "loss": 0.5139, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.5714285714285714, | |
| "grad_norm": 1.5689399242401123, | |
| "learning_rate": 4.761904761904762e-05, | |
| "loss": 0.5188, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.5857142857142857, | |
| "grad_norm": 1.3908042907714844, | |
| "learning_rate": 4.603174603174603e-05, | |
| "loss": 0.529, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 1.7993814945220947, | |
| "learning_rate": 4.4444444444444447e-05, | |
| "loss": 0.548, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.6142857142857143, | |
| "grad_norm": 1.5198805332183838, | |
| "learning_rate": 4.2857142857142856e-05, | |
| "loss": 0.5367, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.6285714285714286, | |
| "grad_norm": 1.4617140293121338, | |
| "learning_rate": 4.126984126984127e-05, | |
| "loss": 0.5016, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.6428571428571429, | |
| "grad_norm": 1.6662240028381348, | |
| "learning_rate": 3.968253968253968e-05, | |
| "loss": 0.5078, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.6571428571428571, | |
| "grad_norm": 1.617306113243103, | |
| "learning_rate": 3.809523809523809e-05, | |
| "loss": 0.5237, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.6714285714285714, | |
| "grad_norm": 1.7389674186706543, | |
| "learning_rate": 3.650793650793651e-05, | |
| "loss": 0.4726, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.6857142857142857, | |
| "grad_norm": 1.54181706905365, | |
| "learning_rate": 3.492063492063492e-05, | |
| "loss": 0.5232, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "grad_norm": 1.4722188711166382, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 0.532, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.7142857142857143, | |
| "grad_norm": 1.3351426124572754, | |
| "learning_rate": 3.1746031746031745e-05, | |
| "loss": 0.5015, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.7285714285714285, | |
| "grad_norm": 1.491120457649231, | |
| "learning_rate": 3.0158730158730158e-05, | |
| "loss": 0.5201, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.7428571428571429, | |
| "grad_norm": 1.24501633644104, | |
| "learning_rate": 2.857142857142857e-05, | |
| "loss": 0.4943, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.7571428571428571, | |
| "grad_norm": 1.4944851398468018, | |
| "learning_rate": 2.6984126984126984e-05, | |
| "loss": 0.4821, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.7714285714285715, | |
| "grad_norm": 1.513795256614685, | |
| "learning_rate": 2.5396825396825397e-05, | |
| "loss": 0.481, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.7857142857142857, | |
| "grad_norm": 1.8671048879623413, | |
| "learning_rate": 2.380952380952381e-05, | |
| "loss": 0.48, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 1.3706579208374023, | |
| "learning_rate": 2.2222222222222223e-05, | |
| "loss": 0.4979, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.8142857142857143, | |
| "grad_norm": 1.3706722259521484, | |
| "learning_rate": 2.0634920634920636e-05, | |
| "loss": 0.4842, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.8285714285714286, | |
| "grad_norm": 1.5844509601593018, | |
| "learning_rate": 1.9047619047619046e-05, | |
| "loss": 0.4647, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.8428571428571429, | |
| "grad_norm": 1.4700433015823364, | |
| "learning_rate": 1.746031746031746e-05, | |
| "loss": 0.4861, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.8571428571428571, | |
| "grad_norm": 1.5295989513397217, | |
| "learning_rate": 1.5873015873015872e-05, | |
| "loss": 0.4719, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.8714285714285714, | |
| "grad_norm": 1.4497429132461548, | |
| "learning_rate": 1.4285714285714285e-05, | |
| "loss": 0.4761, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.8857142857142857, | |
| "grad_norm": 1.5591635704040527, | |
| "learning_rate": 1.2698412698412699e-05, | |
| "loss": 0.4787, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "grad_norm": 1.410678744316101, | |
| "learning_rate": 1.1111111111111112e-05, | |
| "loss": 0.4824, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.9142857142857143, | |
| "grad_norm": 1.4857158660888672, | |
| "learning_rate": 9.523809523809523e-06, | |
| "loss": 0.4757, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.9285714285714286, | |
| "grad_norm": 1.626083493232727, | |
| "learning_rate": 7.936507936507936e-06, | |
| "loss": 0.4571, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.9428571428571428, | |
| "grad_norm": 1.595832109451294, | |
| "learning_rate": 6.349206349206349e-06, | |
| "loss": 0.4909, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.9571428571428572, | |
| "grad_norm": 1.5187280178070068, | |
| "learning_rate": 4.7619047619047615e-06, | |
| "loss": 0.4785, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.9714285714285714, | |
| "grad_norm": 1.7050893306732178, | |
| "learning_rate": 3.1746031746031746e-06, | |
| "loss": 0.4933, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.9857142857142858, | |
| "grad_norm": 1.5147182941436768, | |
| "learning_rate": 1.5873015873015873e-06, | |
| "loss": 0.4833, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 1.7184252738952637, | |
| "learning_rate": 0.0, | |
| "loss": 0.4739, | |
| "step": 1400 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 1400, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8419093040332800.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |