| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0395136778115504, | |
| "eval_steps": 500, | |
| "global_step": 1000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.030395136778115502, | |
| "grad_norm": 9.889811515808105, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 1.6102, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.060790273556231005, | |
| "grad_norm": 11.921159744262695, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 1.4517, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.0911854103343465, | |
| "grad_norm": 8.763319969177246, | |
| "learning_rate": 6e-06, | |
| "loss": 1.3953, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.12158054711246201, | |
| "grad_norm": 4.841191291809082, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 1.0262, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.1519756838905775, | |
| "grad_norm": 4.8184733390808105, | |
| "learning_rate": 1e-05, | |
| "loss": 0.6875, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.182370820668693, | |
| "grad_norm": 4.519493579864502, | |
| "learning_rate": 1.2e-05, | |
| "loss": 0.5443, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.2127659574468085, | |
| "grad_norm": 2.340122699737549, | |
| "learning_rate": 1.4000000000000001e-05, | |
| "loss": 0.3662, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.24316109422492402, | |
| "grad_norm": 1.4288235902786255, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 0.2864, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.2735562310030395, | |
| "grad_norm": 1.979983925819397, | |
| "learning_rate": 1.8e-05, | |
| "loss": 0.2239, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.303951367781155, | |
| "grad_norm": 1.3639392852783203, | |
| "learning_rate": 2e-05, | |
| "loss": 0.166, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.3343465045592705, | |
| "grad_norm": 0.9854844808578491, | |
| "learning_rate": 2.2000000000000003e-05, | |
| "loss": 0.1515, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.364741641337386, | |
| "grad_norm": 1.101383924484253, | |
| "learning_rate": 2.4e-05, | |
| "loss": 0.1637, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.3951367781155015, | |
| "grad_norm": 1.665946125984192, | |
| "learning_rate": 2.6000000000000002e-05, | |
| "loss": 0.1045, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.425531914893617, | |
| "grad_norm": 0.7495555877685547, | |
| "learning_rate": 2.8000000000000003e-05, | |
| "loss": 0.1058, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.45592705167173253, | |
| "grad_norm": 1.0226916074752808, | |
| "learning_rate": 3e-05, | |
| "loss": 0.1079, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.48632218844984804, | |
| "grad_norm": 0.6161096096038818, | |
| "learning_rate": 3.2000000000000005e-05, | |
| "loss": 0.0993, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.5167173252279635, | |
| "grad_norm": 0.7106040120124817, | |
| "learning_rate": 3.4000000000000007e-05, | |
| "loss": 0.1079, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.547112462006079, | |
| "grad_norm": 1.0743619203567505, | |
| "learning_rate": 3.6e-05, | |
| "loss": 0.0882, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.5775075987841946, | |
| "grad_norm": 0.85890132188797, | |
| "learning_rate": 3.8e-05, | |
| "loss": 0.0943, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.60790273556231, | |
| "grad_norm": 1.4179412126541138, | |
| "learning_rate": 4e-05, | |
| "loss": 0.0969, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.6382978723404256, | |
| "grad_norm": 2.021270513534546, | |
| "learning_rate": 4.2e-05, | |
| "loss": 0.0934, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.668693009118541, | |
| "grad_norm": 1.4204037189483643, | |
| "learning_rate": 4.4000000000000006e-05, | |
| "loss": 0.0992, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.6990881458966566, | |
| "grad_norm": 1.0484898090362549, | |
| "learning_rate": 4.600000000000001e-05, | |
| "loss": 0.0807, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.729483282674772, | |
| "grad_norm": 1.0355311632156372, | |
| "learning_rate": 4.8e-05, | |
| "loss": 0.0827, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.7598784194528876, | |
| "grad_norm": 0.7956388592720032, | |
| "learning_rate": 5e-05, | |
| "loss": 0.0818, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.790273556231003, | |
| "grad_norm": 0.8189113736152649, | |
| "learning_rate": 5.2000000000000004e-05, | |
| "loss": 0.076, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.8206686930091185, | |
| "grad_norm": 0.6537955403327942, | |
| "learning_rate": 5.4000000000000005e-05, | |
| "loss": 0.0738, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.851063829787234, | |
| "grad_norm": 0.8595532774925232, | |
| "learning_rate": 5.6000000000000006e-05, | |
| "loss": 0.0797, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.8814589665653495, | |
| "grad_norm": 0.9948979020118713, | |
| "learning_rate": 5.8e-05, | |
| "loss": 0.0674, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.9118541033434651, | |
| "grad_norm": 0.7822934985160828, | |
| "learning_rate": 6e-05, | |
| "loss": 0.0839, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.9422492401215805, | |
| "grad_norm": 0.9252318143844604, | |
| "learning_rate": 6.2e-05, | |
| "loss": 0.0657, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.9726443768996961, | |
| "grad_norm": 1.2264695167541504, | |
| "learning_rate": 6.400000000000001e-05, | |
| "loss": 0.0751, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.0030395136778116, | |
| "grad_norm": 0.6637468338012695, | |
| "learning_rate": 6.6e-05, | |
| "loss": 0.0766, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.033434650455927, | |
| "grad_norm": 1.3035773038864136, | |
| "learning_rate": 6.800000000000001e-05, | |
| "loss": 0.0637, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.0638297872340425, | |
| "grad_norm": 0.8334704637527466, | |
| "learning_rate": 7e-05, | |
| "loss": 0.0753, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.094224924012158, | |
| "grad_norm": 1.2079942226409912, | |
| "learning_rate": 7.2e-05, | |
| "loss": 0.0729, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.1246200607902737, | |
| "grad_norm": 0.8719915747642517, | |
| "learning_rate": 7.4e-05, | |
| "loss": 0.0577, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.155015197568389, | |
| "grad_norm": 0.9174433350563049, | |
| "learning_rate": 7.6e-05, | |
| "loss": 0.0705, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.1854103343465046, | |
| "grad_norm": 1.2214233875274658, | |
| "learning_rate": 7.800000000000001e-05, | |
| "loss": 0.0837, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.21580547112462, | |
| "grad_norm": 1.2708603143692017, | |
| "learning_rate": 8e-05, | |
| "loss": 0.0658, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.2462006079027357, | |
| "grad_norm": 1.0723034143447876, | |
| "learning_rate": 8.2e-05, | |
| "loss": 0.0625, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.2765957446808511, | |
| "grad_norm": 1.0916433334350586, | |
| "learning_rate": 8.4e-05, | |
| "loss": 0.087, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.3069908814589666, | |
| "grad_norm": 0.6322633624076843, | |
| "learning_rate": 8.6e-05, | |
| "loss": 0.0722, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.337386018237082, | |
| "grad_norm": 0.5855675339698792, | |
| "learning_rate": 8.800000000000001e-05, | |
| "loss": 0.0635, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.3677811550151975, | |
| "grad_norm": 1.0976413488388062, | |
| "learning_rate": 9e-05, | |
| "loss": 0.0664, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.3981762917933132, | |
| "grad_norm": 0.9359951019287109, | |
| "learning_rate": 9.200000000000001e-05, | |
| "loss": 0.0647, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.4285714285714286, | |
| "grad_norm": 0.7038888335227966, | |
| "learning_rate": 9.4e-05, | |
| "loss": 0.0716, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.458966565349544, | |
| "grad_norm": 0.5396299958229065, | |
| "learning_rate": 9.6e-05, | |
| "loss": 0.0527, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.4893617021276595, | |
| "grad_norm": 0.5245558619499207, | |
| "learning_rate": 9.8e-05, | |
| "loss": 0.0627, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.5197568389057752, | |
| "grad_norm": 1.350913405418396, | |
| "learning_rate": 0.0001, | |
| "loss": 0.0555, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.5501519756838906, | |
| "grad_norm": 0.6774181723594666, | |
| "learning_rate": 9.999972660400536e-05, | |
| "loss": 0.061, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.580547112462006, | |
| "grad_norm": 0.5291458964347839, | |
| "learning_rate": 9.999890641901125e-05, | |
| "loss": 0.0497, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.6109422492401215, | |
| "grad_norm": 0.8055920600891113, | |
| "learning_rate": 9.999753945398704e-05, | |
| "loss": 0.0612, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.641337386018237, | |
| "grad_norm": 0.6707597374916077, | |
| "learning_rate": 9.99956257238817e-05, | |
| "loss": 0.0618, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.6717325227963524, | |
| "grad_norm": 0.6728675365447998, | |
| "learning_rate": 9.999316524962345e-05, | |
| "loss": 0.0569, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.702127659574468, | |
| "grad_norm": 0.5206692814826965, | |
| "learning_rate": 9.999015805811965e-05, | |
| "loss": 0.0574, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.7325227963525835, | |
| "grad_norm": 0.7465855479240417, | |
| "learning_rate": 9.998660418225645e-05, | |
| "loss": 0.0665, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.7629179331306992, | |
| "grad_norm": 0.5522341728210449, | |
| "learning_rate": 9.998250366089848e-05, | |
| "loss": 0.072, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.7933130699088147, | |
| "grad_norm": 0.6498285531997681, | |
| "learning_rate": 9.997785653888835e-05, | |
| "loss": 0.0471, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.8237082066869301, | |
| "grad_norm": 0.8599438071250916, | |
| "learning_rate": 9.997266286704631e-05, | |
| "loss": 0.056, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.8541033434650456, | |
| "grad_norm": 2.1041059494018555, | |
| "learning_rate": 9.996692270216947e-05, | |
| "loss": 0.0732, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.884498480243161, | |
| "grad_norm": 1.016334891319275, | |
| "learning_rate": 9.996063610703137e-05, | |
| "loss": 0.0657, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.9148936170212765, | |
| "grad_norm": 0.6369310021400452, | |
| "learning_rate": 9.995380315038119e-05, | |
| "loss": 0.0499, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.9452887537993921, | |
| "grad_norm": 0.6747097969055176, | |
| "learning_rate": 9.994642390694308e-05, | |
| "loss": 0.0552, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.9756838905775076, | |
| "grad_norm": 0.8461899161338806, | |
| "learning_rate": 9.993849845741524e-05, | |
| "loss": 0.051, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 2.0060790273556233, | |
| "grad_norm": 1.1829544305801392, | |
| "learning_rate": 9.993002688846913e-05, | |
| "loss": 0.0548, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 2.0364741641337387, | |
| "grad_norm": 0.5973241329193115, | |
| "learning_rate": 9.992100929274846e-05, | |
| "loss": 0.0505, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 2.066869300911854, | |
| "grad_norm": 0.4595549404621124, | |
| "learning_rate": 9.991144576886823e-05, | |
| "loss": 0.064, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 2.0972644376899696, | |
| "grad_norm": 0.5466217994689941, | |
| "learning_rate": 9.990133642141359e-05, | |
| "loss": 0.0475, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 2.127659574468085, | |
| "grad_norm": 0.4098721146583557, | |
| "learning_rate": 9.989068136093873e-05, | |
| "loss": 0.0459, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 2.1580547112462005, | |
| "grad_norm": 0.5519854426383972, | |
| "learning_rate": 9.987948070396571e-05, | |
| "loss": 0.0463, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 2.188449848024316, | |
| "grad_norm": 0.5170401930809021, | |
| "learning_rate": 9.986773457298311e-05, | |
| "loss": 0.0484, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 2.2188449848024314, | |
| "grad_norm": 0.3473241925239563, | |
| "learning_rate": 9.985544309644475e-05, | |
| "loss": 0.0546, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 2.2492401215805473, | |
| "grad_norm": 0.38744667172431946, | |
| "learning_rate": 9.984260640876821e-05, | |
| "loss": 0.0428, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 2.2796352583586628, | |
| "grad_norm": 0.7359247803688049, | |
| "learning_rate": 9.98292246503335e-05, | |
| "loss": 0.0592, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 2.310030395136778, | |
| "grad_norm": 0.5814300179481506, | |
| "learning_rate": 9.981529796748134e-05, | |
| "loss": 0.0399, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 2.3404255319148937, | |
| "grad_norm": 0.5898028016090393, | |
| "learning_rate": 9.980082651251175e-05, | |
| "loss": 0.046, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 2.370820668693009, | |
| "grad_norm": 0.4852890968322754, | |
| "learning_rate": 9.97858104436822e-05, | |
| "loss": 0.046, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 2.4012158054711246, | |
| "grad_norm": 0.4703480899333954, | |
| "learning_rate": 9.977024992520602e-05, | |
| "loss": 0.0374, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 2.43161094224924, | |
| "grad_norm": 0.43933120369911194, | |
| "learning_rate": 9.975414512725057e-05, | |
| "loss": 0.0454, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 2.4620060790273555, | |
| "grad_norm": 0.639541745185852, | |
| "learning_rate": 9.973749622593534e-05, | |
| "loss": 0.0379, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 2.4924012158054714, | |
| "grad_norm": 0.6174078583717346, | |
| "learning_rate": 9.972030340333001e-05, | |
| "loss": 0.0539, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 2.522796352583587, | |
| "grad_norm": 0.49048295617103577, | |
| "learning_rate": 9.970256684745258e-05, | |
| "loss": 0.045, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 2.5531914893617023, | |
| "grad_norm": 0.3859421908855438, | |
| "learning_rate": 9.968428675226714e-05, | |
| "loss": 0.038, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 2.5835866261398177, | |
| "grad_norm": 0.5661002993583679, | |
| "learning_rate": 9.966546331768191e-05, | |
| "loss": 0.0377, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 2.613981762917933, | |
| "grad_norm": 0.6421768665313721, | |
| "learning_rate": 9.964609674954696e-05, | |
| "loss": 0.0516, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 2.6443768996960486, | |
| "grad_norm": 0.36835750937461853, | |
| "learning_rate": 9.962618725965196e-05, | |
| "loss": 0.0354, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 2.674772036474164, | |
| "grad_norm": 0.34994593262672424, | |
| "learning_rate": 9.96057350657239e-05, | |
| "loss": 0.0391, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 2.7051671732522795, | |
| "grad_norm": 0.501876175403595, | |
| "learning_rate": 9.95847403914247e-05, | |
| "loss": 0.0459, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 2.735562310030395, | |
| "grad_norm": 0.4514513611793518, | |
| "learning_rate": 9.956320346634876e-05, | |
| "loss": 0.0368, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.7659574468085104, | |
| "grad_norm": 0.4428149461746216, | |
| "learning_rate": 9.954112452602045e-05, | |
| "loss": 0.033, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 2.7963525835866263, | |
| "grad_norm": 0.7399154305458069, | |
| "learning_rate": 9.95185038118915e-05, | |
| "loss": 0.0443, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 2.8267477203647418, | |
| "grad_norm": 0.4058426320552826, | |
| "learning_rate": 9.949534157133844e-05, | |
| "loss": 0.051, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 2.857142857142857, | |
| "grad_norm": 0.3738716244697571, | |
| "learning_rate": 9.94716380576598e-05, | |
| "loss": 0.0318, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 2.8875379939209727, | |
| "grad_norm": 0.5219188332557678, | |
| "learning_rate": 9.944739353007344e-05, | |
| "loss": 0.0383, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.917933130699088, | |
| "grad_norm": 0.28857406973838806, | |
| "learning_rate": 9.942260825371358e-05, | |
| "loss": 0.0413, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 2.9483282674772036, | |
| "grad_norm": 0.4435504674911499, | |
| "learning_rate": 9.939728249962807e-05, | |
| "loss": 0.0331, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 2.978723404255319, | |
| "grad_norm": 0.6941442489624023, | |
| "learning_rate": 9.937141654477528e-05, | |
| "loss": 0.039, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 3.0091185410334345, | |
| "grad_norm": 0.5373895764350891, | |
| "learning_rate": 9.934501067202117e-05, | |
| "loss": 0.0434, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 3.0395136778115504, | |
| "grad_norm": 0.3684053122997284, | |
| "learning_rate": 9.931806517013612e-05, | |
| "loss": 0.0367, | |
| "step": 1000 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 10000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 31, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.301578581152896e+16, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |