| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 3930, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.002544529262086514, | |
| "grad_norm": 2.2193164825439453, | |
| "learning_rate": 1.1450381679389313e-06, | |
| "loss": 0.1149, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.005089058524173028, | |
| "grad_norm": 2.2632293701171875, | |
| "learning_rate": 2.4173027989821884e-06, | |
| "loss": 0.1642, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.007633587786259542, | |
| "grad_norm": 3.3879759311676025, | |
| "learning_rate": 3.6895674300254453e-06, | |
| "loss": 0.1337, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.010178117048346057, | |
| "grad_norm": 3.659600257873535, | |
| "learning_rate": 4.961832061068703e-06, | |
| "loss": 0.1454, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.01272264631043257, | |
| "grad_norm": 2.5636653900146484, | |
| "learning_rate": 6.2340966921119596e-06, | |
| "loss": 0.0598, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.015267175572519083, | |
| "grad_norm": 3.9301695823669434, | |
| "learning_rate": 7.506361323155217e-06, | |
| "loss": 0.0593, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.017811704834605598, | |
| "grad_norm": 0.9530353546142578, | |
| "learning_rate": 8.778625954198473e-06, | |
| "loss": 0.0368, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.020356234096692113, | |
| "grad_norm": 2.2630889415740967, | |
| "learning_rate": 1.0050890585241731e-05, | |
| "loss": 0.0398, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.022900763358778626, | |
| "grad_norm": 1.9916532039642334, | |
| "learning_rate": 1.1323155216284988e-05, | |
| "loss": 0.0319, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.02544529262086514, | |
| "grad_norm": 2.0590147972106934, | |
| "learning_rate": 1.2595419847328243e-05, | |
| "loss": 0.0432, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.027989821882951654, | |
| "grad_norm": 2.5208053588867188, | |
| "learning_rate": 1.38676844783715e-05, | |
| "loss": 0.0291, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.030534351145038167, | |
| "grad_norm": 1.356877088546753, | |
| "learning_rate": 1.5139949109414759e-05, | |
| "loss": 0.0202, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.03307888040712468, | |
| "grad_norm": 0.7600014805793762, | |
| "learning_rate": 1.6412213740458016e-05, | |
| "loss": 0.0259, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.035623409669211195, | |
| "grad_norm": 0.018205782398581505, | |
| "learning_rate": 1.7684478371501274e-05, | |
| "loss": 0.0163, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.03816793893129771, | |
| "grad_norm": 0.918929398059845, | |
| "learning_rate": 1.895674300254453e-05, | |
| "loss": 0.0244, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.04071246819338423, | |
| "grad_norm": 1.1537033319473267, | |
| "learning_rate": 2.0229007633587788e-05, | |
| "loss": 0.0184, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.043256997455470736, | |
| "grad_norm": 1.2687195539474487, | |
| "learning_rate": 2.1501272264631043e-05, | |
| "loss": 0.012, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.04580152671755725, | |
| "grad_norm": 0.24528977274894714, | |
| "learning_rate": 2.2773536895674302e-05, | |
| "loss": 0.0197, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.04834605597964377, | |
| "grad_norm": 1.1114501953125, | |
| "learning_rate": 2.404580152671756e-05, | |
| "loss": 0.0119, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.05089058524173028, | |
| "grad_norm": 0.843619704246521, | |
| "learning_rate": 2.5318066157760816e-05, | |
| "loss": 0.017, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.05343511450381679, | |
| "grad_norm": 0.6730576753616333, | |
| "learning_rate": 2.6590330788804074e-05, | |
| "loss": 0.021, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.05597964376590331, | |
| "grad_norm": 0.7756699323654175, | |
| "learning_rate": 2.7862595419847333e-05, | |
| "loss": 0.0164, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.058524173027989825, | |
| "grad_norm": 0.18474726378917694, | |
| "learning_rate": 2.9134860050890588e-05, | |
| "loss": 0.0198, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.061068702290076333, | |
| "grad_norm": 0.012082410044968128, | |
| "learning_rate": 3.0407124681933847e-05, | |
| "loss": 0.0189, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.06361323155216285, | |
| "grad_norm": 0.9460607767105103, | |
| "learning_rate": 3.16793893129771e-05, | |
| "loss": 0.0069, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.06615776081424936, | |
| "grad_norm": 0.15384167432785034, | |
| "learning_rate": 3.295165394402036e-05, | |
| "loss": 0.0192, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.06870229007633588, | |
| "grad_norm": 0.16621553897857666, | |
| "learning_rate": 3.4223918575063616e-05, | |
| "loss": 0.0262, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.07124681933842239, | |
| "grad_norm": 0.08461080491542816, | |
| "learning_rate": 3.549618320610687e-05, | |
| "loss": 0.0127, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.0737913486005089, | |
| "grad_norm": 0.6972628235816956, | |
| "learning_rate": 3.6768447837150126e-05, | |
| "loss": 0.0123, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.07633587786259542, | |
| "grad_norm": 0.3922271132469177, | |
| "learning_rate": 3.8040712468193385e-05, | |
| "loss": 0.009, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.07888040712468193, | |
| "grad_norm": 0.9791713356971741, | |
| "learning_rate": 3.9312977099236644e-05, | |
| "loss": 0.0225, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.08142493638676845, | |
| "grad_norm": 0.32885971665382385, | |
| "learning_rate": 4.05852417302799e-05, | |
| "loss": 0.0143, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.08396946564885496, | |
| "grad_norm": 0.09194409102201462, | |
| "learning_rate": 4.1857506361323154e-05, | |
| "loss": 0.0112, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.08651399491094147, | |
| "grad_norm": 0.03973765671253204, | |
| "learning_rate": 4.312977099236641e-05, | |
| "loss": 0.0127, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.089058524173028, | |
| "grad_norm": 1.0717049837112427, | |
| "learning_rate": 4.440203562340967e-05, | |
| "loss": 0.0101, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.0916030534351145, | |
| "grad_norm": 0.5723052620887756, | |
| "learning_rate": 4.567430025445293e-05, | |
| "loss": 0.0179, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.09414758269720101, | |
| "grad_norm": 0.7891315817832947, | |
| "learning_rate": 4.694656488549619e-05, | |
| "loss": 0.0061, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.09669211195928754, | |
| "grad_norm": 0.7999855875968933, | |
| "learning_rate": 4.821882951653944e-05, | |
| "loss": 0.023, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.09923664122137404, | |
| "grad_norm": 0.07333844900131226, | |
| "learning_rate": 4.94910941475827e-05, | |
| "loss": 0.01, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.10178117048346055, | |
| "grad_norm": 1.2032876014709473, | |
| "learning_rate": 4.999964498957073e-05, | |
| "loss": 0.02, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.10432569974554708, | |
| "grad_norm": 1.1466550827026367, | |
| "learning_rate": 4.999747551790483e-05, | |
| "loss": 0.016, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.10687022900763359, | |
| "grad_norm": 0.5132968425750732, | |
| "learning_rate": 4.999333397353335e-05, | |
| "loss": 0.0201, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.10941475826972011, | |
| "grad_norm": 0.4130668044090271, | |
| "learning_rate": 4.998722068318635e-05, | |
| "loss": 0.0146, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.11195928753180662, | |
| "grad_norm": 0.04550207406282425, | |
| "learning_rate": 4.997913612914664e-05, | |
| "loss": 0.0045, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.11450381679389313, | |
| "grad_norm": 0.8829872608184814, | |
| "learning_rate": 4.996908094921178e-05, | |
| "loss": 0.0061, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.11704834605597965, | |
| "grad_norm": 0.25924187898635864, | |
| "learning_rate": 4.995705593664369e-05, | |
| "loss": 0.0068, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.11959287531806616, | |
| "grad_norm": 0.1608368158340454, | |
| "learning_rate": 4.994306204010613e-05, | |
| "loss": 0.006, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.12213740458015267, | |
| "grad_norm": 0.016572551801800728, | |
| "learning_rate": 4.9927100363589846e-05, | |
| "loss": 0.0097, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.12468193384223919, | |
| "grad_norm": 0.03372234106063843, | |
| "learning_rate": 4.990917216632545e-05, | |
| "loss": 0.0107, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.1272264631043257, | |
| "grad_norm": 0.10057298094034195, | |
| "learning_rate": 4.988927886268411e-05, | |
| "loss": 0.0173, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.1272264631043257, | |
| "eval_loss": 0.03813108429312706, | |
| "eval_runtime": 134.0752, | |
| "eval_samples_per_second": 59.534, | |
| "eval_steps_per_second": 0.47, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.1297709923664122, | |
| "grad_norm": 1.369550108909607, | |
| "learning_rate": 4.986742202206594e-05, | |
| "loss": 0.0195, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.13231552162849872, | |
| "grad_norm": 0.0586123988032341, | |
| "learning_rate": 4.9843603368776224e-05, | |
| "loss": 0.011, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.13486005089058525, | |
| "grad_norm": 0.1948956549167633, | |
| "learning_rate": 4.981782478188933e-05, | |
| "loss": 0.0067, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.13740458015267176, | |
| "grad_norm": 0.5815696120262146, | |
| "learning_rate": 4.979008829510055e-05, | |
| "loss": 0.0096, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.13994910941475827, | |
| "grad_norm": 1.0388095378875732, | |
| "learning_rate": 4.9760396096565565e-05, | |
| "loss": 0.009, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.14249363867684478, | |
| "grad_norm": 0.4838831424713135, | |
| "learning_rate": 4.9728750528727893e-05, | |
| "loss": 0.0103, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.1450381679389313, | |
| "grad_norm": 0.12236972153186798, | |
| "learning_rate": 4.969515408813405e-05, | |
| "loss": 0.011, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.1475826972010178, | |
| "grad_norm": 0.11237329244613647, | |
| "learning_rate": 4.965960942523662e-05, | |
| "loss": 0.0037, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.15012722646310434, | |
| "grad_norm": 0.004609148483723402, | |
| "learning_rate": 4.9622119344185156e-05, | |
| "loss": 0.0099, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.15267175572519084, | |
| "grad_norm": 0.13287989795207977, | |
| "learning_rate": 4.9582686802604914e-05, | |
| "loss": 0.0028, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.15521628498727735, | |
| "grad_norm": 0.35999101400375366, | |
| "learning_rate": 4.954131491136362e-05, | |
| "loss": 0.0111, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.15776081424936386, | |
| "grad_norm": 0.010963108390569687, | |
| "learning_rate": 4.949800693432592e-05, | |
| "loss": 0.0149, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.16030534351145037, | |
| "grad_norm": 0.10792261362075806, | |
| "learning_rate": 4.945276628809603e-05, | |
| "loss": 0.0121, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.1628498727735369, | |
| "grad_norm": 1.0253171920776367, | |
| "learning_rate": 4.940559654174808e-05, | |
| "loss": 0.0123, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.16539440203562342, | |
| "grad_norm": 0.07742812484502792, | |
| "learning_rate": 4.9356501416544624e-05, | |
| "loss": 0.0029, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.16793893129770993, | |
| "grad_norm": 0.4924394488334656, | |
| "learning_rate": 4.9305484785643e-05, | |
| "loss": 0.0109, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.17048346055979643, | |
| "grad_norm": 0.169286847114563, | |
| "learning_rate": 4.925255067378987e-05, | |
| "loss": 0.0077, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.17302798982188294, | |
| "grad_norm": 0.0202664602547884, | |
| "learning_rate": 4.919770325700359e-05, | |
| "loss": 0.0164, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.17557251908396945, | |
| "grad_norm": 0.4131915867328644, | |
| "learning_rate": 4.9140946862244854e-05, | |
| "loss": 0.0196, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.178117048346056, | |
| "grad_norm": 0.5200774073600769, | |
| "learning_rate": 4.908228596707527e-05, | |
| "loss": 0.0125, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.1806615776081425, | |
| "grad_norm": 0.7565369606018066, | |
| "learning_rate": 4.902172519930416e-05, | |
| "loss": 0.0074, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.183206106870229, | |
| "grad_norm": 0.08834939450025558, | |
| "learning_rate": 4.895926933662347e-05, | |
| "loss": 0.0088, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.18575063613231552, | |
| "grad_norm": 0.785431981086731, | |
| "learning_rate": 4.8894923306230825e-05, | |
| "loss": 0.008, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.18829516539440203, | |
| "grad_norm": 0.11372370272874832, | |
| "learning_rate": 4.882869218444083e-05, | |
| "loss": 0.0078, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.19083969465648856, | |
| "grad_norm": 0.3121352791786194, | |
| "learning_rate": 4.87605811962846e-05, | |
| "loss": 0.0117, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.19338422391857507, | |
| "grad_norm": 0.9200536012649536, | |
| "learning_rate": 4.869059571509758e-05, | |
| "loss": 0.0076, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.19592875318066158, | |
| "grad_norm": 0.6021171808242798, | |
| "learning_rate": 4.861874126209558e-05, | |
| "loss": 0.0042, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.1984732824427481, | |
| "grad_norm": 0.08358690142631531, | |
| "learning_rate": 4.8545023505939223e-05, | |
| "loss": 0.0105, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.2010178117048346, | |
| "grad_norm": 0.8210988640785217, | |
| "learning_rate": 4.8469448262286786e-05, | |
| "loss": 0.0073, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.2035623409669211, | |
| "grad_norm": 0.6437301635742188, | |
| "learning_rate": 4.8392021493335324e-05, | |
| "loss": 0.017, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.20610687022900764, | |
| "grad_norm": 0.6329097151756287, | |
| "learning_rate": 4.831274930735036e-05, | |
| "loss": 0.0143, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.20865139949109415, | |
| "grad_norm": 0.12898841500282288, | |
| "learning_rate": 4.8231637958183974e-05, | |
| "loss": 0.0038, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.21119592875318066, | |
| "grad_norm": 0.45694243907928467, | |
| "learning_rate": 4.814869384478142e-05, | |
| "loss": 0.0081, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.21374045801526717, | |
| "grad_norm": 0.8665072917938232, | |
| "learning_rate": 4.8063923510676376e-05, | |
| "loss": 0.0156, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.21628498727735368, | |
| "grad_norm": 0.005406165029853582, | |
| "learning_rate": 4.797733364347463e-05, | |
| "loss": 0.0065, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.21882951653944022, | |
| "grad_norm": 0.4675016403198242, | |
| "learning_rate": 4.788893107432652e-05, | |
| "loss": 0.0137, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.22137404580152673, | |
| "grad_norm": 0.030773309990763664, | |
| "learning_rate": 4.7798722777388035e-05, | |
| "loss": 0.0019, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.22391857506361323, | |
| "grad_norm": 0.03853096067905426, | |
| "learning_rate": 4.7706715869270635e-05, | |
| "loss": 0.0105, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.22646310432569974, | |
| "grad_norm": 1.0207020044326782, | |
| "learning_rate": 4.7612917608479764e-05, | |
| "loss": 0.0109, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.22900763358778625, | |
| "grad_norm": 0.023684734478592873, | |
| "learning_rate": 4.7517335394842236e-05, | |
| "loss": 0.0129, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.23155216284987276, | |
| "grad_norm": 0.059336401522159576, | |
| "learning_rate": 4.741997676892249e-05, | |
| "loss": 0.0127, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.2340966921119593, | |
| "grad_norm": 0.07478685677051544, | |
| "learning_rate": 4.732084941142766e-05, | |
| "loss": 0.0094, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.2366412213740458, | |
| "grad_norm": 0.1500403732061386, | |
| "learning_rate": 4.7219961142601675e-05, | |
| "loss": 0.008, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.23918575063613232, | |
| "grad_norm": 0.006639978848397732, | |
| "learning_rate": 4.711731992160831e-05, | |
| "loss": 0.0029, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.24173027989821882, | |
| "grad_norm": 0.01452037412673235, | |
| "learning_rate": 4.701293384590324e-05, | |
| "loss": 0.0112, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.24427480916030533, | |
| "grad_norm": 0.016306525096297264, | |
| "learning_rate": 4.690681115059528e-05, | |
| "loss": 0.0033, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.24681933842239187, | |
| "grad_norm": 0.2131829559803009, | |
| "learning_rate": 4.6798960207796665e-05, | |
| "loss": 0.0061, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.24936386768447838, | |
| "grad_norm": 0.01569824293255806, | |
| "learning_rate": 4.66893895259626e-05, | |
| "loss": 0.0078, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.25190839694656486, | |
| "grad_norm": 0.012850727885961533, | |
| "learning_rate": 4.6578107749220015e-05, | |
| "loss": 0.0057, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.2544529262086514, | |
| "grad_norm": 0.43829357624053955, | |
| "learning_rate": 4.6465123656685575e-05, | |
| "loss": 0.0103, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.2544529262086514, | |
| "eval_loss": 0.03520084545016289, | |
| "eval_runtime": 133.9425, | |
| "eval_samples_per_second": 59.593, | |
| "eval_steps_per_second": 0.47, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.25699745547073793, | |
| "grad_norm": 0.040336623787879944, | |
| "learning_rate": 4.6350446161773164e-05, | |
| "loss": 0.0044, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.2595419847328244, | |
| "grad_norm": 0.1553158313035965, | |
| "learning_rate": 4.6234084311490655e-05, | |
| "loss": 0.0056, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.26208651399491095, | |
| "grad_norm": 1.226593255996704, | |
| "learning_rate": 4.6116047285726174e-05, | |
| "loss": 0.0078, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.26463104325699743, | |
| "grad_norm": 0.41113924980163574, | |
| "learning_rate": 4.5996344396523925e-05, | |
| "loss": 0.012, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.26717557251908397, | |
| "grad_norm": 0.020707394927740097, | |
| "learning_rate": 4.5874985087349516e-05, | |
| "loss": 0.0023, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.2697201017811705, | |
| "grad_norm": 0.6316179037094116, | |
| "learning_rate": 4.5751978932344986e-05, | |
| "loss": 0.003, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.272264631043257, | |
| "grad_norm": 0.6680510640144348, | |
| "learning_rate": 4.562733563557348e-05, | |
| "loss": 0.0065, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 0.2748091603053435, | |
| "grad_norm": 0.17518192529678345, | |
| "learning_rate": 4.55010650302537e-05, | |
| "loss": 0.0108, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.27735368956743, | |
| "grad_norm": 0.6529995203018188, | |
| "learning_rate": 4.537317707798411e-05, | |
| "loss": 0.0077, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 0.27989821882951654, | |
| "grad_norm": 0.07259833812713623, | |
| "learning_rate": 4.524368186795713e-05, | |
| "loss": 0.0116, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.2824427480916031, | |
| "grad_norm": 0.0009080986492335796, | |
| "learning_rate": 4.5112589616163135e-05, | |
| "loss": 0.007, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 0.28498727735368956, | |
| "grad_norm": 1.1138458251953125, | |
| "learning_rate": 4.4979910664584504e-05, | |
| "loss": 0.0173, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.2875318066157761, | |
| "grad_norm": 0.0050997077487409115, | |
| "learning_rate": 4.484565548037976e-05, | |
| "loss": 0.0106, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 0.2900763358778626, | |
| "grad_norm": 0.026803424581885338, | |
| "learning_rate": 4.470983465505781e-05, | |
| "loss": 0.0041, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.2926208651399491, | |
| "grad_norm": 0.02261420153081417, | |
| "learning_rate": 4.4572458903642354e-05, | |
| "loss": 0.0116, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.2951653944020356, | |
| "grad_norm": 0.22386318445205688, | |
| "learning_rate": 4.4433539063826556e-05, | |
| "loss": 0.0089, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.29770992366412213, | |
| "grad_norm": 0.5273404121398926, | |
| "learning_rate": 4.429308609511807e-05, | |
| "loss": 0.0129, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 0.30025445292620867, | |
| "grad_norm": 0.023222139105200768, | |
| "learning_rate": 4.415111107797445e-05, | |
| "loss": 0.004, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.30279898218829515, | |
| "grad_norm": 0.15821930766105652, | |
| "learning_rate": 4.400762521292895e-05, | |
| "loss": 0.0109, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 0.3053435114503817, | |
| "grad_norm": 0.18869535624980927, | |
| "learning_rate": 4.3862639819706955e-05, | |
| "loss": 0.0046, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.30788804071246817, | |
| "grad_norm": 0.003842174308374524, | |
| "learning_rate": 4.371616633633294e-05, | |
| "loss": 0.0093, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 0.3104325699745547, | |
| "grad_norm": 0.006177359260618687, | |
| "learning_rate": 4.3568216318228094e-05, | |
| "loss": 0.0108, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.31297709923664124, | |
| "grad_norm": 0.1568138301372528, | |
| "learning_rate": 4.3418801437298765e-05, | |
| "loss": 0.0141, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 0.3155216284987277, | |
| "grad_norm": 0.014416090212762356, | |
| "learning_rate": 4.3267933481015553e-05, | |
| "loss": 0.0032, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.31806615776081426, | |
| "grad_norm": 0.055812641978263855, | |
| "learning_rate": 4.3115624351483494e-05, | |
| "loss": 0.0084, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.32061068702290074, | |
| "grad_norm": 0.037721991539001465, | |
| "learning_rate": 4.296188606450301e-05, | |
| "loss": 0.0224, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.3231552162849873, | |
| "grad_norm": 0.024133220314979553, | |
| "learning_rate": 4.2806730748622024e-05, | |
| "loss": 0.004, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 0.3256997455470738, | |
| "grad_norm": 0.19586949050426483, | |
| "learning_rate": 4.265017064417909e-05, | |
| "loss": 0.0042, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.3282442748091603, | |
| "grad_norm": 0.34333810210227966, | |
| "learning_rate": 4.249221810233779e-05, | |
| "loss": 0.0026, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 0.33078880407124683, | |
| "grad_norm": 0.6564996242523193, | |
| "learning_rate": 4.233288558411226e-05, | |
| "loss": 0.0086, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.3333333333333333, | |
| "grad_norm": 0.16231529414653778, | |
| "learning_rate": 4.217218565938423e-05, | |
| "loss": 0.0099, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 0.33587786259541985, | |
| "grad_norm": 0.013459747657179832, | |
| "learning_rate": 4.2010131005911305e-05, | |
| "loss": 0.0099, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.3384223918575064, | |
| "grad_norm": 0.364897757768631, | |
| "learning_rate": 4.1846734408326815e-05, | |
| "loss": 0.0039, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 0.34096692111959287, | |
| "grad_norm": 0.9234687089920044, | |
| "learning_rate": 4.168200875713125e-05, | |
| "loss": 0.0097, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.3435114503816794, | |
| "grad_norm": 0.07833091914653778, | |
| "learning_rate": 4.15159670476753e-05, | |
| "loss": 0.0098, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.3460559796437659, | |
| "grad_norm": 1.059687614440918, | |
| "learning_rate": 4.134862237913463e-05, | |
| "loss": 0.0154, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.3486005089058524, | |
| "grad_norm": 0.1955600082874298, | |
| "learning_rate": 4.117998795347651e-05, | |
| "loss": 0.0092, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 0.3511450381679389, | |
| "grad_norm": 0.6965257525444031, | |
| "learning_rate": 4.101007707441824e-05, | |
| "loss": 0.0081, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 0.35368956743002544, | |
| "grad_norm": 0.8857764601707458, | |
| "learning_rate": 4.083890314637769e-05, | |
| "loss": 0.0116, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 0.356234096692112, | |
| "grad_norm": 0.48666754364967346, | |
| "learning_rate": 4.0666479673415734e-05, | |
| "loss": 0.0053, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.35877862595419846, | |
| "grad_norm": 0.14460720121860504, | |
| "learning_rate": 4.049282025817095e-05, | |
| "loss": 0.0083, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 0.361323155216285, | |
| "grad_norm": 0.5809695720672607, | |
| "learning_rate": 4.031793860078649e-05, | |
| "loss": 0.0115, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 0.3638676844783715, | |
| "grad_norm": 0.04807254672050476, | |
| "learning_rate": 4.0141848497829236e-05, | |
| "loss": 0.0056, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 0.366412213740458, | |
| "grad_norm": 0.02069387026131153, | |
| "learning_rate": 3.996456384120143e-05, | |
| "loss": 0.0133, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 0.36895674300254455, | |
| "grad_norm": 0.008871096186339855, | |
| "learning_rate": 3.9786098617044674e-05, | |
| "loss": 0.0077, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.37150127226463103, | |
| "grad_norm": 0.006375044584274292, | |
| "learning_rate": 3.960646690463658e-05, | |
| "loss": 0.0087, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 0.37404580152671757, | |
| "grad_norm": 0.24041035771369934, | |
| "learning_rate": 3.9425682875280054e-05, | |
| "loss": 0.0021, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 0.37659033078880405, | |
| "grad_norm": 0.23533613979816437, | |
| "learning_rate": 3.9243760791185255e-05, | |
| "loss": 0.013, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 0.3791348600508906, | |
| "grad_norm": 0.09379549324512482, | |
| "learning_rate": 3.906071500434452e-05, | |
| "loss": 0.008, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 0.3816793893129771, | |
| "grad_norm": 0.0210269745439291, | |
| "learning_rate": 3.887655995540004e-05, | |
| "loss": 0.0112, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.3816793893129771, | |
| "eval_loss": 0.029847292229533195, | |
| "eval_runtime": 133.9825, | |
| "eval_samples_per_second": 59.575, | |
| "eval_steps_per_second": 0.47, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.3842239185750636, | |
| "grad_norm": 0.5631283521652222, | |
| "learning_rate": 3.8691310172504705e-05, | |
| "loss": 0.0138, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 0.38676844783715014, | |
| "grad_norm": 0.13655175268650055, | |
| "learning_rate": 3.850498027017588e-05, | |
| "loss": 0.002, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 0.3893129770992366, | |
| "grad_norm": 0.175134539604187, | |
| "learning_rate": 3.8317584948142535e-05, | |
| "loss": 0.0063, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 0.39185750636132316, | |
| "grad_norm": 0.0077371494844555855, | |
| "learning_rate": 3.81291389901855e-05, | |
| "loss": 0.0038, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 0.3944020356234097, | |
| "grad_norm": 0.16864041984081268, | |
| "learning_rate": 3.7939657262971225e-05, | |
| "loss": 0.0067, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.3969465648854962, | |
| "grad_norm": 0.5973467826843262, | |
| "learning_rate": 3.774915471487886e-05, | |
| "loss": 0.0119, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 0.3994910941475827, | |
| "grad_norm": 0.11558622121810913, | |
| "learning_rate": 3.755764637482106e-05, | |
| "loss": 0.0026, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 0.4020356234096692, | |
| "grad_norm": 0.012447798624634743, | |
| "learning_rate": 3.736514735105824e-05, | |
| "loss": 0.0025, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 0.40458015267175573, | |
| "grad_norm": 1.1532433032989502, | |
| "learning_rate": 3.717167283000673e-05, | |
| "loss": 0.0086, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 0.4071246819338422, | |
| "grad_norm": 0.0178969819098711, | |
| "learning_rate": 3.6977238075040735e-05, | |
| "loss": 0.0043, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.40966921119592875, | |
| "grad_norm": 0.02340787835419178, | |
| "learning_rate": 3.6781858425288086e-05, | |
| "loss": 0.0025, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 0.4122137404580153, | |
| "grad_norm": 0.056904137134552, | |
| "learning_rate": 3.658554929442024e-05, | |
| "loss": 0.0017, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 0.41475826972010177, | |
| "grad_norm": 0.9576080441474915, | |
| "learning_rate": 3.638832616943622e-05, | |
| "loss": 0.0049, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 0.4173027989821883, | |
| "grad_norm": 0.9628064632415771, | |
| "learning_rate": 3.6190204609440824e-05, | |
| "loss": 0.0072, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 0.4198473282442748, | |
| "grad_norm": 0.0015831461641937494, | |
| "learning_rate": 3.5991200244417214e-05, | |
| "loss": 0.0041, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.4223918575063613, | |
| "grad_norm": 0.014716788195073605, | |
| "learning_rate": 3.579132877399378e-05, | |
| "loss": 0.0112, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 0.42493638676844786, | |
| "grad_norm": 0.03392916917800903, | |
| "learning_rate": 3.5590605966205616e-05, | |
| "loss": 0.0056, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 0.42748091603053434, | |
| "grad_norm": 0.04351244866847992, | |
| "learning_rate": 3.5389047656250585e-05, | |
| "loss": 0.0078, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 0.4300254452926209, | |
| "grad_norm": 0.07899494469165802, | |
| "learning_rate": 3.5186669745240026e-05, | |
| "loss": 0.0093, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 0.43256997455470736, | |
| "grad_norm": 0.1421324610710144, | |
| "learning_rate": 3.498348819894434e-05, | |
| "loss": 0.0067, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.4351145038167939, | |
| "grad_norm": 0.5013014674186707, | |
| "learning_rate": 3.477951904653341e-05, | |
| "loss": 0.0101, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 0.43765903307888043, | |
| "grad_norm": 0.006869410630315542, | |
| "learning_rate": 3.457477837931205e-05, | |
| "loss": 0.0092, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 0.4402035623409669, | |
| "grad_norm": 0.8049445152282715, | |
| "learning_rate": 3.436928234945056e-05, | |
| "loss": 0.0081, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 0.44274809160305345, | |
| "grad_norm": 0.03588230535387993, | |
| "learning_rate": 3.416304716871045e-05, | |
| "loss": 0.0025, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 0.44529262086513993, | |
| "grad_norm": 0.6233575344085693, | |
| "learning_rate": 3.39560891071655e-05, | |
| "loss": 0.0067, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.44783715012722647, | |
| "grad_norm": 0.011277757585048676, | |
| "learning_rate": 3.374842449191818e-05, | |
| "loss": 0.0061, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 0.45038167938931295, | |
| "grad_norm": 0.19664280116558075, | |
| "learning_rate": 3.3540069705811626e-05, | |
| "loss": 0.0079, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 0.4529262086513995, | |
| "grad_norm": 0.45007187128067017, | |
| "learning_rate": 3.3331041186137105e-05, | |
| "loss": 0.0135, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 0.455470737913486, | |
| "grad_norm": 0.06902237981557846, | |
| "learning_rate": 3.3121355423337344e-05, | |
| "loss": 0.0053, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 0.4580152671755725, | |
| "grad_norm": 0.5233955979347229, | |
| "learning_rate": 3.2911028959705575e-05, | |
| "loss": 0.0083, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.46055979643765904, | |
| "grad_norm": 0.13601745665073395, | |
| "learning_rate": 3.2700078388080446e-05, | |
| "loss": 0.0085, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 0.4631043256997455, | |
| "grad_norm": 0.05271271988749504, | |
| "learning_rate": 3.2488520350537044e-05, | |
| "loss": 0.0051, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 0.46564885496183206, | |
| "grad_norm": 0.009570769965648651, | |
| "learning_rate": 3.2276371537073975e-05, | |
| "loss": 0.0071, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 0.4681933842239186, | |
| "grad_norm": 0.026523761451244354, | |
| "learning_rate": 3.206364868429667e-05, | |
| "loss": 0.0064, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 0.4707379134860051, | |
| "grad_norm": 0.054482318460941315, | |
| "learning_rate": 3.1850368574097026e-05, | |
| "loss": 0.0047, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.4732824427480916, | |
| "grad_norm": 0.11016249656677246, | |
| "learning_rate": 3.163654803232946e-05, | |
| "loss": 0.0175, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 0.4758269720101781, | |
| "grad_norm": 0.0042827255092561245, | |
| "learning_rate": 3.142220392748351e-05, | |
| "loss": 0.0087, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 0.47837150127226463, | |
| "grad_norm": 0.29444900155067444, | |
| "learning_rate": 3.1207353169353084e-05, | |
| "loss": 0.0028, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 0.48091603053435117, | |
| "grad_norm": 0.8651733994483948, | |
| "learning_rate": 3.099201270770237e-05, | |
| "loss": 0.011, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 0.48346055979643765, | |
| "grad_norm": 0.06286109238862991, | |
| "learning_rate": 3.077619953092873e-05, | |
| "loss": 0.007, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.4860050890585242, | |
| "grad_norm": 0.06814798712730408, | |
| "learning_rate": 3.055993066472243e-05, | |
| "loss": 0.0049, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 0.48854961832061067, | |
| "grad_norm": 0.3042219877243042, | |
| "learning_rate": 3.034322317072344e-05, | |
| "loss": 0.0045, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 0.4910941475826972, | |
| "grad_norm": 1.1074669361114502, | |
| "learning_rate": 3.0126094145175514e-05, | |
| "loss": 0.0103, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 0.49363867684478374, | |
| "grad_norm": 0.29976963996887207, | |
| "learning_rate": 2.9908560717577362e-05, | |
| "loss": 0.003, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 0.4961832061068702, | |
| "grad_norm": 0.08179239183664322, | |
| "learning_rate": 2.969064004933133e-05, | |
| "loss": 0.0079, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.49872773536895676, | |
| "grad_norm": 0.10447824001312256, | |
| "learning_rate": 2.9472349332389525e-05, | |
| "loss": 0.0036, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 0.5012722646310432, | |
| "grad_norm": 1.0608930587768555, | |
| "learning_rate": 2.9253705787897523e-05, | |
| "loss": 0.0058, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 0.5038167938931297, | |
| "grad_norm": 0.20421336591243744, | |
| "learning_rate": 2.9034726664835753e-05, | |
| "loss": 0.007, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 0.5063613231552163, | |
| "grad_norm": 0.14725904166698456, | |
| "learning_rate": 2.8815429238658764e-05, | |
| "loss": 0.0038, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 0.5089058524173028, | |
| "grad_norm": 0.06564632803201675, | |
| "learning_rate": 2.8595830809932284e-05, | |
| "loss": 0.0026, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.5089058524173028, | |
| "eval_loss": 0.02710827998816967, | |
| "eval_runtime": 133.9472, | |
| "eval_samples_per_second": 59.591, | |
| "eval_steps_per_second": 0.47, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.5114503816793893, | |
| "grad_norm": 0.01245685201138258, | |
| "learning_rate": 2.8375948702968413e-05, | |
| "loss": 0.0115, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 0.5139949109414759, | |
| "grad_norm": 0.1518605798482895, | |
| "learning_rate": 2.815580026445887e-05, | |
| "loss": 0.0054, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 0.5165394402035624, | |
| "grad_norm": 0.1046416237950325, | |
| "learning_rate": 2.79354028621065e-05, | |
| "loss": 0.0062, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 0.5190839694656488, | |
| "grad_norm": 0.3347122371196747, | |
| "learning_rate": 2.7714773883255146e-05, | |
| "loss": 0.007, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 0.5216284987277354, | |
| "grad_norm": 0.2936704456806183, | |
| "learning_rate": 2.7493930733517874e-05, | |
| "loss": 0.0094, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 0.5241730279898219, | |
| "grad_norm": 0.6291568279266357, | |
| "learning_rate": 2.7272890835403937e-05, | |
| "loss": 0.0061, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 0.5267175572519084, | |
| "grad_norm": 0.37302365899086, | |
| "learning_rate": 2.7051671626944187e-05, | |
| "loss": 0.0109, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 0.5292620865139949, | |
| "grad_norm": 0.5293512940406799, | |
| "learning_rate": 2.6830290560315468e-05, | |
| "loss": 0.0107, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 0.5318066157760815, | |
| "grad_norm": 0.02336425893008709, | |
| "learning_rate": 2.6608765100463728e-05, | |
| "loss": 0.0113, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 0.5343511450381679, | |
| "grad_norm": 0.7965981960296631, | |
| "learning_rate": 2.6387112723726248e-05, | |
| "loss": 0.0117, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.5368956743002544, | |
| "grad_norm": 0.12472939491271973, | |
| "learning_rate": 2.616535091645287e-05, | |
| "loss": 0.0097, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 0.539440203562341, | |
| "grad_norm": 0.06943671405315399, | |
| "learning_rate": 2.5943497173626513e-05, | |
| "loss": 0.0069, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 0.5419847328244275, | |
| "grad_norm": 0.0347890704870224, | |
| "learning_rate": 2.572156899748297e-05, | |
| "loss": 0.0073, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 0.544529262086514, | |
| "grad_norm": 0.007771250791847706, | |
| "learning_rate": 2.5499583896130157e-05, | |
| "loss": 0.0098, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 0.5470737913486005, | |
| "grad_norm": 0.26268109679222107, | |
| "learning_rate": 2.527755938216687e-05, | |
| "loss": 0.0111, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 0.549618320610687, | |
| "grad_norm": 0.4121718406677246, | |
| "learning_rate": 2.505551297130117e-05, | |
| "loss": 0.0053, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 0.5521628498727735, | |
| "grad_norm": 0.09937995672225952, | |
| "learning_rate": 2.483346218096861e-05, | |
| "loss": 0.0058, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 0.55470737913486, | |
| "grad_norm": 0.556506335735321, | |
| "learning_rate": 2.461142452895025e-05, | |
| "loss": 0.0096, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 0.5572519083969466, | |
| "grad_norm": 0.03499068692326546, | |
| "learning_rate": 2.4389417531990634e-05, | |
| "loss": 0.0047, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 0.5597964376590331, | |
| "grad_norm": 0.0036660893820226192, | |
| "learning_rate": 2.4167458704415916e-05, | |
| "loss": 0.0156, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.5623409669211196, | |
| "grad_norm": 0.5959199070930481, | |
| "learning_rate": 2.3945565556752114e-05, | |
| "loss": 0.0039, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 0.5648854961832062, | |
| "grad_norm": 0.4356940686702728, | |
| "learning_rate": 2.372375559434373e-05, | |
| "loss": 0.0081, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 0.5674300254452926, | |
| "grad_norm": 1.212583303451538, | |
| "learning_rate": 2.3502046315972656e-05, | |
| "loss": 0.0126, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 0.5699745547073791, | |
| "grad_norm": 0.2046574354171753, | |
| "learning_rate": 2.3280455212477776e-05, | |
| "loss": 0.0031, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 0.5725190839694656, | |
| "grad_norm": 0.3375607430934906, | |
| "learning_rate": 2.3058999765375042e-05, | |
| "loss": 0.0104, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 0.5750636132315522, | |
| "grad_norm": 0.023851798847317696, | |
| "learning_rate": 2.2837697445478352e-05, | |
| "loss": 0.0064, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 0.5776081424936387, | |
| "grad_norm": 0.1618007868528366, | |
| "learning_rate": 2.261656571152128e-05, | |
| "loss": 0.0065, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 0.5801526717557252, | |
| "grad_norm": 0.27718690037727356, | |
| "learning_rate": 2.2395622008779686e-05, | |
| "loss": 0.0079, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 0.5826972010178118, | |
| "grad_norm": 0.7294254899024963, | |
| "learning_rate": 2.2174883767695522e-05, | |
| "loss": 0.0074, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 0.5852417302798982, | |
| "grad_norm": 0.0032069615554064512, | |
| "learning_rate": 2.195436840250168e-05, | |
| "loss": 0.001, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.5877862595419847, | |
| "grad_norm": 0.5366030335426331, | |
| "learning_rate": 2.1734093309848183e-05, | |
| "loss": 0.0034, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 0.5903307888040712, | |
| "grad_norm": 0.06393870711326599, | |
| "learning_rate": 2.151407586742972e-05, | |
| "loss": 0.0108, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 0.5928753180661578, | |
| "grad_norm": 0.055318817496299744, | |
| "learning_rate": 2.129433343261475e-05, | |
| "loss": 0.0059, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 0.5954198473282443, | |
| "grad_norm": 0.0782274380326271, | |
| "learning_rate": 2.1074883341076136e-05, | |
| "loss": 0.0044, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 0.5979643765903307, | |
| "grad_norm": 0.00423054164275527, | |
| "learning_rate": 2.0855742905423535e-05, | |
| "loss": 0.0079, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 0.6005089058524173, | |
| "grad_norm": 0.2912484407424927, | |
| "learning_rate": 2.063692941383754e-05, | |
| "loss": 0.0026, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 0.6030534351145038, | |
| "grad_norm": 0.3566083014011383, | |
| "learning_rate": 2.0418460128705892e-05, | |
| "loss": 0.0054, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 0.6055979643765903, | |
| "grad_norm": 0.00747159868478775, | |
| "learning_rate": 2.0200352285261553e-05, | |
| "loss": 0.0066, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 0.6081424936386769, | |
| "grad_norm": 0.3612661063671112, | |
| "learning_rate": 1.9982623090223046e-05, | |
| "loss": 0.0059, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 0.6106870229007634, | |
| "grad_norm": 0.13741767406463623, | |
| "learning_rate": 1.9765289720436987e-05, | |
| "loss": 0.0092, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.6132315521628499, | |
| "grad_norm": 0.18614216148853302, | |
| "learning_rate": 1.9548369321522986e-05, | |
| "loss": 0.0067, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 0.6157760814249363, | |
| "grad_norm": 0.6235272288322449, | |
| "learning_rate": 1.933187900652103e-05, | |
| "loss": 0.0136, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 0.6183206106870229, | |
| "grad_norm": 0.08954493701457977, | |
| "learning_rate": 1.9115835854541396e-05, | |
| "loss": 0.0032, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 0.6208651399491094, | |
| "grad_norm": 0.007935562171041965, | |
| "learning_rate": 1.890025690941731e-05, | |
| "loss": 0.0068, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 0.6234096692111959, | |
| "grad_norm": 0.9097616076469421, | |
| "learning_rate": 1.8685159178360263e-05, | |
| "loss": 0.0046, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 0.6259541984732825, | |
| "grad_norm": 0.6209919452667236, | |
| "learning_rate": 1.847055963061839e-05, | |
| "loss": 0.0118, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 0.628498727735369, | |
| "grad_norm": 0.4671260416507721, | |
| "learning_rate": 1.825647519613769e-05, | |
| "loss": 0.0034, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 0.6310432569974554, | |
| "grad_norm": 0.33821019530296326, | |
| "learning_rate": 1.8042922764226434e-05, | |
| "loss": 0.0174, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 0.6335877862595419, | |
| "grad_norm": 0.029848894104361534, | |
| "learning_rate": 1.7829919182222752e-05, | |
| "loss": 0.0122, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 0.6361323155216285, | |
| "grad_norm": 0.495715856552124, | |
| "learning_rate": 1.7617481254165487e-05, | |
| "loss": 0.0047, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.6361323155216285, | |
| "eval_loss": 0.027142422273755074, | |
| "eval_runtime": 133.9003, | |
| "eval_samples_per_second": 59.612, | |
| "eval_steps_per_second": 0.47, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.638676844783715, | |
| "grad_norm": 0.1676882654428482, | |
| "learning_rate": 1.7405625739468596e-05, | |
| "loss": 0.0106, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 0.6412213740458015, | |
| "grad_norm": 0.4715454876422882, | |
| "learning_rate": 1.7194369351598905e-05, | |
| "loss": 0.0082, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 0.6437659033078881, | |
| "grad_norm": 0.45407402515411377, | |
| "learning_rate": 1.698372875675762e-05, | |
| "loss": 0.0088, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 0.6463104325699746, | |
| "grad_norm": 0.004092986695468426, | |
| "learning_rate": 1.6773720572565483e-05, | |
| "loss": 0.0072, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 0.648854961832061, | |
| "grad_norm": 0.5857213735580444, | |
| "learning_rate": 1.656436136675181e-05, | |
| "loss": 0.0101, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 0.6513994910941476, | |
| "grad_norm": 0.4550642669200897, | |
| "learning_rate": 1.635566765584744e-05, | |
| "loss": 0.0255, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 0.6539440203562341, | |
| "grad_norm": 0.39526107907295227, | |
| "learning_rate": 1.6147655903881765e-05, | |
| "loss": 0.0092, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 0.6564885496183206, | |
| "grad_norm": 0.07752832025289536, | |
| "learning_rate": 1.5940342521083792e-05, | |
| "loss": 0.0082, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 0.6590330788804071, | |
| "grad_norm": 0.8561895489692688, | |
| "learning_rate": 1.5733743862587612e-05, | |
| "loss": 0.0112, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 0.6615776081424937, | |
| "grad_norm": 0.04887833073735237, | |
| "learning_rate": 1.5527876227142075e-05, | |
| "loss": 0.0023, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.6641221374045801, | |
| "grad_norm": 0.02125716209411621, | |
| "learning_rate": 1.532275585582499e-05, | |
| "loss": 0.0103, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.006089942064136267, | |
| "learning_rate": 1.511839893076184e-05, | |
| "loss": 0.0036, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 0.6692111959287532, | |
| "grad_norm": 0.054803185164928436, | |
| "learning_rate": 1.4914821573849166e-05, | |
| "loss": 0.0029, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 0.6717557251908397, | |
| "grad_norm": 0.8407303094863892, | |
| "learning_rate": 1.4712039845482698e-05, | |
| "loss": 0.0103, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 0.6743002544529262, | |
| "grad_norm": 0.030154723674058914, | |
| "learning_rate": 1.4510069743290333e-05, | |
| "loss": 0.0049, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 0.6768447837150128, | |
| "grad_norm": 0.708321750164032, | |
| "learning_rate": 1.430892720087008e-05, | |
| "loss": 0.003, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 0.6793893129770993, | |
| "grad_norm": 0.295716255903244, | |
| "learning_rate": 1.4108628086532998e-05, | |
| "loss": 0.009, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 0.6819338422391857, | |
| "grad_norm": 0.8219901919364929, | |
| "learning_rate": 1.3909188202051415e-05, | |
| "loss": 0.0208, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 0.6844783715012722, | |
| "grad_norm": 0.9473980069160461, | |
| "learning_rate": 1.3710623281412221e-05, | |
| "loss": 0.013, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 0.6870229007633588, | |
| "grad_norm": 1.042126178741455, | |
| "learning_rate": 1.3512948989575685e-05, | |
| "loss": 0.008, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.6895674300254453, | |
| "grad_norm": 1.2536725997924805, | |
| "learning_rate": 1.3316180921239562e-05, | |
| "loss": 0.0063, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 0.6921119592875318, | |
| "grad_norm": 0.07743038982152939, | |
| "learning_rate": 1.3120334599608838e-05, | |
| "loss": 0.009, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 0.6946564885496184, | |
| "grad_norm": 0.19203220307826996, | |
| "learning_rate": 1.2925425475171126e-05, | |
| "loss": 0.0093, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 0.6972010178117048, | |
| "grad_norm": 0.0058543807826936245, | |
| "learning_rate": 1.2731468924477696e-05, | |
| "loss": 0.0106, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 0.6997455470737913, | |
| "grad_norm": 0.6394088864326477, | |
| "learning_rate": 1.253848024893049e-05, | |
| "loss": 0.0047, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 0.7022900763358778, | |
| "grad_norm": 0.13549527525901794, | |
| "learning_rate": 1.2346474673574862e-05, | |
| "loss": 0.0041, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 0.7048346055979644, | |
| "grad_norm": 0.5880483388900757, | |
| "learning_rate": 1.2155467345898602e-05, | |
| "loss": 0.0062, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 0.7073791348600509, | |
| "grad_norm": 0.04639929160475731, | |
| "learning_rate": 1.1965473334636826e-05, | |
| "loss": 0.0008, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 0.7099236641221374, | |
| "grad_norm": 0.008351466618478298, | |
| "learning_rate": 1.1776507628583258e-05, | |
| "loss": 0.0033, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 0.712468193384224, | |
| "grad_norm": 0.2375815063714981, | |
| "learning_rate": 1.1588585135407707e-05, | |
| "loss": 0.0052, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.7150127226463104, | |
| "grad_norm": 0.006800468545407057, | |
| "learning_rate": 1.1401720680479996e-05, | |
| "loss": 0.0045, | |
| "step": 2810 | |
| }, | |
| { | |
| "epoch": 0.7175572519083969, | |
| "grad_norm": 0.6628823280334473, | |
| "learning_rate": 1.1215929005700421e-05, | |
| "loss": 0.0063, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 0.7201017811704835, | |
| "grad_norm": 0.055454812943935394, | |
| "learning_rate": 1.1031224768336674e-05, | |
| "loss": 0.0028, | |
| "step": 2830 | |
| }, | |
| { | |
| "epoch": 0.72264631043257, | |
| "grad_norm": 0.02487095445394516, | |
| "learning_rate": 1.0847622539867606e-05, | |
| "loss": 0.0125, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 0.7251908396946565, | |
| "grad_norm": 0.004573671147227287, | |
| "learning_rate": 1.0665136804833553e-05, | |
| "loss": 0.0053, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 0.727735368956743, | |
| "grad_norm": 0.4363318383693695, | |
| "learning_rate": 1.0483781959693782e-05, | |
| "loss": 0.0151, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 0.7302798982188295, | |
| "grad_norm": 0.3757225573062897, | |
| "learning_rate": 1.0303572311690607e-05, | |
| "loss": 0.0012, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 0.732824427480916, | |
| "grad_norm": 1.0542665719985962, | |
| "learning_rate": 1.0124522077720793e-05, | |
| "loss": 0.0042, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 0.7353689567430025, | |
| "grad_norm": 0.2832266688346863, | |
| "learning_rate": 9.94664538321386e-06, | |
| "loss": 0.0062, | |
| "step": 2890 | |
| }, | |
| { | |
| "epoch": 0.7379134860050891, | |
| "grad_norm": 0.10864142328500748, | |
| "learning_rate": 9.769956261017823e-06, | |
| "loss": 0.0134, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.7404580152671756, | |
| "grad_norm": 0.01676347479224205, | |
| "learning_rate": 9.594468650292044e-06, | |
| "loss": 0.0135, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 0.7430025445292621, | |
| "grad_norm": 0.05719975382089615, | |
| "learning_rate": 9.420196395407623e-06, | |
| "loss": 0.0061, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 0.7455470737913485, | |
| "grad_norm": 0.009233399294316769, | |
| "learning_rate": 9.247153244855156e-06, | |
| "loss": 0.0017, | |
| "step": 2930 | |
| }, | |
| { | |
| "epoch": 0.7480916030534351, | |
| "grad_norm": 1.099915623664856, | |
| "learning_rate": 9.075352850160118e-06, | |
| "loss": 0.0074, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 0.7506361323155216, | |
| "grad_norm": 0.4556666910648346, | |
| "learning_rate": 8.904808764805914e-06, | |
| "loss": 0.0084, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 0.7531806615776081, | |
| "grad_norm": 0.9948151707649231, | |
| "learning_rate": 8.735534443164573e-06, | |
| "loss": 0.0076, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 0.7557251908396947, | |
| "grad_norm": 0.5090053081512451, | |
| "learning_rate": 8.567543239435397e-06, | |
| "loss": 0.0048, | |
| "step": 2970 | |
| }, | |
| { | |
| "epoch": 0.7582697201017812, | |
| "grad_norm": 0.12809348106384277, | |
| "learning_rate": 8.40084840659133e-06, | |
| "loss": 0.0062, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 0.7608142493638677, | |
| "grad_norm": 0.4823962152004242, | |
| "learning_rate": 8.235463095333553e-06, | |
| "loss": 0.0036, | |
| "step": 2990 | |
| }, | |
| { | |
| "epoch": 0.7633587786259542, | |
| "grad_norm": 0.551089882850647, | |
| "learning_rate": 8.071400353053894e-06, | |
| "loss": 0.0078, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.7633587786259542, | |
| "eval_loss": 0.027370482683181763, | |
| "eval_runtime": 134.0666, | |
| "eval_samples_per_second": 59.538, | |
| "eval_steps_per_second": 0.47, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.7659033078880407, | |
| "grad_norm": 0.02001293934881687, | |
| "learning_rate": 7.908673122805585e-06, | |
| "loss": 0.0051, | |
| "step": 3010 | |
| }, | |
| { | |
| "epoch": 0.7684478371501272, | |
| "grad_norm": 0.036044761538505554, | |
| "learning_rate": 7.747294242282136e-06, | |
| "loss": 0.0075, | |
| "step": 3020 | |
| }, | |
| { | |
| "epoch": 0.7709923664122137, | |
| "grad_norm": 1.4469804763793945, | |
| "learning_rate": 7.587276442804553e-06, | |
| "loss": 0.0085, | |
| "step": 3030 | |
| }, | |
| { | |
| "epoch": 0.7735368956743003, | |
| "grad_norm": 0.42628103494644165, | |
| "learning_rate": 7.428632348317005e-06, | |
| "loss": 0.0048, | |
| "step": 3040 | |
| }, | |
| { | |
| "epoch": 0.7760814249363868, | |
| "grad_norm": 0.05394347384572029, | |
| "learning_rate": 7.271374474390835e-06, | |
| "loss": 0.0017, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 0.7786259541984732, | |
| "grad_norm": 0.857109546661377, | |
| "learning_rate": 7.1155152272372685e-06, | |
| "loss": 0.0099, | |
| "step": 3060 | |
| }, | |
| { | |
| "epoch": 0.7811704834605598, | |
| "grad_norm": 0.05668023228645325, | |
| "learning_rate": 6.961066902728594e-06, | |
| "loss": 0.0155, | |
| "step": 3070 | |
| }, | |
| { | |
| "epoch": 0.7837150127226463, | |
| "grad_norm": 1.0613700151443481, | |
| "learning_rate": 6.808041685428232e-06, | |
| "loss": 0.0046, | |
| "step": 3080 | |
| }, | |
| { | |
| "epoch": 0.7862595419847328, | |
| "grad_norm": 0.012211822904646397, | |
| "learning_rate": 6.656451647629391e-06, | |
| "loss": 0.0077, | |
| "step": 3090 | |
| }, | |
| { | |
| "epoch": 0.7888040712468194, | |
| "grad_norm": 0.09284523874521255, | |
| "learning_rate": 6.506308748402748e-06, | |
| "loss": 0.008, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.7913486005089059, | |
| "grad_norm": 0.08356593549251556, | |
| "learning_rate": 6.3576248326529084e-06, | |
| "loss": 0.006, | |
| "step": 3110 | |
| }, | |
| { | |
| "epoch": 0.7938931297709924, | |
| "grad_norm": 0.15305979549884796, | |
| "learning_rate": 6.210411630184035e-06, | |
| "loss": 0.0058, | |
| "step": 3120 | |
| }, | |
| { | |
| "epoch": 0.7964376590330788, | |
| "grad_norm": 0.04744777828454971, | |
| "learning_rate": 6.06468075477441e-06, | |
| "loss": 0.0114, | |
| "step": 3130 | |
| }, | |
| { | |
| "epoch": 0.7989821882951654, | |
| "grad_norm": 0.02002541720867157, | |
| "learning_rate": 5.920443703260259e-06, | |
| "loss": 0.0049, | |
| "step": 3140 | |
| }, | |
| { | |
| "epoch": 0.8015267175572519, | |
| "grad_norm": 0.024022314697504044, | |
| "learning_rate": 5.777711854628712e-06, | |
| "loss": 0.0071, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 0.8040712468193384, | |
| "grad_norm": 0.5654802322387695, | |
| "learning_rate": 5.636496469120129e-06, | |
| "loss": 0.0167, | |
| "step": 3160 | |
| }, | |
| { | |
| "epoch": 0.806615776081425, | |
| "grad_norm": 0.041485343128442764, | |
| "learning_rate": 5.496808687339791e-06, | |
| "loss": 0.0088, | |
| "step": 3170 | |
| }, | |
| { | |
| "epoch": 0.8091603053435115, | |
| "grad_norm": 0.41280263662338257, | |
| "learning_rate": 5.358659529378954e-06, | |
| "loss": 0.0082, | |
| "step": 3180 | |
| }, | |
| { | |
| "epoch": 0.811704834605598, | |
| "grad_norm": 0.0841585099697113, | |
| "learning_rate": 5.22205989394553e-06, | |
| "loss": 0.0138, | |
| "step": 3190 | |
| }, | |
| { | |
| "epoch": 0.8142493638676844, | |
| "grad_norm": 0.8708165884017944, | |
| "learning_rate": 5.087020557504199e-06, | |
| "loss": 0.0257, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.816793893129771, | |
| "grad_norm": 0.04931897670030594, | |
| "learning_rate": 4.9535521734263315e-06, | |
| "loss": 0.0018, | |
| "step": 3210 | |
| }, | |
| { | |
| "epoch": 0.8193384223918575, | |
| "grad_norm": 0.06793482601642609, | |
| "learning_rate": 4.821665271149464e-06, | |
| "loss": 0.0021, | |
| "step": 3220 | |
| }, | |
| { | |
| "epoch": 0.821882951653944, | |
| "grad_norm": 0.4492109417915344, | |
| "learning_rate": 4.691370255346672e-06, | |
| "loss": 0.0144, | |
| "step": 3230 | |
| }, | |
| { | |
| "epoch": 0.8244274809160306, | |
| "grad_norm": 0.17483478784561157, | |
| "learning_rate": 4.5626774051056975e-06, | |
| "loss": 0.0057, | |
| "step": 3240 | |
| }, | |
| { | |
| "epoch": 0.8269720101781171, | |
| "grad_norm": 1.6676959991455078, | |
| "learning_rate": 4.435596873118042e-06, | |
| "loss": 0.007, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 0.8295165394402035, | |
| "grad_norm": 0.33237066864967346, | |
| "learning_rate": 4.3101386848780265e-06, | |
| "loss": 0.0024, | |
| "step": 3260 | |
| }, | |
| { | |
| "epoch": 0.8320610687022901, | |
| "grad_norm": 0.503776490688324, | |
| "learning_rate": 4.186312737891826e-06, | |
| "loss": 0.0057, | |
| "step": 3270 | |
| }, | |
| { | |
| "epoch": 0.8346055979643766, | |
| "grad_norm": 0.731706440448761, | |
| "learning_rate": 4.064128800896719e-06, | |
| "loss": 0.0139, | |
| "step": 3280 | |
| }, | |
| { | |
| "epoch": 0.8371501272264631, | |
| "grad_norm": 0.2808413505554199, | |
| "learning_rate": 3.943596513090323e-06, | |
| "loss": 0.0049, | |
| "step": 3290 | |
| }, | |
| { | |
| "epoch": 0.8396946564885496, | |
| "grad_norm": 0.3193255662918091, | |
| "learning_rate": 3.824725383370256e-06, | |
| "loss": 0.0097, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.8422391857506362, | |
| "grad_norm": 0.9438762068748474, | |
| "learning_rate": 3.707524789583891e-06, | |
| "loss": 0.0033, | |
| "step": 3310 | |
| }, | |
| { | |
| "epoch": 0.8447837150127226, | |
| "grad_norm": 0.0667446032166481, | |
| "learning_rate": 3.5920039777885885e-06, | |
| "loss": 0.0071, | |
| "step": 3320 | |
| }, | |
| { | |
| "epoch": 0.8473282442748091, | |
| "grad_norm": 0.15694209933280945, | |
| "learning_rate": 3.4781720615221973e-06, | |
| "loss": 0.0041, | |
| "step": 3330 | |
| }, | |
| { | |
| "epoch": 0.8498727735368957, | |
| "grad_norm": 0.11617179214954376, | |
| "learning_rate": 3.366038021084167e-06, | |
| "loss": 0.0106, | |
| "step": 3340 | |
| }, | |
| { | |
| "epoch": 0.8524173027989822, | |
| "grad_norm": 0.456826776266098, | |
| "learning_rate": 3.255610702827e-06, | |
| "loss": 0.0049, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 0.8549618320610687, | |
| "grad_norm": 0.3071352243423462, | |
| "learning_rate": 3.146898818458424e-06, | |
| "loss": 0.0121, | |
| "step": 3360 | |
| }, | |
| { | |
| "epoch": 0.8575063613231552, | |
| "grad_norm": 0.24180732667446136, | |
| "learning_rate": 3.0399109443540685e-06, | |
| "loss": 0.0085, | |
| "step": 3370 | |
| }, | |
| { | |
| "epoch": 0.8600508905852418, | |
| "grad_norm": 0.2852170467376709, | |
| "learning_rate": 2.93465552088088e-06, | |
| "loss": 0.0078, | |
| "step": 3380 | |
| }, | |
| { | |
| "epoch": 0.8625954198473282, | |
| "grad_norm": 0.02955169416964054, | |
| "learning_rate": 2.8311408517312816e-06, | |
| "loss": 0.0042, | |
| "step": 3390 | |
| }, | |
| { | |
| "epoch": 0.8651399491094147, | |
| "grad_norm": 0.08671633154153824, | |
| "learning_rate": 2.729375103268045e-06, | |
| "loss": 0.005, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.8676844783715013, | |
| "grad_norm": 0.37408682703971863, | |
| "learning_rate": 2.629366303880079e-06, | |
| "loss": 0.0107, | |
| "step": 3410 | |
| }, | |
| { | |
| "epoch": 0.8702290076335878, | |
| "grad_norm": 0.02283228002488613, | |
| "learning_rate": 2.5311223433490128e-06, | |
| "loss": 0.0054, | |
| "step": 3420 | |
| }, | |
| { | |
| "epoch": 0.8727735368956743, | |
| "grad_norm": 0.14945147931575775, | |
| "learning_rate": 2.434650972226832e-06, | |
| "loss": 0.0034, | |
| "step": 3430 | |
| }, | |
| { | |
| "epoch": 0.8753180661577609, | |
| "grad_norm": 0.004307675641030073, | |
| "learning_rate": 2.339959801224362e-06, | |
| "loss": 0.002, | |
| "step": 3440 | |
| }, | |
| { | |
| "epoch": 0.8778625954198473, | |
| "grad_norm": 0.017721090465784073, | |
| "learning_rate": 2.247056300610906e-06, | |
| "loss": 0.0095, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 0.8804071246819338, | |
| "grad_norm": 0.0006961692124605179, | |
| "learning_rate": 2.1559477996248747e-06, | |
| "loss": 0.0056, | |
| "step": 3460 | |
| }, | |
| { | |
| "epoch": 0.8829516539440203, | |
| "grad_norm": 0.030027322471141815, | |
| "learning_rate": 2.0666414858955847e-06, | |
| "loss": 0.0019, | |
| "step": 3470 | |
| }, | |
| { | |
| "epoch": 0.8854961832061069, | |
| "grad_norm": 0.3239416182041168, | |
| "learning_rate": 1.9791444048762433e-06, | |
| "loss": 0.0052, | |
| "step": 3480 | |
| }, | |
| { | |
| "epoch": 0.8880407124681934, | |
| "grad_norm": 0.5234742760658264, | |
| "learning_rate": 1.8934634592880806e-06, | |
| "loss": 0.0099, | |
| "step": 3490 | |
| }, | |
| { | |
| "epoch": 0.8905852417302799, | |
| "grad_norm": 0.05097776651382446, | |
| "learning_rate": 1.8096054085758457e-06, | |
| "loss": 0.0153, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.8905852417302799, | |
| "eval_loss": 0.026601266115903854, | |
| "eval_runtime": 133.9309, | |
| "eval_samples_per_second": 59.598, | |
| "eval_steps_per_second": 0.47, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.8931297709923665, | |
| "grad_norm": 0.0069605689495801926, | |
| "learning_rate": 1.727576868374478e-06, | |
| "loss": 0.0023, | |
| "step": 3510 | |
| }, | |
| { | |
| "epoch": 0.8956743002544529, | |
| "grad_norm": 0.22233577072620392, | |
| "learning_rate": 1.6473843099872682e-06, | |
| "loss": 0.0021, | |
| "step": 3520 | |
| }, | |
| { | |
| "epoch": 0.8982188295165394, | |
| "grad_norm": 0.2330171763896942, | |
| "learning_rate": 1.569034059875274e-06, | |
| "loss": 0.0035, | |
| "step": 3530 | |
| }, | |
| { | |
| "epoch": 0.9007633587786259, | |
| "grad_norm": 0.7454178333282471, | |
| "learning_rate": 1.4925322991582557e-06, | |
| "loss": 0.0115, | |
| "step": 3540 | |
| }, | |
| { | |
| "epoch": 0.9033078880407125, | |
| "grad_norm": 0.01470651663839817, | |
| "learning_rate": 1.417885063127014e-06, | |
| "loss": 0.0048, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 0.905852417302799, | |
| "grad_norm": 0.1470329314470291, | |
| "learning_rate": 1.3450982407672897e-06, | |
| "loss": 0.0052, | |
| "step": 3560 | |
| }, | |
| { | |
| "epoch": 0.9083969465648855, | |
| "grad_norm": 0.007075577508658171, | |
| "learning_rate": 1.274177574295149e-06, | |
| "loss": 0.0065, | |
| "step": 3570 | |
| }, | |
| { | |
| "epoch": 0.910941475826972, | |
| "grad_norm": 0.005552398506551981, | |
| "learning_rate": 1.205128658704005e-06, | |
| "loss": 0.0039, | |
| "step": 3580 | |
| }, | |
| { | |
| "epoch": 0.9134860050890585, | |
| "grad_norm": 0.27127352356910706, | |
| "learning_rate": 1.13795694132319e-06, | |
| "loss": 0.0081, | |
| "step": 3590 | |
| }, | |
| { | |
| "epoch": 0.916030534351145, | |
| "grad_norm": 0.24828727543354034, | |
| "learning_rate": 1.0726677213882319e-06, | |
| "loss": 0.0038, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.9185750636132316, | |
| "grad_norm": 0.004463238641619682, | |
| "learning_rate": 1.0092661496227979e-06, | |
| "loss": 0.0006, | |
| "step": 3610 | |
| }, | |
| { | |
| "epoch": 0.9211195928753181, | |
| "grad_norm": 0.019411705434322357, | |
| "learning_rate": 9.477572278323271e-07, | |
| "loss": 0.009, | |
| "step": 3620 | |
| }, | |
| { | |
| "epoch": 0.9236641221374046, | |
| "grad_norm": 0.04583721607923508, | |
| "learning_rate": 8.881458085094635e-07, | |
| "loss": 0.0097, | |
| "step": 3630 | |
| }, | |
| { | |
| "epoch": 0.926208651399491, | |
| "grad_norm": 0.017350351437926292, | |
| "learning_rate": 8.304365944511977e-07, | |
| "loss": 0.0095, | |
| "step": 3640 | |
| }, | |
| { | |
| "epoch": 0.9287531806615776, | |
| "grad_norm": 0.011083148419857025, | |
| "learning_rate": 7.746341383879058e-07, | |
| "loss": 0.0059, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 0.9312977099236641, | |
| "grad_norm": 0.0054423692636191845, | |
| "learning_rate": 7.207428426241475e-07, | |
| "loss": 0.0033, | |
| "step": 3660 | |
| }, | |
| { | |
| "epoch": 0.9338422391857506, | |
| "grad_norm": 0.007262678351253271, | |
| "learning_rate": 6.687669586913775e-07, | |
| "loss": 0.0015, | |
| "step": 3670 | |
| }, | |
| { | |
| "epoch": 0.9363867684478372, | |
| "grad_norm": 0.03224071487784386, | |
| "learning_rate": 6.187105870125387e-07, | |
| "loss": 0.0018, | |
| "step": 3680 | |
| }, | |
| { | |
| "epoch": 0.9389312977099237, | |
| "grad_norm": 0.5585166215896606, | |
| "learning_rate": 5.705776765785626e-07, | |
| "loss": 0.0047, | |
| "step": 3690 | |
| }, | |
| { | |
| "epoch": 0.9414758269720102, | |
| "grad_norm": 0.08225244283676147, | |
| "learning_rate": 5.2437202463686e-07, | |
| "loss": 0.0102, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.9440203562340967, | |
| "grad_norm": 0.015504542738199234, | |
| "learning_rate": 4.800972763917111e-07, | |
| "loss": 0.0033, | |
| "step": 3710 | |
| }, | |
| { | |
| "epoch": 0.9465648854961832, | |
| "grad_norm": 0.11132729053497314, | |
| "learning_rate": 4.377569247167368e-07, | |
| "loss": 0.006, | |
| "step": 3720 | |
| }, | |
| { | |
| "epoch": 0.9491094147582697, | |
| "grad_norm": 0.0025119101628661156, | |
| "learning_rate": 3.973543098792998e-07, | |
| "loss": 0.0064, | |
| "step": 3730 | |
| }, | |
| { | |
| "epoch": 0.9516539440203562, | |
| "grad_norm": 0.4965267479419708, | |
| "learning_rate": 3.588926192770292e-07, | |
| "loss": 0.0059, | |
| "step": 3740 | |
| }, | |
| { | |
| "epoch": 0.9541984732824428, | |
| "grad_norm": 0.27043449878692627, | |
| "learning_rate": 3.223748871863247e-07, | |
| "loss": 0.0024, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 0.9567430025445293, | |
| "grad_norm": 0.023476067930459976, | |
| "learning_rate": 2.8780399452301707e-07, | |
| "loss": 0.0058, | |
| "step": 3760 | |
| }, | |
| { | |
| "epoch": 0.9592875318066157, | |
| "grad_norm": 0.005937150679528713, | |
| "learning_rate": 2.55182668615056e-07, | |
| "loss": 0.0089, | |
| "step": 3770 | |
| }, | |
| { | |
| "epoch": 0.9618320610687023, | |
| "grad_norm": 0.21308811008930206, | |
| "learning_rate": 2.2451348298737352e-07, | |
| "loss": 0.0079, | |
| "step": 3780 | |
| }, | |
| { | |
| "epoch": 0.9643765903307888, | |
| "grad_norm": 0.09421635419130325, | |
| "learning_rate": 1.9579885715884928e-07, | |
| "loss": 0.0025, | |
| "step": 3790 | |
| }, | |
| { | |
| "epoch": 0.9669211195928753, | |
| "grad_norm": 0.022222932428121567, | |
| "learning_rate": 1.6904105645142444e-07, | |
| "loss": 0.0039, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.9694656488549618, | |
| "grad_norm": 0.25252196192741394, | |
| "learning_rate": 1.4424219181140286e-07, | |
| "loss": 0.0096, | |
| "step": 3810 | |
| }, | |
| { | |
| "epoch": 0.9720101781170484, | |
| "grad_norm": 0.037245169281959534, | |
| "learning_rate": 1.2140421964289827e-07, | |
| "loss": 0.0044, | |
| "step": 3820 | |
| }, | |
| { | |
| "epoch": 0.9745547073791349, | |
| "grad_norm": 0.25672975182533264, | |
| "learning_rate": 1.0052894165351045e-07, | |
| "loss": 0.0183, | |
| "step": 3830 | |
| }, | |
| { | |
| "epoch": 0.9770992366412213, | |
| "grad_norm": 0.18478481471538544, | |
| "learning_rate": 8.161800471217785e-08, | |
| "loss": 0.0147, | |
| "step": 3840 | |
| }, | |
| { | |
| "epoch": 0.9796437659033079, | |
| "grad_norm": 0.08742686361074448, | |
| "learning_rate": 6.467290071925647e-08, | |
| "loss": 0.0079, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 0.9821882951653944, | |
| "grad_norm": 0.004091011360287666, | |
| "learning_rate": 4.969496648881411e-08, | |
| "loss": 0.0071, | |
| "step": 3860 | |
| }, | |
| { | |
| "epoch": 0.9847328244274809, | |
| "grad_norm": 0.7053390741348267, | |
| "learning_rate": 3.668538364318963e-08, | |
| "loss": 0.0021, | |
| "step": 3870 | |
| }, | |
| { | |
| "epoch": 0.9872773536895675, | |
| "grad_norm": 0.12487643957138062, | |
| "learning_rate": 2.5645178519742576e-08, | |
| "loss": 0.0058, | |
| "step": 3880 | |
| }, | |
| { | |
| "epoch": 0.989821882951654, | |
| "grad_norm": 0.018872104585170746, | |
| "learning_rate": 1.6575222089917907e-08, | |
| "loss": 0.0048, | |
| "step": 3890 | |
| }, | |
| { | |
| "epoch": 0.9923664122137404, | |
| "grad_norm": 0.0031618671491742134, | |
| "learning_rate": 9.476229890506561e-09, | |
| "loss": 0.0034, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.9949109414758269, | |
| "grad_norm": 0.2635253071784973, | |
| "learning_rate": 4.348761967210013e-09, | |
| "loss": 0.004, | |
| "step": 3910 | |
| }, | |
| { | |
| "epoch": 0.9974554707379135, | |
| "grad_norm": 0.11264585703611374, | |
| "learning_rate": 1.1932228304645243e-09, | |
| "loss": 0.0014, | |
| "step": 3920 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 6.796592060709372e-05, | |
| "learning_rate": 9.86142350556385e-12, | |
| "loss": 0.0042, | |
| "step": 3930 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 3930, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.273606668827065e+18, | |
| "train_batch_size": 128, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |