| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.7633587786259542, | |
| "eval_steps": 500, | |
| "global_step": 3000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.002544529262086514, | |
| "grad_norm": 7.356313705444336, | |
| "learning_rate": 9.160305343511451e-07, | |
| "loss": 0.1163, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.005089058524173028, | |
| "grad_norm": 7.439971446990967, | |
| "learning_rate": 1.933842239185751e-06, | |
| "loss": 0.1588, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.007633587786259542, | |
| "grad_norm": 11.112627983093262, | |
| "learning_rate": 2.951653944020356e-06, | |
| "loss": 0.1163, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.010178117048346057, | |
| "grad_norm": 11.65157413482666, | |
| "learning_rate": 3.969465648854962e-06, | |
| "loss": 0.1064, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.01272264631043257, | |
| "grad_norm": 7.872356414794922, | |
| "learning_rate": 4.987277353689568e-06, | |
| "loss": 0.0418, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.015267175572519083, | |
| "grad_norm": 12.023307800292969, | |
| "learning_rate": 6.005089058524174e-06, | |
| "loss": 0.0432, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.017811704834605598, | |
| "grad_norm": 1.0275840759277344, | |
| "learning_rate": 7.022900763358779e-06, | |
| "loss": 0.0281, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.020356234096692113, | |
| "grad_norm": 7.895046710968018, | |
| "learning_rate": 8.040712468193384e-06, | |
| "loss": 0.0345, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.022900763358778626, | |
| "grad_norm": 7.354756832122803, | |
| "learning_rate": 9.058524173027991e-06, | |
| "loss": 0.0271, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.02544529262086514, | |
| "grad_norm": 5.681747913360596, | |
| "learning_rate": 1.0076335877862595e-05, | |
| "loss": 0.0345, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.027989821882951654, | |
| "grad_norm": 4.338479518890381, | |
| "learning_rate": 1.1094147582697202e-05, | |
| "loss": 0.0203, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.030534351145038167, | |
| "grad_norm": 1.7058123350143433, | |
| "learning_rate": 1.2111959287531807e-05, | |
| "loss": 0.0167, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.03307888040712468, | |
| "grad_norm": 4.322059154510498, | |
| "learning_rate": 1.3129770992366414e-05, | |
| "loss": 0.0265, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.035623409669211195, | |
| "grad_norm": 0.042335961014032364, | |
| "learning_rate": 1.4147582697201019e-05, | |
| "loss": 0.0163, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.03816793893129771, | |
| "grad_norm": 7.194125175476074, | |
| "learning_rate": 1.5165394402035624e-05, | |
| "loss": 0.0266, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.04071246819338423, | |
| "grad_norm": 1.6283208131790161, | |
| "learning_rate": 1.618320610687023e-05, | |
| "loss": 0.0149, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.043256997455470736, | |
| "grad_norm": 6.294187068939209, | |
| "learning_rate": 1.7201017811704836e-05, | |
| "loss": 0.0098, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.04580152671755725, | |
| "grad_norm": 0.2349224090576172, | |
| "learning_rate": 1.8218829516539443e-05, | |
| "loss": 0.0223, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.04834605597964377, | |
| "grad_norm": 4.587934494018555, | |
| "learning_rate": 1.923664122137405e-05, | |
| "loss": 0.0112, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.05089058524173028, | |
| "grad_norm": 3.2867186069488525, | |
| "learning_rate": 2.0254452926208653e-05, | |
| "loss": 0.0162, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.05343511450381679, | |
| "grad_norm": 1.680902123451233, | |
| "learning_rate": 2.127226463104326e-05, | |
| "loss": 0.0202, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.05597964376590331, | |
| "grad_norm": 4.776252746582031, | |
| "learning_rate": 2.2290076335877867e-05, | |
| "loss": 0.016, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.058524173027989825, | |
| "grad_norm": 0.5116833448410034, | |
| "learning_rate": 2.330788804071247e-05, | |
| "loss": 0.0163, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.061068702290076333, | |
| "grad_norm": 0.01541766244918108, | |
| "learning_rate": 2.4325699745547078e-05, | |
| "loss": 0.0177, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.06361323155216285, | |
| "grad_norm": 3.283529043197632, | |
| "learning_rate": 2.5343511450381678e-05, | |
| "loss": 0.0068, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.06615776081424936, | |
| "grad_norm": 0.07980302721261978, | |
| "learning_rate": 2.6361323155216285e-05, | |
| "loss": 0.0143, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.06870229007633588, | |
| "grad_norm": 1.6337662935256958, | |
| "learning_rate": 2.737913486005089e-05, | |
| "loss": 0.0338, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.07124681933842239, | |
| "grad_norm": 0.6722275614738464, | |
| "learning_rate": 2.8396946564885498e-05, | |
| "loss": 0.0119, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.0737913486005089, | |
| "grad_norm": 2.636549234390259, | |
| "learning_rate": 2.9414758269720102e-05, | |
| "loss": 0.0106, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.07633587786259542, | |
| "grad_norm": 0.9932194352149963, | |
| "learning_rate": 3.043256997455471e-05, | |
| "loss": 0.0079, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.07888040712468193, | |
| "grad_norm": 2.856503963470459, | |
| "learning_rate": 3.145038167938931e-05, | |
| "loss": 0.0209, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.08142493638676845, | |
| "grad_norm": 1.5642755031585693, | |
| "learning_rate": 3.246819338422392e-05, | |
| "loss": 0.0179, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.08396946564885496, | |
| "grad_norm": 0.16029702126979828, | |
| "learning_rate": 3.3486005089058526e-05, | |
| "loss": 0.0149, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.08651399491094147, | |
| "grad_norm": 0.08286827802658081, | |
| "learning_rate": 3.450381679389313e-05, | |
| "loss": 0.0128, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.089058524173028, | |
| "grad_norm": 3.1540822982788086, | |
| "learning_rate": 3.552162849872774e-05, | |
| "loss": 0.0132, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.0916030534351145, | |
| "grad_norm": 3.3594603538513184, | |
| "learning_rate": 3.653944020356235e-05, | |
| "loss": 0.0168, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.09414758269720101, | |
| "grad_norm": 4.138404369354248, | |
| "learning_rate": 3.755725190839695e-05, | |
| "loss": 0.0086, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.09669211195928754, | |
| "grad_norm": 1.4163811206817627, | |
| "learning_rate": 3.8575063613231554e-05, | |
| "loss": 0.0157, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.09923664122137404, | |
| "grad_norm": 0.06929302215576172, | |
| "learning_rate": 3.959287531806616e-05, | |
| "loss": 0.0124, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.10178117048346055, | |
| "grad_norm": 3.256929397583008, | |
| "learning_rate": 3.993214588634436e-05, | |
| "loss": 0.0213, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.10432569974554708, | |
| "grad_norm": 3.4773950576782227, | |
| "learning_rate": 3.9819055696918295e-05, | |
| "loss": 0.0196, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.10687022900763359, | |
| "grad_norm": 1.9974408149719238, | |
| "learning_rate": 3.970596550749223e-05, | |
| "loss": 0.0181, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.10941475826972011, | |
| "grad_norm": 0.8266643285751343, | |
| "learning_rate": 3.959287531806616e-05, | |
| "loss": 0.0182, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.11195928753180662, | |
| "grad_norm": 0.04631345346570015, | |
| "learning_rate": 3.947978512864009e-05, | |
| "loss": 0.0048, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.11450381679389313, | |
| "grad_norm": 3.425898790359497, | |
| "learning_rate": 3.9366694939214026e-05, | |
| "loss": 0.0092, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.11704834605597965, | |
| "grad_norm": 0.24482232332229614, | |
| "learning_rate": 3.925360474978796e-05, | |
| "loss": 0.0075, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.11959287531806616, | |
| "grad_norm": 1.1092677116394043, | |
| "learning_rate": 3.914051456036189e-05, | |
| "loss": 0.0047, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.12213740458015267, | |
| "grad_norm": 0.07202545553445816, | |
| "learning_rate": 3.902742437093582e-05, | |
| "loss": 0.0107, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.12468193384223919, | |
| "grad_norm": 0.07417860627174377, | |
| "learning_rate": 3.8914334181509756e-05, | |
| "loss": 0.0097, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.1272264631043257, | |
| "grad_norm": 0.4800795912742615, | |
| "learning_rate": 3.880124399208369e-05, | |
| "loss": 0.0147, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.1272264631043257, | |
| "eval_loss": 0.040407657623291016, | |
| "eval_runtime": 132.3251, | |
| "eval_samples_per_second": 60.321, | |
| "eval_steps_per_second": 0.476, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.1297709923664122, | |
| "grad_norm": 5.036166667938232, | |
| "learning_rate": 3.868815380265762e-05, | |
| "loss": 0.0235, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.13231552162849872, | |
| "grad_norm": 1.3904993534088135, | |
| "learning_rate": 3.8575063613231554e-05, | |
| "loss": 0.0144, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.13486005089058525, | |
| "grad_norm": 1.0833613872528076, | |
| "learning_rate": 3.8461973423805486e-05, | |
| "loss": 0.0057, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.13740458015267176, | |
| "grad_norm": 1.6842162609100342, | |
| "learning_rate": 3.834888323437942e-05, | |
| "loss": 0.0076, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.13994910941475827, | |
| "grad_norm": 0.6625421643257141, | |
| "learning_rate": 3.823579304495336e-05, | |
| "loss": 0.0098, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.14249363867684478, | |
| "grad_norm": 4.125272750854492, | |
| "learning_rate": 3.8122702855527284e-05, | |
| "loss": 0.0116, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.1450381679389313, | |
| "grad_norm": 0.7487501502037048, | |
| "learning_rate": 3.8009612666101216e-05, | |
| "loss": 0.011, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.1475826972010178, | |
| "grad_norm": 1.5776028633117676, | |
| "learning_rate": 3.789652247667515e-05, | |
| "loss": 0.0023, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.15012722646310434, | |
| "grad_norm": 0.11522725969552994, | |
| "learning_rate": 3.778343228724908e-05, | |
| "loss": 0.0146, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.15267175572519084, | |
| "grad_norm": 0.4145560562610626, | |
| "learning_rate": 3.767034209782302e-05, | |
| "loss": 0.0025, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.15521628498727735, | |
| "grad_norm": 2.3070929050445557, | |
| "learning_rate": 3.755725190839695e-05, | |
| "loss": 0.013, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.15776081424936386, | |
| "grad_norm": 0.022943010553717613, | |
| "learning_rate": 3.744416171897088e-05, | |
| "loss": 0.0148, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.16030534351145037, | |
| "grad_norm": 0.046120110899209976, | |
| "learning_rate": 3.733107152954481e-05, | |
| "loss": 0.0107, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.1628498727735369, | |
| "grad_norm": 4.343276500701904, | |
| "learning_rate": 3.721798134011875e-05, | |
| "loss": 0.0064, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.16539440203562342, | |
| "grad_norm": 0.3732512295246124, | |
| "learning_rate": 3.7104891150692684e-05, | |
| "loss": 0.0033, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.16793893129770993, | |
| "grad_norm": 1.889374852180481, | |
| "learning_rate": 3.699180096126661e-05, | |
| "loss": 0.009, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.17048346055979643, | |
| "grad_norm": 0.13871723413467407, | |
| "learning_rate": 3.687871077184054e-05, | |
| "loss": 0.0071, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.17302798982188294, | |
| "grad_norm": 0.022979654371738434, | |
| "learning_rate": 3.676562058241448e-05, | |
| "loss": 0.014, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.17557251908396945, | |
| "grad_norm": 0.7708499431610107, | |
| "learning_rate": 3.6652530392988414e-05, | |
| "loss": 0.0233, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.178117048346056, | |
| "grad_norm": 1.6740437746047974, | |
| "learning_rate": 3.653944020356235e-05, | |
| "loss": 0.0106, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.1806615776081425, | |
| "grad_norm": 1.0917600393295288, | |
| "learning_rate": 3.642635001413627e-05, | |
| "loss": 0.0057, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.183206106870229, | |
| "grad_norm": 0.12821227312088013, | |
| "learning_rate": 3.6313259824710205e-05, | |
| "loss": 0.0119, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.18575063613231552, | |
| "grad_norm": 2.3844153881073, | |
| "learning_rate": 3.6200169635284144e-05, | |
| "loss": 0.0072, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.18829516539440203, | |
| "grad_norm": 1.0997446775436401, | |
| "learning_rate": 3.608707944585808e-05, | |
| "loss": 0.0075, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.19083969465648856, | |
| "grad_norm": 0.12582369148731232, | |
| "learning_rate": 3.597398925643201e-05, | |
| "loss": 0.0084, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.19338422391857507, | |
| "grad_norm": 4.1404643058776855, | |
| "learning_rate": 3.586089906700594e-05, | |
| "loss": 0.013, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.19592875318066158, | |
| "grad_norm": 0.9068903923034668, | |
| "learning_rate": 3.5747808877579875e-05, | |
| "loss": 0.0086, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.1984732824427481, | |
| "grad_norm": 0.25046494603157043, | |
| "learning_rate": 3.563471868815381e-05, | |
| "loss": 0.0077, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.2010178117048346, | |
| "grad_norm": 8.346513748168945, | |
| "learning_rate": 3.552162849872774e-05, | |
| "loss": 0.0077, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.2035623409669211, | |
| "grad_norm": 2.8214240074157715, | |
| "learning_rate": 3.540853830930167e-05, | |
| "loss": 0.0198, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.20610687022900764, | |
| "grad_norm": 0.48185858130455017, | |
| "learning_rate": 3.5295448119875605e-05, | |
| "loss": 0.0157, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.20865139949109415, | |
| "grad_norm": 0.8089591264724731, | |
| "learning_rate": 3.518235793044954e-05, | |
| "loss": 0.004, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.21119592875318066, | |
| "grad_norm": 1.6558741331100464, | |
| "learning_rate": 3.506926774102347e-05, | |
| "loss": 0.0102, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.21374045801526717, | |
| "grad_norm": 2.9806978702545166, | |
| "learning_rate": 3.49561775515974e-05, | |
| "loss": 0.0174, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.21628498727735368, | |
| "grad_norm": 0.01149509847164154, | |
| "learning_rate": 3.4843087362171335e-05, | |
| "loss": 0.0046, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.21882951653944022, | |
| "grad_norm": 0.5450740456581116, | |
| "learning_rate": 3.472999717274527e-05, | |
| "loss": 0.0264, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.22137404580152673, | |
| "grad_norm": 0.3999159336090088, | |
| "learning_rate": 3.46169069833192e-05, | |
| "loss": 0.0063, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.22391857506361323, | |
| "grad_norm": 0.027704713866114616, | |
| "learning_rate": 3.450381679389313e-05, | |
| "loss": 0.0103, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.22646310432569974, | |
| "grad_norm": 3.349081516265869, | |
| "learning_rate": 3.4390726604467065e-05, | |
| "loss": 0.012, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.22900763358778625, | |
| "grad_norm": 0.19467805325984955, | |
| "learning_rate": 3.4277636415041e-05, | |
| "loss": 0.0104, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.23155216284987276, | |
| "grad_norm": 4.603861331939697, | |
| "learning_rate": 3.416454622561493e-05, | |
| "loss": 0.0098, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.2340966921119593, | |
| "grad_norm": 0.03825552016496658, | |
| "learning_rate": 3.405145603618886e-05, | |
| "loss": 0.0109, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.2366412213740458, | |
| "grad_norm": 0.467573881149292, | |
| "learning_rate": 3.3938365846762796e-05, | |
| "loss": 0.007, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.23918575063613232, | |
| "grad_norm": 0.002599045867100358, | |
| "learning_rate": 3.382527565733673e-05, | |
| "loss": 0.0018, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.24173027989821882, | |
| "grad_norm": 0.18834111094474792, | |
| "learning_rate": 3.371218546791066e-05, | |
| "loss": 0.0094, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.24427480916030533, | |
| "grad_norm": 0.24286945164203644, | |
| "learning_rate": 3.3599095278484593e-05, | |
| "loss": 0.0021, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.24681933842239187, | |
| "grad_norm": 0.138252392411232, | |
| "learning_rate": 3.3486005089058526e-05, | |
| "loss": 0.0074, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.24936386768447838, | |
| "grad_norm": 0.01082629058510065, | |
| "learning_rate": 3.337291489963246e-05, | |
| "loss": 0.0085, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.25190839694656486, | |
| "grad_norm": 0.0677606388926506, | |
| "learning_rate": 3.325982471020639e-05, | |
| "loss": 0.0059, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.2544529262086514, | |
| "grad_norm": 1.374834656715393, | |
| "learning_rate": 3.3146734520780324e-05, | |
| "loss": 0.006, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.2544529262086514, | |
| "eval_loss": 0.034901443868875504, | |
| "eval_runtime": 132.3371, | |
| "eval_samples_per_second": 60.316, | |
| "eval_steps_per_second": 0.476, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.25699745547073793, | |
| "grad_norm": 0.2298440933227539, | |
| "learning_rate": 3.3033644331354256e-05, | |
| "loss": 0.0058, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.2595419847328244, | |
| "grad_norm": 0.6669068932533264, | |
| "learning_rate": 3.292055414192819e-05, | |
| "loss": 0.0051, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.26208651399491095, | |
| "grad_norm": 3.5202884674072266, | |
| "learning_rate": 3.280746395250212e-05, | |
| "loss": 0.0078, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.26463104325699743, | |
| "grad_norm": 1.5344403982162476, | |
| "learning_rate": 3.2694373763076054e-05, | |
| "loss": 0.0119, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.26717557251908397, | |
| "grad_norm": 0.06722147762775421, | |
| "learning_rate": 3.2581283573649987e-05, | |
| "loss": 0.0036, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.2697201017811705, | |
| "grad_norm": 0.3634966015815735, | |
| "learning_rate": 3.246819338422392e-05, | |
| "loss": 0.0055, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.272264631043257, | |
| "grad_norm": 1.2074553966522217, | |
| "learning_rate": 3.235510319479785e-05, | |
| "loss": 0.0091, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 0.2748091603053435, | |
| "grad_norm": 0.20816545188426971, | |
| "learning_rate": 3.224201300537179e-05, | |
| "loss": 0.011, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.27735368956743, | |
| "grad_norm": 1.0346763134002686, | |
| "learning_rate": 3.212892281594572e-05, | |
| "loss": 0.0067, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 0.27989821882951654, | |
| "grad_norm": 0.12196449190378189, | |
| "learning_rate": 3.201583262651965e-05, | |
| "loss": 0.0112, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.2824427480916031, | |
| "grad_norm": 0.010421439073979855, | |
| "learning_rate": 3.190274243709358e-05, | |
| "loss": 0.0078, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 0.28498727735368956, | |
| "grad_norm": 3.224294662475586, | |
| "learning_rate": 3.178965224766752e-05, | |
| "loss": 0.0213, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.2875318066157761, | |
| "grad_norm": 0.03761279582977295, | |
| "learning_rate": 3.1676562058241454e-05, | |
| "loss": 0.008, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 0.2900763358778626, | |
| "grad_norm": 0.1224556639790535, | |
| "learning_rate": 3.156347186881538e-05, | |
| "loss": 0.0044, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.2926208651399491, | |
| "grad_norm": 0.1516578644514084, | |
| "learning_rate": 3.145038167938931e-05, | |
| "loss": 0.01, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.2951653944020356, | |
| "grad_norm": 0.4016042649745941, | |
| "learning_rate": 3.1337291489963245e-05, | |
| "loss": 0.0056, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.29770992366412213, | |
| "grad_norm": 3.278085708618164, | |
| "learning_rate": 3.1224201300537184e-05, | |
| "loss": 0.013, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 0.30025445292620867, | |
| "grad_norm": 0.023581985384225845, | |
| "learning_rate": 3.111111111111112e-05, | |
| "loss": 0.0042, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.30279898218829515, | |
| "grad_norm": 0.17346112430095673, | |
| "learning_rate": 3.099802092168504e-05, | |
| "loss": 0.0117, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 0.3053435114503817, | |
| "grad_norm": 0.6197765469551086, | |
| "learning_rate": 3.0884930732258975e-05, | |
| "loss": 0.0068, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.30788804071246817, | |
| "grad_norm": 0.013691466301679611, | |
| "learning_rate": 3.0771840542832914e-05, | |
| "loss": 0.0075, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 0.3104325699745547, | |
| "grad_norm": 0.006097413599491119, | |
| "learning_rate": 3.065875035340685e-05, | |
| "loss": 0.0088, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.31297709923664124, | |
| "grad_norm": 0.6474159359931946, | |
| "learning_rate": 3.054566016398078e-05, | |
| "loss": 0.0137, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 0.3155216284987277, | |
| "grad_norm": 0.12056040018796921, | |
| "learning_rate": 3.043256997455471e-05, | |
| "loss": 0.0029, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.31806615776081426, | |
| "grad_norm": 0.007047746796160936, | |
| "learning_rate": 3.031947978512864e-05, | |
| "loss": 0.0091, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.32061068702290074, | |
| "grad_norm": 0.02806812897324562, | |
| "learning_rate": 3.0206389595702577e-05, | |
| "loss": 0.0197, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.3231552162849873, | |
| "grad_norm": 0.07042030245065689, | |
| "learning_rate": 3.009329940627651e-05, | |
| "loss": 0.0068, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 0.3256997455470738, | |
| "grad_norm": 0.11463948339223862, | |
| "learning_rate": 2.998020921685044e-05, | |
| "loss": 0.0042, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.3282442748091603, | |
| "grad_norm": 0.9267715811729431, | |
| "learning_rate": 2.986711902742437e-05, | |
| "loss": 0.0023, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 0.33078880407124683, | |
| "grad_norm": 0.03568391874432564, | |
| "learning_rate": 2.9754028837998307e-05, | |
| "loss": 0.006, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.3333333333333333, | |
| "grad_norm": 1.4884401559829712, | |
| "learning_rate": 2.964093864857224e-05, | |
| "loss": 0.0118, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 0.33587786259541985, | |
| "grad_norm": 0.021503394469618797, | |
| "learning_rate": 2.9527848459146173e-05, | |
| "loss": 0.0086, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.3384223918575064, | |
| "grad_norm": 1.2209817171096802, | |
| "learning_rate": 2.9414758269720102e-05, | |
| "loss": 0.0035, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 0.34096692111959287, | |
| "grad_norm": 2.962817668914795, | |
| "learning_rate": 2.9301668080294038e-05, | |
| "loss": 0.0132, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.3435114503816794, | |
| "grad_norm": 0.5963621139526367, | |
| "learning_rate": 2.918857789086797e-05, | |
| "loss": 0.0123, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.3460559796437659, | |
| "grad_norm": 4.348054885864258, | |
| "learning_rate": 2.9075487701441903e-05, | |
| "loss": 0.0173, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.3486005089058524, | |
| "grad_norm": 0.03299657627940178, | |
| "learning_rate": 2.8962397512015835e-05, | |
| "loss": 0.0053, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 0.3511450381679389, | |
| "grad_norm": 1.095913290977478, | |
| "learning_rate": 2.8849307322589765e-05, | |
| "loss": 0.0098, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 0.35368956743002544, | |
| "grad_norm": 1.9477157592773438, | |
| "learning_rate": 2.87362171331637e-05, | |
| "loss": 0.0156, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 0.356234096692112, | |
| "grad_norm": 0.4195559024810791, | |
| "learning_rate": 2.8623126943737633e-05, | |
| "loss": 0.0054, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.35877862595419846, | |
| "grad_norm": 0.5277218222618103, | |
| "learning_rate": 2.8510036754311566e-05, | |
| "loss": 0.0099, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 0.361323155216285, | |
| "grad_norm": 1.7285236120224, | |
| "learning_rate": 2.8396946564885498e-05, | |
| "loss": 0.0093, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 0.3638676844783715, | |
| "grad_norm": 0.046280182898044586, | |
| "learning_rate": 2.8283856375459434e-05, | |
| "loss": 0.0051, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 0.366412213740458, | |
| "grad_norm": 0.3064826726913452, | |
| "learning_rate": 2.8170766186033363e-05, | |
| "loss": 0.0116, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 0.36895674300254455, | |
| "grad_norm": 0.011129036545753479, | |
| "learning_rate": 2.8057675996607296e-05, | |
| "loss": 0.0097, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.37150127226463103, | |
| "grad_norm": 0.027697764337062836, | |
| "learning_rate": 2.794458580718123e-05, | |
| "loss": 0.0112, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 0.37404580152671757, | |
| "grad_norm": 0.6371593475341797, | |
| "learning_rate": 2.783149561775516e-05, | |
| "loss": 0.0017, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 0.37659033078880405, | |
| "grad_norm": 0.15575116872787476, | |
| "learning_rate": 2.7718405428329097e-05, | |
| "loss": 0.0108, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 0.3791348600508906, | |
| "grad_norm": 0.43781110644340515, | |
| "learning_rate": 2.7605315238903026e-05, | |
| "loss": 0.008, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 0.3816793893129771, | |
| "grad_norm": 0.06556320935487747, | |
| "learning_rate": 2.749222504947696e-05, | |
| "loss": 0.014, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.3816793893129771, | |
| "eval_loss": 0.03299512341618538, | |
| "eval_runtime": 132.3085, | |
| "eval_samples_per_second": 60.329, | |
| "eval_steps_per_second": 0.476, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.3842239185750636, | |
| "grad_norm": 2.4097580909729004, | |
| "learning_rate": 2.737913486005089e-05, | |
| "loss": 0.0168, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 0.38676844783715014, | |
| "grad_norm": 1.6767481565475464, | |
| "learning_rate": 2.7266044670624827e-05, | |
| "loss": 0.0042, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 0.3893129770992366, | |
| "grad_norm": 0.7469854950904846, | |
| "learning_rate": 2.715295448119876e-05, | |
| "loss": 0.0063, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 0.39185750636132316, | |
| "grad_norm": 0.013593906536698341, | |
| "learning_rate": 2.703986429177269e-05, | |
| "loss": 0.003, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 0.3944020356234097, | |
| "grad_norm": 2.1557767391204834, | |
| "learning_rate": 2.692677410234662e-05, | |
| "loss": 0.0058, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.3969465648854962, | |
| "grad_norm": 0.4056849479675293, | |
| "learning_rate": 2.6813683912920558e-05, | |
| "loss": 0.0116, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 0.3994910941475827, | |
| "grad_norm": 0.5583807826042175, | |
| "learning_rate": 2.670059372349449e-05, | |
| "loss": 0.0029, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 0.4020356234096692, | |
| "grad_norm": 0.01837926357984543, | |
| "learning_rate": 2.6587503534068423e-05, | |
| "loss": 0.0085, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 0.40458015267175573, | |
| "grad_norm": 3.254915475845337, | |
| "learning_rate": 2.6474413344642352e-05, | |
| "loss": 0.0103, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 0.4071246819338422, | |
| "grad_norm": 0.11980800330638885, | |
| "learning_rate": 2.6361323155216285e-05, | |
| "loss": 0.0057, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.40966921119592875, | |
| "grad_norm": 0.0031275292858481407, | |
| "learning_rate": 2.624823296579022e-05, | |
| "loss": 0.0021, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 0.4122137404580153, | |
| "grad_norm": 0.08046354353427887, | |
| "learning_rate": 2.6135142776364153e-05, | |
| "loss": 0.0012, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 0.41475826972010177, | |
| "grad_norm": 1.5370029211044312, | |
| "learning_rate": 2.6022052586938086e-05, | |
| "loss": 0.0031, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 0.4173027989821883, | |
| "grad_norm": 1.8733289241790771, | |
| "learning_rate": 2.5908962397512015e-05, | |
| "loss": 0.008, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 0.4198473282442748, | |
| "grad_norm": 0.003946431912481785, | |
| "learning_rate": 2.5795872208085954e-05, | |
| "loss": 0.0043, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.4223918575063613, | |
| "grad_norm": 0.09077942371368408, | |
| "learning_rate": 2.5682782018659883e-05, | |
| "loss": 0.0101, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 0.42493638676844786, | |
| "grad_norm": 0.5937609076499939, | |
| "learning_rate": 2.5569691829233816e-05, | |
| "loss": 0.0026, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 0.42748091603053434, | |
| "grad_norm": 0.23827993869781494, | |
| "learning_rate": 2.545660163980775e-05, | |
| "loss": 0.0093, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 0.4300254452926209, | |
| "grad_norm": 0.28849929571151733, | |
| "learning_rate": 2.5343511450381678e-05, | |
| "loss": 0.0086, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 0.43256997455470736, | |
| "grad_norm": 0.36202535033226013, | |
| "learning_rate": 2.5230421260955617e-05, | |
| "loss": 0.006, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.4351145038167939, | |
| "grad_norm": 2.5391650199890137, | |
| "learning_rate": 2.5117331071529546e-05, | |
| "loss": 0.0069, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 0.43765903307888043, | |
| "grad_norm": 0.0037785761523991823, | |
| "learning_rate": 2.500424088210348e-05, | |
| "loss": 0.0071, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 0.4402035623409669, | |
| "grad_norm": 3.466691017150879, | |
| "learning_rate": 2.489115069267741e-05, | |
| "loss": 0.0136, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 0.44274809160305345, | |
| "grad_norm": 0.08069751411676407, | |
| "learning_rate": 2.4778060503251347e-05, | |
| "loss": 0.0042, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 0.44529262086513993, | |
| "grad_norm": 1.002782940864563, | |
| "learning_rate": 2.466497031382528e-05, | |
| "loss": 0.0051, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.44783715012722647, | |
| "grad_norm": 0.026038512587547302, | |
| "learning_rate": 2.455188012439921e-05, | |
| "loss": 0.0082, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 0.45038167938931295, | |
| "grad_norm": 1.3340743780136108, | |
| "learning_rate": 2.443878993497314e-05, | |
| "loss": 0.0074, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 0.4529262086513995, | |
| "grad_norm": 2.351045608520508, | |
| "learning_rate": 2.4325699745547078e-05, | |
| "loss": 0.0146, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 0.455470737913486, | |
| "grad_norm": 0.40506449341773987, | |
| "learning_rate": 2.421260955612101e-05, | |
| "loss": 0.0068, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 0.4580152671755725, | |
| "grad_norm": 1.7703135013580322, | |
| "learning_rate": 2.4099519366694943e-05, | |
| "loss": 0.006, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.46055979643765904, | |
| "grad_norm": 0.23942232131958008, | |
| "learning_rate": 2.3986429177268872e-05, | |
| "loss": 0.0076, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 0.4631043256997455, | |
| "grad_norm": 0.4366392493247986, | |
| "learning_rate": 2.3873338987842804e-05, | |
| "loss": 0.0049, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 0.46564885496183206, | |
| "grad_norm": 0.13517136871814728, | |
| "learning_rate": 2.376024879841674e-05, | |
| "loss": 0.0083, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 0.4681933842239186, | |
| "grad_norm": 0.01784752495586872, | |
| "learning_rate": 2.3647158608990673e-05, | |
| "loss": 0.005, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 0.4707379134860051, | |
| "grad_norm": 0.09187888354063034, | |
| "learning_rate": 2.3534068419564605e-05, | |
| "loss": 0.0076, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.4732824427480916, | |
| "grad_norm": 1.0796499252319336, | |
| "learning_rate": 2.3420978230138535e-05, | |
| "loss": 0.0118, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 0.4758269720101781, | |
| "grad_norm": 0.013372441753745079, | |
| "learning_rate": 2.330788804071247e-05, | |
| "loss": 0.0114, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 0.47837150127226463, | |
| "grad_norm": 1.0882402658462524, | |
| "learning_rate": 2.3194797851286403e-05, | |
| "loss": 0.0037, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 0.48091603053435117, | |
| "grad_norm": 3.339613199234009, | |
| "learning_rate": 2.3081707661860336e-05, | |
| "loss": 0.0112, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 0.48346055979643765, | |
| "grad_norm": 0.45970460772514343, | |
| "learning_rate": 2.296861747243427e-05, | |
| "loss": 0.0084, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.4860050890585242, | |
| "grad_norm": 0.5688272714614868, | |
| "learning_rate": 2.2855527283008204e-05, | |
| "loss": 0.0052, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 0.48854961832061067, | |
| "grad_norm": 0.9630580544471741, | |
| "learning_rate": 2.2742437093582133e-05, | |
| "loss": 0.0015, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 0.4910941475826972, | |
| "grad_norm": 3.6641829013824463, | |
| "learning_rate": 2.2629346904156066e-05, | |
| "loss": 0.0102, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 0.49363867684478374, | |
| "grad_norm": 2.901209831237793, | |
| "learning_rate": 2.251625671473e-05, | |
| "loss": 0.0091, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 0.4961832061068702, | |
| "grad_norm": 0.008068634197115898, | |
| "learning_rate": 2.240316652530393e-05, | |
| "loss": 0.0095, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.49872773536895676, | |
| "grad_norm": 0.2659095525741577, | |
| "learning_rate": 2.2290076335877867e-05, | |
| "loss": 0.0053, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 0.5012722646310432, | |
| "grad_norm": 2.0646238327026367, | |
| "learning_rate": 2.2176986146451796e-05, | |
| "loss": 0.0065, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 0.5038167938931297, | |
| "grad_norm": 0.686174213886261, | |
| "learning_rate": 2.206389595702573e-05, | |
| "loss": 0.0053, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 0.5063613231552163, | |
| "grad_norm": 0.06601180136203766, | |
| "learning_rate": 2.195080576759966e-05, | |
| "loss": 0.0029, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 0.5089058524173028, | |
| "grad_norm": 0.7958572506904602, | |
| "learning_rate": 2.1837715578173597e-05, | |
| "loss": 0.0025, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.5089058524173028, | |
| "eval_loss": 0.030569521710276604, | |
| "eval_runtime": 132.2651, | |
| "eval_samples_per_second": 60.348, | |
| "eval_steps_per_second": 0.476, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.5114503816793893, | |
| "grad_norm": 0.03702898323535919, | |
| "learning_rate": 2.172462538874753e-05, | |
| "loss": 0.0175, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 0.5139949109414759, | |
| "grad_norm": 0.0648573562502861, | |
| "learning_rate": 2.161153519932146e-05, | |
| "loss": 0.0072, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 0.5165394402035624, | |
| "grad_norm": 0.19452892243862152, | |
| "learning_rate": 2.1498445009895392e-05, | |
| "loss": 0.0062, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 0.5190839694656488, | |
| "grad_norm": 2.057232141494751, | |
| "learning_rate": 2.1385354820469324e-05, | |
| "loss": 0.0087, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 0.5216284987277354, | |
| "grad_norm": 0.8806775808334351, | |
| "learning_rate": 2.127226463104326e-05, | |
| "loss": 0.0108, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 0.5241730279898219, | |
| "grad_norm": 2.1051242351531982, | |
| "learning_rate": 2.1159174441617193e-05, | |
| "loss": 0.0063, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 0.5267175572519084, | |
| "grad_norm": 0.4909399151802063, | |
| "learning_rate": 2.1046084252191122e-05, | |
| "loss": 0.0137, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 0.5292620865139949, | |
| "grad_norm": 2.7490108013153076, | |
| "learning_rate": 2.0932994062765055e-05, | |
| "loss": 0.0161, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 0.5318066157760815, | |
| "grad_norm": 0.005269869230687618, | |
| "learning_rate": 2.081990387333899e-05, | |
| "loss": 0.0108, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 0.5343511450381679, | |
| "grad_norm": 1.9901833534240723, | |
| "learning_rate": 2.0706813683912923e-05, | |
| "loss": 0.0126, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.5368956743002544, | |
| "grad_norm": 0.10248175263404846, | |
| "learning_rate": 2.0593723494486856e-05, | |
| "loss": 0.0112, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 0.539440203562341, | |
| "grad_norm": 0.6750550270080566, | |
| "learning_rate": 2.0480633305060785e-05, | |
| "loss": 0.0083, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 0.5419847328244275, | |
| "grad_norm": 0.05727030336856842, | |
| "learning_rate": 2.0367543115634724e-05, | |
| "loss": 0.0057, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 0.544529262086514, | |
| "grad_norm": 0.0749388262629509, | |
| "learning_rate": 2.0254452926208653e-05, | |
| "loss": 0.0091, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 0.5470737913486005, | |
| "grad_norm": 0.5812210440635681, | |
| "learning_rate": 2.0141362736782586e-05, | |
| "loss": 0.0122, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 0.549618320610687, | |
| "grad_norm": 0.984972357749939, | |
| "learning_rate": 2.002827254735652e-05, | |
| "loss": 0.0054, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 0.5521628498727735, | |
| "grad_norm": 1.9045511484146118, | |
| "learning_rate": 1.991518235793045e-05, | |
| "loss": 0.0068, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 0.55470737913486, | |
| "grad_norm": 1.6718263626098633, | |
| "learning_rate": 1.9802092168504384e-05, | |
| "loss": 0.0097, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 0.5572519083969466, | |
| "grad_norm": 0.09667091816663742, | |
| "learning_rate": 1.9689001979078316e-05, | |
| "loss": 0.0066, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 0.5597964376590331, | |
| "grad_norm": 0.021367337554693222, | |
| "learning_rate": 1.957591178965225e-05, | |
| "loss": 0.0124, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.5623409669211196, | |
| "grad_norm": 2.480168342590332, | |
| "learning_rate": 1.946282160022618e-05, | |
| "loss": 0.0038, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 0.5648854961832062, | |
| "grad_norm": 1.3038026094436646, | |
| "learning_rate": 1.9349731410800114e-05, | |
| "loss": 0.0099, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 0.5674300254452926, | |
| "grad_norm": 3.30692982673645, | |
| "learning_rate": 1.923664122137405e-05, | |
| "loss": 0.0168, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 0.5699745547073791, | |
| "grad_norm": 0.8603412508964539, | |
| "learning_rate": 1.912355103194798e-05, | |
| "loss": 0.0039, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 0.5725190839694656, | |
| "grad_norm": 0.08034808933734894, | |
| "learning_rate": 1.901046084252191e-05, | |
| "loss": 0.0093, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 0.5750636132315522, | |
| "grad_norm": 0.02945304848253727, | |
| "learning_rate": 1.8897370653095844e-05, | |
| "loss": 0.0045, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 0.5776081424936387, | |
| "grad_norm": 0.4240521788597107, | |
| "learning_rate": 1.8784280463669777e-05, | |
| "loss": 0.0052, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 0.5801526717557252, | |
| "grad_norm": 0.6077271103858948, | |
| "learning_rate": 1.8671190274243713e-05, | |
| "loss": 0.0103, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 0.5826972010178118, | |
| "grad_norm": 2.4900014400482178, | |
| "learning_rate": 1.8558100084817642e-05, | |
| "loss": 0.0055, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 0.5852417302798982, | |
| "grad_norm": 0.006987131666392088, | |
| "learning_rate": 1.8445009895391578e-05, | |
| "loss": 0.0004, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.5877862595419847, | |
| "grad_norm": 0.2958974540233612, | |
| "learning_rate": 1.8331919705965507e-05, | |
| "loss": 0.002, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 0.5903307888040712, | |
| "grad_norm": 0.26562365889549255, | |
| "learning_rate": 1.8218829516539443e-05, | |
| "loss": 0.0109, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 0.5928753180661578, | |
| "grad_norm": 0.03657606989145279, | |
| "learning_rate": 1.8105739327113376e-05, | |
| "loss": 0.004, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 0.5954198473282443, | |
| "grad_norm": 0.2807680070400238, | |
| "learning_rate": 1.7992649137687308e-05, | |
| "loss": 0.0043, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 0.5979643765903307, | |
| "grad_norm": 0.0042809853330254555, | |
| "learning_rate": 1.787955894826124e-05, | |
| "loss": 0.0101, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 0.6005089058524173, | |
| "grad_norm": 2.7109897136688232, | |
| "learning_rate": 1.776646875883517e-05, | |
| "loss": 0.0047, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 0.6030534351145038, | |
| "grad_norm": 0.8760998249053955, | |
| "learning_rate": 1.7653378569409106e-05, | |
| "loss": 0.0077, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 0.6055979643765903, | |
| "grad_norm": 0.06611794233322144, | |
| "learning_rate": 1.754028837998304e-05, | |
| "loss": 0.0071, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 0.6081424936386769, | |
| "grad_norm": 1.071660041809082, | |
| "learning_rate": 1.742719819055697e-05, | |
| "loss": 0.0092, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 0.6106870229007634, | |
| "grad_norm": 0.6373600959777832, | |
| "learning_rate": 1.7314108001130904e-05, | |
| "loss": 0.0099, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.6132315521628499, | |
| "grad_norm": 0.3777238428592682, | |
| "learning_rate": 1.7201017811704836e-05, | |
| "loss": 0.005, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 0.6157760814249363, | |
| "grad_norm": 1.357934832572937, | |
| "learning_rate": 1.708792762227877e-05, | |
| "loss": 0.0143, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 0.6183206106870229, | |
| "grad_norm": 0.09793155640363693, | |
| "learning_rate": 1.69748374328527e-05, | |
| "loss": 0.0031, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 0.6208651399491094, | |
| "grad_norm": 0.029055587947368622, | |
| "learning_rate": 1.6861747243426634e-05, | |
| "loss": 0.0079, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 0.6234096692111959, | |
| "grad_norm": 2.053697109222412, | |
| "learning_rate": 1.6748657054000566e-05, | |
| "loss": 0.0096, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 0.6259541984732825, | |
| "grad_norm": 2.4707324504852295, | |
| "learning_rate": 1.66355668645745e-05, | |
| "loss": 0.0146, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 0.628498727735369, | |
| "grad_norm": 1.1234222650527954, | |
| "learning_rate": 1.652247667514843e-05, | |
| "loss": 0.004, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 0.6310432569974554, | |
| "grad_norm": 1.3815442323684692, | |
| "learning_rate": 1.6409386485722364e-05, | |
| "loss": 0.018, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 0.6335877862595419, | |
| "grad_norm": 0.029578784480690956, | |
| "learning_rate": 1.6296296296296297e-05, | |
| "loss": 0.0087, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 0.6361323155216285, | |
| "grad_norm": 1.1439350843429565, | |
| "learning_rate": 1.618320610687023e-05, | |
| "loss": 0.003, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.6361323155216285, | |
| "eval_loss": 0.02840873785316944, | |
| "eval_runtime": 132.2725, | |
| "eval_samples_per_second": 60.345, | |
| "eval_steps_per_second": 0.476, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.638676844783715, | |
| "grad_norm": 0.31489187479019165, | |
| "learning_rate": 1.6070115917444162e-05, | |
| "loss": 0.0113, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 0.6412213740458015, | |
| "grad_norm": 2.010575294494629, | |
| "learning_rate": 1.5957025728018098e-05, | |
| "loss": 0.0063, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 0.6437659033078881, | |
| "grad_norm": 0.5254458785057068, | |
| "learning_rate": 1.5843935538592027e-05, | |
| "loss": 0.0063, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 0.6463104325699746, | |
| "grad_norm": 0.024824144318699837, | |
| "learning_rate": 1.5730845349165963e-05, | |
| "loss": 0.0047, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 0.648854961832061, | |
| "grad_norm": 2.5982894897460938, | |
| "learning_rate": 1.5617755159739892e-05, | |
| "loss": 0.0091, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 0.6513994910941476, | |
| "grad_norm": 0.7221731543540955, | |
| "learning_rate": 1.5504664970313828e-05, | |
| "loss": 0.0277, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 0.6539440203562341, | |
| "grad_norm": 1.3815771341323853, | |
| "learning_rate": 1.539157478088776e-05, | |
| "loss": 0.009, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 0.6564885496183206, | |
| "grad_norm": 0.06710302084684372, | |
| "learning_rate": 1.5278484591461693e-05, | |
| "loss": 0.0087, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 0.6590330788804071, | |
| "grad_norm": 3.2860422134399414, | |
| "learning_rate": 1.5165394402035624e-05, | |
| "loss": 0.0125, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 0.6615776081424937, | |
| "grad_norm": 0.16810142993927002, | |
| "learning_rate": 1.5052304212609557e-05, | |
| "loss": 0.0026, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.6641221374045801, | |
| "grad_norm": 0.04981774836778641, | |
| "learning_rate": 1.493921402318349e-05, | |
| "loss": 0.0115, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.015164737589657307, | |
| "learning_rate": 1.4826123833757422e-05, | |
| "loss": 0.0038, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 0.6692111959287532, | |
| "grad_norm": 0.07419941574335098, | |
| "learning_rate": 1.4713033644331356e-05, | |
| "loss": 0.0026, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 0.6717557251908397, | |
| "grad_norm": 3.2604362964630127, | |
| "learning_rate": 1.4599943454905287e-05, | |
| "loss": 0.0059, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 0.6743002544529262, | |
| "grad_norm": 0.026491310447454453, | |
| "learning_rate": 1.4486853265479221e-05, | |
| "loss": 0.0039, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 0.6768447837150128, | |
| "grad_norm": 1.043686866760254, | |
| "learning_rate": 1.4373763076053154e-05, | |
| "loss": 0.003, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 0.6793893129770993, | |
| "grad_norm": 0.6336632966995239, | |
| "learning_rate": 1.4260672886627088e-05, | |
| "loss": 0.0096, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 0.6819338422391857, | |
| "grad_norm": 2.5435397624969482, | |
| "learning_rate": 1.4147582697201019e-05, | |
| "loss": 0.0246, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 0.6844783715012722, | |
| "grad_norm": 3.237632989883423, | |
| "learning_rate": 1.4034492507774953e-05, | |
| "loss": 0.0167, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 0.6870229007633588, | |
| "grad_norm": 2.2935593128204346, | |
| "learning_rate": 1.3921402318348884e-05, | |
| "loss": 0.0049, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.6895674300254453, | |
| "grad_norm": 3.847285509109497, | |
| "learning_rate": 1.3808312128922817e-05, | |
| "loss": 0.0093, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 0.6921119592875318, | |
| "grad_norm": 0.15955288708209991, | |
| "learning_rate": 1.369522193949675e-05, | |
| "loss": 0.0068, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 0.6946564885496184, | |
| "grad_norm": 1.2124927043914795, | |
| "learning_rate": 1.3582131750070682e-05, | |
| "loss": 0.0098, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 0.6972010178117048, | |
| "grad_norm": 0.009976382367312908, | |
| "learning_rate": 1.3469041560644616e-05, | |
| "loss": 0.0064, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 0.6997455470737913, | |
| "grad_norm": 0.8196397423744202, | |
| "learning_rate": 1.3355951371218547e-05, | |
| "loss": 0.0054, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 0.7022900763358778, | |
| "grad_norm": 0.3890758454799652, | |
| "learning_rate": 1.3242861181792481e-05, | |
| "loss": 0.0063, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 0.7048346055979644, | |
| "grad_norm": 0.06490058451890945, | |
| "learning_rate": 1.3129770992366414e-05, | |
| "loss": 0.0054, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 0.7073791348600509, | |
| "grad_norm": 0.036454420536756516, | |
| "learning_rate": 1.3016680802940346e-05, | |
| "loss": 0.0015, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 0.7099236641221374, | |
| "grad_norm": 0.004566237796097994, | |
| "learning_rate": 1.2903590613514279e-05, | |
| "loss": 0.002, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 0.712468193384224, | |
| "grad_norm": 0.4990832507610321, | |
| "learning_rate": 1.2790500424088213e-05, | |
| "loss": 0.0063, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.7150127226463104, | |
| "grad_norm": 0.06024640053510666, | |
| "learning_rate": 1.2677410234662144e-05, | |
| "loss": 0.0037, | |
| "step": 2810 | |
| }, | |
| { | |
| "epoch": 0.7175572519083969, | |
| "grad_norm": 2.1582958698272705, | |
| "learning_rate": 1.2564320045236076e-05, | |
| "loss": 0.0063, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 0.7201017811704835, | |
| "grad_norm": 0.0880260169506073, | |
| "learning_rate": 1.2451229855810009e-05, | |
| "loss": 0.0037, | |
| "step": 2830 | |
| }, | |
| { | |
| "epoch": 0.72264631043257, | |
| "grad_norm": 0.01811220683157444, | |
| "learning_rate": 1.2338139666383942e-05, | |
| "loss": 0.013, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 0.7251908396946565, | |
| "grad_norm": 0.0051167807541787624, | |
| "learning_rate": 1.2225049476957876e-05, | |
| "loss": 0.0043, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 0.727735368956743, | |
| "grad_norm": 1.3101856708526611, | |
| "learning_rate": 1.2111959287531807e-05, | |
| "loss": 0.0159, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 0.7302798982188295, | |
| "grad_norm": 1.7038617134094238, | |
| "learning_rate": 1.1998869098105741e-05, | |
| "loss": 0.0011, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 0.732824427480916, | |
| "grad_norm": 2.967705249786377, | |
| "learning_rate": 1.1885778908679672e-05, | |
| "loss": 0.0055, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 0.7353689567430025, | |
| "grad_norm": 1.4774291515350342, | |
| "learning_rate": 1.1772688719253606e-05, | |
| "loss": 0.0075, | |
| "step": 2890 | |
| }, | |
| { | |
| "epoch": 0.7379134860050891, | |
| "grad_norm": 2.806942939758301, | |
| "learning_rate": 1.1659598529827539e-05, | |
| "loss": 0.0172, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.7404580152671756, | |
| "grad_norm": 0.09960631281137466, | |
| "learning_rate": 1.1546508340401473e-05, | |
| "loss": 0.0118, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 0.7430025445292621, | |
| "grad_norm": 0.1535715013742447, | |
| "learning_rate": 1.1433418150975404e-05, | |
| "loss": 0.0029, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 0.7455470737913485, | |
| "grad_norm": 0.035965435206890106, | |
| "learning_rate": 1.1320327961549336e-05, | |
| "loss": 0.001, | |
| "step": 2930 | |
| }, | |
| { | |
| "epoch": 0.7480916030534351, | |
| "grad_norm": 2.874985933303833, | |
| "learning_rate": 1.1207237772123269e-05, | |
| "loss": 0.0061, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 0.7506361323155216, | |
| "grad_norm": 1.503135323524475, | |
| "learning_rate": 1.1094147582697202e-05, | |
| "loss": 0.0095, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 0.7531806615776081, | |
| "grad_norm": 1.2479780912399292, | |
| "learning_rate": 1.0981057393271136e-05, | |
| "loss": 0.0051, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 0.7557251908396947, | |
| "grad_norm": 1.7447328567504883, | |
| "learning_rate": 1.0867967203845067e-05, | |
| "loss": 0.0083, | |
| "step": 2970 | |
| }, | |
| { | |
| "epoch": 0.7582697201017812, | |
| "grad_norm": 0.24014875292778015, | |
| "learning_rate": 1.0754877014419001e-05, | |
| "loss": 0.0077, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 0.7608142493638677, | |
| "grad_norm": 0.24356932938098907, | |
| "learning_rate": 1.0641786824992932e-05, | |
| "loss": 0.003, | |
| "step": 2990 | |
| }, | |
| { | |
| "epoch": 0.7633587786259542, | |
| "grad_norm": 2.1898441314697266, | |
| "learning_rate": 1.0528696635566866e-05, | |
| "loss": 0.0092, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.7633587786259542, | |
| "eval_loss": 0.027731578797101974, | |
| "eval_runtime": 132.3021, | |
| "eval_samples_per_second": 60.332, | |
| "eval_steps_per_second": 0.476, | |
| "step": 3000 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 3930, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.7125140675443098e+18, | |
| "train_batch_size": 128, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |