| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.9991537376586743, | |
| "eval_steps": 500, | |
| "global_step": 1329, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.022566995768688293, | |
| "grad_norm": 3.9500676023225703, | |
| "learning_rate": 3.7593984962406015e-07, | |
| "loss": 0.8582, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.045133991537376586, | |
| "grad_norm": 1.395677808116374, | |
| "learning_rate": 7.518796992481203e-07, | |
| "loss": 0.8024, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06770098730606489, | |
| "grad_norm": 0.9062193282306004, | |
| "learning_rate": 1.1278195488721805e-06, | |
| "loss": 0.7357, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.09026798307475317, | |
| "grad_norm": 0.8282785482347225, | |
| "learning_rate": 1.5037593984962406e-06, | |
| "loss": 0.6898, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.11283497884344147, | |
| "grad_norm": 0.701981062657255, | |
| "learning_rate": 1.8796992481203007e-06, | |
| "loss": 0.6707, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.13540197461212977, | |
| "grad_norm": 0.640628105313228, | |
| "learning_rate": 2.255639097744361e-06, | |
| "loss": 0.6616, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.15796897038081806, | |
| "grad_norm": 0.6794950494897954, | |
| "learning_rate": 2.631578947368421e-06, | |
| "loss": 0.6518, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.18053596614950634, | |
| "grad_norm": 0.7767981323221586, | |
| "learning_rate": 3.007518796992481e-06, | |
| "loss": 0.6496, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.20310296191819463, | |
| "grad_norm": 0.7882061395786479, | |
| "learning_rate": 3.3834586466165413e-06, | |
| "loss": 0.6451, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.22566995768688294, | |
| "grad_norm": 0.9439189657933283, | |
| "learning_rate": 3.7593984962406014e-06, | |
| "loss": 0.6417, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.24823695345557123, | |
| "grad_norm": 0.7346506648254489, | |
| "learning_rate": 4.135338345864662e-06, | |
| "loss": 0.6314, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.27080394922425954, | |
| "grad_norm": 0.6595327999279074, | |
| "learning_rate": 4.511278195488722e-06, | |
| "loss": 0.6293, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.2933709449929478, | |
| "grad_norm": 0.8336967878084969, | |
| "learning_rate": 4.887218045112782e-06, | |
| "loss": 0.6301, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.3159379407616361, | |
| "grad_norm": 1.0637113778881198, | |
| "learning_rate": 4.970735785953177e-06, | |
| "loss": 0.6245, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.3385049365303244, | |
| "grad_norm": 0.6759376136072178, | |
| "learning_rate": 4.928929765886288e-06, | |
| "loss": 0.6245, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.3610719322990127, | |
| "grad_norm": 1.0169288303598698, | |
| "learning_rate": 4.887123745819398e-06, | |
| "loss": 0.6261, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.383638928067701, | |
| "grad_norm": 0.7994151168318469, | |
| "learning_rate": 4.845317725752509e-06, | |
| "loss": 0.621, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.40620592383638926, | |
| "grad_norm": 0.7288471424905668, | |
| "learning_rate": 4.803511705685619e-06, | |
| "loss": 0.6169, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.4287729196050776, | |
| "grad_norm": 0.6569051029133128, | |
| "learning_rate": 4.7617056856187295e-06, | |
| "loss": 0.6107, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.4513399153737659, | |
| "grad_norm": 0.6306476938762344, | |
| "learning_rate": 4.71989966555184e-06, | |
| "loss": 0.6166, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.47390691114245415, | |
| "grad_norm": 0.6011927260340564, | |
| "learning_rate": 4.67809364548495e-06, | |
| "loss": 0.6128, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.49647390691114246, | |
| "grad_norm": 0.6568818099792321, | |
| "learning_rate": 4.63628762541806e-06, | |
| "loss": 0.6041, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.5190409026798307, | |
| "grad_norm": 0.6201063357053574, | |
| "learning_rate": 4.594481605351171e-06, | |
| "loss": 0.6126, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.5416078984485191, | |
| "grad_norm": 0.5804525175003069, | |
| "learning_rate": 4.5526755852842815e-06, | |
| "loss": 0.611, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.5641748942172073, | |
| "grad_norm": 0.642468562835698, | |
| "learning_rate": 4.510869565217392e-06, | |
| "loss": 0.6049, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.5867418899858956, | |
| "grad_norm": 0.6897016737786636, | |
| "learning_rate": 4.469063545150502e-06, | |
| "loss": 0.6073, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.609308885754584, | |
| "grad_norm": 0.6770865024228586, | |
| "learning_rate": 4.427257525083612e-06, | |
| "loss": 0.6025, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.6318758815232722, | |
| "grad_norm": 0.6074629511796228, | |
| "learning_rate": 4.3854515050167225e-06, | |
| "loss": 0.609, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.6544428772919605, | |
| "grad_norm": 0.6220418776986034, | |
| "learning_rate": 4.3436454849498336e-06, | |
| "loss": 0.6021, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.6770098730606487, | |
| "grad_norm": 0.7074012275628311, | |
| "learning_rate": 4.301839464882944e-06, | |
| "loss": 0.6081, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.6995768688293371, | |
| "grad_norm": 0.5964332959901174, | |
| "learning_rate": 4.260033444816054e-06, | |
| "loss": 0.6011, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.7221438645980254, | |
| "grad_norm": 0.6259056006758187, | |
| "learning_rate": 4.218227424749164e-06, | |
| "loss": 0.6042, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.7447108603667136, | |
| "grad_norm": 0.5493942038048834, | |
| "learning_rate": 4.1764214046822745e-06, | |
| "loss": 0.5995, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.767277856135402, | |
| "grad_norm": 0.6181685023195539, | |
| "learning_rate": 4.134615384615385e-06, | |
| "loss": 0.5987, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.7898448519040903, | |
| "grad_norm": 0.588036854641419, | |
| "learning_rate": 4.092809364548495e-06, | |
| "loss": 0.6, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.8124118476727785, | |
| "grad_norm": 0.6828867431467442, | |
| "learning_rate": 4.051003344481605e-06, | |
| "loss": 0.5989, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.8349788434414669, | |
| "grad_norm": 0.6024684614349975, | |
| "learning_rate": 4.0091973244147155e-06, | |
| "loss": 0.6059, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.8575458392101551, | |
| "grad_norm": 0.5987044058560508, | |
| "learning_rate": 3.967391304347827e-06, | |
| "loss": 0.6023, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.8801128349788434, | |
| "grad_norm": 0.6031854798342579, | |
| "learning_rate": 3.925585284280937e-06, | |
| "loss": 0.5963, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.9026798307475318, | |
| "grad_norm": 0.5784643272502593, | |
| "learning_rate": 3.883779264214047e-06, | |
| "loss": 0.5984, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.92524682651622, | |
| "grad_norm": 0.5828958217828615, | |
| "learning_rate": 3.841973244147157e-06, | |
| "loss": 0.5963, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.9478138222849083, | |
| "grad_norm": 0.6221793656032795, | |
| "learning_rate": 3.800167224080268e-06, | |
| "loss": 0.5968, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.9703808180535967, | |
| "grad_norm": 0.5425155387264896, | |
| "learning_rate": 3.7583612040133782e-06, | |
| "loss": 0.5907, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.9929478138222849, | |
| "grad_norm": 0.5548540193834617, | |
| "learning_rate": 3.7165551839464885e-06, | |
| "loss": 0.5953, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.9997179125528914, | |
| "eval_loss": 0.5933970212936401, | |
| "eval_runtime": 687.5019, | |
| "eval_samples_per_second": 17.369, | |
| "eval_steps_per_second": 0.544, | |
| "step": 443 | |
| }, | |
| { | |
| "epoch": 1.0155148095909732, | |
| "grad_norm": 0.663986999357991, | |
| "learning_rate": 3.674749163879599e-06, | |
| "loss": 0.6102, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.0380818053596614, | |
| "grad_norm": 0.6073973469870063, | |
| "learning_rate": 3.6329431438127094e-06, | |
| "loss": 0.5402, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.0606488011283497, | |
| "grad_norm": 0.5871743515341908, | |
| "learning_rate": 3.5911371237458196e-06, | |
| "loss": 0.5553, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.0832157968970382, | |
| "grad_norm": 0.5998465628010546, | |
| "learning_rate": 3.5493311036789303e-06, | |
| "loss": 0.5428, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.1057827926657264, | |
| "grad_norm": 0.631113957346967, | |
| "learning_rate": 3.5075250836120405e-06, | |
| "loss": 0.553, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.1283497884344147, | |
| "grad_norm": 0.5595994504229799, | |
| "learning_rate": 3.465719063545151e-06, | |
| "loss": 0.5511, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.150916784203103, | |
| "grad_norm": 0.5718415684682732, | |
| "learning_rate": 3.4239130434782614e-06, | |
| "loss": 0.5519, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.1734837799717912, | |
| "grad_norm": 0.5565246715923281, | |
| "learning_rate": 3.3821070234113717e-06, | |
| "loss": 0.5441, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.1960507757404795, | |
| "grad_norm": 0.5740404698821812, | |
| "learning_rate": 3.3403010033444823e-06, | |
| "loss": 0.5498, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.2186177715091677, | |
| "grad_norm": 0.6406151182955427, | |
| "learning_rate": 3.298494983277592e-06, | |
| "loss": 0.5472, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.2411847672778562, | |
| "grad_norm": 1.0247459934962126, | |
| "learning_rate": 3.2566889632107024e-06, | |
| "loss": 0.5519, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.2637517630465445, | |
| "grad_norm": 0.6137317588735225, | |
| "learning_rate": 3.2148829431438126e-06, | |
| "loss": 0.5537, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.2863187588152327, | |
| "grad_norm": 0.5803805149684825, | |
| "learning_rate": 3.1730769230769233e-06, | |
| "loss": 0.5507, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.308885754583921, | |
| "grad_norm": 0.614582832632458, | |
| "learning_rate": 3.1312709030100335e-06, | |
| "loss": 0.5505, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.3314527503526092, | |
| "grad_norm": 0.5795519009889963, | |
| "learning_rate": 3.0894648829431438e-06, | |
| "loss": 0.5501, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.3540197461212977, | |
| "grad_norm": 0.556651253096743, | |
| "learning_rate": 3.0476588628762544e-06, | |
| "loss": 0.5557, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.376586741889986, | |
| "grad_norm": 0.5720074610909325, | |
| "learning_rate": 3.0058528428093647e-06, | |
| "loss": 0.5473, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.3991537376586742, | |
| "grad_norm": 0.5813836681865406, | |
| "learning_rate": 2.964046822742475e-06, | |
| "loss": 0.5517, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.4217207334273625, | |
| "grad_norm": 0.5523421119994065, | |
| "learning_rate": 2.9222408026755856e-06, | |
| "loss": 0.5495, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.4442877291960508, | |
| "grad_norm": 0.5617619307728544, | |
| "learning_rate": 2.880434782608696e-06, | |
| "loss": 0.5488, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.466854724964739, | |
| "grad_norm": 0.6071916695080162, | |
| "learning_rate": 2.8386287625418065e-06, | |
| "loss": 0.5496, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.4894217207334273, | |
| "grad_norm": 0.6034608381516856, | |
| "learning_rate": 2.7968227424749167e-06, | |
| "loss": 0.5532, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.5119887165021155, | |
| "grad_norm": 0.5806505121518643, | |
| "learning_rate": 2.755016722408027e-06, | |
| "loss": 0.5478, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.5345557122708038, | |
| "grad_norm": 0.5823380773786808, | |
| "learning_rate": 2.7132107023411376e-06, | |
| "loss": 0.542, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.5571227080394923, | |
| "grad_norm": 0.591931176130096, | |
| "learning_rate": 2.671404682274248e-06, | |
| "loss": 0.5436, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.5796897038081805, | |
| "grad_norm": 0.575757448424994, | |
| "learning_rate": 2.629598662207358e-06, | |
| "loss": 0.5479, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.6022566995768688, | |
| "grad_norm": 0.5440261614938245, | |
| "learning_rate": 2.5877926421404688e-06, | |
| "loss": 0.5505, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.6248236953455573, | |
| "grad_norm": 0.5866164019825096, | |
| "learning_rate": 2.545986622073579e-06, | |
| "loss": 0.5583, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.6473906911142455, | |
| "grad_norm": 0.574480308049467, | |
| "learning_rate": 2.5041806020066893e-06, | |
| "loss": 0.5414, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.6699576868829338, | |
| "grad_norm": 0.5987421971532259, | |
| "learning_rate": 2.4623745819397995e-06, | |
| "loss": 0.5567, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.692524682651622, | |
| "grad_norm": 0.5606374529736117, | |
| "learning_rate": 2.4205685618729097e-06, | |
| "loss": 0.5481, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.7150916784203103, | |
| "grad_norm": 0.5589529879882318, | |
| "learning_rate": 2.3787625418060204e-06, | |
| "loss": 0.547, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.7376586741889986, | |
| "grad_norm": 0.537042270204007, | |
| "learning_rate": 2.3369565217391307e-06, | |
| "loss": 0.5472, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.7602256699576868, | |
| "grad_norm": 0.5692518598971062, | |
| "learning_rate": 2.2951505016722413e-06, | |
| "loss": 0.5434, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.782792665726375, | |
| "grad_norm": 0.548945107183346, | |
| "learning_rate": 2.253344481605351e-06, | |
| "loss": 0.5464, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.8053596614950633, | |
| "grad_norm": 0.5612980211542685, | |
| "learning_rate": 2.211538461538462e-06, | |
| "loss": 0.5412, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.8279266572637518, | |
| "grad_norm": 0.5352386025237578, | |
| "learning_rate": 2.169732441471572e-06, | |
| "loss": 0.5519, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.85049365303244, | |
| "grad_norm": 0.5369069746509202, | |
| "learning_rate": 2.1279264214046823e-06, | |
| "loss": 0.5388, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.8730606488011283, | |
| "grad_norm": 0.5357053149375428, | |
| "learning_rate": 2.086120401337793e-06, | |
| "loss": 0.5507, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.8956276445698168, | |
| "grad_norm": 0.722556948186832, | |
| "learning_rate": 2.044314381270903e-06, | |
| "loss": 0.5508, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.918194640338505, | |
| "grad_norm": 0.5648614764610744, | |
| "learning_rate": 2.0025083612040134e-06, | |
| "loss": 0.5383, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.9407616361071933, | |
| "grad_norm": 0.5449086056577778, | |
| "learning_rate": 1.960702341137124e-06, | |
| "loss": 0.5434, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.9633286318758816, | |
| "grad_norm": 0.5439248073291871, | |
| "learning_rate": 1.9188963210702343e-06, | |
| "loss": 0.5501, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.9858956276445698, | |
| "grad_norm": 0.5668278705849349, | |
| "learning_rate": 1.8770903010033448e-06, | |
| "loss": 0.5476, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.9994358251057829, | |
| "eval_loss": 0.5841257572174072, | |
| "eval_runtime": 685.8839, | |
| "eval_samples_per_second": 17.41, | |
| "eval_steps_per_second": 0.545, | |
| "step": 886 | |
| }, | |
| { | |
| "epoch": 2.008462623413258, | |
| "grad_norm": 0.8419949483787798, | |
| "learning_rate": 1.8352842809364548e-06, | |
| "loss": 0.5793, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 2.0310296191819464, | |
| "grad_norm": 0.5856594136748535, | |
| "learning_rate": 1.7934782608695653e-06, | |
| "loss": 0.5034, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.0535966149506346, | |
| "grad_norm": 0.5827928398358422, | |
| "learning_rate": 1.7516722408026757e-06, | |
| "loss": 0.5061, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 2.076163610719323, | |
| "grad_norm": 0.5599155876387997, | |
| "learning_rate": 1.709866220735786e-06, | |
| "loss": 0.4983, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 2.098730606488011, | |
| "grad_norm": 0.5470260712158903, | |
| "learning_rate": 1.6680602006688964e-06, | |
| "loss": 0.5052, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 2.1212976022566994, | |
| "grad_norm": 0.5758776710250274, | |
| "learning_rate": 1.6262541806020069e-06, | |
| "loss": 0.5089, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 2.143864598025388, | |
| "grad_norm": 0.5755610731770497, | |
| "learning_rate": 1.5844481605351173e-06, | |
| "loss": 0.5099, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.1664315937940763, | |
| "grad_norm": 0.5688878225446357, | |
| "learning_rate": 1.5426421404682276e-06, | |
| "loss": 0.5059, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 2.1889985895627646, | |
| "grad_norm": 0.5484653799292355, | |
| "learning_rate": 1.500836120401338e-06, | |
| "loss": 0.5027, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 2.211565585331453, | |
| "grad_norm": 0.5855753779073777, | |
| "learning_rate": 1.4590301003344485e-06, | |
| "loss": 0.497, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 2.234132581100141, | |
| "grad_norm": 0.5846394849624436, | |
| "learning_rate": 1.4172240802675585e-06, | |
| "loss": 0.4967, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 2.2566995768688294, | |
| "grad_norm": 0.6020064410941347, | |
| "learning_rate": 1.375418060200669e-06, | |
| "loss": 0.5061, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.2792665726375176, | |
| "grad_norm": 0.5674319171403842, | |
| "learning_rate": 1.3336120401337794e-06, | |
| "loss": 0.5119, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 2.301833568406206, | |
| "grad_norm": 0.5608300219896133, | |
| "learning_rate": 1.2918060200668896e-06, | |
| "loss": 0.5057, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 2.324400564174894, | |
| "grad_norm": 0.5902005590764662, | |
| "learning_rate": 1.25e-06, | |
| "loss": 0.5096, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 2.3469675599435824, | |
| "grad_norm": 0.546284085451993, | |
| "learning_rate": 1.2081939799331106e-06, | |
| "loss": 0.5069, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 2.3695345557122707, | |
| "grad_norm": 0.5498516301636015, | |
| "learning_rate": 1.1663879598662208e-06, | |
| "loss": 0.5041, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 2.392101551480959, | |
| "grad_norm": 0.5351775325473189, | |
| "learning_rate": 1.124581939799331e-06, | |
| "loss": 0.5032, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 2.414668547249647, | |
| "grad_norm": 0.5483556296524353, | |
| "learning_rate": 1.0827759197324415e-06, | |
| "loss": 0.5035, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 2.4372355430183354, | |
| "grad_norm": 0.5646775870143284, | |
| "learning_rate": 1.040969899665552e-06, | |
| "loss": 0.5069, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 2.459802538787024, | |
| "grad_norm": 0.5302435040525055, | |
| "learning_rate": 9.991638795986624e-07, | |
| "loss": 0.505, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 2.4823695345557124, | |
| "grad_norm": 0.5774882345635233, | |
| "learning_rate": 9.573578595317726e-07, | |
| "loss": 0.5074, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.5049365303244007, | |
| "grad_norm": 0.5389744005535605, | |
| "learning_rate": 9.15551839464883e-07, | |
| "loss": 0.5107, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 2.527503526093089, | |
| "grad_norm": 0.5359062536764768, | |
| "learning_rate": 8.737458193979933e-07, | |
| "loss": 0.5001, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 2.550070521861777, | |
| "grad_norm": 0.5329301854453081, | |
| "learning_rate": 8.319397993311038e-07, | |
| "loss": 0.5053, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 2.5726375176304654, | |
| "grad_norm": 0.5469111484464729, | |
| "learning_rate": 7.901337792642141e-07, | |
| "loss": 0.5034, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 2.5952045133991537, | |
| "grad_norm": 0.5376793002418263, | |
| "learning_rate": 7.483277591973246e-07, | |
| "loss": 0.5038, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 2.617771509167842, | |
| "grad_norm": 0.5321398147289212, | |
| "learning_rate": 7.065217391304348e-07, | |
| "loss": 0.5072, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 2.64033850493653, | |
| "grad_norm": 0.5325680399356436, | |
| "learning_rate": 6.647157190635452e-07, | |
| "loss": 0.4955, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 2.6629055007052185, | |
| "grad_norm": 0.6374259282175002, | |
| "learning_rate": 6.229096989966555e-07, | |
| "loss": 0.5066, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 2.685472496473907, | |
| "grad_norm": 0.5462731372886629, | |
| "learning_rate": 5.81103678929766e-07, | |
| "loss": 0.5071, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 2.7080394922425954, | |
| "grad_norm": 0.5385696882283996, | |
| "learning_rate": 5.392976588628763e-07, | |
| "loss": 0.5054, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.7306064880112837, | |
| "grad_norm": 0.5456766485564613, | |
| "learning_rate": 4.974916387959867e-07, | |
| "loss": 0.5061, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 2.753173483779972, | |
| "grad_norm": 0.5339029579122799, | |
| "learning_rate": 4.55685618729097e-07, | |
| "loss": 0.5024, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 2.77574047954866, | |
| "grad_norm": 0.5252848686524678, | |
| "learning_rate": 4.138795986622074e-07, | |
| "loss": 0.5092, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 2.7983074753173485, | |
| "grad_norm": 0.5250218608560205, | |
| "learning_rate": 3.7207357859531776e-07, | |
| "loss": 0.5074, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 2.8208744710860367, | |
| "grad_norm": 0.5267485139063955, | |
| "learning_rate": 3.302675585284281e-07, | |
| "loss": 0.507, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 2.843441466854725, | |
| "grad_norm": 0.5433190534773383, | |
| "learning_rate": 2.884615384615385e-07, | |
| "loss": 0.5085, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 2.8660084626234132, | |
| "grad_norm": 0.5174335750907513, | |
| "learning_rate": 2.4665551839464886e-07, | |
| "loss": 0.5057, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 2.8885754583921015, | |
| "grad_norm": 0.5152650038198892, | |
| "learning_rate": 2.048494983277592e-07, | |
| "loss": 0.503, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 2.9111424541607898, | |
| "grad_norm": 0.5238413421157163, | |
| "learning_rate": 1.6304347826086958e-07, | |
| "loss": 0.507, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 2.933709449929478, | |
| "grad_norm": 0.5532403905639468, | |
| "learning_rate": 1.2123745819397995e-07, | |
| "loss": 0.502, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.9562764456981663, | |
| "grad_norm": 0.5439654700776371, | |
| "learning_rate": 7.943143812709031e-08, | |
| "loss": 0.506, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.9788434414668545, | |
| "grad_norm": 0.5300109389569346, | |
| "learning_rate": 3.762541806020067e-08, | |
| "loss": 0.5015, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 2.9991537376586743, | |
| "eval_loss": 0.5893023610115051, | |
| "eval_runtime": 684.3576, | |
| "eval_samples_per_second": 17.448, | |
| "eval_steps_per_second": 0.546, | |
| "step": 1329 | |
| }, | |
| { | |
| "epoch": 2.9991537376586743, | |
| "step": 1329, | |
| "total_flos": 5064195066298368.0, | |
| "train_loss": 0.5616077652006243, | |
| "train_runtime": 120588.8327, | |
| "train_samples_per_second": 5.644, | |
| "train_steps_per_second": 0.011 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1329, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5064195066298368.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |