SirajRLX's picture
Add files using upload-large-folder tool
9fcdb22 verified
{
"best_global_step": 2000,
"best_metric": 0.8460908532142639,
"best_model_checkpoint": "task2file/sft_qwen_14B_v2/checkpoints/checkpoint-2000",
"epoch": 0.8438818565400844,
"eval_steps": 100,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0008438818565400844,
"grad_norm": 0.5386583805084229,
"learning_rate": 1.7574692442882248e-07,
"loss": 1.6941628456115723,
"step": 2
},
{
"epoch": 0.0016877637130801688,
"grad_norm": 0.5477277636528015,
"learning_rate": 5.272407732864675e-07,
"loss": 1.7132279872894287,
"step": 4
},
{
"epoch": 0.002531645569620253,
"grad_norm": 0.5390765070915222,
"learning_rate": 8.787346221441126e-07,
"loss": 1.641180396080017,
"step": 6
},
{
"epoch": 0.0033755274261603376,
"grad_norm": 0.5023683905601501,
"learning_rate": 1.2302284710017575e-06,
"loss": 1.5616240501403809,
"step": 8
},
{
"epoch": 0.004219409282700422,
"grad_norm": 0.4899154603481293,
"learning_rate": 1.5817223198594026e-06,
"loss": 1.572033405303955,
"step": 10
},
{
"epoch": 0.005063291139240506,
"grad_norm": 0.5239788293838501,
"learning_rate": 1.9332161687170474e-06,
"loss": 1.6242921352386475,
"step": 12
},
{
"epoch": 0.00590717299578059,
"grad_norm": 0.5172926783561707,
"learning_rate": 2.2847100175746925e-06,
"loss": 1.6800041198730469,
"step": 14
},
{
"epoch": 0.006751054852320675,
"grad_norm": 0.5539224743843079,
"learning_rate": 2.6362038664323376e-06,
"loss": 1.6450834274291992,
"step": 16
},
{
"epoch": 0.007594936708860759,
"grad_norm": 0.5255337953567505,
"learning_rate": 2.9876977152899827e-06,
"loss": 1.6673263311386108,
"step": 18
},
{
"epoch": 0.008438818565400843,
"grad_norm": 0.5074548721313477,
"learning_rate": 3.3391915641476277e-06,
"loss": 1.531802773475647,
"step": 20
},
{
"epoch": 0.009282700421940928,
"grad_norm": 0.4160279333591461,
"learning_rate": 3.6906854130052724e-06,
"loss": 1.599354863166809,
"step": 22
},
{
"epoch": 0.010126582278481013,
"grad_norm": 0.5716474652290344,
"learning_rate": 4.0421792618629174e-06,
"loss": 1.6700962781906128,
"step": 24
},
{
"epoch": 0.010970464135021098,
"grad_norm": 0.5148899555206299,
"learning_rate": 4.3936731107205625e-06,
"loss": 1.66217839717865,
"step": 26
},
{
"epoch": 0.01181434599156118,
"grad_norm": 0.575722336769104,
"learning_rate": 4.7451669595782076e-06,
"loss": 1.6692266464233398,
"step": 28
},
{
"epoch": 0.012658227848101266,
"grad_norm": 0.5345953106880188,
"learning_rate": 5.096660808435853e-06,
"loss": 1.5518689155578613,
"step": 30
},
{
"epoch": 0.01350210970464135,
"grad_norm": 0.4462043344974518,
"learning_rate": 5.448154657293498e-06,
"loss": 1.5930007696151733,
"step": 32
},
{
"epoch": 0.014345991561181435,
"grad_norm": 0.5119605660438538,
"learning_rate": 5.799648506151143e-06,
"loss": 1.6069684028625488,
"step": 34
},
{
"epoch": 0.015189873417721518,
"grad_norm": 0.5328608751296997,
"learning_rate": 6.151142355008788e-06,
"loss": 1.5838109254837036,
"step": 36
},
{
"epoch": 0.016033755274261603,
"grad_norm": 0.5065920352935791,
"learning_rate": 6.502636203866433e-06,
"loss": 1.608130931854248,
"step": 38
},
{
"epoch": 0.016877637130801686,
"grad_norm": 0.4479359984397888,
"learning_rate": 6.854130052724078e-06,
"loss": 1.5942182540893555,
"step": 40
},
{
"epoch": 0.017721518987341773,
"grad_norm": 0.42844903469085693,
"learning_rate": 7.205623901581722e-06,
"loss": 1.6441553831100464,
"step": 42
},
{
"epoch": 0.018565400843881856,
"grad_norm": 0.476630836725235,
"learning_rate": 7.557117750439367e-06,
"loss": 1.6068111658096313,
"step": 44
},
{
"epoch": 0.019409282700421943,
"grad_norm": 0.4532654881477356,
"learning_rate": 7.908611599297012e-06,
"loss": 1.6618021726608276,
"step": 46
},
{
"epoch": 0.020253164556962026,
"grad_norm": 0.3701118230819702,
"learning_rate": 8.260105448154657e-06,
"loss": 1.4730033874511719,
"step": 48
},
{
"epoch": 0.02109704641350211,
"grad_norm": 0.38471561670303345,
"learning_rate": 8.611599297012302e-06,
"loss": 1.4828267097473145,
"step": 50
},
{
"epoch": 0.021940928270042195,
"grad_norm": 0.3602336347103119,
"learning_rate": 8.963093145869948e-06,
"loss": 1.3877452611923218,
"step": 52
},
{
"epoch": 0.02278481012658228,
"grad_norm": 0.40318572521209717,
"learning_rate": 9.314586994727593e-06,
"loss": 1.49052894115448,
"step": 54
},
{
"epoch": 0.02362869198312236,
"grad_norm": 0.3223826587200165,
"learning_rate": 9.666080843585238e-06,
"loss": 1.4912524223327637,
"step": 56
},
{
"epoch": 0.024472573839662448,
"grad_norm": 0.3873065114021301,
"learning_rate": 1.0017574692442883e-05,
"loss": 1.526674509048462,
"step": 58
},
{
"epoch": 0.02531645569620253,
"grad_norm": 0.410159707069397,
"learning_rate": 1.0369068541300528e-05,
"loss": 1.4480271339416504,
"step": 60
},
{
"epoch": 0.026160337552742614,
"grad_norm": 0.3632003962993622,
"learning_rate": 1.0720562390158173e-05,
"loss": 1.4222990274429321,
"step": 62
},
{
"epoch": 0.0270042194092827,
"grad_norm": 0.33118435740470886,
"learning_rate": 1.1072056239015818e-05,
"loss": 1.387171745300293,
"step": 64
},
{
"epoch": 0.027848101265822784,
"grad_norm": 0.3301764726638794,
"learning_rate": 1.1423550087873463e-05,
"loss": 1.3523777723312378,
"step": 66
},
{
"epoch": 0.02869198312236287,
"grad_norm": 0.34342435002326965,
"learning_rate": 1.1775043936731108e-05,
"loss": 1.4515162706375122,
"step": 68
},
{
"epoch": 0.029535864978902954,
"grad_norm": 0.3243122100830078,
"learning_rate": 1.2126537785588753e-05,
"loss": 1.3509243726730347,
"step": 70
},
{
"epoch": 0.030379746835443037,
"grad_norm": 0.3450150787830353,
"learning_rate": 1.2478031634446398e-05,
"loss": 1.4936245679855347,
"step": 72
},
{
"epoch": 0.031223628691983123,
"grad_norm": 0.38912028074264526,
"learning_rate": 1.2829525483304042e-05,
"loss": 1.3419109582901,
"step": 74
},
{
"epoch": 0.032067510548523206,
"grad_norm": 0.3019310235977173,
"learning_rate": 1.3181019332161687e-05,
"loss": 1.4284154176712036,
"step": 76
},
{
"epoch": 0.03291139240506329,
"grad_norm": 0.37803682684898376,
"learning_rate": 1.3532513181019332e-05,
"loss": 1.4256561994552612,
"step": 78
},
{
"epoch": 0.03375527426160337,
"grad_norm": 0.34191736578941345,
"learning_rate": 1.3884007029876977e-05,
"loss": 1.3256909847259521,
"step": 80
},
{
"epoch": 0.03459915611814346,
"grad_norm": 0.35242700576782227,
"learning_rate": 1.4235500878734624e-05,
"loss": 1.2710685729980469,
"step": 82
},
{
"epoch": 0.035443037974683546,
"grad_norm": 0.38094228506088257,
"learning_rate": 1.4586994727592269e-05,
"loss": 1.253411889076233,
"step": 84
},
{
"epoch": 0.036286919831223625,
"grad_norm": 0.36837366223335266,
"learning_rate": 1.4938488576449914e-05,
"loss": 1.3064342737197876,
"step": 86
},
{
"epoch": 0.03713080168776371,
"grad_norm": 0.3443569242954254,
"learning_rate": 1.5289982425307557e-05,
"loss": 1.293562412261963,
"step": 88
},
{
"epoch": 0.0379746835443038,
"grad_norm": 0.3799338936805725,
"learning_rate": 1.5641476274165202e-05,
"loss": 1.3382648229599,
"step": 90
},
{
"epoch": 0.038818565400843885,
"grad_norm": 0.40501922369003296,
"learning_rate": 1.599297012302285e-05,
"loss": 1.3925724029541016,
"step": 92
},
{
"epoch": 0.039662447257383965,
"grad_norm": 0.4419630467891693,
"learning_rate": 1.6344463971880492e-05,
"loss": 1.357171893119812,
"step": 94
},
{
"epoch": 0.04050632911392405,
"grad_norm": 0.3619817793369293,
"learning_rate": 1.6695957820738137e-05,
"loss": 1.3029985427856445,
"step": 96
},
{
"epoch": 0.04135021097046414,
"grad_norm": 0.4851357340812683,
"learning_rate": 1.7047451669595782e-05,
"loss": 1.3498191833496094,
"step": 98
},
{
"epoch": 0.04219409282700422,
"grad_norm": 0.418658584356308,
"learning_rate": 1.7398945518453427e-05,
"loss": 1.185287356376648,
"step": 100
},
{
"epoch": 0.04219409282700422,
"eval_loss": 1.2979938983917236,
"eval_runtime": 682.1979,
"eval_samples_per_second": 3.089,
"eval_steps_per_second": 3.089,
"step": 100
},
{
"epoch": 0.043037974683544304,
"grad_norm": 0.4464418888092041,
"learning_rate": 1.7750439367311073e-05,
"loss": 1.2217272520065308,
"step": 102
},
{
"epoch": 0.04388185654008439,
"grad_norm": 0.4706237316131592,
"learning_rate": 1.8101933216168718e-05,
"loss": 1.2052050828933716,
"step": 104
},
{
"epoch": 0.04472573839662447,
"grad_norm": 0.46394404768943787,
"learning_rate": 1.8453427065026363e-05,
"loss": 1.221343994140625,
"step": 106
},
{
"epoch": 0.04556962025316456,
"grad_norm": 0.4726889431476593,
"learning_rate": 1.8804920913884008e-05,
"loss": 1.2387475967407227,
"step": 108
},
{
"epoch": 0.046413502109704644,
"grad_norm": 0.42130985856056213,
"learning_rate": 1.9156414762741653e-05,
"loss": 1.2851309776306152,
"step": 110
},
{
"epoch": 0.04725738396624472,
"grad_norm": 0.4504576623439789,
"learning_rate": 1.9507908611599298e-05,
"loss": 1.2753145694732666,
"step": 112
},
{
"epoch": 0.04810126582278481,
"grad_norm": 0.396085262298584,
"learning_rate": 1.9859402460456943e-05,
"loss": 1.2427717447280884,
"step": 114
},
{
"epoch": 0.048945147679324896,
"grad_norm": 0.5106491446495056,
"learning_rate": 2.0210896309314588e-05,
"loss": 1.2943825721740723,
"step": 116
},
{
"epoch": 0.049789029535864976,
"grad_norm": 0.42351317405700684,
"learning_rate": 2.0562390158172233e-05,
"loss": 1.263301134109497,
"step": 118
},
{
"epoch": 0.05063291139240506,
"grad_norm": 0.4403539299964905,
"learning_rate": 2.0913884007029878e-05,
"loss": 1.2647849321365356,
"step": 120
},
{
"epoch": 0.05147679324894515,
"grad_norm": 0.5260752439498901,
"learning_rate": 2.1265377855887523e-05,
"loss": 1.2351393699645996,
"step": 122
},
{
"epoch": 0.05232067510548523,
"grad_norm": 0.44978851079940796,
"learning_rate": 2.1616871704745168e-05,
"loss": 1.0384471416473389,
"step": 124
},
{
"epoch": 0.053164556962025315,
"grad_norm": 0.47732362151145935,
"learning_rate": 2.1968365553602813e-05,
"loss": 1.1518068313598633,
"step": 126
},
{
"epoch": 0.0540084388185654,
"grad_norm": 0.5473551750183105,
"learning_rate": 2.231985940246046e-05,
"loss": 1.2264912128448486,
"step": 128
},
{
"epoch": 0.05485232067510549,
"grad_norm": 0.4473855197429657,
"learning_rate": 2.2671353251318103e-05,
"loss": 1.1615246534347534,
"step": 130
},
{
"epoch": 0.05569620253164557,
"grad_norm": 0.5980377197265625,
"learning_rate": 2.302284710017575e-05,
"loss": 1.1334880590438843,
"step": 132
},
{
"epoch": 0.056540084388185655,
"grad_norm": 0.5987792015075684,
"learning_rate": 2.3374340949033394e-05,
"loss": 1.1546804904937744,
"step": 134
},
{
"epoch": 0.05738396624472574,
"grad_norm": 0.45355498790740967,
"learning_rate": 2.372583479789104e-05,
"loss": 1.194953441619873,
"step": 136
},
{
"epoch": 0.05822784810126582,
"grad_norm": 0.5373698472976685,
"learning_rate": 2.4077328646748684e-05,
"loss": 1.1067466735839844,
"step": 138
},
{
"epoch": 0.05907172995780591,
"grad_norm": 0.48734328150749207,
"learning_rate": 2.442882249560633e-05,
"loss": 1.188468098640442,
"step": 140
},
{
"epoch": 0.059915611814345994,
"grad_norm": 0.4692173898220062,
"learning_rate": 2.478031634446397e-05,
"loss": 1.1624362468719482,
"step": 142
},
{
"epoch": 0.060759493670886074,
"grad_norm": 0.532554030418396,
"learning_rate": 2.513181019332162e-05,
"loss": 1.0978907346725464,
"step": 144
},
{
"epoch": 0.06160337552742616,
"grad_norm": 0.5853802561759949,
"learning_rate": 2.5483304042179264e-05,
"loss": 1.2030781507492065,
"step": 146
},
{
"epoch": 0.06244725738396625,
"grad_norm": 0.5061611533164978,
"learning_rate": 2.583479789103691e-05,
"loss": 1.082366943359375,
"step": 148
},
{
"epoch": 0.06329113924050633,
"grad_norm": 0.49426141381263733,
"learning_rate": 2.6186291739894554e-05,
"loss": 1.10564386844635,
"step": 150
},
{
"epoch": 0.06413502109704641,
"grad_norm": 0.5846618413925171,
"learning_rate": 2.6537785588752196e-05,
"loss": 1.1992807388305664,
"step": 152
},
{
"epoch": 0.06497890295358649,
"grad_norm": 0.5517552495002747,
"learning_rate": 2.6889279437609844e-05,
"loss": 1.1757566928863525,
"step": 154
},
{
"epoch": 0.06582278481012659,
"grad_norm": 0.5667305588722229,
"learning_rate": 2.7240773286467486e-05,
"loss": 1.0548783540725708,
"step": 156
},
{
"epoch": 0.06666666666666667,
"grad_norm": 0.6760414242744446,
"learning_rate": 2.7592267135325134e-05,
"loss": 1.184364914894104,
"step": 158
},
{
"epoch": 0.06751054852320675,
"grad_norm": 0.5261430740356445,
"learning_rate": 2.7943760984182776e-05,
"loss": 1.1945042610168457,
"step": 160
},
{
"epoch": 0.06835443037974684,
"grad_norm": 0.6155015230178833,
"learning_rate": 2.8295254833040425e-05,
"loss": 1.2021973133087158,
"step": 162
},
{
"epoch": 0.06919831223628692,
"grad_norm": 0.6131619215011597,
"learning_rate": 2.8646748681898066e-05,
"loss": 1.144123911857605,
"step": 164
},
{
"epoch": 0.070042194092827,
"grad_norm": 0.5749185681343079,
"learning_rate": 2.8998242530755715e-05,
"loss": 1.1329256296157837,
"step": 166
},
{
"epoch": 0.07088607594936709,
"grad_norm": 0.5243118405342102,
"learning_rate": 2.9349736379613356e-05,
"loss": 1.0892387628555298,
"step": 168
},
{
"epoch": 0.07172995780590717,
"grad_norm": 0.7190104722976685,
"learning_rate": 2.9701230228471005e-05,
"loss": 1.163260817527771,
"step": 170
},
{
"epoch": 0.07257383966244725,
"grad_norm": 0.5486982464790344,
"learning_rate": 3.0052724077328647e-05,
"loss": 1.0880777835845947,
"step": 172
},
{
"epoch": 0.07341772151898734,
"grad_norm": 0.5020889043807983,
"learning_rate": 3.0404217926186295e-05,
"loss": 1.0433368682861328,
"step": 174
},
{
"epoch": 0.07426160337552742,
"grad_norm": 0.47329774498939514,
"learning_rate": 3.075571177504394e-05,
"loss": 1.0528991222381592,
"step": 176
},
{
"epoch": 0.0751054852320675,
"grad_norm": 0.6635547876358032,
"learning_rate": 3.110720562390158e-05,
"loss": 1.1627811193466187,
"step": 178
},
{
"epoch": 0.0759493670886076,
"grad_norm": 0.5624618530273438,
"learning_rate": 3.145869947275923e-05,
"loss": 1.084869384765625,
"step": 180
},
{
"epoch": 0.07679324894514768,
"grad_norm": 0.6029536724090576,
"learning_rate": 3.181019332161687e-05,
"loss": 1.2227671146392822,
"step": 182
},
{
"epoch": 0.07763713080168777,
"grad_norm": 0.930959939956665,
"learning_rate": 3.216168717047452e-05,
"loss": 1.0955452919006348,
"step": 184
},
{
"epoch": 0.07848101265822785,
"grad_norm": 0.5326952338218689,
"learning_rate": 3.251318101933216e-05,
"loss": 1.0640798807144165,
"step": 186
},
{
"epoch": 0.07932489451476793,
"grad_norm": 0.5484727621078491,
"learning_rate": 3.286467486818981e-05,
"loss": 1.0700589418411255,
"step": 188
},
{
"epoch": 0.08016877637130802,
"grad_norm": 0.605273425579071,
"learning_rate": 3.3216168717047456e-05,
"loss": 1.1593081951141357,
"step": 190
},
{
"epoch": 0.0810126582278481,
"grad_norm": 0.5704394578933716,
"learning_rate": 3.35676625659051e-05,
"loss": 1.1617076396942139,
"step": 192
},
{
"epoch": 0.08185654008438818,
"grad_norm": 0.5929452180862427,
"learning_rate": 3.3919156414762746e-05,
"loss": 1.1346839666366577,
"step": 194
},
{
"epoch": 0.08270042194092828,
"grad_norm": 0.5624077916145325,
"learning_rate": 3.427065026362039e-05,
"loss": 1.0934710502624512,
"step": 196
},
{
"epoch": 0.08354430379746836,
"grad_norm": 0.6717425584793091,
"learning_rate": 3.4622144112478036e-05,
"loss": 1.1810534000396729,
"step": 198
},
{
"epoch": 0.08438818565400844,
"grad_norm": 0.5120199918746948,
"learning_rate": 3.4973637961335674e-05,
"loss": 1.1525514125823975,
"step": 200
},
{
"epoch": 0.08438818565400844,
"eval_loss": 1.142486810684204,
"eval_runtime": 668.2356,
"eval_samples_per_second": 3.153,
"eval_steps_per_second": 3.153,
"step": 200
},
{
"epoch": 0.08523206751054853,
"grad_norm": 0.5144487023353577,
"learning_rate": 3.5325131810193326e-05,
"loss": 1.0243735313415527,
"step": 202
},
{
"epoch": 0.08607594936708861,
"grad_norm": 0.6325069069862366,
"learning_rate": 3.5676625659050964e-05,
"loss": 1.118743896484375,
"step": 204
},
{
"epoch": 0.08691983122362869,
"grad_norm": 0.5501633882522583,
"learning_rate": 3.6028119507908616e-05,
"loss": 1.0380504131317139,
"step": 206
},
{
"epoch": 0.08776371308016878,
"grad_norm": 0.6133899688720703,
"learning_rate": 3.6379613356766254e-05,
"loss": 0.9837555885314941,
"step": 208
},
{
"epoch": 0.08860759493670886,
"grad_norm": 0.5799810886383057,
"learning_rate": 3.6731107205623906e-05,
"loss": 1.090720295906067,
"step": 210
},
{
"epoch": 0.08945147679324894,
"grad_norm": 0.6039511561393738,
"learning_rate": 3.7082601054481544e-05,
"loss": 1.120232343673706,
"step": 212
},
{
"epoch": 0.09029535864978903,
"grad_norm": 0.5983024835586548,
"learning_rate": 3.7434094903339196e-05,
"loss": 1.096949815750122,
"step": 214
},
{
"epoch": 0.09113924050632911,
"grad_norm": 0.5641079545021057,
"learning_rate": 3.7785588752196835e-05,
"loss": 1.1226298809051514,
"step": 216
},
{
"epoch": 0.0919831223628692,
"grad_norm": 0.655717134475708,
"learning_rate": 3.8137082601054486e-05,
"loss": 1.1260643005371094,
"step": 218
},
{
"epoch": 0.09282700421940929,
"grad_norm": 0.6111898422241211,
"learning_rate": 3.848857644991213e-05,
"loss": 1.0777709484100342,
"step": 220
},
{
"epoch": 0.09367088607594937,
"grad_norm": 0.6821302771568298,
"learning_rate": 3.884007029876977e-05,
"loss": 1.10588800907135,
"step": 222
},
{
"epoch": 0.09451476793248945,
"grad_norm": 0.693175733089447,
"learning_rate": 3.919156414762742e-05,
"loss": 1.1498671770095825,
"step": 224
},
{
"epoch": 0.09535864978902954,
"grad_norm": 0.5288166403770447,
"learning_rate": 3.954305799648506e-05,
"loss": 1.0587562322616577,
"step": 226
},
{
"epoch": 0.09620253164556962,
"grad_norm": 0.6882867813110352,
"learning_rate": 3.989455184534271e-05,
"loss": 1.1107512712478638,
"step": 228
},
{
"epoch": 0.0970464135021097,
"grad_norm": 0.5834154486656189,
"learning_rate": 4.024604569420035e-05,
"loss": 1.020510196685791,
"step": 230
},
{
"epoch": 0.09789029535864979,
"grad_norm": 0.7157064080238342,
"learning_rate": 4.0597539543058e-05,
"loss": 1.0642449855804443,
"step": 232
},
{
"epoch": 0.09873417721518987,
"grad_norm": 0.6530708074569702,
"learning_rate": 4.094903339191564e-05,
"loss": 1.0359872579574585,
"step": 234
},
{
"epoch": 0.09957805907172995,
"grad_norm": 0.6329686045646667,
"learning_rate": 4.130052724077329e-05,
"loss": 1.050504446029663,
"step": 236
},
{
"epoch": 0.10042194092827005,
"grad_norm": 0.6597026586532593,
"learning_rate": 4.165202108963093e-05,
"loss": 1.2621175050735474,
"step": 238
},
{
"epoch": 0.10126582278481013,
"grad_norm": 0.6195225119590759,
"learning_rate": 4.200351493848858e-05,
"loss": 1.1218310594558716,
"step": 240
},
{
"epoch": 0.1021097046413502,
"grad_norm": 0.6764137744903564,
"learning_rate": 4.235500878734622e-05,
"loss": 1.1250728368759155,
"step": 242
},
{
"epoch": 0.1029535864978903,
"grad_norm": 0.552363395690918,
"learning_rate": 4.270650263620387e-05,
"loss": 1.028212308883667,
"step": 244
},
{
"epoch": 0.10379746835443038,
"grad_norm": 0.5620495676994324,
"learning_rate": 4.305799648506151e-05,
"loss": 1.0425450801849365,
"step": 246
},
{
"epoch": 0.10464135021097046,
"grad_norm": 0.6860032081604004,
"learning_rate": 4.3409490333919156e-05,
"loss": 1.144278883934021,
"step": 248
},
{
"epoch": 0.10548523206751055,
"grad_norm": 0.6033259034156799,
"learning_rate": 4.37609841827768e-05,
"loss": 1.1223982572555542,
"step": 250
},
{
"epoch": 0.10632911392405063,
"grad_norm": 0.6292146444320679,
"learning_rate": 4.4112478031634446e-05,
"loss": 1.1609960794448853,
"step": 252
},
{
"epoch": 0.10717299578059072,
"grad_norm": 0.7982883453369141,
"learning_rate": 4.44639718804921e-05,
"loss": 1.063547968864441,
"step": 254
},
{
"epoch": 0.1080168776371308,
"grad_norm": 0.7719110250473022,
"learning_rate": 4.4815465729349736e-05,
"loss": 1.0719804763793945,
"step": 256
},
{
"epoch": 0.10886075949367088,
"grad_norm": 0.6101011633872986,
"learning_rate": 4.516695957820739e-05,
"loss": 1.0778400897979736,
"step": 258
},
{
"epoch": 0.10970464135021098,
"grad_norm": 0.7300994396209717,
"learning_rate": 4.5518453427065026e-05,
"loss": 1.2129558324813843,
"step": 260
},
{
"epoch": 0.11054852320675106,
"grad_norm": 0.8348747491836548,
"learning_rate": 4.586994727592268e-05,
"loss": 1.221714735031128,
"step": 262
},
{
"epoch": 0.11139240506329114,
"grad_norm": 0.5445612072944641,
"learning_rate": 4.6221441124780316e-05,
"loss": 1.0187978744506836,
"step": 264
},
{
"epoch": 0.11223628691983123,
"grad_norm": 0.6230319738388062,
"learning_rate": 4.657293497363797e-05,
"loss": 1.096561312675476,
"step": 266
},
{
"epoch": 0.11308016877637131,
"grad_norm": 0.6231237649917603,
"learning_rate": 4.6924428822495606e-05,
"loss": 1.089842438697815,
"step": 268
},
{
"epoch": 0.11392405063291139,
"grad_norm": 0.7178627252578735,
"learning_rate": 4.727592267135325e-05,
"loss": 1.0696645975112915,
"step": 270
},
{
"epoch": 0.11476793248945148,
"grad_norm": 0.6895854473114014,
"learning_rate": 4.7627416520210896e-05,
"loss": 1.0511361360549927,
"step": 272
},
{
"epoch": 0.11561181434599156,
"grad_norm": 0.6046878695487976,
"learning_rate": 4.797891036906854e-05,
"loss": 1.1373958587646484,
"step": 274
},
{
"epoch": 0.11645569620253164,
"grad_norm": 0.6524552702903748,
"learning_rate": 4.833040421792619e-05,
"loss": 1.0734186172485352,
"step": 276
},
{
"epoch": 0.11729957805907174,
"grad_norm": 0.6331019997596741,
"learning_rate": 4.868189806678383e-05,
"loss": 1.123913049697876,
"step": 278
},
{
"epoch": 0.11814345991561181,
"grad_norm": 0.5919018983840942,
"learning_rate": 4.903339191564148e-05,
"loss": 1.0635710954666138,
"step": 280
},
{
"epoch": 0.1189873417721519,
"grad_norm": 0.6067633032798767,
"learning_rate": 4.938488576449912e-05,
"loss": 1.0429247617721558,
"step": 282
},
{
"epoch": 0.11983122362869199,
"grad_norm": 0.6583750247955322,
"learning_rate": 4.9736379613356774e-05,
"loss": 1.1397464275360107,
"step": 284
},
{
"epoch": 0.12067510548523207,
"grad_norm": 0.6200069785118103,
"learning_rate": 5.008787346221442e-05,
"loss": 1.0590803623199463,
"step": 286
},
{
"epoch": 0.12151898734177215,
"grad_norm": 0.6798665523529053,
"learning_rate": 5.0439367311072064e-05,
"loss": 1.1318789720535278,
"step": 288
},
{
"epoch": 0.12236286919831224,
"grad_norm": 0.7508794069290161,
"learning_rate": 5.07908611599297e-05,
"loss": 1.0934956073760986,
"step": 290
},
{
"epoch": 0.12320675105485232,
"grad_norm": 0.6901452541351318,
"learning_rate": 5.114235500878735e-05,
"loss": 1.163407802581787,
"step": 292
},
{
"epoch": 0.1240506329113924,
"grad_norm": 0.6423285603523254,
"learning_rate": 5.1493848857644985e-05,
"loss": 1.09059476852417,
"step": 294
},
{
"epoch": 0.1248945147679325,
"grad_norm": 0.6839275360107422,
"learning_rate": 5.1845342706502644e-05,
"loss": 1.0690211057662964,
"step": 296
},
{
"epoch": 0.1257383966244726,
"grad_norm": 0.6350128054618835,
"learning_rate": 5.219683655536028e-05,
"loss": 0.982322096824646,
"step": 298
},
{
"epoch": 0.12658227848101267,
"grad_norm": 0.7136530876159668,
"learning_rate": 5.254833040421793e-05,
"loss": 1.1132930517196655,
"step": 300
},
{
"epoch": 0.12658227848101267,
"eval_loss": 1.0952109098434448,
"eval_runtime": 677.0652,
"eval_samples_per_second": 3.112,
"eval_steps_per_second": 3.112,
"step": 300
},
{
"epoch": 0.12742616033755275,
"grad_norm": 0.7339721322059631,
"learning_rate": 5.289982425307557e-05,
"loss": 0.973595917224884,
"step": 302
},
{
"epoch": 0.12827004219409283,
"grad_norm": 0.5941481590270996,
"learning_rate": 5.3251318101933224e-05,
"loss": 0.9819849729537964,
"step": 304
},
{
"epoch": 0.1291139240506329,
"grad_norm": 0.7153938412666321,
"learning_rate": 5.360281195079086e-05,
"loss": 1.0315470695495605,
"step": 306
},
{
"epoch": 0.12995780590717299,
"grad_norm": 0.5167180299758911,
"learning_rate": 5.395430579964851e-05,
"loss": 0.9492001533508301,
"step": 308
},
{
"epoch": 0.1308016877637131,
"grad_norm": 0.6055944561958313,
"learning_rate": 5.430579964850615e-05,
"loss": 1.0156209468841553,
"step": 310
},
{
"epoch": 0.13164556962025317,
"grad_norm": 0.7662386298179626,
"learning_rate": 5.4657293497363805e-05,
"loss": 1.1791651248931885,
"step": 312
},
{
"epoch": 0.13248945147679325,
"grad_norm": 0.6065546274185181,
"learning_rate": 5.500878734622145e-05,
"loss": 1.0009297132492065,
"step": 314
},
{
"epoch": 0.13333333333333333,
"grad_norm": 0.604225754737854,
"learning_rate": 5.536028119507909e-05,
"loss": 1.0208244323730469,
"step": 316
},
{
"epoch": 0.1341772151898734,
"grad_norm": 0.6186763048171997,
"learning_rate": 5.571177504393673e-05,
"loss": 0.9968416690826416,
"step": 318
},
{
"epoch": 0.1350210970464135,
"grad_norm": 0.7100363969802856,
"learning_rate": 5.606326889279437e-05,
"loss": 0.9540256857872009,
"step": 320
},
{
"epoch": 0.1358649789029536,
"grad_norm": 0.6979711055755615,
"learning_rate": 5.641476274165203e-05,
"loss": 1.0631953477859497,
"step": 322
},
{
"epoch": 0.13670886075949368,
"grad_norm": 0.6237109303474426,
"learning_rate": 5.676625659050967e-05,
"loss": 1.0170501470565796,
"step": 324
},
{
"epoch": 0.13755274261603376,
"grad_norm": 0.6525548696517944,
"learning_rate": 5.711775043936731e-05,
"loss": 1.0715603828430176,
"step": 326
},
{
"epoch": 0.13839662447257384,
"grad_norm": 0.6869221329689026,
"learning_rate": 5.746924428822495e-05,
"loss": 1.0111541748046875,
"step": 328
},
{
"epoch": 0.13924050632911392,
"grad_norm": 0.553188145160675,
"learning_rate": 5.782073813708261e-05,
"loss": 1.0311682224273682,
"step": 330
},
{
"epoch": 0.140084388185654,
"grad_norm": 0.6760852932929993,
"learning_rate": 5.817223198594025e-05,
"loss": 1.0213634967803955,
"step": 332
},
{
"epoch": 0.1409282700421941,
"grad_norm": 0.5907419919967651,
"learning_rate": 5.8523725834797894e-05,
"loss": 0.9748594164848328,
"step": 334
},
{
"epoch": 0.14177215189873418,
"grad_norm": 0.7044920921325684,
"learning_rate": 5.887521968365554e-05,
"loss": 1.05863356590271,
"step": 336
},
{
"epoch": 0.14261603375527426,
"grad_norm": 0.679073691368103,
"learning_rate": 5.922671353251318e-05,
"loss": 1.1341127157211304,
"step": 338
},
{
"epoch": 0.14345991561181434,
"grad_norm": 0.7676237225532532,
"learning_rate": 5.957820738137083e-05,
"loss": 0.9540836215019226,
"step": 340
},
{
"epoch": 0.14430379746835442,
"grad_norm": 0.6313899755477905,
"learning_rate": 5.9929701230228474e-05,
"loss": 1.0585911273956299,
"step": 342
},
{
"epoch": 0.1451476793248945,
"grad_norm": 0.7123099565505981,
"learning_rate": 6.028119507908612e-05,
"loss": 1.0760118961334229,
"step": 344
},
{
"epoch": 0.1459915611814346,
"grad_norm": 0.585935652256012,
"learning_rate": 6.063268892794376e-05,
"loss": 1.036866307258606,
"step": 346
},
{
"epoch": 0.1468354430379747,
"grad_norm": 0.5643263459205627,
"learning_rate": 6.0984182776801416e-05,
"loss": 1.0242938995361328,
"step": 348
},
{
"epoch": 0.14767932489451477,
"grad_norm": 0.626761794090271,
"learning_rate": 6.133567662565906e-05,
"loss": 1.0497376918792725,
"step": 350
},
{
"epoch": 0.14852320675105485,
"grad_norm": 0.5106956958770752,
"learning_rate": 6.16871704745167e-05,
"loss": 0.9811885356903076,
"step": 352
},
{
"epoch": 0.14936708860759493,
"grad_norm": 0.6948089003562927,
"learning_rate": 6.203866432337434e-05,
"loss": 1.0715330839157104,
"step": 354
},
{
"epoch": 0.150210970464135,
"grad_norm": 0.699713945388794,
"learning_rate": 6.239015817223199e-05,
"loss": 1.0405226945877075,
"step": 356
},
{
"epoch": 0.15105485232067511,
"grad_norm": 0.6437667012214661,
"learning_rate": 6.274165202108964e-05,
"loss": 1.0490930080413818,
"step": 358
},
{
"epoch": 0.1518987341772152,
"grad_norm": 0.6952699422836304,
"learning_rate": 6.309314586994728e-05,
"loss": 0.9267548322677612,
"step": 360
},
{
"epoch": 0.15274261603375527,
"grad_norm": 0.6713186502456665,
"learning_rate": 6.344463971880492e-05,
"loss": 1.0427420139312744,
"step": 362
},
{
"epoch": 0.15358649789029535,
"grad_norm": 0.6750379800796509,
"learning_rate": 6.379613356766257e-05,
"loss": 1.048950433731079,
"step": 364
},
{
"epoch": 0.15443037974683543,
"grad_norm": 0.6053379774093628,
"learning_rate": 6.414762741652022e-05,
"loss": 1.0156004428863525,
"step": 366
},
{
"epoch": 0.15527426160337554,
"grad_norm": 0.8063633441925049,
"learning_rate": 6.449912126537786e-05,
"loss": 1.0020819902420044,
"step": 368
},
{
"epoch": 0.15611814345991562,
"grad_norm": 0.8027494549751282,
"learning_rate": 6.48506151142355e-05,
"loss": 1.055633783340454,
"step": 370
},
{
"epoch": 0.1569620253164557,
"grad_norm": 0.6580121517181396,
"learning_rate": 6.520210896309315e-05,
"loss": 1.0149940252304077,
"step": 372
},
{
"epoch": 0.15780590717299578,
"grad_norm": 0.6561233997344971,
"learning_rate": 6.55536028119508e-05,
"loss": 0.9769611954689026,
"step": 374
},
{
"epoch": 0.15864978902953586,
"grad_norm": 0.6444346308708191,
"learning_rate": 6.590509666080844e-05,
"loss": 0.9099349975585938,
"step": 376
},
{
"epoch": 0.15949367088607594,
"grad_norm": 0.5879359245300293,
"learning_rate": 6.625659050966608e-05,
"loss": 1.0797548294067383,
"step": 378
},
{
"epoch": 0.16033755274261605,
"grad_norm": 0.6994144916534424,
"learning_rate": 6.660808435852373e-05,
"loss": 1.0336791276931763,
"step": 380
},
{
"epoch": 0.16118143459915613,
"grad_norm": 0.6128669381141663,
"learning_rate": 6.695957820738138e-05,
"loss": 1.018118143081665,
"step": 382
},
{
"epoch": 0.1620253164556962,
"grad_norm": 1.0237540006637573,
"learning_rate": 6.731107205623902e-05,
"loss": 1.1405497789382935,
"step": 384
},
{
"epoch": 0.16286919831223629,
"grad_norm": 0.6091578006744385,
"learning_rate": 6.766256590509666e-05,
"loss": 1.0314189195632935,
"step": 386
},
{
"epoch": 0.16371308016877636,
"grad_norm": 0.5916037559509277,
"learning_rate": 6.801405975395431e-05,
"loss": 0.9564052820205688,
"step": 388
},
{
"epoch": 0.16455696202531644,
"grad_norm": 0.771653950214386,
"learning_rate": 6.836555360281195e-05,
"loss": 1.0023859739303589,
"step": 390
},
{
"epoch": 0.16540084388185655,
"grad_norm": 0.654658317565918,
"learning_rate": 6.87170474516696e-05,
"loss": 1.07024085521698,
"step": 392
},
{
"epoch": 0.16624472573839663,
"grad_norm": 0.6611968874931335,
"learning_rate": 6.906854130052724e-05,
"loss": 1.0552500486373901,
"step": 394
},
{
"epoch": 0.1670886075949367,
"grad_norm": 0.6955893039703369,
"learning_rate": 6.942003514938489e-05,
"loss": 1.0562875270843506,
"step": 396
},
{
"epoch": 0.1679324894514768,
"grad_norm": 0.6666058301925659,
"learning_rate": 6.977152899824253e-05,
"loss": 0.9850592017173767,
"step": 398
},
{
"epoch": 0.16877637130801687,
"grad_norm": 0.6131711006164551,
"learning_rate": 7.012302284710018e-05,
"loss": 1.0077755451202393,
"step": 400
},
{
"epoch": 0.16877637130801687,
"eval_loss": 1.0625108480453491,
"eval_runtime": 691.0068,
"eval_samples_per_second": 3.049,
"eval_steps_per_second": 3.049,
"step": 400
},
{
"epoch": 0.16962025316455695,
"grad_norm": 0.6286499500274658,
"learning_rate": 7.047451669595783e-05,
"loss": 1.1012427806854248,
"step": 402
},
{
"epoch": 0.17046413502109706,
"grad_norm": 0.6639351844787598,
"learning_rate": 7.082601054481547e-05,
"loss": 1.0379719734191895,
"step": 404
},
{
"epoch": 0.17130801687763714,
"grad_norm": 0.750401496887207,
"learning_rate": 7.117750439367311e-05,
"loss": 1.031856656074524,
"step": 406
},
{
"epoch": 0.17215189873417722,
"grad_norm": 0.8084847331047058,
"learning_rate": 7.152899824253075e-05,
"loss": 1.0493193864822388,
"step": 408
},
{
"epoch": 0.1729957805907173,
"grad_norm": 0.7448462247848511,
"learning_rate": 7.188049209138841e-05,
"loss": 1.1012418270111084,
"step": 410
},
{
"epoch": 0.17383966244725738,
"grad_norm": 0.5841867923736572,
"learning_rate": 7.223198594024605e-05,
"loss": 0.9926692247390747,
"step": 412
},
{
"epoch": 0.17468354430379746,
"grad_norm": 0.7125606536865234,
"learning_rate": 7.258347978910369e-05,
"loss": 1.0588877201080322,
"step": 414
},
{
"epoch": 0.17552742616033756,
"grad_norm": 0.5750942230224609,
"learning_rate": 7.293497363796134e-05,
"loss": 1.038270354270935,
"step": 416
},
{
"epoch": 0.17637130801687764,
"grad_norm": 0.565444827079773,
"learning_rate": 7.328646748681899e-05,
"loss": 0.9843021035194397,
"step": 418
},
{
"epoch": 0.17721518987341772,
"grad_norm": 0.5825693011283875,
"learning_rate": 7.363796133567663e-05,
"loss": 1.0731632709503174,
"step": 420
},
{
"epoch": 0.1780590717299578,
"grad_norm": 0.6267391443252563,
"learning_rate": 7.398945518453427e-05,
"loss": 1.0061273574829102,
"step": 422
},
{
"epoch": 0.17890295358649788,
"grad_norm": 0.6621372103691101,
"learning_rate": 7.434094903339192e-05,
"loss": 1.0461612939834595,
"step": 424
},
{
"epoch": 0.17974683544303796,
"grad_norm": 0.6635435223579407,
"learning_rate": 7.469244288224957e-05,
"loss": 0.9789207577705383,
"step": 426
},
{
"epoch": 0.18059071729957807,
"grad_norm": 0.6342346668243408,
"learning_rate": 7.504393673110721e-05,
"loss": 1.0527069568634033,
"step": 428
},
{
"epoch": 0.18143459915611815,
"grad_norm": 0.6762149930000305,
"learning_rate": 7.539543057996485e-05,
"loss": 0.9708702564239502,
"step": 430
},
{
"epoch": 0.18227848101265823,
"grad_norm": 0.7073282599449158,
"learning_rate": 7.57469244288225e-05,
"loss": 1.0509834289550781,
"step": 432
},
{
"epoch": 0.1831223628691983,
"grad_norm": 0.6917856931686401,
"learning_rate": 7.609841827768014e-05,
"loss": 1.0128819942474365,
"step": 434
},
{
"epoch": 0.1839662447257384,
"grad_norm": 0.5574942231178284,
"learning_rate": 7.644991212653779e-05,
"loss": 0.989395797252655,
"step": 436
},
{
"epoch": 0.1848101265822785,
"grad_norm": 0.640765905380249,
"learning_rate": 7.680140597539543e-05,
"loss": 0.9846042990684509,
"step": 438
},
{
"epoch": 0.18565400843881857,
"grad_norm": 0.6699127554893494,
"learning_rate": 7.715289982425308e-05,
"loss": 1.0344442129135132,
"step": 440
},
{
"epoch": 0.18649789029535865,
"grad_norm": 0.6164930462837219,
"learning_rate": 7.750439367311072e-05,
"loss": 1.0179373025894165,
"step": 442
},
{
"epoch": 0.18734177215189873,
"grad_norm": 0.6880720853805542,
"learning_rate": 7.785588752196837e-05,
"loss": 1.0518895387649536,
"step": 444
},
{
"epoch": 0.1881856540084388,
"grad_norm": 0.6501413583755493,
"learning_rate": 7.820738137082601e-05,
"loss": 1.0442606210708618,
"step": 446
},
{
"epoch": 0.1890295358649789,
"grad_norm": 0.6076085567474365,
"learning_rate": 7.855887521968366e-05,
"loss": 0.9828442335128784,
"step": 448
},
{
"epoch": 0.189873417721519,
"grad_norm": 0.6418202519416809,
"learning_rate": 7.89103690685413e-05,
"loss": 1.0573710203170776,
"step": 450
},
{
"epoch": 0.19071729957805908,
"grad_norm": 0.7055076360702515,
"learning_rate": 7.926186291739895e-05,
"loss": 1.0216103792190552,
"step": 452
},
{
"epoch": 0.19156118143459916,
"grad_norm": 0.5668330192565918,
"learning_rate": 7.961335676625659e-05,
"loss": 0.9837722778320312,
"step": 454
},
{
"epoch": 0.19240506329113924,
"grad_norm": 0.6419380307197571,
"learning_rate": 7.996485061511424e-05,
"loss": 1.0003894567489624,
"step": 456
},
{
"epoch": 0.19324894514767932,
"grad_norm": 0.5949198007583618,
"learning_rate": 8.031634446397188e-05,
"loss": 1.0609031915664673,
"step": 458
},
{
"epoch": 0.1940928270042194,
"grad_norm": 0.7032039761543274,
"learning_rate": 8.066783831282952e-05,
"loss": 1.0543403625488281,
"step": 460
},
{
"epoch": 0.1949367088607595,
"grad_norm": 0.5775868892669678,
"learning_rate": 8.101933216168718e-05,
"loss": 0.9819303154945374,
"step": 462
},
{
"epoch": 0.19578059071729959,
"grad_norm": 0.9301062226295471,
"learning_rate": 8.137082601054482e-05,
"loss": 1.0542067289352417,
"step": 464
},
{
"epoch": 0.19662447257383966,
"grad_norm": 0.6193217039108276,
"learning_rate": 8.172231985940246e-05,
"loss": 0.9966341257095337,
"step": 466
},
{
"epoch": 0.19746835443037974,
"grad_norm": 0.6286146640777588,
"learning_rate": 8.20738137082601e-05,
"loss": 1.0474121570587158,
"step": 468
},
{
"epoch": 0.19831223628691982,
"grad_norm": 0.7418972253799438,
"learning_rate": 8.242530755711776e-05,
"loss": 0.9549239277839661,
"step": 470
},
{
"epoch": 0.1991561181434599,
"grad_norm": 0.6122808456420898,
"learning_rate": 8.27768014059754e-05,
"loss": 1.0191338062286377,
"step": 472
},
{
"epoch": 0.2,
"grad_norm": 0.6375362277030945,
"learning_rate": 8.312829525483304e-05,
"loss": 1.0987539291381836,
"step": 474
},
{
"epoch": 0.2008438818565401,
"grad_norm": 0.6459513306617737,
"learning_rate": 8.347978910369068e-05,
"loss": 1.0369136333465576,
"step": 476
},
{
"epoch": 0.20168776371308017,
"grad_norm": 0.7029640674591064,
"learning_rate": 8.383128295254833e-05,
"loss": 1.0582096576690674,
"step": 478
},
{
"epoch": 0.20253164556962025,
"grad_norm": 0.6345387697219849,
"learning_rate": 8.418277680140598e-05,
"loss": 1.022916316986084,
"step": 480
},
{
"epoch": 0.20337552742616033,
"grad_norm": 0.5764590501785278,
"learning_rate": 8.453427065026362e-05,
"loss": 0.973024308681488,
"step": 482
},
{
"epoch": 0.2042194092827004,
"grad_norm": 0.5884482860565186,
"learning_rate": 8.488576449912127e-05,
"loss": 1.0292812585830688,
"step": 484
},
{
"epoch": 0.20506329113924052,
"grad_norm": 0.616357147693634,
"learning_rate": 8.523725834797891e-05,
"loss": 1.0083447694778442,
"step": 486
},
{
"epoch": 0.2059071729957806,
"grad_norm": 0.7671196460723877,
"learning_rate": 8.558875219683656e-05,
"loss": 0.9936985373497009,
"step": 488
},
{
"epoch": 0.20675105485232068,
"grad_norm": 0.6197299957275391,
"learning_rate": 8.59402460456942e-05,
"loss": 1.051513910293579,
"step": 490
},
{
"epoch": 0.20759493670886076,
"grad_norm": 0.6912890672683716,
"learning_rate": 8.629173989455185e-05,
"loss": 0.9474978446960449,
"step": 492
},
{
"epoch": 0.20843881856540084,
"grad_norm": 0.6941592693328857,
"learning_rate": 8.664323374340949e-05,
"loss": 1.0671660900115967,
"step": 494
},
{
"epoch": 0.20928270042194091,
"grad_norm": 0.5889528393745422,
"learning_rate": 8.699472759226714e-05,
"loss": 1.0020159482955933,
"step": 496
},
{
"epoch": 0.21012658227848102,
"grad_norm": 0.6478549838066101,
"learning_rate": 8.734622144112478e-05,
"loss": 1.0165860652923584,
"step": 498
},
{
"epoch": 0.2109704641350211,
"grad_norm": 0.6018255949020386,
"learning_rate": 8.769771528998243e-05,
"loss": 0.8798263072967529,
"step": 500
},
{
"epoch": 0.2109704641350211,
"eval_loss": 1.042096495628357,
"eval_runtime": 692.4361,
"eval_samples_per_second": 3.043,
"eval_steps_per_second": 3.043,
"step": 500
},
{
"epoch": 0.21181434599156118,
"grad_norm": 0.578990638256073,
"learning_rate": 8.804920913884007e-05,
"loss": 1.092096209526062,
"step": 502
},
{
"epoch": 0.21265822784810126,
"grad_norm": 0.6597883701324463,
"learning_rate": 8.840070298769771e-05,
"loss": 1.0413451194763184,
"step": 504
},
{
"epoch": 0.21350210970464134,
"grad_norm": 0.6660305261611938,
"learning_rate": 8.875219683655536e-05,
"loss": 1.0073142051696777,
"step": 506
},
{
"epoch": 0.21434599156118145,
"grad_norm": 0.6283115148544312,
"learning_rate": 8.910369068541301e-05,
"loss": 1.0319768190383911,
"step": 508
},
{
"epoch": 0.21518987341772153,
"grad_norm": 0.6257343292236328,
"learning_rate": 8.945518453427065e-05,
"loss": 1.0046353340148926,
"step": 510
},
{
"epoch": 0.2160337552742616,
"grad_norm": 0.5530875325202942,
"learning_rate": 8.980667838312829e-05,
"loss": 0.9169099926948547,
"step": 512
},
{
"epoch": 0.2168776371308017,
"grad_norm": 0.5369633436203003,
"learning_rate": 9.015817223198594e-05,
"loss": 0.9081505537033081,
"step": 514
},
{
"epoch": 0.21772151898734177,
"grad_norm": 0.6618232131004333,
"learning_rate": 9.05096660808436e-05,
"loss": 0.9165045022964478,
"step": 516
},
{
"epoch": 0.21856540084388185,
"grad_norm": 0.600666344165802,
"learning_rate": 9.086115992970123e-05,
"loss": 0.91348797082901,
"step": 518
},
{
"epoch": 0.21940928270042195,
"grad_norm": 0.5919831991195679,
"learning_rate": 9.121265377855887e-05,
"loss": 1.006508469581604,
"step": 520
},
{
"epoch": 0.22025316455696203,
"grad_norm": 0.688058614730835,
"learning_rate": 9.156414762741654e-05,
"loss": 1.0013236999511719,
"step": 522
},
{
"epoch": 0.2210970464135021,
"grad_norm": 0.6721227765083313,
"learning_rate": 9.191564147627418e-05,
"loss": 1.0909923315048218,
"step": 524
},
{
"epoch": 0.2219409282700422,
"grad_norm": 0.5987313389778137,
"learning_rate": 9.226713532513181e-05,
"loss": 1.0117096900939941,
"step": 526
},
{
"epoch": 0.22278481012658227,
"grad_norm": 0.6191489696502686,
"learning_rate": 9.261862917398945e-05,
"loss": 1.0153647661209106,
"step": 528
},
{
"epoch": 0.22362869198312235,
"grad_norm": 0.6821563243865967,
"learning_rate": 9.29701230228471e-05,
"loss": 0.9649755954742432,
"step": 530
},
{
"epoch": 0.22447257383966246,
"grad_norm": 1.760398268699646,
"learning_rate": 9.332161687170476e-05,
"loss": 0.8673232197761536,
"step": 532
},
{
"epoch": 0.22531645569620254,
"grad_norm": 0.6670058369636536,
"learning_rate": 9.36731107205624e-05,
"loss": 0.9942440986633301,
"step": 534
},
{
"epoch": 0.22616033755274262,
"grad_norm": 0.7345916032791138,
"learning_rate": 9.402460456942003e-05,
"loss": 1.0364389419555664,
"step": 536
},
{
"epoch": 0.2270042194092827,
"grad_norm": 0.5946128964424133,
"learning_rate": 9.437609841827768e-05,
"loss": 0.9314924478530884,
"step": 538
},
{
"epoch": 0.22784810126582278,
"grad_norm": 0.5800848603248596,
"learning_rate": 9.472759226713534e-05,
"loss": 1.0694862604141235,
"step": 540
},
{
"epoch": 0.22869198312236286,
"grad_norm": 0.6712192893028259,
"learning_rate": 9.507908611599297e-05,
"loss": 1.03531014919281,
"step": 542
},
{
"epoch": 0.22953586497890296,
"grad_norm": 0.5641416311264038,
"learning_rate": 9.543057996485063e-05,
"loss": 0.9795235991477966,
"step": 544
},
{
"epoch": 0.23037974683544304,
"grad_norm": 0.50412517786026,
"learning_rate": 9.578207381370826e-05,
"loss": 0.9641494750976562,
"step": 546
},
{
"epoch": 0.23122362869198312,
"grad_norm": 0.579118549823761,
"learning_rate": 9.61335676625659e-05,
"loss": 0.9375281929969788,
"step": 548
},
{
"epoch": 0.2320675105485232,
"grad_norm": 0.5888341665267944,
"learning_rate": 9.648506151142355e-05,
"loss": 0.9414046406745911,
"step": 550
},
{
"epoch": 0.23291139240506328,
"grad_norm": 0.5595056414604187,
"learning_rate": 9.68365553602812e-05,
"loss": 0.9005617499351501,
"step": 552
},
{
"epoch": 0.23375527426160336,
"grad_norm": 0.6605326533317566,
"learning_rate": 9.718804920913884e-05,
"loss": 1.0283968448638916,
"step": 554
},
{
"epoch": 0.23459915611814347,
"grad_norm": 0.5657313466072083,
"learning_rate": 9.753954305799648e-05,
"loss": 1.0058249235153198,
"step": 556
},
{
"epoch": 0.23544303797468355,
"grad_norm": 0.5433364510536194,
"learning_rate": 9.789103690685413e-05,
"loss": 0.9835494756698608,
"step": 558
},
{
"epoch": 0.23628691983122363,
"grad_norm": 0.6129802465438843,
"learning_rate": 9.824253075571179e-05,
"loss": 1.054532527923584,
"step": 560
},
{
"epoch": 0.2371308016877637,
"grad_norm": 0.6496239304542542,
"learning_rate": 9.859402460456942e-05,
"loss": 1.0240973234176636,
"step": 562
},
{
"epoch": 0.2379746835443038,
"grad_norm": 0.6380873918533325,
"learning_rate": 9.894551845342706e-05,
"loss": 1.0229179859161377,
"step": 564
},
{
"epoch": 0.23881856540084387,
"grad_norm": 0.6151993870735168,
"learning_rate": 9.929701230228471e-05,
"loss": 1.0111570358276367,
"step": 566
},
{
"epoch": 0.23966244725738398,
"grad_norm": 0.5727584958076477,
"learning_rate": 9.964850615114237e-05,
"loss": 0.9450829029083252,
"step": 568
},
{
"epoch": 0.24050632911392406,
"grad_norm": 0.6620725989341736,
"learning_rate": 0.0001,
"loss": 0.9800319075584412,
"step": 570
},
{
"epoch": 0.24135021097046414,
"grad_norm": 0.6151163578033447,
"learning_rate": 0.00010035149384885764,
"loss": 0.9757438898086548,
"step": 572
},
{
"epoch": 0.24219409282700421,
"grad_norm": 0.5672140717506409,
"learning_rate": 0.0001007029876977153,
"loss": 0.9104921817779541,
"step": 574
},
{
"epoch": 0.2430379746835443,
"grad_norm": 0.5697256326675415,
"learning_rate": 0.00010105448154657293,
"loss": 1.1027376651763916,
"step": 576
},
{
"epoch": 0.2438818565400844,
"grad_norm": 0.5590381622314453,
"learning_rate": 0.00010140597539543057,
"loss": 1.0055404901504517,
"step": 578
},
{
"epoch": 0.24472573839662448,
"grad_norm": 0.5518567562103271,
"learning_rate": 0.00010175746924428824,
"loss": 1.020835518836975,
"step": 580
},
{
"epoch": 0.24556962025316456,
"grad_norm": 0.6338496208190918,
"learning_rate": 0.00010210896309314588,
"loss": 0.9528344869613647,
"step": 582
},
{
"epoch": 0.24641350210970464,
"grad_norm": 0.6497329473495483,
"learning_rate": 0.00010246045694200353,
"loss": 1.0088670253753662,
"step": 584
},
{
"epoch": 0.24725738396624472,
"grad_norm": 0.49888095259666443,
"learning_rate": 0.00010281195079086117,
"loss": 0.9961200952529907,
"step": 586
},
{
"epoch": 0.2481012658227848,
"grad_norm": 0.5680158734321594,
"learning_rate": 0.0001031634446397188,
"loss": 0.9635610580444336,
"step": 588
},
{
"epoch": 0.2489451476793249,
"grad_norm": 0.658168375492096,
"learning_rate": 0.00010351493848857646,
"loss": 0.9392287135124207,
"step": 590
},
{
"epoch": 0.249789029535865,
"grad_norm": 0.618262767791748,
"learning_rate": 0.0001038664323374341,
"loss": 0.9600516557693481,
"step": 592
},
{
"epoch": 0.25063291139240507,
"grad_norm": 0.6003909111022949,
"learning_rate": 0.00010421792618629173,
"loss": 1.005476713180542,
"step": 594
},
{
"epoch": 0.2514767932489452,
"grad_norm": 0.5437078475952148,
"learning_rate": 0.00010456942003514938,
"loss": 0.9523017406463623,
"step": 596
},
{
"epoch": 0.2523206751054852,
"grad_norm": 0.5524541735649109,
"learning_rate": 0.00010492091388400705,
"loss": 0.9526668787002563,
"step": 598
},
{
"epoch": 0.25316455696202533,
"grad_norm": 0.679504930973053,
"learning_rate": 0.00010527240773286469,
"loss": 1.019660472869873,
"step": 600
},
{
"epoch": 0.25316455696202533,
"eval_loss": 1.0193854570388794,
"eval_runtime": 677.9523,
"eval_samples_per_second": 3.108,
"eval_steps_per_second": 3.108,
"step": 600
},
{
"epoch": 0.2540084388185654,
"grad_norm": 0.5646136999130249,
"learning_rate": 0.00010562390158172233,
"loss": 0.9910882711410522,
"step": 602
},
{
"epoch": 0.2548523206751055,
"grad_norm": 0.5238093137741089,
"learning_rate": 0.00010597539543057998,
"loss": 0.9616432785987854,
"step": 604
},
{
"epoch": 0.25569620253164554,
"grad_norm": 0.7483857274055481,
"learning_rate": 0.00010632688927943762,
"loss": 1.0078275203704834,
"step": 606
},
{
"epoch": 0.25654008438818565,
"grad_norm": 0.578948974609375,
"learning_rate": 0.00010667838312829525,
"loss": 0.9827103018760681,
"step": 608
},
{
"epoch": 0.25738396624472576,
"grad_norm": 0.5525906085968018,
"learning_rate": 0.00010702987697715289,
"loss": 1.0423277616500854,
"step": 610
},
{
"epoch": 0.2582278481012658,
"grad_norm": 0.6721326112747192,
"learning_rate": 0.00010738137082601054,
"loss": 0.9561693072319031,
"step": 612
},
{
"epoch": 0.2590717299578059,
"grad_norm": 0.5701051354408264,
"learning_rate": 0.00010773286467486821,
"loss": 0.9602992534637451,
"step": 614
},
{
"epoch": 0.25991561181434597,
"grad_norm": 0.6349860429763794,
"learning_rate": 0.00010808435852372585,
"loss": 1.1422650814056396,
"step": 616
},
{
"epoch": 0.2607594936708861,
"grad_norm": 0.5496085286140442,
"learning_rate": 0.00010843585237258349,
"loss": 0.9762773513793945,
"step": 618
},
{
"epoch": 0.2616033755274262,
"grad_norm": 0.6080722808837891,
"learning_rate": 0.00010878734622144114,
"loss": 1.0133616924285889,
"step": 620
},
{
"epoch": 0.26244725738396624,
"grad_norm": 0.5450218915939331,
"learning_rate": 0.00010913884007029878,
"loss": 0.9385587573051453,
"step": 622
},
{
"epoch": 0.26329113924050634,
"grad_norm": 0.592106819152832,
"learning_rate": 0.00010949033391915641,
"loss": 0.9359989762306213,
"step": 624
},
{
"epoch": 0.2641350210970464,
"grad_norm": 0.6449427604675293,
"learning_rate": 0.00010984182776801407,
"loss": 1.0266027450561523,
"step": 626
},
{
"epoch": 0.2649789029535865,
"grad_norm": 0.538299560546875,
"learning_rate": 0.0001101933216168717,
"loss": 0.9303187131881714,
"step": 628
},
{
"epoch": 0.26582278481012656,
"grad_norm": 0.546316921710968,
"learning_rate": 0.00011054481546572934,
"loss": 0.9368857145309448,
"step": 630
},
{
"epoch": 0.26666666666666666,
"grad_norm": 0.5818730592727661,
"learning_rate": 0.00011089630931458701,
"loss": 0.9573145508766174,
"step": 632
},
{
"epoch": 0.26751054852320677,
"grad_norm": 0.5958262085914612,
"learning_rate": 0.00011124780316344465,
"loss": 0.9345449805259705,
"step": 634
},
{
"epoch": 0.2683544303797468,
"grad_norm": 0.6259077787399292,
"learning_rate": 0.0001115992970123023,
"loss": 1.0906590223312378,
"step": 636
},
{
"epoch": 0.26919831223628693,
"grad_norm": 0.589672863483429,
"learning_rate": 0.00011195079086115994,
"loss": 1.0757447481155396,
"step": 638
},
{
"epoch": 0.270042194092827,
"grad_norm": 0.5714080333709717,
"learning_rate": 0.00011230228471001758,
"loss": 0.9310855269432068,
"step": 640
},
{
"epoch": 0.2708860759493671,
"grad_norm": 0.45342639088630676,
"learning_rate": 0.00011265377855887523,
"loss": 0.9276360273361206,
"step": 642
},
{
"epoch": 0.2717299578059072,
"grad_norm": 0.6386750340461731,
"learning_rate": 0.00011300527240773287,
"loss": 1.084719181060791,
"step": 644
},
{
"epoch": 0.27257383966244725,
"grad_norm": 0.6446163654327393,
"learning_rate": 0.0001133567662565905,
"loss": 0.9763918519020081,
"step": 646
},
{
"epoch": 0.27341772151898736,
"grad_norm": 0.5925686359405518,
"learning_rate": 0.00011370826010544816,
"loss": 0.9517921805381775,
"step": 648
},
{
"epoch": 0.2742616033755274,
"grad_norm": 0.5399773716926575,
"learning_rate": 0.00011405975395430582,
"loss": 1.0587927103042603,
"step": 650
},
{
"epoch": 0.2751054852320675,
"grad_norm": 0.5872456431388855,
"learning_rate": 0.00011441124780316346,
"loss": 0.883341908454895,
"step": 652
},
{
"epoch": 0.2759493670886076,
"grad_norm": 0.5574564337730408,
"learning_rate": 0.0001147627416520211,
"loss": 1.0306891202926636,
"step": 654
},
{
"epoch": 0.2767932489451477,
"grad_norm": 0.47789013385772705,
"learning_rate": 0.00011511423550087874,
"loss": 0.8814032077789307,
"step": 656
},
{
"epoch": 0.2776371308016878,
"grad_norm": 0.5565530061721802,
"learning_rate": 0.00011546572934973639,
"loss": 0.9460552334785461,
"step": 658
},
{
"epoch": 0.27848101265822783,
"grad_norm": 0.5299761295318604,
"learning_rate": 0.00011581722319859403,
"loss": 0.9475110769271851,
"step": 660
},
{
"epoch": 0.27932489451476794,
"grad_norm": 0.6503344178199768,
"learning_rate": 0.00011616871704745166,
"loss": 1.0630913972854614,
"step": 662
},
{
"epoch": 0.280168776371308,
"grad_norm": 0.5794585943222046,
"learning_rate": 0.00011652021089630932,
"loss": 0.9389138221740723,
"step": 664
},
{
"epoch": 0.2810126582278481,
"grad_norm": 0.5762867331504822,
"learning_rate": 0.00011687170474516695,
"loss": 0.8934136033058167,
"step": 666
},
{
"epoch": 0.2818565400843882,
"grad_norm": 0.6565435528755188,
"learning_rate": 0.00011722319859402462,
"loss": 1.1072614192962646,
"step": 668
},
{
"epoch": 0.28270042194092826,
"grad_norm": 0.5819830298423767,
"learning_rate": 0.00011757469244288226,
"loss": 1.0501434803009033,
"step": 670
},
{
"epoch": 0.28354430379746837,
"grad_norm": 0.6071487069129944,
"learning_rate": 0.00011792618629173991,
"loss": 0.9880793690681458,
"step": 672
},
{
"epoch": 0.2843881856540084,
"grad_norm": 0.5765058398246765,
"learning_rate": 0.00011827768014059755,
"loss": 0.9670693874359131,
"step": 674
},
{
"epoch": 0.2852320675105485,
"grad_norm": 0.5245351791381836,
"learning_rate": 0.00011862917398945519,
"loss": 0.9602360725402832,
"step": 676
},
{
"epoch": 0.28607594936708863,
"grad_norm": 0.6189922094345093,
"learning_rate": 0.00011898066783831282,
"loss": 0.9684560894966125,
"step": 678
},
{
"epoch": 0.2869198312236287,
"grad_norm": 0.6138690710067749,
"learning_rate": 0.00011933216168717048,
"loss": 0.9465792775154114,
"step": 680
},
{
"epoch": 0.2877637130801688,
"grad_norm": 0.5371595621109009,
"learning_rate": 0.00011968365553602812,
"loss": 0.8495944738388062,
"step": 682
},
{
"epoch": 0.28860759493670884,
"grad_norm": 0.5549944639205933,
"learning_rate": 0.00012003514938488578,
"loss": 0.9663267135620117,
"step": 684
},
{
"epoch": 0.28945147679324895,
"grad_norm": 0.6484189033508301,
"learning_rate": 0.00012038664323374342,
"loss": 0.9736058712005615,
"step": 686
},
{
"epoch": 0.290295358649789,
"grad_norm": 0.540351390838623,
"learning_rate": 0.00012073813708260107,
"loss": 1.0591845512390137,
"step": 688
},
{
"epoch": 0.2911392405063291,
"grad_norm": 0.5657922029495239,
"learning_rate": 0.00012108963093145871,
"loss": 0.944908618927002,
"step": 690
},
{
"epoch": 0.2919831223628692,
"grad_norm": 0.6040505170822144,
"learning_rate": 0.00012144112478031635,
"loss": 1.0018219947814941,
"step": 692
},
{
"epoch": 0.29282700421940927,
"grad_norm": 0.5435477495193481,
"learning_rate": 0.000121792618629174,
"loss": 1.0351502895355225,
"step": 694
},
{
"epoch": 0.2936708860759494,
"grad_norm": 0.5712518692016602,
"learning_rate": 0.00012214411247803164,
"loss": 0.9935672283172607,
"step": 696
},
{
"epoch": 0.29451476793248943,
"grad_norm": 0.6138222813606262,
"learning_rate": 0.00012249560632688928,
"loss": 1.0165108442306519,
"step": 698
},
{
"epoch": 0.29535864978902954,
"grad_norm": 0.4495212435722351,
"learning_rate": 0.00012284710017574691,
"loss": 0.9334425926208496,
"step": 700
},
{
"epoch": 0.29535864978902954,
"eval_loss": 0.996929407119751,
"eval_runtime": 668.6398,
"eval_samples_per_second": 3.151,
"eval_steps_per_second": 3.151,
"step": 700
},
{
"epoch": 0.29620253164556964,
"grad_norm": 0.5321539044380188,
"learning_rate": 0.00012319859402460458,
"loss": 1.0516537427902222,
"step": 702
},
{
"epoch": 0.2970464135021097,
"grad_norm": 0.5716516971588135,
"learning_rate": 0.00012355008787346222,
"loss": 0.9387198686599731,
"step": 704
},
{
"epoch": 0.2978902953586498,
"grad_norm": 0.5617920160293579,
"learning_rate": 0.00012390158172231988,
"loss": 0.99737948179245,
"step": 706
},
{
"epoch": 0.29873417721518986,
"grad_norm": 0.4922899007797241,
"learning_rate": 0.00012425307557117752,
"loss": 0.9955025911331177,
"step": 708
},
{
"epoch": 0.29957805907172996,
"grad_norm": 0.543501615524292,
"learning_rate": 0.00012460456942003516,
"loss": 0.9124280214309692,
"step": 710
},
{
"epoch": 0.30042194092827,
"grad_norm": 0.49590054154396057,
"learning_rate": 0.0001249560632688928,
"loss": 0.9820216298103333,
"step": 712
},
{
"epoch": 0.3012658227848101,
"grad_norm": 0.5984305739402771,
"learning_rate": 0.00012530755711775044,
"loss": 1.0152074098587036,
"step": 714
},
{
"epoch": 0.30210970464135023,
"grad_norm": 0.9343504905700684,
"learning_rate": 0.00012565905096660807,
"loss": 1.0577725172042847,
"step": 716
},
{
"epoch": 0.3029535864978903,
"grad_norm": 0.5118702054023743,
"learning_rate": 0.0001260105448154657,
"loss": 0.9830358028411865,
"step": 718
},
{
"epoch": 0.3037974683544304,
"grad_norm": 0.4940392076969147,
"learning_rate": 0.00012636203866432338,
"loss": 0.9466043710708618,
"step": 720
},
{
"epoch": 0.30464135021097044,
"grad_norm": 0.5965693593025208,
"learning_rate": 0.00012671353251318104,
"loss": 1.015270709991455,
"step": 722
},
{
"epoch": 0.30548523206751055,
"grad_norm": 0.5020529627799988,
"learning_rate": 0.00012706502636203868,
"loss": 0.9703927636146545,
"step": 724
},
{
"epoch": 0.30632911392405066,
"grad_norm": 0.6067010164260864,
"learning_rate": 0.00012741652021089632,
"loss": 1.0255526304244995,
"step": 726
},
{
"epoch": 0.3071729957805907,
"grad_norm": 0.5931884050369263,
"learning_rate": 0.00012776801405975396,
"loss": 0.9335633516311646,
"step": 728
},
{
"epoch": 0.3080168776371308,
"grad_norm": 0.5938752293586731,
"learning_rate": 0.0001281195079086116,
"loss": 1.0921578407287598,
"step": 730
},
{
"epoch": 0.30886075949367087,
"grad_norm": 0.49728086590766907,
"learning_rate": 0.00012847100175746923,
"loss": 0.963066041469574,
"step": 732
},
{
"epoch": 0.309704641350211,
"grad_norm": 0.5452080965042114,
"learning_rate": 0.0001288224956063269,
"loss": 0.9513075351715088,
"step": 734
},
{
"epoch": 0.3105485232067511,
"grad_norm": 0.5497731566429138,
"learning_rate": 0.00012917398945518454,
"loss": 0.8576077222824097,
"step": 736
},
{
"epoch": 0.31139240506329113,
"grad_norm": 0.5580397248268127,
"learning_rate": 0.0001295254833040422,
"loss": 0.9542577862739563,
"step": 738
},
{
"epoch": 0.31223628691983124,
"grad_norm": 0.5890427827835083,
"learning_rate": 0.00012987697715289984,
"loss": 0.8992732167243958,
"step": 740
},
{
"epoch": 0.3130801687763713,
"grad_norm": 0.5942965745925903,
"learning_rate": 0.00013022847100175748,
"loss": 1.0322896242141724,
"step": 742
},
{
"epoch": 0.3139240506329114,
"grad_norm": 0.6341713070869446,
"learning_rate": 0.00013057996485061512,
"loss": 0.9217103719711304,
"step": 744
},
{
"epoch": 0.31476793248945145,
"grad_norm": 0.5294105410575867,
"learning_rate": 0.00013093145869947276,
"loss": 0.951789915561676,
"step": 746
},
{
"epoch": 0.31561181434599156,
"grad_norm": 0.6372058391571045,
"learning_rate": 0.0001312829525483304,
"loss": 0.9459875226020813,
"step": 748
},
{
"epoch": 0.31645569620253167,
"grad_norm": 0.5979796648025513,
"learning_rate": 0.00013163444639718806,
"loss": 0.9626097679138184,
"step": 750
},
{
"epoch": 0.3172995780590717,
"grad_norm": 0.5682399868965149,
"learning_rate": 0.0001319859402460457,
"loss": 1.0261781215667725,
"step": 752
},
{
"epoch": 0.3181434599156118,
"grad_norm": 0.5349125266075134,
"learning_rate": 0.00013233743409490336,
"loss": 0.9319828152656555,
"step": 754
},
{
"epoch": 0.3189873417721519,
"grad_norm": 0.6093934178352356,
"learning_rate": 0.000132688927943761,
"loss": 0.9216550588607788,
"step": 756
},
{
"epoch": 0.319831223628692,
"grad_norm": 0.5188612341880798,
"learning_rate": 0.00013304042179261864,
"loss": 0.901739776134491,
"step": 758
},
{
"epoch": 0.3206751054852321,
"grad_norm": 0.5877130627632141,
"learning_rate": 0.00013339191564147628,
"loss": 1.0362589359283447,
"step": 760
},
{
"epoch": 0.32151898734177214,
"grad_norm": 0.5542771816253662,
"learning_rate": 0.00013374340949033392,
"loss": 0.8787116408348083,
"step": 762
},
{
"epoch": 0.32236286919831225,
"grad_norm": 0.5084902048110962,
"learning_rate": 0.00013409490333919156,
"loss": 0.9237037301063538,
"step": 764
},
{
"epoch": 0.3232067510548523,
"grad_norm": 0.5461528301239014,
"learning_rate": 0.00013444639718804922,
"loss": 1.0150731801986694,
"step": 766
},
{
"epoch": 0.3240506329113924,
"grad_norm": 0.53483647108078,
"learning_rate": 0.00013479789103690686,
"loss": 0.8985214829444885,
"step": 768
},
{
"epoch": 0.32489451476793246,
"grad_norm": 0.5580531358718872,
"learning_rate": 0.0001351493848857645,
"loss": 1.0225775241851807,
"step": 770
},
{
"epoch": 0.32573839662447257,
"grad_norm": 0.5203377604484558,
"learning_rate": 0.00013550087873462216,
"loss": 0.9571293592453003,
"step": 772
},
{
"epoch": 0.3265822784810127,
"grad_norm": 0.5049671530723572,
"learning_rate": 0.0001358523725834798,
"loss": 1.0468909740447998,
"step": 774
},
{
"epoch": 0.32742616033755273,
"grad_norm": 0.4723063111305237,
"learning_rate": 0.00013620386643233744,
"loss": 0.7743215560913086,
"step": 776
},
{
"epoch": 0.32827004219409284,
"grad_norm": 0.6310980916023254,
"learning_rate": 0.00013655536028119508,
"loss": 1.021510362625122,
"step": 778
},
{
"epoch": 0.3291139240506329,
"grad_norm": 0.47066664695739746,
"learning_rate": 0.00013690685413005274,
"loss": 0.9134382605552673,
"step": 780
},
{
"epoch": 0.329957805907173,
"grad_norm": 0.5725092887878418,
"learning_rate": 0.00013725834797891038,
"loss": 0.9797834753990173,
"step": 782
},
{
"epoch": 0.3308016877637131,
"grad_norm": 0.5139563083648682,
"learning_rate": 0.00013760984182776802,
"loss": 0.9372621178627014,
"step": 784
},
{
"epoch": 0.33164556962025316,
"grad_norm": 0.5275821685791016,
"learning_rate": 0.00013796133567662566,
"loss": 0.9528245329856873,
"step": 786
},
{
"epoch": 0.33248945147679326,
"grad_norm": 0.5702582001686096,
"learning_rate": 0.0001383128295254833,
"loss": 0.9750176072120667,
"step": 788
},
{
"epoch": 0.3333333333333333,
"grad_norm": 0.5281293392181396,
"learning_rate": 0.00013866432337434096,
"loss": 0.9412306547164917,
"step": 790
},
{
"epoch": 0.3341772151898734,
"grad_norm": 0.5578986406326294,
"learning_rate": 0.0001390158172231986,
"loss": 0.997580885887146,
"step": 792
},
{
"epoch": 0.33502109704641353,
"grad_norm": 0.50461345911026,
"learning_rate": 0.00013936731107205624,
"loss": 0.9082320928573608,
"step": 794
},
{
"epoch": 0.3358649789029536,
"grad_norm": 0.5258530378341675,
"learning_rate": 0.0001397188049209139,
"loss": 1.0082844495773315,
"step": 796
},
{
"epoch": 0.3367088607594937,
"grad_norm": 0.5548169016838074,
"learning_rate": 0.00014007029876977154,
"loss": 0.9729003310203552,
"step": 798
},
{
"epoch": 0.33755274261603374,
"grad_norm": 0.48601076006889343,
"learning_rate": 0.00014042179261862918,
"loss": 0.9099526405334473,
"step": 800
},
{
"epoch": 0.33755274261603374,
"eval_loss": 0.9800403714179993,
"eval_runtime": 678.8306,
"eval_samples_per_second": 3.104,
"eval_steps_per_second": 3.104,
"step": 800
},
{
"epoch": 0.33839662447257385,
"grad_norm": 0.5413158535957336,
"learning_rate": 0.00014077328646748682,
"loss": 0.8610644936561584,
"step": 802
},
{
"epoch": 0.3392405063291139,
"grad_norm": 0.5147035717964172,
"learning_rate": 0.00014112478031634446,
"loss": 0.9584825038909912,
"step": 804
},
{
"epoch": 0.340084388185654,
"grad_norm": 0.5931771397590637,
"learning_rate": 0.0001414762741652021,
"loss": 1.0142558813095093,
"step": 806
},
{
"epoch": 0.3409282700421941,
"grad_norm": 0.5178377032279968,
"learning_rate": 0.00014182776801405976,
"loss": 1.0078763961791992,
"step": 808
},
{
"epoch": 0.34177215189873417,
"grad_norm": 0.5453237295150757,
"learning_rate": 0.0001421792618629174,
"loss": 0.9107215404510498,
"step": 810
},
{
"epoch": 0.3426160337552743,
"grad_norm": 0.5886152982711792,
"learning_rate": 0.00014253075571177506,
"loss": 0.9981362819671631,
"step": 812
},
{
"epoch": 0.3434599156118143,
"grad_norm": 0.48040178418159485,
"learning_rate": 0.0001428822495606327,
"loss": 0.9636131525039673,
"step": 814
},
{
"epoch": 0.34430379746835443,
"grad_norm": 0.5011753439903259,
"learning_rate": 0.00014323374340949034,
"loss": 0.9590586423873901,
"step": 816
},
{
"epoch": 0.34514767932489454,
"grad_norm": 0.57858806848526,
"learning_rate": 0.00014358523725834798,
"loss": 0.978246808052063,
"step": 818
},
{
"epoch": 0.3459915611814346,
"grad_norm": 0.46092939376831055,
"learning_rate": 0.00014393673110720562,
"loss": 0.8549934029579163,
"step": 820
},
{
"epoch": 0.3468354430379747,
"grad_norm": 0.5756489038467407,
"learning_rate": 0.00014428822495606326,
"loss": 0.9771265387535095,
"step": 822
},
{
"epoch": 0.34767932489451475,
"grad_norm": 0.5501731634140015,
"learning_rate": 0.00014463971880492092,
"loss": 0.8739748001098633,
"step": 824
},
{
"epoch": 0.34852320675105486,
"grad_norm": 0.5451868176460266,
"learning_rate": 0.0001449912126537786,
"loss": 0.9129468202590942,
"step": 826
},
{
"epoch": 0.3493670886075949,
"grad_norm": 0.4624619781970978,
"learning_rate": 0.00014534270650263623,
"loss": 0.9196704030036926,
"step": 828
},
{
"epoch": 0.350210970464135,
"grad_norm": 0.520878791809082,
"learning_rate": 0.00014569420035149386,
"loss": 0.9976527690887451,
"step": 830
},
{
"epoch": 0.3510548523206751,
"grad_norm": 0.4469171464443207,
"learning_rate": 0.0001460456942003515,
"loss": 0.7753443717956543,
"step": 832
},
{
"epoch": 0.3518987341772152,
"grad_norm": 0.5105249881744385,
"learning_rate": 0.00014639718804920914,
"loss": 0.9584846496582031,
"step": 834
},
{
"epoch": 0.3527426160337553,
"grad_norm": 0.5043913125991821,
"learning_rate": 0.00014674868189806678,
"loss": 0.9013500213623047,
"step": 836
},
{
"epoch": 0.35358649789029534,
"grad_norm": 0.575850784778595,
"learning_rate": 0.00014710017574692442,
"loss": 0.9775562286376953,
"step": 838
},
{
"epoch": 0.35443037974683544,
"grad_norm": 0.5128876566886902,
"learning_rate": 0.00014745166959578208,
"loss": 0.9278940558433533,
"step": 840
},
{
"epoch": 0.35527426160337555,
"grad_norm": 0.5757885575294495,
"learning_rate": 0.00014780316344463975,
"loss": 1.0091488361358643,
"step": 842
},
{
"epoch": 0.3561181434599156,
"grad_norm": 0.500934898853302,
"learning_rate": 0.00014815465729349739,
"loss": 0.9286836981773376,
"step": 844
},
{
"epoch": 0.3569620253164557,
"grad_norm": 0.5220686197280884,
"learning_rate": 0.00014850615114235502,
"loss": 0.9484171867370605,
"step": 846
},
{
"epoch": 0.35780590717299576,
"grad_norm": 0.5494697690010071,
"learning_rate": 0.00014885764499121266,
"loss": 1.0556397438049316,
"step": 848
},
{
"epoch": 0.35864978902953587,
"grad_norm": 0.46633943915367126,
"learning_rate": 0.0001492091388400703,
"loss": 0.933089017868042,
"step": 850
},
{
"epoch": 0.3594936708860759,
"grad_norm": 0.4864962697029114,
"learning_rate": 0.00014956063268892794,
"loss": 1.0016963481903076,
"step": 852
},
{
"epoch": 0.36033755274261603,
"grad_norm": 0.5032764673233032,
"learning_rate": 0.0001499121265377856,
"loss": 0.9085348844528198,
"step": 854
},
{
"epoch": 0.36118143459915614,
"grad_norm": 0.5424998998641968,
"learning_rate": 0.00015026362038664324,
"loss": 0.923502504825592,
"step": 856
},
{
"epoch": 0.3620253164556962,
"grad_norm": 0.5181655287742615,
"learning_rate": 0.00015061511423550088,
"loss": 0.919174313545227,
"step": 858
},
{
"epoch": 0.3628691983122363,
"grad_norm": 0.5129443407058716,
"learning_rate": 0.00015096660808435855,
"loss": 0.851981520652771,
"step": 860
},
{
"epoch": 0.36371308016877635,
"grad_norm": 0.49540698528289795,
"learning_rate": 0.00015131810193321618,
"loss": 0.8633858561515808,
"step": 862
},
{
"epoch": 0.36455696202531646,
"grad_norm": 0.4706701934337616,
"learning_rate": 0.00015166959578207382,
"loss": 0.9473677277565002,
"step": 864
},
{
"epoch": 0.36540084388185656,
"grad_norm": 0.587704598903656,
"learning_rate": 0.00015202108963093146,
"loss": 0.942383885383606,
"step": 866
},
{
"epoch": 0.3662447257383966,
"grad_norm": 0.5851273536682129,
"learning_rate": 0.0001523725834797891,
"loss": 0.9811251163482666,
"step": 868
},
{
"epoch": 0.3670886075949367,
"grad_norm": 0.46357613801956177,
"learning_rate": 0.00015272407732864676,
"loss": 0.8151084184646606,
"step": 870
},
{
"epoch": 0.3679324894514768,
"grad_norm": 0.47250670194625854,
"learning_rate": 0.0001530755711775044,
"loss": 0.9023333191871643,
"step": 872
},
{
"epoch": 0.3687763713080169,
"grad_norm": 0.47510042786598206,
"learning_rate": 0.00015342706502636204,
"loss": 0.8758499622344971,
"step": 874
},
{
"epoch": 0.369620253164557,
"grad_norm": 0.5687124729156494,
"learning_rate": 0.00015377855887521968,
"loss": 0.9837421774864197,
"step": 876
},
{
"epoch": 0.37046413502109704,
"grad_norm": 0.49064236879348755,
"learning_rate": 0.00015413005272407735,
"loss": 0.9084216356277466,
"step": 878
},
{
"epoch": 0.37130801687763715,
"grad_norm": 0.5288164615631104,
"learning_rate": 0.00015448154657293498,
"loss": 0.8425542712211609,
"step": 880
},
{
"epoch": 0.3721518987341772,
"grad_norm": 0.5446951985359192,
"learning_rate": 0.00015483304042179262,
"loss": 1.006197452545166,
"step": 882
},
{
"epoch": 0.3729957805907173,
"grad_norm": 0.46872642636299133,
"learning_rate": 0.00015518453427065026,
"loss": 0.9779444932937622,
"step": 884
},
{
"epoch": 0.37383966244725736,
"grad_norm": 0.4852714240550995,
"learning_rate": 0.00015553602811950793,
"loss": 0.9099963307380676,
"step": 886
},
{
"epoch": 0.37468354430379747,
"grad_norm": 0.5219841003417969,
"learning_rate": 0.00015588752196836556,
"loss": 0.9730570316314697,
"step": 888
},
{
"epoch": 0.3755274261603376,
"grad_norm": 0.5258626341819763,
"learning_rate": 0.0001562390158172232,
"loss": 0.9304586052894592,
"step": 890
},
{
"epoch": 0.3763713080168776,
"grad_norm": 0.4266716241836548,
"learning_rate": 0.00015659050966608084,
"loss": 0.8680716156959534,
"step": 892
},
{
"epoch": 0.37721518987341773,
"grad_norm": 0.46361327171325684,
"learning_rate": 0.0001569420035149385,
"loss": 0.8428018093109131,
"step": 894
},
{
"epoch": 0.3780590717299578,
"grad_norm": 0.5313687920570374,
"learning_rate": 0.00015729349736379614,
"loss": 0.8465750217437744,
"step": 896
},
{
"epoch": 0.3789029535864979,
"grad_norm": 0.47166210412979126,
"learning_rate": 0.00015764499121265378,
"loss": 0.9120327234268188,
"step": 898
},
{
"epoch": 0.379746835443038,
"grad_norm": 0.4647318720817566,
"learning_rate": 0.00015799648506151145,
"loss": 0.8950425982475281,
"step": 900
},
{
"epoch": 0.379746835443038,
"eval_loss": 0.9643027186393738,
"eval_runtime": 691.7929,
"eval_samples_per_second": 3.046,
"eval_steps_per_second": 3.046,
"step": 900
},
{
"epoch": 0.38059071729957805,
"grad_norm": 0.5445119738578796,
"learning_rate": 0.00015834797891036909,
"loss": 0.908163845539093,
"step": 902
},
{
"epoch": 0.38143459915611816,
"grad_norm": 0.4311858117580414,
"learning_rate": 0.00015869947275922672,
"loss": 0.8945821523666382,
"step": 904
},
{
"epoch": 0.3822784810126582,
"grad_norm": 0.5590984225273132,
"learning_rate": 0.00015905096660808436,
"loss": 0.9478458762168884,
"step": 906
},
{
"epoch": 0.3831223628691983,
"grad_norm": 0.5470241904258728,
"learning_rate": 0.000159402460456942,
"loss": 0.9259957671165466,
"step": 908
},
{
"epoch": 0.38396624472573837,
"grad_norm": 0.5498791337013245,
"learning_rate": 0.00015975395430579964,
"loss": 0.8824930787086487,
"step": 910
},
{
"epoch": 0.3848101265822785,
"grad_norm": 0.4779198467731476,
"learning_rate": 0.0001601054481546573,
"loss": 0.8842340707778931,
"step": 912
},
{
"epoch": 0.3856540084388186,
"grad_norm": 0.5390620827674866,
"learning_rate": 0.00016045694200351494,
"loss": 0.92950040102005,
"step": 914
},
{
"epoch": 0.38649789029535864,
"grad_norm": 0.505519688129425,
"learning_rate": 0.0001608084358523726,
"loss": 0.8420897126197815,
"step": 916
},
{
"epoch": 0.38734177215189874,
"grad_norm": 0.4463907778263092,
"learning_rate": 0.00016115992970123025,
"loss": 0.8256624341011047,
"step": 918
},
{
"epoch": 0.3881856540084388,
"grad_norm": 0.5321422219276428,
"learning_rate": 0.00016151142355008788,
"loss": 0.8701168298721313,
"step": 920
},
{
"epoch": 0.3890295358649789,
"grad_norm": 0.4343073070049286,
"learning_rate": 0.00016186291739894552,
"loss": 0.9019309878349304,
"step": 922
},
{
"epoch": 0.389873417721519,
"grad_norm": 0.5311984419822693,
"learning_rate": 0.00016221441124780316,
"loss": 0.8560551404953003,
"step": 924
},
{
"epoch": 0.39071729957805906,
"grad_norm": 0.588691771030426,
"learning_rate": 0.0001625659050966608,
"loss": 0.9700050354003906,
"step": 926
},
{
"epoch": 0.39156118143459917,
"grad_norm": 0.5425586104393005,
"learning_rate": 0.00016291739894551844,
"loss": 1.043768048286438,
"step": 928
},
{
"epoch": 0.3924050632911392,
"grad_norm": 0.5228736996650696,
"learning_rate": 0.0001632688927943761,
"loss": 0.9501712918281555,
"step": 930
},
{
"epoch": 0.39324894514767933,
"grad_norm": 0.48960360884666443,
"learning_rate": 0.00016362038664323377,
"loss": 0.9223058223724365,
"step": 932
},
{
"epoch": 0.39409282700421944,
"grad_norm": 0.45204755663871765,
"learning_rate": 0.0001639718804920914,
"loss": 0.9692960977554321,
"step": 934
},
{
"epoch": 0.3949367088607595,
"grad_norm": 0.5299274921417236,
"learning_rate": 0.00016432337434094905,
"loss": 0.9467466473579407,
"step": 936
},
{
"epoch": 0.3957805907172996,
"grad_norm": 0.5607715249061584,
"learning_rate": 0.00016467486818980668,
"loss": 0.9118053317070007,
"step": 938
},
{
"epoch": 0.39662447257383965,
"grad_norm": 0.5271831154823303,
"learning_rate": 0.00016502636203866432,
"loss": 0.9131460189819336,
"step": 940
},
{
"epoch": 0.39746835443037976,
"grad_norm": 0.5075286030769348,
"learning_rate": 0.00016537785588752196,
"loss": 0.9358300566673279,
"step": 942
},
{
"epoch": 0.3983122362869198,
"grad_norm": 0.515731155872345,
"learning_rate": 0.00016572934973637963,
"loss": 0.8908210396766663,
"step": 944
},
{
"epoch": 0.3991561181434599,
"grad_norm": 0.4856977164745331,
"learning_rate": 0.00016608084358523726,
"loss": 0.9775290489196777,
"step": 946
},
{
"epoch": 0.4,
"grad_norm": 0.48846355080604553,
"learning_rate": 0.00016643233743409493,
"loss": 0.8957490921020508,
"step": 948
},
{
"epoch": 0.4008438818565401,
"grad_norm": 0.42990800738334656,
"learning_rate": 0.00016678383128295257,
"loss": 0.9036174416542053,
"step": 950
},
{
"epoch": 0.4016877637130802,
"grad_norm": 0.49552062153816223,
"learning_rate": 0.0001671353251318102,
"loss": 0.991032600402832,
"step": 952
},
{
"epoch": 0.40253164556962023,
"grad_norm": 0.4565040171146393,
"learning_rate": 0.00016748681898066784,
"loss": 0.823063313961029,
"step": 954
},
{
"epoch": 0.40337552742616034,
"grad_norm": 0.4290153682231903,
"learning_rate": 0.00016783831282952548,
"loss": 0.8785063624382019,
"step": 956
},
{
"epoch": 0.40421940928270045,
"grad_norm": 0.5419702529907227,
"learning_rate": 0.00016818980667838312,
"loss": 0.8763971924781799,
"step": 958
},
{
"epoch": 0.4050632911392405,
"grad_norm": 0.5177501440048218,
"learning_rate": 0.00016854130052724079,
"loss": 0.9470553398132324,
"step": 960
},
{
"epoch": 0.4059071729957806,
"grad_norm": 0.539725661277771,
"learning_rate": 0.00016889279437609842,
"loss": 0.9235025644302368,
"step": 962
},
{
"epoch": 0.40675105485232066,
"grad_norm": 0.5324983596801758,
"learning_rate": 0.0001692442882249561,
"loss": 1.0248996019363403,
"step": 964
},
{
"epoch": 0.40759493670886077,
"grad_norm": 0.4936407506465912,
"learning_rate": 0.00016959578207381373,
"loss": 0.9076873660087585,
"step": 966
},
{
"epoch": 0.4084388185654008,
"grad_norm": 0.4960501194000244,
"learning_rate": 0.00016994727592267137,
"loss": 0.9162673950195312,
"step": 968
},
{
"epoch": 0.4092827004219409,
"grad_norm": 0.45093682408332825,
"learning_rate": 0.000170298769771529,
"loss": 0.904100775718689,
"step": 970
},
{
"epoch": 0.41012658227848103,
"grad_norm": 0.4560275077819824,
"learning_rate": 0.00017065026362038664,
"loss": 0.8633337020874023,
"step": 972
},
{
"epoch": 0.4109704641350211,
"grad_norm": 0.44885000586509705,
"learning_rate": 0.00017100175746924428,
"loss": 0.8454209566116333,
"step": 974
},
{
"epoch": 0.4118143459915612,
"grad_norm": 0.47251659631729126,
"learning_rate": 0.00017135325131810195,
"loss": 0.824730396270752,
"step": 976
},
{
"epoch": 0.41265822784810124,
"grad_norm": 0.6597666144371033,
"learning_rate": 0.00017170474516695959,
"loss": 0.9496501684188843,
"step": 978
},
{
"epoch": 0.41350210970464135,
"grad_norm": 0.39806297421455383,
"learning_rate": 0.00017205623901581722,
"loss": 0.9419087171554565,
"step": 980
},
{
"epoch": 0.41434599156118146,
"grad_norm": 0.48231109976768494,
"learning_rate": 0.0001724077328646749,
"loss": 0.9182976484298706,
"step": 982
},
{
"epoch": 0.4151898734177215,
"grad_norm": 0.5438776612281799,
"learning_rate": 0.00017275922671353253,
"loss": 0.9386967420578003,
"step": 984
},
{
"epoch": 0.4160337552742616,
"grad_norm": 0.4959667921066284,
"learning_rate": 0.00017311072056239017,
"loss": 0.897849440574646,
"step": 986
},
{
"epoch": 0.41687763713080167,
"grad_norm": 0.43533357977867126,
"learning_rate": 0.0001734622144112478,
"loss": 0.8776953816413879,
"step": 988
},
{
"epoch": 0.4177215189873418,
"grad_norm": 0.47513946890830994,
"learning_rate": 0.00017381370826010547,
"loss": 0.9162989854812622,
"step": 990
},
{
"epoch": 0.41856540084388183,
"grad_norm": 0.4907188415527344,
"learning_rate": 0.0001741652021089631,
"loss": 0.9482660889625549,
"step": 992
},
{
"epoch": 0.41940928270042194,
"grad_norm": 0.44499966502189636,
"learning_rate": 0.00017451669595782075,
"loss": 0.8812930583953857,
"step": 994
},
{
"epoch": 0.42025316455696204,
"grad_norm": 0.4535730481147766,
"learning_rate": 0.00017486818980667838,
"loss": 0.9439874887466431,
"step": 996
},
{
"epoch": 0.4210970464135021,
"grad_norm": 0.5240745544433594,
"learning_rate": 0.00017521968365553602,
"loss": 0.8818395137786865,
"step": 998
},
{
"epoch": 0.4219409282700422,
"grad_norm": 0.5301211476325989,
"learning_rate": 0.0001755711775043937,
"loss": 0.886186957359314,
"step": 1000
},
{
"epoch": 0.4219409282700422,
"eval_loss": 0.9487298727035522,
"eval_runtime": 689.4288,
"eval_samples_per_second": 3.056,
"eval_steps_per_second": 3.056,
"step": 1000
},
{
"epoch": 0.42278481012658226,
"grad_norm": 0.47876957058906555,
"learning_rate": 0.00017592267135325133,
"loss": 0.8814021348953247,
"step": 1002
},
{
"epoch": 0.42362869198312236,
"grad_norm": 0.4929780960083008,
"learning_rate": 0.00017627416520210896,
"loss": 0.8295068740844727,
"step": 1004
},
{
"epoch": 0.42447257383966247,
"grad_norm": 0.41888436675071716,
"learning_rate": 0.00017662565905096663,
"loss": 0.8364827036857605,
"step": 1006
},
{
"epoch": 0.4253164556962025,
"grad_norm": 0.5175151824951172,
"learning_rate": 0.00017697715289982427,
"loss": 1.0167189836502075,
"step": 1008
},
{
"epoch": 0.42616033755274263,
"grad_norm": 0.4815356135368347,
"learning_rate": 0.0001773286467486819,
"loss": 0.8460752964019775,
"step": 1010
},
{
"epoch": 0.4270042194092827,
"grad_norm": 0.5210875272750854,
"learning_rate": 0.00017768014059753954,
"loss": 0.8595574498176575,
"step": 1012
},
{
"epoch": 0.4278481012658228,
"grad_norm": 0.4400486946105957,
"learning_rate": 0.00017803163444639718,
"loss": 0.8764723539352417,
"step": 1014
},
{
"epoch": 0.4286919831223629,
"grad_norm": 0.5282127857208252,
"learning_rate": 0.00017838312829525482,
"loss": 0.9706798791885376,
"step": 1016
},
{
"epoch": 0.42953586497890295,
"grad_norm": 0.47523441910743713,
"learning_rate": 0.00017873462214411249,
"loss": 0.8912002444267273,
"step": 1018
},
{
"epoch": 0.43037974683544306,
"grad_norm": 0.49640706181526184,
"learning_rate": 0.00017908611599297012,
"loss": 0.8835636973381042,
"step": 1020
},
{
"epoch": 0.4312236286919831,
"grad_norm": 0.5253039002418518,
"learning_rate": 0.0001794376098418278,
"loss": 0.8711735606193542,
"step": 1022
},
{
"epoch": 0.4320675105485232,
"grad_norm": 0.49285009503364563,
"learning_rate": 0.00017978910369068543,
"loss": 0.9064869284629822,
"step": 1024
},
{
"epoch": 0.43291139240506327,
"grad_norm": 0.48758041858673096,
"learning_rate": 0.00018014059753954307,
"loss": 0.8993359208106995,
"step": 1026
},
{
"epoch": 0.4337552742616034,
"grad_norm": 0.4815461039543152,
"learning_rate": 0.0001804920913884007,
"loss": 0.8747937679290771,
"step": 1028
},
{
"epoch": 0.4345991561181435,
"grad_norm": 0.4930349886417389,
"learning_rate": 0.00018084358523725834,
"loss": 0.8879084587097168,
"step": 1030
},
{
"epoch": 0.43544303797468353,
"grad_norm": 0.49303027987480164,
"learning_rate": 0.00018119507908611598,
"loss": 1.0023083686828613,
"step": 1032
},
{
"epoch": 0.43628691983122364,
"grad_norm": 0.5312249064445496,
"learning_rate": 0.00018154657293497365,
"loss": 0.938680112361908,
"step": 1034
},
{
"epoch": 0.4371308016877637,
"grad_norm": 0.5174582004547119,
"learning_rate": 0.0001818980667838313,
"loss": 0.854195773601532,
"step": 1036
},
{
"epoch": 0.4379746835443038,
"grad_norm": 0.5452545881271362,
"learning_rate": 0.00018224956063268895,
"loss": 0.9059375524520874,
"step": 1038
},
{
"epoch": 0.4388185654008439,
"grad_norm": 0.5480839014053345,
"learning_rate": 0.0001826010544815466,
"loss": 0.9708920121192932,
"step": 1040
},
{
"epoch": 0.43966244725738396,
"grad_norm": 0.44693151116371155,
"learning_rate": 0.00018295254833040423,
"loss": 0.8561046123504639,
"step": 1042
},
{
"epoch": 0.44050632911392407,
"grad_norm": 0.4024234116077423,
"learning_rate": 0.00018330404217926187,
"loss": 0.8811968564987183,
"step": 1044
},
{
"epoch": 0.4413502109704641,
"grad_norm": 0.5586408376693726,
"learning_rate": 0.0001836555360281195,
"loss": 0.978068470954895,
"step": 1046
},
{
"epoch": 0.4421940928270042,
"grad_norm": 0.4918624758720398,
"learning_rate": 0.00018400702987697714,
"loss": 0.8640981316566467,
"step": 1048
},
{
"epoch": 0.4430379746835443,
"grad_norm": 0.4335230588912964,
"learning_rate": 0.0001843585237258348,
"loss": 0.9077964425086975,
"step": 1050
},
{
"epoch": 0.4438818565400844,
"grad_norm": 0.5275123715400696,
"learning_rate": 0.00018471001757469247,
"loss": 0.960682213306427,
"step": 1052
},
{
"epoch": 0.4447257383966245,
"grad_norm": 0.5144415497779846,
"learning_rate": 0.0001850615114235501,
"loss": 0.8549577593803406,
"step": 1054
},
{
"epoch": 0.44556962025316454,
"grad_norm": 0.5166662931442261,
"learning_rate": 0.00018541300527240775,
"loss": 0.9320827126502991,
"step": 1056
},
{
"epoch": 0.44641350210970465,
"grad_norm": 0.5011980533599854,
"learning_rate": 0.0001857644991212654,
"loss": 0.9632431864738464,
"step": 1058
},
{
"epoch": 0.4472573839662447,
"grad_norm": 0.49312469363212585,
"learning_rate": 0.00018611599297012303,
"loss": 0.9442946910858154,
"step": 1060
},
{
"epoch": 0.4481012658227848,
"grad_norm": 0.49958568811416626,
"learning_rate": 0.00018646748681898066,
"loss": 0.952802300453186,
"step": 1062
},
{
"epoch": 0.4489451476793249,
"grad_norm": 0.49827462434768677,
"learning_rate": 0.00018681898066783833,
"loss": 0.9630650877952576,
"step": 1064
},
{
"epoch": 0.44978902953586497,
"grad_norm": 0.523980438709259,
"learning_rate": 0.00018717047451669597,
"loss": 0.9081395268440247,
"step": 1066
},
{
"epoch": 0.4506329113924051,
"grad_norm": 0.5108568668365479,
"learning_rate": 0.0001875219683655536,
"loss": 0.9398958683013916,
"step": 1068
},
{
"epoch": 0.45147679324894513,
"grad_norm": 0.4453965127468109,
"learning_rate": 0.00018787346221441127,
"loss": 0.93592768907547,
"step": 1070
},
{
"epoch": 0.45232067510548524,
"grad_norm": 0.4675683081150055,
"learning_rate": 0.0001882249560632689,
"loss": 0.8879633545875549,
"step": 1072
},
{
"epoch": 0.4531645569620253,
"grad_norm": 0.47398847341537476,
"learning_rate": 0.00018857644991212655,
"loss": 0.9966004490852356,
"step": 1074
},
{
"epoch": 0.4540084388185654,
"grad_norm": 0.46053192019462585,
"learning_rate": 0.0001889279437609842,
"loss": 0.9184179902076721,
"step": 1076
},
{
"epoch": 0.4548523206751055,
"grad_norm": 0.5601398348808289,
"learning_rate": 0.00018927943760984182,
"loss": 0.9434974193572998,
"step": 1078
},
{
"epoch": 0.45569620253164556,
"grad_norm": 0.48422637581825256,
"learning_rate": 0.0001896309314586995,
"loss": 0.9522465467453003,
"step": 1080
},
{
"epoch": 0.45654008438818566,
"grad_norm": 0.46280911564826965,
"learning_rate": 0.00018998242530755713,
"loss": 0.9315434694290161,
"step": 1082
},
{
"epoch": 0.4573839662447257,
"grad_norm": 0.4658683240413666,
"learning_rate": 0.00019033391915641477,
"loss": 0.8165783286094666,
"step": 1084
},
{
"epoch": 0.4582278481012658,
"grad_norm": 0.4192182123661041,
"learning_rate": 0.0001906854130052724,
"loss": 0.8024274110794067,
"step": 1086
},
{
"epoch": 0.45907172995780593,
"grad_norm": 0.4178735613822937,
"learning_rate": 0.00019103690685413007,
"loss": 0.8620653748512268,
"step": 1088
},
{
"epoch": 0.459915611814346,
"grad_norm": 0.4488574266433716,
"learning_rate": 0.0001913884007029877,
"loss": 0.9121530055999756,
"step": 1090
},
{
"epoch": 0.4607594936708861,
"grad_norm": 0.5164965987205505,
"learning_rate": 0.00019173989455184535,
"loss": 0.9496700763702393,
"step": 1092
},
{
"epoch": 0.46160337552742614,
"grad_norm": 0.41563132405281067,
"learning_rate": 0.00019209138840070299,
"loss": 0.8757708668708801,
"step": 1094
},
{
"epoch": 0.46244725738396625,
"grad_norm": 2.3222429752349854,
"learning_rate": 0.00019244288224956065,
"loss": 0.8406533002853394,
"step": 1096
},
{
"epoch": 0.46329113924050636,
"grad_norm": 0.4813845157623291,
"learning_rate": 0.0001927943760984183,
"loss": 0.9459465742111206,
"step": 1098
},
{
"epoch": 0.4641350210970464,
"grad_norm": 0.6233882308006287,
"learning_rate": 0.00019314586994727593,
"loss": 0.9376904368400574,
"step": 1100
},
{
"epoch": 0.4641350210970464,
"eval_loss": 0.9357889294624329,
"eval_runtime": 676.9573,
"eval_samples_per_second": 3.112,
"eval_steps_per_second": 3.112,
"step": 1100
},
{
"epoch": 0.4649789029535865,
"grad_norm": 0.5125579237937927,
"learning_rate": 0.00019349736379613357,
"loss": 0.8998825550079346,
"step": 1102
},
{
"epoch": 0.46582278481012657,
"grad_norm": 0.4534320831298828,
"learning_rate": 0.00019384885764499123,
"loss": 0.9213768839836121,
"step": 1104
},
{
"epoch": 0.4666666666666667,
"grad_norm": 0.4715143144130707,
"learning_rate": 0.00019420035149384887,
"loss": 0.9739661812782288,
"step": 1106
},
{
"epoch": 0.4675105485232067,
"grad_norm": 0.45529672503471375,
"learning_rate": 0.0001945518453427065,
"loss": 0.9113216400146484,
"step": 1108
},
{
"epoch": 0.46835443037974683,
"grad_norm": 0.48199015855789185,
"learning_rate": 0.00019490333919156417,
"loss": 0.92528235912323,
"step": 1110
},
{
"epoch": 0.46919831223628694,
"grad_norm": 0.4425188899040222,
"learning_rate": 0.0001952548330404218,
"loss": 0.8612716197967529,
"step": 1112
},
{
"epoch": 0.470042194092827,
"grad_norm": 0.4311593770980835,
"learning_rate": 0.00019560632688927945,
"loss": 0.8901699185371399,
"step": 1114
},
{
"epoch": 0.4708860759493671,
"grad_norm": 0.497806578874588,
"learning_rate": 0.0001959578207381371,
"loss": 0.9271994829177856,
"step": 1116
},
{
"epoch": 0.47172995780590715,
"grad_norm": 0.47149473428726196,
"learning_rate": 0.00019630931458699473,
"loss": 0.8740925788879395,
"step": 1118
},
{
"epoch": 0.47257383966244726,
"grad_norm": 0.5029966235160828,
"learning_rate": 0.00019666080843585236,
"loss": 0.9190115928649902,
"step": 1120
},
{
"epoch": 0.47341772151898737,
"grad_norm": 0.47972601652145386,
"learning_rate": 0.00019701230228471003,
"loss": 1.0043057203292847,
"step": 1122
},
{
"epoch": 0.4742616033755274,
"grad_norm": 0.45096471905708313,
"learning_rate": 0.00019736379613356767,
"loss": 0.9472925066947937,
"step": 1124
},
{
"epoch": 0.4751054852320675,
"grad_norm": 0.459852933883667,
"learning_rate": 0.00019771528998242533,
"loss": 0.8043124079704285,
"step": 1126
},
{
"epoch": 0.4759493670886076,
"grad_norm": 0.5313422679901123,
"learning_rate": 0.00019806678383128297,
"loss": 0.9662142395973206,
"step": 1128
},
{
"epoch": 0.4767932489451477,
"grad_norm": 0.43474531173706055,
"learning_rate": 0.0001984182776801406,
"loss": 0.8691151738166809,
"step": 1130
},
{
"epoch": 0.47763713080168774,
"grad_norm": 0.5232312083244324,
"learning_rate": 0.00019876977152899825,
"loss": 0.9024254083633423,
"step": 1132
},
{
"epoch": 0.47848101265822784,
"grad_norm": 0.47671905159950256,
"learning_rate": 0.0001991212653778559,
"loss": 0.8996873497962952,
"step": 1134
},
{
"epoch": 0.47932489451476795,
"grad_norm": 0.42289480566978455,
"learning_rate": 0.00019947275922671353,
"loss": 0.8336917757987976,
"step": 1136
},
{
"epoch": 0.480168776371308,
"grad_norm": 0.4700844883918762,
"learning_rate": 0.0001998242530755712,
"loss": 0.8943206071853638,
"step": 1138
},
{
"epoch": 0.4810126582278481,
"grad_norm": 0.5341399312019348,
"learning_rate": 0.00019999999711649004,
"loss": 0.9340365529060364,
"step": 1140
},
{
"epoch": 0.48185654008438816,
"grad_norm": 0.46169522404670715,
"learning_rate": 0.00019999997404841123,
"loss": 0.8241778016090393,
"step": 1142
},
{
"epoch": 0.48270042194092827,
"grad_norm": 0.5475223064422607,
"learning_rate": 0.00019999992791225896,
"loss": 1.0096158981323242,
"step": 1144
},
{
"epoch": 0.4835443037974684,
"grad_norm": 0.524641215801239,
"learning_rate": 0.00019999985870804385,
"loss": 0.9650378227233887,
"step": 1146
},
{
"epoch": 0.48438818565400843,
"grad_norm": 0.4326174855232239,
"learning_rate": 0.00019999976643578186,
"loss": 0.9003009796142578,
"step": 1148
},
{
"epoch": 0.48523206751054854,
"grad_norm": 0.49034059047698975,
"learning_rate": 0.0001999996510954943,
"loss": 0.9793432950973511,
"step": 1150
},
{
"epoch": 0.4860759493670886,
"grad_norm": 0.4760092496871948,
"learning_rate": 0.00019999951268720776,
"loss": 0.8793007731437683,
"step": 1152
},
{
"epoch": 0.4869198312236287,
"grad_norm": 0.44606879353523254,
"learning_rate": 0.00019999935121095417,
"loss": 0.888630747795105,
"step": 1154
},
{
"epoch": 0.4877637130801688,
"grad_norm": 0.5030332207679749,
"learning_rate": 0.0001999991666667708,
"loss": 0.9660000801086426,
"step": 1156
},
{
"epoch": 0.48860759493670886,
"grad_norm": 0.4295555055141449,
"learning_rate": 0.00019999895905470014,
"loss": 0.824654757976532,
"step": 1158
},
{
"epoch": 0.48945147679324896,
"grad_norm": 0.392167866230011,
"learning_rate": 0.0001999987283747902,
"loss": 0.8159562349319458,
"step": 1160
},
{
"epoch": 0.490295358649789,
"grad_norm": 0.484611839056015,
"learning_rate": 0.00019999847462709412,
"loss": 0.9630686044692993,
"step": 1162
},
{
"epoch": 0.4911392405063291,
"grad_norm": 0.45891445875167847,
"learning_rate": 0.00019999819781167042,
"loss": 0.8396129608154297,
"step": 1164
},
{
"epoch": 0.4919831223628692,
"grad_norm": 0.5553452968597412,
"learning_rate": 0.00019999789792858304,
"loss": 0.8194513916969299,
"step": 1166
},
{
"epoch": 0.4928270042194093,
"grad_norm": 0.38998672366142273,
"learning_rate": 0.00019999757497790106,
"loss": 0.9268721342086792,
"step": 1168
},
{
"epoch": 0.4936708860759494,
"grad_norm": 0.45445355772972107,
"learning_rate": 0.00019999722895969904,
"loss": 0.9204684495925903,
"step": 1170
},
{
"epoch": 0.49451476793248944,
"grad_norm": 0.484225332736969,
"learning_rate": 0.00019999685987405678,
"loss": 0.9270301461219788,
"step": 1172
},
{
"epoch": 0.49535864978902955,
"grad_norm": 0.45215415954589844,
"learning_rate": 0.00019999646772105942,
"loss": 0.8782645463943481,
"step": 1174
},
{
"epoch": 0.4962025316455696,
"grad_norm": 0.41578832268714905,
"learning_rate": 0.00019999605250079744,
"loss": 0.873112678527832,
"step": 1176
},
{
"epoch": 0.4970464135021097,
"grad_norm": 0.4632788300514221,
"learning_rate": 0.0001999956142133666,
"loss": 0.9593189358711243,
"step": 1178
},
{
"epoch": 0.4978902953586498,
"grad_norm": 0.5007622838020325,
"learning_rate": 0.000199995152858868,
"loss": 0.9215621948242188,
"step": 1180
},
{
"epoch": 0.49873417721518987,
"grad_norm": 0.5201935768127441,
"learning_rate": 0.0001999946684374081,
"loss": 0.8964219689369202,
"step": 1182
},
{
"epoch": 0.49957805907173,
"grad_norm": 0.5053967237472534,
"learning_rate": 0.0001999941609490986,
"loss": 0.8528663516044617,
"step": 1184
},
{
"epoch": 0.5004219409282701,
"grad_norm": 0.44397974014282227,
"learning_rate": 0.0001999936303940566,
"loss": 0.8214734792709351,
"step": 1186
},
{
"epoch": 0.5012658227848101,
"grad_norm": 0.4563101530075073,
"learning_rate": 0.0001999930767724045,
"loss": 0.8882166743278503,
"step": 1188
},
{
"epoch": 0.5021097046413502,
"grad_norm": 0.4666728675365448,
"learning_rate": 0.00019999250008426997,
"loss": 0.8882588148117065,
"step": 1190
},
{
"epoch": 0.5029535864978903,
"grad_norm": 0.4423629939556122,
"learning_rate": 0.00019999190032978607,
"loss": 0.8353691697120667,
"step": 1192
},
{
"epoch": 0.5037974683544304,
"grad_norm": 0.4514595866203308,
"learning_rate": 0.00019999127750909118,
"loss": 0.8721219301223755,
"step": 1194
},
{
"epoch": 0.5046413502109705,
"grad_norm": 0.48469996452331543,
"learning_rate": 0.00019999063162232886,
"loss": 0.902795135974884,
"step": 1196
},
{
"epoch": 0.5054852320675105,
"grad_norm": 0.4931983947753906,
"learning_rate": 0.00019998996266964823,
"loss": 0.8843175768852234,
"step": 1198
},
{
"epoch": 0.5063291139240507,
"grad_norm": 0.507408082485199,
"learning_rate": 0.00019998927065120357,
"loss": 0.9179208278656006,
"step": 1200
},
{
"epoch": 0.5063291139240507,
"eval_loss": 0.9224098324775696,
"eval_runtime": 669.7542,
"eval_samples_per_second": 3.146,
"eval_steps_per_second": 3.146,
"step": 1200
},
{
"epoch": 0.5071729957805907,
"grad_norm": 0.4024188220500946,
"learning_rate": 0.00019998855556715447,
"loss": 0.9078981876373291,
"step": 1202
},
{
"epoch": 0.5080168776371308,
"grad_norm": 0.6558667421340942,
"learning_rate": 0.00019998781741766594,
"loss": 0.8089252710342407,
"step": 1204
},
{
"epoch": 0.5088607594936709,
"grad_norm": 0.45128145813941956,
"learning_rate": 0.00019998705620290823,
"loss": 0.9316248297691345,
"step": 1206
},
{
"epoch": 0.509704641350211,
"grad_norm": 0.4733511507511139,
"learning_rate": 0.00019998627192305694,
"loss": 0.9047867655754089,
"step": 1208
},
{
"epoch": 0.510548523206751,
"grad_norm": 0.4026021659374237,
"learning_rate": 0.00019998546457829298,
"loss": 0.8507166504859924,
"step": 1210
},
{
"epoch": 0.5113924050632911,
"grad_norm": 0.43416184186935425,
"learning_rate": 0.00019998463416880262,
"loss": 0.8490090370178223,
"step": 1212
},
{
"epoch": 0.5122362869198313,
"grad_norm": 0.3678364157676697,
"learning_rate": 0.0001999837806947774,
"loss": 0.7730492949485779,
"step": 1214
},
{
"epoch": 0.5130801687763713,
"grad_norm": 0.49042677879333496,
"learning_rate": 0.00019998290415641415,
"loss": 0.9625269174575806,
"step": 1216
},
{
"epoch": 0.5139240506329114,
"grad_norm": 0.4576701819896698,
"learning_rate": 0.00019998200455391516,
"loss": 0.8961732983589172,
"step": 1218
},
{
"epoch": 0.5147679324894515,
"grad_norm": 0.4465518593788147,
"learning_rate": 0.00019998108188748793,
"loss": 0.8508996367454529,
"step": 1220
},
{
"epoch": 0.5156118143459916,
"grad_norm": 0.4267960488796234,
"learning_rate": 0.00019998013615734524,
"loss": 0.8664930462837219,
"step": 1222
},
{
"epoch": 0.5164556962025316,
"grad_norm": 0.39936602115631104,
"learning_rate": 0.0001999791673637053,
"loss": 0.8841317892074585,
"step": 1224
},
{
"epoch": 0.5172995780590718,
"grad_norm": 0.4442414343357086,
"learning_rate": 0.0001999781755067916,
"loss": 0.8601276874542236,
"step": 1226
},
{
"epoch": 0.5181434599156118,
"grad_norm": 0.4397028088569641,
"learning_rate": 0.00019997716058683292,
"loss": 0.8377046585083008,
"step": 1228
},
{
"epoch": 0.5189873417721519,
"grad_norm": 0.5327648520469666,
"learning_rate": 0.0001999761226040634,
"loss": 0.9853615164756775,
"step": 1230
},
{
"epoch": 0.5198312236286919,
"grad_norm": 0.40931421518325806,
"learning_rate": 0.00019997506155872244,
"loss": 0.9235715866088867,
"step": 1232
},
{
"epoch": 0.5206751054852321,
"grad_norm": 0.4599161148071289,
"learning_rate": 0.00019997397745105487,
"loss": 0.868396520614624,
"step": 1234
},
{
"epoch": 0.5215189873417722,
"grad_norm": 0.4813833236694336,
"learning_rate": 0.0001999728702813107,
"loss": 0.9946733117103577,
"step": 1236
},
{
"epoch": 0.5223628691983122,
"grad_norm": 0.43518269062042236,
"learning_rate": 0.00019997174004974543,
"loss": 0.8608635067939758,
"step": 1238
},
{
"epoch": 0.5232067510548524,
"grad_norm": 0.45646214485168457,
"learning_rate": 0.0001999705867566197,
"loss": 0.819354772567749,
"step": 1240
},
{
"epoch": 0.5240506329113924,
"grad_norm": 0.46834197640419006,
"learning_rate": 0.00019996941040219954,
"loss": 0.8934658765792847,
"step": 1242
},
{
"epoch": 0.5248945147679325,
"grad_norm": 0.46065032482147217,
"learning_rate": 0.0001999682109867564,
"loss": 0.8624778985977173,
"step": 1244
},
{
"epoch": 0.5257383966244725,
"grad_norm": 0.4506741166114807,
"learning_rate": 0.00019996698851056688,
"loss": 0.8540882468223572,
"step": 1246
},
{
"epoch": 0.5265822784810127,
"grad_norm": 0.4550519287586212,
"learning_rate": 0.00019996574297391302,
"loss": 0.9003006219863892,
"step": 1248
},
{
"epoch": 0.5274261603375527,
"grad_norm": 0.4097813367843628,
"learning_rate": 0.00019996447437708214,
"loss": 0.8490248918533325,
"step": 1250
},
{
"epoch": 0.5282700421940928,
"grad_norm": 0.4375711679458618,
"learning_rate": 0.00019996318272036688,
"loss": 0.9204569458961487,
"step": 1252
},
{
"epoch": 0.529113924050633,
"grad_norm": 0.4335198998451233,
"learning_rate": 0.00019996186800406518,
"loss": 0.8831789493560791,
"step": 1254
},
{
"epoch": 0.529957805907173,
"grad_norm": 0.4840933084487915,
"learning_rate": 0.00019996053022848035,
"loss": 0.8578064441680908,
"step": 1256
},
{
"epoch": 0.5308016877637131,
"grad_norm": 0.48911216855049133,
"learning_rate": 0.00019995916939392097,
"loss": 0.9235416650772095,
"step": 1258
},
{
"epoch": 0.5316455696202531,
"grad_norm": 0.41106730699539185,
"learning_rate": 0.00019995778550070098,
"loss": 0.7897764444351196,
"step": 1260
},
{
"epoch": 0.5324894514767933,
"grad_norm": 0.42233356833457947,
"learning_rate": 0.00019995637854913957,
"loss": 0.9392989873886108,
"step": 1262
},
{
"epoch": 0.5333333333333333,
"grad_norm": 0.39145180583000183,
"learning_rate": 0.0001999549485395613,
"loss": 0.9130091667175293,
"step": 1264
},
{
"epoch": 0.5341772151898734,
"grad_norm": 0.42780739068984985,
"learning_rate": 0.00019995349547229614,
"loss": 0.9690561890602112,
"step": 1266
},
{
"epoch": 0.5350210970464135,
"grad_norm": 0.4823327362537384,
"learning_rate": 0.0001999520193476792,
"loss": 0.9990131258964539,
"step": 1268
},
{
"epoch": 0.5358649789029536,
"grad_norm": 0.4669715166091919,
"learning_rate": 0.00019995052016605097,
"loss": 0.8343052864074707,
"step": 1270
},
{
"epoch": 0.5367088607594936,
"grad_norm": 0.4024743437767029,
"learning_rate": 0.0001999489979277574,
"loss": 0.9605970978736877,
"step": 1272
},
{
"epoch": 0.5375527426160338,
"grad_norm": 0.42063432931900024,
"learning_rate": 0.0001999474526331495,
"loss": 0.8164438009262085,
"step": 1274
},
{
"epoch": 0.5383966244725739,
"grad_norm": 0.47953495383262634,
"learning_rate": 0.00019994588428258383,
"loss": 0.879767656326294,
"step": 1276
},
{
"epoch": 0.5392405063291139,
"grad_norm": 0.39668557047843933,
"learning_rate": 0.00019994429287642214,
"loss": 0.7658726572990417,
"step": 1278
},
{
"epoch": 0.540084388185654,
"grad_norm": 0.5136121511459351,
"learning_rate": 0.00019994267841503155,
"loss": 0.9477384686470032,
"step": 1280
},
{
"epoch": 0.5409282700421941,
"grad_norm": 0.4039286673069,
"learning_rate": 0.00019994104089878452,
"loss": 0.7883896827697754,
"step": 1282
},
{
"epoch": 0.5417721518987342,
"grad_norm": 0.4425487816333771,
"learning_rate": 0.0001999393803280587,
"loss": 0.8526129126548767,
"step": 1284
},
{
"epoch": 0.5426160337552742,
"grad_norm": 0.4544559121131897,
"learning_rate": 0.00019993769670323725,
"loss": 0.8694683909416199,
"step": 1286
},
{
"epoch": 0.5434599156118144,
"grad_norm": 0.42872917652130127,
"learning_rate": 0.0001999359900247085,
"loss": 0.8211527466773987,
"step": 1288
},
{
"epoch": 0.5443037974683544,
"grad_norm": 0.41549673676490784,
"learning_rate": 0.00019993426029286617,
"loss": 0.7967932224273682,
"step": 1290
},
{
"epoch": 0.5451476793248945,
"grad_norm": 0.42888355255126953,
"learning_rate": 0.00019993250750810926,
"loss": 0.8291563987731934,
"step": 1292
},
{
"epoch": 0.5459915611814345,
"grad_norm": 0.44985485076904297,
"learning_rate": 0.0001999307316708421,
"loss": 0.8787803649902344,
"step": 1294
},
{
"epoch": 0.5468354430379747,
"grad_norm": 0.43326008319854736,
"learning_rate": 0.00019992893278147436,
"loss": 0.8375519514083862,
"step": 1296
},
{
"epoch": 0.5476793248945148,
"grad_norm": 0.406556099653244,
"learning_rate": 0.00019992711084042096,
"loss": 0.8538051247596741,
"step": 1298
},
{
"epoch": 0.5485232067510548,
"grad_norm": 0.46090877056121826,
"learning_rate": 0.00019992526584810226,
"loss": 0.9722132086753845,
"step": 1300
},
{
"epoch": 0.5485232067510548,
"eval_loss": 0.9068717360496521,
"eval_runtime": 680.7718,
"eval_samples_per_second": 3.095,
"eval_steps_per_second": 3.095,
"step": 1300
},
{
"epoch": 0.549367088607595,
"grad_norm": 0.44932207465171814,
"learning_rate": 0.00019992339780494382,
"loss": 0.8474152684211731,
"step": 1302
},
{
"epoch": 0.550210970464135,
"grad_norm": 0.5185276865959167,
"learning_rate": 0.0001999215067113766,
"loss": 0.8440775871276855,
"step": 1304
},
{
"epoch": 0.5510548523206751,
"grad_norm": 0.43811365962028503,
"learning_rate": 0.0001999195925678368,
"loss": 0.8848010897636414,
"step": 1306
},
{
"epoch": 0.5518987341772152,
"grad_norm": 0.4861660301685333,
"learning_rate": 0.00019991765537476596,
"loss": 0.8265100121498108,
"step": 1308
},
{
"epoch": 0.5527426160337553,
"grad_norm": 0.5068721771240234,
"learning_rate": 0.000199915695132611,
"loss": 0.9327963590621948,
"step": 1310
},
{
"epoch": 0.5535864978902953,
"grad_norm": 0.4902805685997009,
"learning_rate": 0.0001999137118418241,
"loss": 0.9315155744552612,
"step": 1312
},
{
"epoch": 0.5544303797468354,
"grad_norm": 0.4530964493751526,
"learning_rate": 0.00019991170550286272,
"loss": 0.9344852566719055,
"step": 1314
},
{
"epoch": 0.5552742616033756,
"grad_norm": 0.4106673300266266,
"learning_rate": 0.00019990967611618974,
"loss": 0.8326173424720764,
"step": 1316
},
{
"epoch": 0.5561181434599156,
"grad_norm": 0.4439711570739746,
"learning_rate": 0.0001999076236822733,
"loss": 0.8934909105300903,
"step": 1318
},
{
"epoch": 0.5569620253164557,
"grad_norm": 0.40245553851127625,
"learning_rate": 0.0001999055482015868,
"loss": 0.6691107153892517,
"step": 1320
},
{
"epoch": 0.5578059071729958,
"grad_norm": 0.43904435634613037,
"learning_rate": 0.0001999034496746091,
"loss": 0.8560307621955872,
"step": 1322
},
{
"epoch": 0.5586497890295359,
"grad_norm": 0.5131705403327942,
"learning_rate": 0.00019990132810182422,
"loss": 0.9024442434310913,
"step": 1324
},
{
"epoch": 0.5594936708860759,
"grad_norm": 0.4539531171321869,
"learning_rate": 0.00019989918348372156,
"loss": 0.8675428628921509,
"step": 1326
},
{
"epoch": 0.560337552742616,
"grad_norm": 0.44469985365867615,
"learning_rate": 0.00019989701582079591,
"loss": 0.8853429555892944,
"step": 1328
},
{
"epoch": 0.5611814345991561,
"grad_norm": 0.4485546946525574,
"learning_rate": 0.00019989482511354725,
"loss": 0.9675378799438477,
"step": 1330
},
{
"epoch": 0.5620253164556962,
"grad_norm": 0.43316468596458435,
"learning_rate": 0.00019989261136248097,
"loss": 0.8256655335426331,
"step": 1332
},
{
"epoch": 0.5628691983122363,
"grad_norm": 0.4315780699253082,
"learning_rate": 0.00019989037456810772,
"loss": 0.7727690935134888,
"step": 1334
},
{
"epoch": 0.5637130801687764,
"grad_norm": 0.45147502422332764,
"learning_rate": 0.00019988811473094348,
"loss": 0.9240618348121643,
"step": 1336
},
{
"epoch": 0.5645569620253165,
"grad_norm": 0.4606908857822418,
"learning_rate": 0.00019988583185150957,
"loss": 0.8005949258804321,
"step": 1338
},
{
"epoch": 0.5654008438818565,
"grad_norm": 0.48932451009750366,
"learning_rate": 0.00019988352593033255,
"loss": 0.9042718410491943,
"step": 1340
},
{
"epoch": 0.5662447257383966,
"grad_norm": 0.46022218465805054,
"learning_rate": 0.00019988119696794443,
"loss": 0.8724613785743713,
"step": 1342
},
{
"epoch": 0.5670886075949367,
"grad_norm": 0.4414621889591217,
"learning_rate": 0.00019987884496488242,
"loss": 0.8653066158294678,
"step": 1344
},
{
"epoch": 0.5679324894514768,
"grad_norm": 0.4490342140197754,
"learning_rate": 0.0001998764699216891,
"loss": 0.8843849301338196,
"step": 1346
},
{
"epoch": 0.5687763713080168,
"grad_norm": 0.47690069675445557,
"learning_rate": 0.00019987407183891232,
"loss": 0.8385393023490906,
"step": 1348
},
{
"epoch": 0.569620253164557,
"grad_norm": 0.41409361362457275,
"learning_rate": 0.00019987165071710527,
"loss": 0.77492755651474,
"step": 1350
},
{
"epoch": 0.570464135021097,
"grad_norm": 0.4151647984981537,
"learning_rate": 0.0001998692065568265,
"loss": 0.8368680477142334,
"step": 1352
},
{
"epoch": 0.5713080168776371,
"grad_norm": 0.4105067849159241,
"learning_rate": 0.0001998667393586398,
"loss": 0.833220362663269,
"step": 1354
},
{
"epoch": 0.5721518987341773,
"grad_norm": 0.44701820611953735,
"learning_rate": 0.0001998642491231143,
"loss": 0.8041552305221558,
"step": 1356
},
{
"epoch": 0.5729957805907173,
"grad_norm": 0.4621582329273224,
"learning_rate": 0.00019986173585082444,
"loss": 0.9052709937095642,
"step": 1358
},
{
"epoch": 0.5738396624472574,
"grad_norm": 0.4934713840484619,
"learning_rate": 0.00019985919954235005,
"loss": 0.8036054372787476,
"step": 1360
},
{
"epoch": 0.5746835443037974,
"grad_norm": 0.4557384252548218,
"learning_rate": 0.0001998566401982761,
"loss": 0.8340095281600952,
"step": 1362
},
{
"epoch": 0.5755274261603376,
"grad_norm": 0.5048403739929199,
"learning_rate": 0.00019985405781919305,
"loss": 0.8703633546829224,
"step": 1364
},
{
"epoch": 0.5763713080168776,
"grad_norm": 0.4707714319229126,
"learning_rate": 0.0001998514524056966,
"loss": 0.9115830659866333,
"step": 1366
},
{
"epoch": 0.5772151898734177,
"grad_norm": 0.4502186179161072,
"learning_rate": 0.00019984882395838777,
"loss": 0.8603149056434631,
"step": 1368
},
{
"epoch": 0.5780590717299579,
"grad_norm": 0.4229136109352112,
"learning_rate": 0.00019984617247787288,
"loss": 0.7482197284698486,
"step": 1370
},
{
"epoch": 0.5789029535864979,
"grad_norm": 0.49208030104637146,
"learning_rate": 0.0001998434979647636,
"loss": 0.9071239233016968,
"step": 1372
},
{
"epoch": 0.579746835443038,
"grad_norm": 0.47393161058425903,
"learning_rate": 0.00019984080041967687,
"loss": 0.9370644092559814,
"step": 1374
},
{
"epoch": 0.580590717299578,
"grad_norm": 0.4483324885368347,
"learning_rate": 0.00019983807984323492,
"loss": 0.8536180257797241,
"step": 1376
},
{
"epoch": 0.5814345991561182,
"grad_norm": 0.4163796007633209,
"learning_rate": 0.00019983533623606543,
"loss": 0.7360405325889587,
"step": 1378
},
{
"epoch": 0.5822784810126582,
"grad_norm": 0.44147446751594543,
"learning_rate": 0.00019983256959880122,
"loss": 0.8162824511528015,
"step": 1380
},
{
"epoch": 0.5831223628691983,
"grad_norm": 0.44443491101264954,
"learning_rate": 0.0001998297799320805,
"loss": 0.9169327616691589,
"step": 1382
},
{
"epoch": 0.5839662447257384,
"grad_norm": 0.4641949236392975,
"learning_rate": 0.00019982696723654686,
"loss": 0.8875693678855896,
"step": 1384
},
{
"epoch": 0.5848101265822785,
"grad_norm": 0.4327974319458008,
"learning_rate": 0.00019982413151284906,
"loss": 0.7747344970703125,
"step": 1386
},
{
"epoch": 0.5856540084388185,
"grad_norm": 0.44200772047042847,
"learning_rate": 0.0001998212727616413,
"loss": 0.8490481972694397,
"step": 1388
},
{
"epoch": 0.5864978902953587,
"grad_norm": 0.39084959030151367,
"learning_rate": 0.000199818390983583,
"loss": 0.8014808297157288,
"step": 1390
},
{
"epoch": 0.5873417721518988,
"grad_norm": 0.3691277801990509,
"learning_rate": 0.00019981548617933897,
"loss": 0.8650928139686584,
"step": 1392
},
{
"epoch": 0.5881856540084388,
"grad_norm": 0.480002760887146,
"learning_rate": 0.00019981255834957925,
"loss": 0.7713267803192139,
"step": 1394
},
{
"epoch": 0.5890295358649789,
"grad_norm": 0.4397226572036743,
"learning_rate": 0.00019980960749497927,
"loss": 0.9053656458854675,
"step": 1396
},
{
"epoch": 0.589873417721519,
"grad_norm": 0.4263727366924286,
"learning_rate": 0.00019980663361621973,
"loss": 0.8737669587135315,
"step": 1398
},
{
"epoch": 0.5907172995780591,
"grad_norm": 0.4468817710876465,
"learning_rate": 0.0001998036367139866,
"loss": 0.9396650791168213,
"step": 1400
},
{
"epoch": 0.5907172995780591,
"eval_loss": 0.8971880674362183,
"eval_runtime": 692.8046,
"eval_samples_per_second": 3.041,
"eval_steps_per_second": 3.041,
"step": 1400
},
{
"epoch": 0.5915611814345991,
"grad_norm": 0.4128144383430481,
"learning_rate": 0.00019980061678897127,
"loss": 0.926134467124939,
"step": 1402
},
{
"epoch": 0.5924050632911393,
"grad_norm": 0.45524629950523376,
"learning_rate": 0.00019979757384187035,
"loss": 0.826360285282135,
"step": 1404
},
{
"epoch": 0.5932489451476793,
"grad_norm": 0.42521336674690247,
"learning_rate": 0.0001997945078733858,
"loss": 0.8147702813148499,
"step": 1406
},
{
"epoch": 0.5940928270042194,
"grad_norm": 0.4172525107860565,
"learning_rate": 0.00019979141888422489,
"loss": 0.7711596488952637,
"step": 1408
},
{
"epoch": 0.5949367088607594,
"grad_norm": 0.46193623542785645,
"learning_rate": 0.00019978830687510013,
"loss": 0.7875980138778687,
"step": 1410
},
{
"epoch": 0.5957805907172996,
"grad_norm": 0.4455825686454773,
"learning_rate": 0.00019978517184672946,
"loss": 0.8590195178985596,
"step": 1412
},
{
"epoch": 0.5966244725738397,
"grad_norm": 0.4266505539417267,
"learning_rate": 0.00019978201379983608,
"loss": 0.9318227767944336,
"step": 1414
},
{
"epoch": 0.5974683544303797,
"grad_norm": 0.42627251148223877,
"learning_rate": 0.00019977883273514843,
"loss": 0.8644474744796753,
"step": 1416
},
{
"epoch": 0.5983122362869199,
"grad_norm": 0.393873393535614,
"learning_rate": 0.00019977562865340038,
"loss": 0.7760446071624756,
"step": 1418
},
{
"epoch": 0.5991561181434599,
"grad_norm": 0.44334208965301514,
"learning_rate": 0.000199772401555331,
"loss": 0.8071584701538086,
"step": 1420
},
{
"epoch": 0.6,
"grad_norm": 0.39390063285827637,
"learning_rate": 0.0001997691514416848,
"loss": 0.9076889753341675,
"step": 1422
},
{
"epoch": 0.60084388185654,
"grad_norm": 0.46753421425819397,
"learning_rate": 0.00019976587831321144,
"loss": 0.8534318804740906,
"step": 1424
},
{
"epoch": 0.6016877637130802,
"grad_norm": 0.4840681254863739,
"learning_rate": 0.00019976258217066598,
"loss": 0.9049596190452576,
"step": 1426
},
{
"epoch": 0.6025316455696202,
"grad_norm": 0.4740557372570038,
"learning_rate": 0.00019975926301480878,
"loss": 0.8520918488502502,
"step": 1428
},
{
"epoch": 0.6033755274261603,
"grad_norm": 0.46498584747314453,
"learning_rate": 0.00019975592084640553,
"loss": 0.8221305012702942,
"step": 1430
},
{
"epoch": 0.6042194092827005,
"grad_norm": 0.40885692834854126,
"learning_rate": 0.0001997525556662272,
"loss": 0.8132198452949524,
"step": 1432
},
{
"epoch": 0.6050632911392405,
"grad_norm": 0.4375821053981781,
"learning_rate": 0.00019974916747505006,
"loss": 0.870534360408783,
"step": 1434
},
{
"epoch": 0.6059071729957806,
"grad_norm": 0.4780935049057007,
"learning_rate": 0.00019974575627365569,
"loss": 0.8825759291648865,
"step": 1436
},
{
"epoch": 0.6067510548523207,
"grad_norm": 0.4623970687389374,
"learning_rate": 0.00019974232206283098,
"loss": 0.8187641501426697,
"step": 1438
},
{
"epoch": 0.6075949367088608,
"grad_norm": 0.42172595858573914,
"learning_rate": 0.0001997388648433682,
"loss": 0.8569821119308472,
"step": 1440
},
{
"epoch": 0.6084388185654008,
"grad_norm": 0.49386659264564514,
"learning_rate": 0.0001997353846160648,
"loss": 0.9173614382743835,
"step": 1442
},
{
"epoch": 0.6092827004219409,
"grad_norm": 0.47122615575790405,
"learning_rate": 0.00019973188138172363,
"loss": 0.9444975256919861,
"step": 1444
},
{
"epoch": 0.610126582278481,
"grad_norm": 0.4831530451774597,
"learning_rate": 0.0001997283551411528,
"loss": 0.9537245631217957,
"step": 1446
},
{
"epoch": 0.6109704641350211,
"grad_norm": 0.44460317492485046,
"learning_rate": 0.0001997248058951658,
"loss": 0.8481367230415344,
"step": 1448
},
{
"epoch": 0.6118143459915611,
"grad_norm": 0.4315263628959656,
"learning_rate": 0.00019972123364458128,
"loss": 0.8572371006011963,
"step": 1450
},
{
"epoch": 0.6126582278481013,
"grad_norm": 0.38559582829475403,
"learning_rate": 0.00019971763839022336,
"loss": 0.762590765953064,
"step": 1452
},
{
"epoch": 0.6135021097046414,
"grad_norm": 0.3820290267467499,
"learning_rate": 0.00019971402013292138,
"loss": 0.7879500389099121,
"step": 1454
},
{
"epoch": 0.6143459915611814,
"grad_norm": 0.42509570717811584,
"learning_rate": 0.00019971037887351,
"loss": 0.8696863055229187,
"step": 1456
},
{
"epoch": 0.6151898734177215,
"grad_norm": 0.46750548481941223,
"learning_rate": 0.00019970671461282916,
"loss": 0.8425862789154053,
"step": 1458
},
{
"epoch": 0.6160337552742616,
"grad_norm": 0.3937121033668518,
"learning_rate": 0.0001997030273517242,
"loss": 0.8028931021690369,
"step": 1460
},
{
"epoch": 0.6168776371308017,
"grad_norm": 0.4361220896244049,
"learning_rate": 0.00019969931709104565,
"loss": 0.8662509918212891,
"step": 1462
},
{
"epoch": 0.6177215189873417,
"grad_norm": 0.4737963378429413,
"learning_rate": 0.00019969558383164943,
"loss": 0.8962596654891968,
"step": 1464
},
{
"epoch": 0.6185654008438819,
"grad_norm": 0.3952867388725281,
"learning_rate": 0.00019969182757439668,
"loss": 0.8630867600440979,
"step": 1466
},
{
"epoch": 0.619409282700422,
"grad_norm": 0.41688743233680725,
"learning_rate": 0.00019968804832015393,
"loss": 0.8759240508079529,
"step": 1468
},
{
"epoch": 0.620253164556962,
"grad_norm": 0.4599224627017975,
"learning_rate": 0.00019968424606979298,
"loss": 0.8459385633468628,
"step": 1470
},
{
"epoch": 0.6210970464135022,
"grad_norm": 0.5476765632629395,
"learning_rate": 0.00019968042082419094,
"loss": 0.8844659328460693,
"step": 1472
},
{
"epoch": 0.6219409282700422,
"grad_norm": 0.46202269196510315,
"learning_rate": 0.00019967657258423022,
"loss": 0.860946536064148,
"step": 1474
},
{
"epoch": 0.6227848101265823,
"grad_norm": 0.39446666836738586,
"learning_rate": 0.00019967270135079853,
"loss": 0.7587860822677612,
"step": 1476
},
{
"epoch": 0.6236286919831223,
"grad_norm": 0.4500375986099243,
"learning_rate": 0.0001996688071247889,
"loss": 0.9143000245094299,
"step": 1478
},
{
"epoch": 0.6244725738396625,
"grad_norm": 0.4203545153141022,
"learning_rate": 0.0001996648899070996,
"loss": 0.7865519523620605,
"step": 1480
},
{
"epoch": 0.6253164556962025,
"grad_norm": 0.45623889565467834,
"learning_rate": 0.00019966094969863432,
"loss": 0.9192912578582764,
"step": 1482
},
{
"epoch": 0.6261603375527426,
"grad_norm": 0.4139637053012848,
"learning_rate": 0.00019965698650030195,
"loss": 0.8459161520004272,
"step": 1484
},
{
"epoch": 0.6270042194092827,
"grad_norm": 0.4410824179649353,
"learning_rate": 0.00019965300031301678,
"loss": 0.8159077763557434,
"step": 1486
},
{
"epoch": 0.6278481012658228,
"grad_norm": 0.43821197748184204,
"learning_rate": 0.00019964899113769828,
"loss": 0.884467363357544,
"step": 1488
},
{
"epoch": 0.6286919831223629,
"grad_norm": 0.4692091643810272,
"learning_rate": 0.00019964495897527133,
"loss": 0.8693601489067078,
"step": 1490
},
{
"epoch": 0.6295358649789029,
"grad_norm": 0.40715447068214417,
"learning_rate": 0.00019964090382666608,
"loss": 0.8897743225097656,
"step": 1492
},
{
"epoch": 0.6303797468354431,
"grad_norm": 0.4123334586620331,
"learning_rate": 0.0001996368256928179,
"loss": 0.8274733424186707,
"step": 1494
},
{
"epoch": 0.6312236286919831,
"grad_norm": 0.44347211718559265,
"learning_rate": 0.00019963272457466767,
"loss": 0.8158749938011169,
"step": 1496
},
{
"epoch": 0.6320675105485232,
"grad_norm": 0.4700329601764679,
"learning_rate": 0.00019962860047316135,
"loss": 0.8439569473266602,
"step": 1498
},
{
"epoch": 0.6329113924050633,
"grad_norm": 0.46415844559669495,
"learning_rate": 0.00019962445338925027,
"loss": 0.8316822648048401,
"step": 1500
},
{
"epoch": 0.6329113924050633,
"eval_loss": 0.887488842010498,
"eval_runtime": 686.2804,
"eval_samples_per_second": 3.07,
"eval_steps_per_second": 3.07,
"step": 1500
},
{
"epoch": 0.6337552742616034,
"grad_norm": 0.44183167815208435,
"learning_rate": 0.0001996202833238911,
"loss": 0.9121994972229004,
"step": 1502
},
{
"epoch": 0.6345991561181434,
"grad_norm": 0.4279208779335022,
"learning_rate": 0.00019961609027804587,
"loss": 0.9160211086273193,
"step": 1504
},
{
"epoch": 0.6354430379746835,
"grad_norm": 0.3967169523239136,
"learning_rate": 0.00019961187425268176,
"loss": 0.861677348613739,
"step": 1506
},
{
"epoch": 0.6362869198312237,
"grad_norm": 0.4796451926231384,
"learning_rate": 0.0001996076352487713,
"loss": 0.8478423357009888,
"step": 1508
},
{
"epoch": 0.6371308016877637,
"grad_norm": 0.3861426115036011,
"learning_rate": 0.00019960337326729245,
"loss": 0.8739159107208252,
"step": 1510
},
{
"epoch": 0.6379746835443038,
"grad_norm": 0.4567820429801941,
"learning_rate": 0.00019959908830922824,
"loss": 0.7693920731544495,
"step": 1512
},
{
"epoch": 0.6388185654008439,
"grad_norm": 0.4457108676433563,
"learning_rate": 0.00019959478037556724,
"loss": 0.8482301831245422,
"step": 1514
},
{
"epoch": 0.639662447257384,
"grad_norm": 0.43918928503990173,
"learning_rate": 0.00019959044946730314,
"loss": 0.8626812696456909,
"step": 1516
},
{
"epoch": 0.640506329113924,
"grad_norm": 0.4095900058746338,
"learning_rate": 0.00019958609558543504,
"loss": 0.8342230916023254,
"step": 1518
},
{
"epoch": 0.6413502109704642,
"grad_norm": 0.4181270897388458,
"learning_rate": 0.00019958171873096724,
"loss": 0.8794118165969849,
"step": 1520
},
{
"epoch": 0.6421940928270042,
"grad_norm": 0.4750959277153015,
"learning_rate": 0.00019957731890490947,
"loss": 0.7787677049636841,
"step": 1522
},
{
"epoch": 0.6430379746835443,
"grad_norm": 0.5129296779632568,
"learning_rate": 0.00019957289610827663,
"loss": 0.9084368348121643,
"step": 1524
},
{
"epoch": 0.6438818565400843,
"grad_norm": 0.4829029142856598,
"learning_rate": 0.000199568450342089,
"loss": 0.9066952466964722,
"step": 1526
},
{
"epoch": 0.6447257383966245,
"grad_norm": 0.4225057363510132,
"learning_rate": 0.00019956398160737214,
"loss": 0.8127874732017517,
"step": 1528
},
{
"epoch": 0.6455696202531646,
"grad_norm": 0.4231826364994049,
"learning_rate": 0.00019955948990515684,
"loss": 0.8781921863555908,
"step": 1530
},
{
"epoch": 0.6464135021097046,
"grad_norm": 0.4436216950416565,
"learning_rate": 0.00019955497523647933,
"loss": 0.8136996626853943,
"step": 1532
},
{
"epoch": 0.6472573839662448,
"grad_norm": 0.4018244743347168,
"learning_rate": 0.00019955043760238098,
"loss": 0.8003877401351929,
"step": 1534
},
{
"epoch": 0.6481012658227848,
"grad_norm": 0.40024611353874207,
"learning_rate": 0.0001995458770039086,
"loss": 0.8996267914772034,
"step": 1536
},
{
"epoch": 0.6489451476793249,
"grad_norm": 0.42845702171325684,
"learning_rate": 0.00019954129344211424,
"loss": 0.8610522150993347,
"step": 1538
},
{
"epoch": 0.6497890295358649,
"grad_norm": 0.4258182644844055,
"learning_rate": 0.00019953668691805517,
"loss": 0.8991837501525879,
"step": 1540
},
{
"epoch": 0.6506329113924051,
"grad_norm": 0.4483257830142975,
"learning_rate": 0.0001995320574327941,
"loss": 0.8150189518928528,
"step": 1542
},
{
"epoch": 0.6514767932489451,
"grad_norm": 0.4320627748966217,
"learning_rate": 0.0001995274049873989,
"loss": 0.8572840690612793,
"step": 1544
},
{
"epoch": 0.6523206751054852,
"grad_norm": 0.39950594305992126,
"learning_rate": 0.00019952272958294288,
"loss": 0.7766129374504089,
"step": 1546
},
{
"epoch": 0.6531645569620254,
"grad_norm": 0.4130117893218994,
"learning_rate": 0.00019951803122050446,
"loss": 0.9032199382781982,
"step": 1548
},
{
"epoch": 0.6540084388185654,
"grad_norm": 0.4436919391155243,
"learning_rate": 0.00019951330990116754,
"loss": 0.8361048102378845,
"step": 1550
},
{
"epoch": 0.6548523206751055,
"grad_norm": 0.4229227900505066,
"learning_rate": 0.00019950856562602121,
"loss": 0.8661768436431885,
"step": 1552
},
{
"epoch": 0.6556962025316456,
"grad_norm": 0.4839727580547333,
"learning_rate": 0.00019950379839615988,
"loss": 0.9737826585769653,
"step": 1554
},
{
"epoch": 0.6565400843881857,
"grad_norm": 0.3859386742115021,
"learning_rate": 0.00019949900821268328,
"loss": 0.7542453408241272,
"step": 1556
},
{
"epoch": 0.6573839662447257,
"grad_norm": 0.39643988013267517,
"learning_rate": 0.0001994941950766964,
"loss": 0.9226290583610535,
"step": 1558
},
{
"epoch": 0.6582278481012658,
"grad_norm": 0.4261355400085449,
"learning_rate": 0.00019948935898930954,
"loss": 0.8375602960586548,
"step": 1560
},
{
"epoch": 0.6590717299578059,
"grad_norm": 0.445931077003479,
"learning_rate": 0.00019948449995163826,
"loss": 0.7847310304641724,
"step": 1562
},
{
"epoch": 0.659915611814346,
"grad_norm": 0.4680459797382355,
"learning_rate": 0.00019947961796480353,
"loss": 0.851150631904602,
"step": 1564
},
{
"epoch": 0.660759493670886,
"grad_norm": 0.6826074719429016,
"learning_rate": 0.00019947471302993143,
"loss": 0.8259562253952026,
"step": 1566
},
{
"epoch": 0.6616033755274262,
"grad_norm": 0.47369205951690674,
"learning_rate": 0.00019946978514815352,
"loss": 0.879021406173706,
"step": 1568
},
{
"epoch": 0.6624472573839663,
"grad_norm": 0.41111937165260315,
"learning_rate": 0.0001994648343206065,
"loss": 0.9296056032180786,
"step": 1570
},
{
"epoch": 0.6632911392405063,
"grad_norm": 0.4297783076763153,
"learning_rate": 0.00019945986054843248,
"loss": 0.873822808265686,
"step": 1572
},
{
"epoch": 0.6641350210970464,
"grad_norm": 0.48124316334724426,
"learning_rate": 0.00019945486383277878,
"loss": 0.937151312828064,
"step": 1574
},
{
"epoch": 0.6649789029535865,
"grad_norm": 0.4048190116882324,
"learning_rate": 0.00019944984417479805,
"loss": 0.8398929834365845,
"step": 1576
},
{
"epoch": 0.6658227848101266,
"grad_norm": 0.4023774266242981,
"learning_rate": 0.00019944480157564826,
"loss": 0.8269520998001099,
"step": 1578
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.3784184157848358,
"learning_rate": 0.00019943973603649262,
"loss": 0.8237608671188354,
"step": 1580
},
{
"epoch": 0.6675105485232068,
"grad_norm": 0.4448573887348175,
"learning_rate": 0.00019943464755849965,
"loss": 0.8469099998474121,
"step": 1582
},
{
"epoch": 0.6683544303797468,
"grad_norm": 0.4690774083137512,
"learning_rate": 0.00019942953614284317,
"loss": 0.8630822896957397,
"step": 1584
},
{
"epoch": 0.6691983122362869,
"grad_norm": 0.43670853972435,
"learning_rate": 0.00019942440179070227,
"loss": 0.8904082179069519,
"step": 1586
},
{
"epoch": 0.6700421940928271,
"grad_norm": 0.4236849844455719,
"learning_rate": 0.00019941924450326138,
"loss": 0.8355640172958374,
"step": 1588
},
{
"epoch": 0.6708860759493671,
"grad_norm": 0.48624539375305176,
"learning_rate": 0.00019941406428171013,
"loss": 0.8898435831069946,
"step": 1590
},
{
"epoch": 0.6717299578059072,
"grad_norm": 0.4453780949115753,
"learning_rate": 0.00019940886112724355,
"loss": 0.9000012278556824,
"step": 1592
},
{
"epoch": 0.6725738396624472,
"grad_norm": 0.4417199194431305,
"learning_rate": 0.00019940363504106193,
"loss": 0.8171058893203735,
"step": 1594
},
{
"epoch": 0.6734177215189874,
"grad_norm": 0.45055362582206726,
"learning_rate": 0.00019939838602437074,
"loss": 0.8703798055648804,
"step": 1596
},
{
"epoch": 0.6742616033755274,
"grad_norm": 0.42316532135009766,
"learning_rate": 0.00019939311407838088,
"loss": 0.8677281737327576,
"step": 1598
},
{
"epoch": 0.6751054852320675,
"grad_norm": 0.3935781717300415,
"learning_rate": 0.0001993878192043085,
"loss": 0.7314013242721558,
"step": 1600
},
{
"epoch": 0.6751054852320675,
"eval_loss": 0.8769772052764893,
"eval_runtime": 677.9338,
"eval_samples_per_second": 3.108,
"eval_steps_per_second": 3.108,
"step": 1600
},
{
"epoch": 0.6759493670886076,
"grad_norm": 0.4197012186050415,
"learning_rate": 0.000199382501403375,
"loss": 0.8075380325317383,
"step": 1602
},
{
"epoch": 0.6767932489451477,
"grad_norm": 0.45227327942848206,
"learning_rate": 0.00019937716067680713,
"loss": 0.7885794639587402,
"step": 1604
},
{
"epoch": 0.6776371308016877,
"grad_norm": 0.45660385489463806,
"learning_rate": 0.00019937179702583682,
"loss": 0.868261992931366,
"step": 1606
},
{
"epoch": 0.6784810126582278,
"grad_norm": 0.3955288231372833,
"learning_rate": 0.00019936641045170144,
"loss": 0.8306655287742615,
"step": 1608
},
{
"epoch": 0.679324894514768,
"grad_norm": 0.3651324510574341,
"learning_rate": 0.00019936100095564353,
"loss": 0.7505315542221069,
"step": 1610
},
{
"epoch": 0.680168776371308,
"grad_norm": 0.4240955412387848,
"learning_rate": 0.00019935556853891096,
"loss": 0.8792756795883179,
"step": 1612
},
{
"epoch": 0.6810126582278481,
"grad_norm": 0.4142507314682007,
"learning_rate": 0.00019935011320275687,
"loss": 0.7328272461891174,
"step": 1614
},
{
"epoch": 0.6818565400843882,
"grad_norm": 0.48339322209358215,
"learning_rate": 0.00019934463494843975,
"loss": 0.8414849042892456,
"step": 1616
},
{
"epoch": 0.6827004219409283,
"grad_norm": 0.4312443435192108,
"learning_rate": 0.00019933913377722328,
"loss": 0.8588716983795166,
"step": 1618
},
{
"epoch": 0.6835443037974683,
"grad_norm": 0.43849512934684753,
"learning_rate": 0.0001993336096903765,
"loss": 0.846511960029602,
"step": 1620
},
{
"epoch": 0.6843881856540084,
"grad_norm": 0.45767566561698914,
"learning_rate": 0.0001993280626891737,
"loss": 0.9352323412895203,
"step": 1622
},
{
"epoch": 0.6852320675105485,
"grad_norm": 0.438412606716156,
"learning_rate": 0.00019932249277489444,
"loss": 0.8884757161140442,
"step": 1624
},
{
"epoch": 0.6860759493670886,
"grad_norm": 0.45893922448158264,
"learning_rate": 0.0001993168999488237,
"loss": 0.7739649415016174,
"step": 1626
},
{
"epoch": 0.6869198312236287,
"grad_norm": 0.38631772994995117,
"learning_rate": 0.0001993112842122515,
"loss": 0.7948258519172668,
"step": 1628
},
{
"epoch": 0.6877637130801688,
"grad_norm": 0.3824027180671692,
"learning_rate": 0.00019930564556647334,
"loss": 0.750511884689331,
"step": 1630
},
{
"epoch": 0.6886075949367089,
"grad_norm": 0.3980776071548462,
"learning_rate": 0.00019929998401278996,
"loss": 0.9006738662719727,
"step": 1632
},
{
"epoch": 0.6894514767932489,
"grad_norm": 0.41419896483421326,
"learning_rate": 0.00019929429955250734,
"loss": 0.6996869444847107,
"step": 1634
},
{
"epoch": 0.6902953586497891,
"grad_norm": 0.43783676624298096,
"learning_rate": 0.00019928859218693682,
"loss": 0.8523393273353577,
"step": 1636
},
{
"epoch": 0.6911392405063291,
"grad_norm": 0.4378126561641693,
"learning_rate": 0.00019928286191739497,
"loss": 0.8885987401008606,
"step": 1638
},
{
"epoch": 0.6919831223628692,
"grad_norm": 0.4276133179664612,
"learning_rate": 0.00019927710874520363,
"loss": 0.8563809990882874,
"step": 1640
},
{
"epoch": 0.6928270042194092,
"grad_norm": 0.4600282311439514,
"learning_rate": 0.00019927133267168992,
"loss": 0.854114294052124,
"step": 1642
},
{
"epoch": 0.6936708860759494,
"grad_norm": 0.5058461427688599,
"learning_rate": 0.00019926553369818635,
"loss": 0.8557889461517334,
"step": 1644
},
{
"epoch": 0.6945147679324895,
"grad_norm": 0.4947412312030792,
"learning_rate": 0.00019925971182603057,
"loss": 0.8622767925262451,
"step": 1646
},
{
"epoch": 0.6953586497890295,
"grad_norm": 0.42555496096611023,
"learning_rate": 0.00019925386705656558,
"loss": 0.7836558818817139,
"step": 1648
},
{
"epoch": 0.6962025316455697,
"grad_norm": 0.47560831904411316,
"learning_rate": 0.00019924799939113967,
"loss": 0.7877846956253052,
"step": 1650
},
{
"epoch": 0.6970464135021097,
"grad_norm": 0.391702264547348,
"learning_rate": 0.0001992421088311064,
"loss": 0.775681734085083,
"step": 1652
},
{
"epoch": 0.6978902953586498,
"grad_norm": 0.463679701089859,
"learning_rate": 0.0001992361953778246,
"loss": 0.8809882402420044,
"step": 1654
},
{
"epoch": 0.6987341772151898,
"grad_norm": 0.44096484780311584,
"learning_rate": 0.00019923025903265836,
"loss": 0.8513299226760864,
"step": 1656
},
{
"epoch": 0.69957805907173,
"grad_norm": 0.47818854451179504,
"learning_rate": 0.0001992242997969772,
"loss": 0.8309667706489563,
"step": 1658
},
{
"epoch": 0.70042194092827,
"grad_norm": 0.4192790985107422,
"learning_rate": 0.00019921831767215565,
"loss": 0.8817558884620667,
"step": 1660
},
{
"epoch": 0.7012658227848101,
"grad_norm": 0.45285239815711975,
"learning_rate": 0.00019921231265957373,
"loss": 0.8315755724906921,
"step": 1662
},
{
"epoch": 0.7021097046413503,
"grad_norm": 0.47118625044822693,
"learning_rate": 0.00019920628476061673,
"loss": 0.8698058724403381,
"step": 1664
},
{
"epoch": 0.7029535864978903,
"grad_norm": 0.4769238829612732,
"learning_rate": 0.0001992002339766751,
"loss": 0.953620970249176,
"step": 1666
},
{
"epoch": 0.7037974683544304,
"grad_norm": 0.39397355914115906,
"learning_rate": 0.00019919416030914468,
"loss": 0.8536615371704102,
"step": 1668
},
{
"epoch": 0.7046413502109705,
"grad_norm": 0.3659648299217224,
"learning_rate": 0.00019918806375942655,
"loss": 0.8159828782081604,
"step": 1670
},
{
"epoch": 0.7054852320675106,
"grad_norm": 0.4381789565086365,
"learning_rate": 0.00019918194432892703,
"loss": 0.782463550567627,
"step": 1672
},
{
"epoch": 0.7063291139240506,
"grad_norm": 0.44745752215385437,
"learning_rate": 0.00019917580201905778,
"loss": 0.9314272999763489,
"step": 1674
},
{
"epoch": 0.7071729957805907,
"grad_norm": 0.4515164792537689,
"learning_rate": 0.00019916963683123574,
"loss": 0.860816240310669,
"step": 1676
},
{
"epoch": 0.7080168776371308,
"grad_norm": 0.42735886573791504,
"learning_rate": 0.00019916344876688303,
"loss": 0.8008456826210022,
"step": 1678
},
{
"epoch": 0.7088607594936709,
"grad_norm": 0.418275386095047,
"learning_rate": 0.00019915723782742718,
"loss": 0.7796595096588135,
"step": 1680
},
{
"epoch": 0.7097046413502109,
"grad_norm": 0.44347989559173584,
"learning_rate": 0.00019915100401430089,
"loss": 0.9006506204605103,
"step": 1682
},
{
"epoch": 0.7105485232067511,
"grad_norm": 0.3932702839374542,
"learning_rate": 0.00019914474732894225,
"loss": 0.7750146985054016,
"step": 1684
},
{
"epoch": 0.7113924050632912,
"grad_norm": 0.4810502231121063,
"learning_rate": 0.00019913846777279447,
"loss": 0.7508097290992737,
"step": 1686
},
{
"epoch": 0.7122362869198312,
"grad_norm": 0.4166778028011322,
"learning_rate": 0.00019913216534730622,
"loss": 0.7926796078681946,
"step": 1688
},
{
"epoch": 0.7130801687763713,
"grad_norm": 0.47900474071502686,
"learning_rate": 0.00019912584005393123,
"loss": 0.8363928198814392,
"step": 1690
},
{
"epoch": 0.7139240506329114,
"grad_norm": 0.435143381357193,
"learning_rate": 0.00019911949189412872,
"loss": 0.8649623990058899,
"step": 1692
},
{
"epoch": 0.7147679324894515,
"grad_norm": 0.4070208966732025,
"learning_rate": 0.00019911312086936305,
"loss": 0.8196499943733215,
"step": 1694
},
{
"epoch": 0.7156118143459915,
"grad_norm": 0.4717724621295929,
"learning_rate": 0.0001991067269811039,
"loss": 0.9157413840293884,
"step": 1696
},
{
"epoch": 0.7164556962025317,
"grad_norm": 0.3942880928516388,
"learning_rate": 0.0001991003102308262,
"loss": 0.7880456447601318,
"step": 1698
},
{
"epoch": 0.7172995780590717,
"grad_norm": 0.4037776589393616,
"learning_rate": 0.0001990938706200102,
"loss": 0.8088646531105042,
"step": 1700
},
{
"epoch": 0.7172995780590717,
"eval_loss": 0.8708170056343079,
"eval_runtime": 670.3019,
"eval_samples_per_second": 3.143,
"eval_steps_per_second": 3.143,
"step": 1700
},
{
"epoch": 0.7181434599156118,
"grad_norm": 0.4443536400794983,
"learning_rate": 0.0001990874081501414,
"loss": 0.8740324378013611,
"step": 1702
},
{
"epoch": 0.7189873417721518,
"grad_norm": 0.45089733600616455,
"learning_rate": 0.00019908092282271053,
"loss": 0.8305023908615112,
"step": 1704
},
{
"epoch": 0.719831223628692,
"grad_norm": 0.3903036415576935,
"learning_rate": 0.00019907441463921366,
"loss": 0.7718394994735718,
"step": 1706
},
{
"epoch": 0.7206751054852321,
"grad_norm": 0.42990729212760925,
"learning_rate": 0.00019906788360115208,
"loss": 0.843289315700531,
"step": 1708
},
{
"epoch": 0.7215189873417721,
"grad_norm": 0.4287833571434021,
"learning_rate": 0.0001990613297100324,
"loss": 0.8332282900810242,
"step": 1710
},
{
"epoch": 0.7223628691983123,
"grad_norm": 0.4557269513607025,
"learning_rate": 0.00019905475296736646,
"loss": 0.784180223941803,
"step": 1712
},
{
"epoch": 0.7232067510548523,
"grad_norm": 0.4001525640487671,
"learning_rate": 0.0001990481533746714,
"loss": 0.7807716131210327,
"step": 1714
},
{
"epoch": 0.7240506329113924,
"grad_norm": 0.5208709836006165,
"learning_rate": 0.0001990415309334696,
"loss": 0.7936414480209351,
"step": 1716
},
{
"epoch": 0.7248945147679325,
"grad_norm": 0.46469953656196594,
"learning_rate": 0.00019903488564528875,
"loss": 0.7964845895767212,
"step": 1718
},
{
"epoch": 0.7257383966244726,
"grad_norm": 0.405773401260376,
"learning_rate": 0.00019902821751166178,
"loss": 0.8302215337753296,
"step": 1720
},
{
"epoch": 0.7265822784810126,
"grad_norm": 0.3874973654747009,
"learning_rate": 0.00019902152653412692,
"loss": 0.8486512899398804,
"step": 1722
},
{
"epoch": 0.7274261603375527,
"grad_norm": 0.46865755319595337,
"learning_rate": 0.00019901481271422758,
"loss": 0.837841272354126,
"step": 1724
},
{
"epoch": 0.7282700421940929,
"grad_norm": 0.4139123558998108,
"learning_rate": 0.0001990080760535126,
"loss": 0.7640942335128784,
"step": 1726
},
{
"epoch": 0.7291139240506329,
"grad_norm": 0.5003024935722351,
"learning_rate": 0.000199001316553536,
"loss": 0.8236244320869446,
"step": 1728
},
{
"epoch": 0.729957805907173,
"grad_norm": 0.38495776057243347,
"learning_rate": 0.00019899453421585696,
"loss": 0.7792612314224243,
"step": 1730
},
{
"epoch": 0.7308016877637131,
"grad_norm": 0.4462733268737793,
"learning_rate": 0.00019898772904204014,
"loss": 0.7733872532844543,
"step": 1732
},
{
"epoch": 0.7316455696202532,
"grad_norm": 0.398248553276062,
"learning_rate": 0.00019898090103365532,
"loss": 0.8933147192001343,
"step": 1734
},
{
"epoch": 0.7324894514767932,
"grad_norm": 0.4231175482273102,
"learning_rate": 0.00019897405019227756,
"loss": 0.8198356628417969,
"step": 1736
},
{
"epoch": 0.7333333333333333,
"grad_norm": 0.43639084696769714,
"learning_rate": 0.0001989671765194873,
"loss": 0.8266391158103943,
"step": 1738
},
{
"epoch": 0.7341772151898734,
"grad_norm": 0.4235835671424866,
"learning_rate": 0.00019896028001687008,
"loss": 0.7839997410774231,
"step": 1740
},
{
"epoch": 0.7350210970464135,
"grad_norm": 0.4033111035823822,
"learning_rate": 0.00019895336068601687,
"loss": 0.7153518199920654,
"step": 1742
},
{
"epoch": 0.7358649789029535,
"grad_norm": 0.44281265139579773,
"learning_rate": 0.00019894641852852377,
"loss": 0.9079289436340332,
"step": 1744
},
{
"epoch": 0.7367088607594937,
"grad_norm": 0.4882747232913971,
"learning_rate": 0.0001989394535459922,
"loss": 0.8645254373550415,
"step": 1746
},
{
"epoch": 0.7375527426160338,
"grad_norm": 0.34858641028404236,
"learning_rate": 0.00019893246574002887,
"loss": 0.8134214878082275,
"step": 1748
},
{
"epoch": 0.7383966244725738,
"grad_norm": 0.4273303747177124,
"learning_rate": 0.00019892545511224574,
"loss": 0.8142663240432739,
"step": 1750
},
{
"epoch": 0.739240506329114,
"grad_norm": 0.4289957284927368,
"learning_rate": 0.00019891842166426004,
"loss": 0.7957643866539001,
"step": 1752
},
{
"epoch": 0.740084388185654,
"grad_norm": 0.4812224805355072,
"learning_rate": 0.00019891136539769416,
"loss": 0.9000363945960999,
"step": 1754
},
{
"epoch": 0.7409282700421941,
"grad_norm": 0.4078022837638855,
"learning_rate": 0.00019890428631417595,
"loss": 0.7719792723655701,
"step": 1756
},
{
"epoch": 0.7417721518987341,
"grad_norm": 0.40169858932495117,
"learning_rate": 0.00019889718441533838,
"loss": 0.7681847214698792,
"step": 1758
},
{
"epoch": 0.7426160337552743,
"grad_norm": 0.3713594377040863,
"learning_rate": 0.00019889005970281972,
"loss": 0.7536827325820923,
"step": 1760
},
{
"epoch": 0.7434599156118143,
"grad_norm": 0.4183627665042877,
"learning_rate": 0.0001988829121782635,
"loss": 0.9000160694122314,
"step": 1762
},
{
"epoch": 0.7443037974683544,
"grad_norm": 0.4241594672203064,
"learning_rate": 0.00019887574184331851,
"loss": 0.8047307133674622,
"step": 1764
},
{
"epoch": 0.7451476793248946,
"grad_norm": 0.4249233603477478,
"learning_rate": 0.00019886854869963883,
"loss": 0.8932583928108215,
"step": 1766
},
{
"epoch": 0.7459915611814346,
"grad_norm": 0.35815340280532837,
"learning_rate": 0.00019886133274888378,
"loss": 0.7684977054595947,
"step": 1768
},
{
"epoch": 0.7468354430379747,
"grad_norm": 0.41035401821136475,
"learning_rate": 0.00019885409399271795,
"loss": 0.8644338250160217,
"step": 1770
},
{
"epoch": 0.7476793248945147,
"grad_norm": 0.5450286269187927,
"learning_rate": 0.00019884683243281116,
"loss": 0.8035860657691956,
"step": 1772
},
{
"epoch": 0.7485232067510549,
"grad_norm": 0.5280999541282654,
"learning_rate": 0.0001988395480708385,
"loss": 0.8861207962036133,
"step": 1774
},
{
"epoch": 0.7493670886075949,
"grad_norm": 0.42665231227874756,
"learning_rate": 0.00019883224090848036,
"loss": 0.7506847977638245,
"step": 1776
},
{
"epoch": 0.750210970464135,
"grad_norm": 0.39029282331466675,
"learning_rate": 0.00019882491094742232,
"loss": 0.7662046551704407,
"step": 1778
},
{
"epoch": 0.7510548523206751,
"grad_norm": 0.43870019912719727,
"learning_rate": 0.00019881755818935534,
"loss": 0.7590143084526062,
"step": 1780
},
{
"epoch": 0.7518987341772152,
"grad_norm": 0.4195050001144409,
"learning_rate": 0.0001988101826359755,
"loss": 0.9340365529060364,
"step": 1782
},
{
"epoch": 0.7527426160337553,
"grad_norm": 0.4236123263835907,
"learning_rate": 0.0001988027842889842,
"loss": 0.7969209551811218,
"step": 1784
},
{
"epoch": 0.7535864978902953,
"grad_norm": 0.4274357259273529,
"learning_rate": 0.00019879536315008808,
"loss": 0.7892382144927979,
"step": 1786
},
{
"epoch": 0.7544303797468355,
"grad_norm": 0.4622916579246521,
"learning_rate": 0.00019878791922099912,
"loss": 0.8752562403678894,
"step": 1788
},
{
"epoch": 0.7552742616033755,
"grad_norm": 0.3648734986782074,
"learning_rate": 0.00019878045250343445,
"loss": 0.813011884689331,
"step": 1790
},
{
"epoch": 0.7561181434599156,
"grad_norm": 0.41361239552497864,
"learning_rate": 0.00019877296299911648,
"loss": 0.7310198545455933,
"step": 1792
},
{
"epoch": 0.7569620253164557,
"grad_norm": 0.4113222062587738,
"learning_rate": 0.00019876545070977294,
"loss": 0.858386754989624,
"step": 1794
},
{
"epoch": 0.7578059071729958,
"grad_norm": 0.4295370876789093,
"learning_rate": 0.00019875791563713676,
"loss": 0.8377325534820557,
"step": 1796
},
{
"epoch": 0.7586497890295358,
"grad_norm": 0.4227522015571594,
"learning_rate": 0.0001987503577829461,
"loss": 0.9139418005943298,
"step": 1798
},
{
"epoch": 0.759493670886076,
"grad_norm": 0.3638151288032532,
"learning_rate": 0.00019874277714894442,
"loss": 0.6994872689247131,
"step": 1800
},
{
"epoch": 0.759493670886076,
"eval_loss": 0.8625519275665283,
"eval_runtime": 686.4271,
"eval_samples_per_second": 3.07,
"eval_steps_per_second": 3.07,
"step": 1800
},
{
"epoch": 0.760337552742616,
"grad_norm": 0.46954411268234253,
"learning_rate": 0.00019873517373688047,
"loss": 0.8681167960166931,
"step": 1802
},
{
"epoch": 0.7611814345991561,
"grad_norm": 0.41958674788475037,
"learning_rate": 0.00019872754754850819,
"loss": 0.7420852184295654,
"step": 1804
},
{
"epoch": 0.7620253164556962,
"grad_norm": 0.431226521730423,
"learning_rate": 0.00019871989858558678,
"loss": 0.8018608093261719,
"step": 1806
},
{
"epoch": 0.7628691983122363,
"grad_norm": 0.45129454135894775,
"learning_rate": 0.0001987122268498807,
"loss": 0.8793904781341553,
"step": 1808
},
{
"epoch": 0.7637130801687764,
"grad_norm": 0.44278961420059204,
"learning_rate": 0.00019870453234315972,
"loss": 0.8416730761528015,
"step": 1810
},
{
"epoch": 0.7645569620253164,
"grad_norm": 0.4689098000526428,
"learning_rate": 0.00019869681506719876,
"loss": 0.8225743174552917,
"step": 1812
},
{
"epoch": 0.7654008438818566,
"grad_norm": 0.4413386881351471,
"learning_rate": 0.00019868907502377806,
"loss": 0.7871913313865662,
"step": 1814
},
{
"epoch": 0.7662447257383966,
"grad_norm": 0.48395273089408875,
"learning_rate": 0.00019868131221468315,
"loss": 0.8616237044334412,
"step": 1816
},
{
"epoch": 0.7670886075949367,
"grad_norm": 0.370263934135437,
"learning_rate": 0.0001986735266417047,
"loss": 0.7243452668190002,
"step": 1818
},
{
"epoch": 0.7679324894514767,
"grad_norm": 0.4522445797920227,
"learning_rate": 0.00019866571830663875,
"loss": 0.8623812198638916,
"step": 1820
},
{
"epoch": 0.7687763713080169,
"grad_norm": 0.43821004033088684,
"learning_rate": 0.00019865788721128643,
"loss": 0.776618480682373,
"step": 1822
},
{
"epoch": 0.769620253164557,
"grad_norm": 0.3825650215148926,
"learning_rate": 0.00019865003335745436,
"loss": 0.8722774982452393,
"step": 1824
},
{
"epoch": 0.770464135021097,
"grad_norm": 0.43684205412864685,
"learning_rate": 0.00019864215674695416,
"loss": 0.907802939414978,
"step": 1826
},
{
"epoch": 0.7713080168776372,
"grad_norm": 0.4128975570201874,
"learning_rate": 0.00019863425738160287,
"loss": 0.8505539298057556,
"step": 1828
},
{
"epoch": 0.7721518987341772,
"grad_norm": 0.4254065454006195,
"learning_rate": 0.0001986263352632227,
"loss": 0.83274906873703,
"step": 1830
},
{
"epoch": 0.7729957805907173,
"grad_norm": 0.4137375056743622,
"learning_rate": 0.00019861839039364118,
"loss": 0.8172078728675842,
"step": 1832
},
{
"epoch": 0.7738396624472574,
"grad_norm": 0.4387703239917755,
"learning_rate": 0.00019861042277469096,
"loss": 0.8610842227935791,
"step": 1834
},
{
"epoch": 0.7746835443037975,
"grad_norm": 0.398952454328537,
"learning_rate": 0.00019860243240821005,
"loss": 0.7961188554763794,
"step": 1836
},
{
"epoch": 0.7755274261603375,
"grad_norm": 0.4079921543598175,
"learning_rate": 0.0001985944192960417,
"loss": 0.8078321218490601,
"step": 1838
},
{
"epoch": 0.7763713080168776,
"grad_norm": 0.4701489508152008,
"learning_rate": 0.00019858638344003433,
"loss": 0.8743909001350403,
"step": 1840
},
{
"epoch": 0.7772151898734178,
"grad_norm": 0.5391269326210022,
"learning_rate": 0.00019857832484204173,
"loss": 0.7681707143783569,
"step": 1842
},
{
"epoch": 0.7780590717299578,
"grad_norm": 0.36250734329223633,
"learning_rate": 0.00019857024350392277,
"loss": 0.7817525863647461,
"step": 1844
},
{
"epoch": 0.7789029535864979,
"grad_norm": 0.41237136721611023,
"learning_rate": 0.00019856213942754175,
"loss": 0.7614551782608032,
"step": 1846
},
{
"epoch": 0.779746835443038,
"grad_norm": 0.3968869745731354,
"learning_rate": 0.00019855401261476807,
"loss": 0.7492313981056213,
"step": 1848
},
{
"epoch": 0.7805907172995781,
"grad_norm": 0.441476434469223,
"learning_rate": 0.00019854586306747646,
"loss": 0.8615695238113403,
"step": 1850
},
{
"epoch": 0.7814345991561181,
"grad_norm": 0.4486154317855835,
"learning_rate": 0.00019853769078754686,
"loss": 0.8416724801063538,
"step": 1852
},
{
"epoch": 0.7822784810126582,
"grad_norm": 0.45148879289627075,
"learning_rate": 0.0001985294957768644,
"loss": 0.8987806439399719,
"step": 1854
},
{
"epoch": 0.7831223628691983,
"grad_norm": 0.39330095052719116,
"learning_rate": 0.0001985212780373196,
"loss": 0.7597590088844299,
"step": 1856
},
{
"epoch": 0.7839662447257384,
"grad_norm": 0.4260408580303192,
"learning_rate": 0.00019851303757080808,
"loss": 0.8429927229881287,
"step": 1858
},
{
"epoch": 0.7848101265822784,
"grad_norm": 0.37519171833992004,
"learning_rate": 0.00019850477437923075,
"loss": 0.7443564534187317,
"step": 1860
},
{
"epoch": 0.7856540084388186,
"grad_norm": 0.4991084337234497,
"learning_rate": 0.00019849648846449383,
"loss": 0.934238612651825,
"step": 1862
},
{
"epoch": 0.7864978902953587,
"grad_norm": 0.4711335003376007,
"learning_rate": 0.00019848817982850867,
"loss": 0.8938905000686646,
"step": 1864
},
{
"epoch": 0.7873417721518987,
"grad_norm": 0.4145370423793793,
"learning_rate": 0.00019847984847319193,
"loss": 0.8745643496513367,
"step": 1866
},
{
"epoch": 0.7881856540084389,
"grad_norm": 0.3444044888019562,
"learning_rate": 0.00019847149440046548,
"loss": 0.7178786396980286,
"step": 1868
},
{
"epoch": 0.7890295358649789,
"grad_norm": 0.34632962942123413,
"learning_rate": 0.00019846311761225646,
"loss": 0.9114303588867188,
"step": 1870
},
{
"epoch": 0.789873417721519,
"grad_norm": 0.38630741834640503,
"learning_rate": 0.0001984547181104972,
"loss": 0.836281955242157,
"step": 1872
},
{
"epoch": 0.790717299578059,
"grad_norm": 0.40947434306144714,
"learning_rate": 0.00019844629589712534,
"loss": 0.8247858881950378,
"step": 1874
},
{
"epoch": 0.7915611814345992,
"grad_norm": 0.45878684520721436,
"learning_rate": 0.00019843785097408372,
"loss": 0.8446351289749146,
"step": 1876
},
{
"epoch": 0.7924050632911392,
"grad_norm": 0.37202852964401245,
"learning_rate": 0.0001984293833433204,
"loss": 0.8567686676979065,
"step": 1878
},
{
"epoch": 0.7932489451476793,
"grad_norm": 0.3839069902896881,
"learning_rate": 0.00019842089300678876,
"loss": 0.8134047985076904,
"step": 1880
},
{
"epoch": 0.7940928270042195,
"grad_norm": 0.4018687605857849,
"learning_rate": 0.00019841237996644727,
"loss": 0.8670110106468201,
"step": 1882
},
{
"epoch": 0.7949367088607595,
"grad_norm": 0.372089684009552,
"learning_rate": 0.0001984038442242598,
"loss": 0.7975355386734009,
"step": 1884
},
{
"epoch": 0.7957805907172996,
"grad_norm": 0.4492705166339874,
"learning_rate": 0.0001983952857821953,
"loss": 0.8755611777305603,
"step": 1886
},
{
"epoch": 0.7966244725738396,
"grad_norm": 0.451054185628891,
"learning_rate": 0.00019838670464222813,
"loss": 0.8680241107940674,
"step": 1888
},
{
"epoch": 0.7974683544303798,
"grad_norm": 0.4036900997161865,
"learning_rate": 0.00019837810080633773,
"loss": 0.7990990877151489,
"step": 1890
},
{
"epoch": 0.7983122362869198,
"grad_norm": 0.46125802397727966,
"learning_rate": 0.0001983694742765089,
"loss": 0.8598953485488892,
"step": 1892
},
{
"epoch": 0.7991561181434599,
"grad_norm": 0.4458785951137543,
"learning_rate": 0.00019836082505473153,
"loss": 0.7707474827766418,
"step": 1894
},
{
"epoch": 0.8,
"grad_norm": 0.44501590728759766,
"learning_rate": 0.0001983521531430009,
"loss": 0.8069534301757812,
"step": 1896
},
{
"epoch": 0.8008438818565401,
"grad_norm": 0.400771826505661,
"learning_rate": 0.0001983434585433174,
"loss": 0.862334668636322,
"step": 1898
},
{
"epoch": 0.8016877637130801,
"grad_norm": 0.39193347096443176,
"learning_rate": 0.00019833474125768676,
"loss": 0.8125433325767517,
"step": 1900
},
{
"epoch": 0.8016877637130801,
"eval_loss": 0.8546335697174072,
"eval_runtime": 688.5301,
"eval_samples_per_second": 3.06,
"eval_steps_per_second": 3.06,
"step": 1900
},
{
"epoch": 0.8025316455696202,
"grad_norm": 0.441129207611084,
"learning_rate": 0.00019832600128811986,
"loss": 0.8246012330055237,
"step": 1902
},
{
"epoch": 0.8033755274261604,
"grad_norm": 0.4089467227458954,
"learning_rate": 0.00019831723863663285,
"loss": 0.8387641310691833,
"step": 1904
},
{
"epoch": 0.8042194092827004,
"grad_norm": 0.4871654510498047,
"learning_rate": 0.0001983084533052471,
"loss": 0.7891429662704468,
"step": 1906
},
{
"epoch": 0.8050632911392405,
"grad_norm": 0.40085604786872864,
"learning_rate": 0.00019829964529598923,
"loss": 0.8008774518966675,
"step": 1908
},
{
"epoch": 0.8059071729957806,
"grad_norm": 0.38775014877319336,
"learning_rate": 0.00019829081461089109,
"loss": 0.7921834588050842,
"step": 1910
},
{
"epoch": 0.8067510548523207,
"grad_norm": 0.37830594182014465,
"learning_rate": 0.0001982819612519897,
"loss": 0.7752519845962524,
"step": 1912
},
{
"epoch": 0.8075949367088607,
"grad_norm": 0.41137194633483887,
"learning_rate": 0.0001982730852213274,
"loss": 0.7958255410194397,
"step": 1914
},
{
"epoch": 0.8084388185654009,
"grad_norm": 0.4871830940246582,
"learning_rate": 0.0001982641865209517,
"loss": 0.8177281618118286,
"step": 1916
},
{
"epoch": 0.809282700421941,
"grad_norm": 0.4157388210296631,
"learning_rate": 0.0001982552651529154,
"loss": 0.7726616859436035,
"step": 1918
},
{
"epoch": 0.810126582278481,
"grad_norm": 0.32338014245033264,
"learning_rate": 0.00019824632111927645,
"loss": 0.6601400375366211,
"step": 1920
},
{
"epoch": 0.810970464135021,
"grad_norm": 0.4642949104309082,
"learning_rate": 0.00019823735442209804,
"loss": 0.7866622805595398,
"step": 1922
},
{
"epoch": 0.8118143459915612,
"grad_norm": 0.43558797240257263,
"learning_rate": 0.00019822836506344865,
"loss": 0.7857986092567444,
"step": 1924
},
{
"epoch": 0.8126582278481013,
"grad_norm": 0.3996953070163727,
"learning_rate": 0.000198219353045402,
"loss": 0.8407763838768005,
"step": 1926
},
{
"epoch": 0.8135021097046413,
"grad_norm": 0.4415414333343506,
"learning_rate": 0.00019821031837003686,
"loss": 0.8433752655982971,
"step": 1928
},
{
"epoch": 0.8143459915611815,
"grad_norm": 0.37329310178756714,
"learning_rate": 0.00019820126103943752,
"loss": 0.6941158175468445,
"step": 1930
},
{
"epoch": 0.8151898734177215,
"grad_norm": 0.4482601284980774,
"learning_rate": 0.00019819218105569323,
"loss": 0.775604248046875,
"step": 1932
},
{
"epoch": 0.8160337552742616,
"grad_norm": 0.426194429397583,
"learning_rate": 0.00019818307842089854,
"loss": 0.8496418595314026,
"step": 1934
},
{
"epoch": 0.8168776371308016,
"grad_norm": 0.42379093170166016,
"learning_rate": 0.00019817395313715335,
"loss": 0.7767958045005798,
"step": 1936
},
{
"epoch": 0.8177215189873418,
"grad_norm": 0.4149782061576843,
"learning_rate": 0.0001981648052065626,
"loss": 0.7347666025161743,
"step": 1938
},
{
"epoch": 0.8185654008438819,
"grad_norm": 0.3927431106567383,
"learning_rate": 0.0001981556346312366,
"loss": 0.7839647531509399,
"step": 1940
},
{
"epoch": 0.8194092827004219,
"grad_norm": 0.4483136236667633,
"learning_rate": 0.00019814644141329083,
"loss": 0.8870531916618347,
"step": 1942
},
{
"epoch": 0.8202531645569621,
"grad_norm": 0.4400147497653961,
"learning_rate": 0.00019813722555484594,
"loss": 0.8646620512008667,
"step": 1944
},
{
"epoch": 0.8210970464135021,
"grad_norm": 0.469971626996994,
"learning_rate": 0.00019812798705802785,
"loss": 0.9576541781425476,
"step": 1946
},
{
"epoch": 0.8219409282700422,
"grad_norm": 0.388113409280777,
"learning_rate": 0.00019811872592496776,
"loss": 0.7956477403640747,
"step": 1948
},
{
"epoch": 0.8227848101265823,
"grad_norm": 0.4271424114704132,
"learning_rate": 0.00019810944215780201,
"loss": 0.8440352082252502,
"step": 1950
},
{
"epoch": 0.8236286919831224,
"grad_norm": 0.4097966253757477,
"learning_rate": 0.00019810013575867217,
"loss": 0.8589065074920654,
"step": 1952
},
{
"epoch": 0.8244725738396624,
"grad_norm": 0.41550710797309875,
"learning_rate": 0.00019809080672972504,
"loss": 0.8606626987457275,
"step": 1954
},
{
"epoch": 0.8253164556962025,
"grad_norm": 0.3680916130542755,
"learning_rate": 0.0001980814550731127,
"loss": 0.7582436800003052,
"step": 1956
},
{
"epoch": 0.8261603375527427,
"grad_norm": 0.573297917842865,
"learning_rate": 0.00019807208079099234,
"loss": 0.8345186710357666,
"step": 1958
},
{
"epoch": 0.8270042194092827,
"grad_norm": 0.43631303310394287,
"learning_rate": 0.00019806268388552646,
"loss": 0.889683723449707,
"step": 1960
},
{
"epoch": 0.8278481012658228,
"grad_norm": 0.442531019449234,
"learning_rate": 0.00019805326435888275,
"loss": 0.8092973232269287,
"step": 1962
},
{
"epoch": 0.8286919831223629,
"grad_norm": 0.45020824670791626,
"learning_rate": 0.0001980438222132341,
"loss": 0.8919535279273987,
"step": 1964
},
{
"epoch": 0.829535864978903,
"grad_norm": 0.43131786584854126,
"learning_rate": 0.0001980343574507586,
"loss": 0.7652381658554077,
"step": 1966
},
{
"epoch": 0.830379746835443,
"grad_norm": 0.4461326599121094,
"learning_rate": 0.00019802487007363967,
"loss": 0.8512389659881592,
"step": 1968
},
{
"epoch": 0.8312236286919831,
"grad_norm": 0.4676894247531891,
"learning_rate": 0.0001980153600840658,
"loss": 0.8681327700614929,
"step": 1970
},
{
"epoch": 0.8320675105485232,
"grad_norm": 0.38376888632774353,
"learning_rate": 0.00019800582748423075,
"loss": 0.7476955652236938,
"step": 1972
},
{
"epoch": 0.8329113924050633,
"grad_norm": 0.37173229455947876,
"learning_rate": 0.00019799627227633354,
"loss": 0.814192533493042,
"step": 1974
},
{
"epoch": 0.8337552742616033,
"grad_norm": 0.4345237910747528,
"learning_rate": 0.00019798669446257844,
"loss": 0.7668994069099426,
"step": 1976
},
{
"epoch": 0.8345991561181435,
"grad_norm": 0.45872583985328674,
"learning_rate": 0.0001979770940451747,
"loss": 0.8321775197982788,
"step": 1978
},
{
"epoch": 0.8354430379746836,
"grad_norm": 0.40618205070495605,
"learning_rate": 0.0001979674710263371,
"loss": 0.7824342250823975,
"step": 1980
},
{
"epoch": 0.8362869198312236,
"grad_norm": 0.38377392292022705,
"learning_rate": 0.00019795782540828544,
"loss": 0.8752480745315552,
"step": 1982
},
{
"epoch": 0.8371308016877637,
"grad_norm": 0.36340072751045227,
"learning_rate": 0.00019794815719324476,
"loss": 0.66700679063797,
"step": 1984
},
{
"epoch": 0.8379746835443038,
"grad_norm": 0.4638384282588959,
"learning_rate": 0.00019793846638344536,
"loss": 0.8582209944725037,
"step": 1986
},
{
"epoch": 0.8388185654008439,
"grad_norm": 0.38364699482917786,
"learning_rate": 0.00019792875298112268,
"loss": 0.7724968791007996,
"step": 1988
},
{
"epoch": 0.8396624472573839,
"grad_norm": 0.439730703830719,
"learning_rate": 0.00019791901698851749,
"loss": 0.8542404174804688,
"step": 1990
},
{
"epoch": 0.8405063291139241,
"grad_norm": 0.4622722566127777,
"learning_rate": 0.00019790925840787563,
"loss": 0.8424296975135803,
"step": 1992
},
{
"epoch": 0.8413502109704641,
"grad_norm": 0.4491989314556122,
"learning_rate": 0.0001978994772414482,
"loss": 0.8400710225105286,
"step": 1994
},
{
"epoch": 0.8421940928270042,
"grad_norm": 0.372295081615448,
"learning_rate": 0.0001978896734914916,
"loss": 0.7265452146530151,
"step": 1996
},
{
"epoch": 0.8430379746835444,
"grad_norm": 0.4328666925430298,
"learning_rate": 0.00019787984716026732,
"loss": 0.8458228707313538,
"step": 1998
},
{
"epoch": 0.8438818565400844,
"grad_norm": 0.42246317863464355,
"learning_rate": 0.00019786999825004216,
"loss": 0.8517491817474365,
"step": 2000
},
{
"epoch": 0.8438818565400844,
"eval_loss": 0.8460908532142639,
"eval_runtime": 685.2518,
"eval_samples_per_second": 3.075,
"eval_steps_per_second": 3.075,
"step": 2000
}
],
"logging_steps": 2,
"max_steps": 14220,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.001
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.195087286365225e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}