SirajRLX's picture
Upload folder using huggingface_hub
d9737b1 verified
{
"best_global_step": 2000,
"best_metric": 0.7587011456489563,
"best_model_checkpoint": "task2file/sft_devstral_24B_v2/checkpoints/checkpoint-2000",
"epoch": 0.8438818565400844,
"eval_steps": 100,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0008438818565400844,
"grad_norm": 1.597854733467102,
"learning_rate": 8.787346221441124e-08,
"loss": 1.3927901983261108,
"step": 2
},
{
"epoch": 0.0016877637130801688,
"grad_norm": 1.6547431945800781,
"learning_rate": 2.6362038664323375e-07,
"loss": 1.407160758972168,
"step": 4
},
{
"epoch": 0.002531645569620253,
"grad_norm": 1.8221601247787476,
"learning_rate": 4.393673110720563e-07,
"loss": 1.376656174659729,
"step": 6
},
{
"epoch": 0.0033755274261603376,
"grad_norm": 1.4831048250198364,
"learning_rate": 6.151142355008788e-07,
"loss": 1.247712254524231,
"step": 8
},
{
"epoch": 0.004219409282700422,
"grad_norm": 1.668201208114624,
"learning_rate": 7.908611599297013e-07,
"loss": 1.2685163021087646,
"step": 10
},
{
"epoch": 0.005063291139240506,
"grad_norm": 1.67417311668396,
"learning_rate": 9.666080843585237e-07,
"loss": 1.2942761182785034,
"step": 12
},
{
"epoch": 0.00590717299578059,
"grad_norm": 1.7154079675674438,
"learning_rate": 1.1423550087873463e-06,
"loss": 1.3638604879379272,
"step": 14
},
{
"epoch": 0.006751054852320675,
"grad_norm": 1.729427456855774,
"learning_rate": 1.3181019332161688e-06,
"loss": 1.3476728200912476,
"step": 16
},
{
"epoch": 0.007594936708860759,
"grad_norm": 1.3813447952270508,
"learning_rate": 1.4938488576449913e-06,
"loss": 1.3476393222808838,
"step": 18
},
{
"epoch": 0.008438818565400843,
"grad_norm": 1.557220458984375,
"learning_rate": 1.6695957820738139e-06,
"loss": 1.2449309825897217,
"step": 20
},
{
"epoch": 0.009282700421940928,
"grad_norm": 1.1883500814437866,
"learning_rate": 1.8453427065026362e-06,
"loss": 1.3125361204147339,
"step": 22
},
{
"epoch": 0.010126582278481013,
"grad_norm": 1.7290029525756836,
"learning_rate": 2.0210896309314587e-06,
"loss": 1.3724769353866577,
"step": 24
},
{
"epoch": 0.010970464135021098,
"grad_norm": 1.5627557039260864,
"learning_rate": 2.1968365553602812e-06,
"loss": 1.3401387929916382,
"step": 26
},
{
"epoch": 0.01181434599156118,
"grad_norm": 1.796866774559021,
"learning_rate": 2.3725834797891038e-06,
"loss": 1.365437388420105,
"step": 28
},
{
"epoch": 0.012658227848101266,
"grad_norm": 1.7030404806137085,
"learning_rate": 2.5483304042179263e-06,
"loss": 1.2706533670425415,
"step": 30
},
{
"epoch": 0.01350210970464135,
"grad_norm": 1.3186293840408325,
"learning_rate": 2.724077328646749e-06,
"loss": 1.3084994554519653,
"step": 32
},
{
"epoch": 0.014345991561181435,
"grad_norm": 1.5762513875961304,
"learning_rate": 2.8998242530755714e-06,
"loss": 1.3259696960449219,
"step": 34
},
{
"epoch": 0.015189873417721518,
"grad_norm": 1.422295331954956,
"learning_rate": 3.075571177504394e-06,
"loss": 1.3205676078796387,
"step": 36
},
{
"epoch": 0.016033755274261603,
"grad_norm": 1.495523452758789,
"learning_rate": 3.2513181019332165e-06,
"loss": 1.3740568161010742,
"step": 38
},
{
"epoch": 0.016877637130801686,
"grad_norm": 1.5112254619598389,
"learning_rate": 3.427065026362039e-06,
"loss": 1.321828842163086,
"step": 40
},
{
"epoch": 0.017721518987341773,
"grad_norm": 1.4667807817459106,
"learning_rate": 3.602811950790861e-06,
"loss": 1.3673173189163208,
"step": 42
},
{
"epoch": 0.018565400843881856,
"grad_norm": 1.6609723567962646,
"learning_rate": 3.7785588752196836e-06,
"loss": 1.3968093395233154,
"step": 44
},
{
"epoch": 0.019409282700421943,
"grad_norm": 1.59381103515625,
"learning_rate": 3.954305799648506e-06,
"loss": 1.4295302629470825,
"step": 46
},
{
"epoch": 0.020253164556962026,
"grad_norm": 1.1470608711242676,
"learning_rate": 4.130052724077329e-06,
"loss": 1.2536572217941284,
"step": 48
},
{
"epoch": 0.02109704641350211,
"grad_norm": 1.2014588117599487,
"learning_rate": 4.305799648506151e-06,
"loss": 1.242217779159546,
"step": 50
},
{
"epoch": 0.021940928270042195,
"grad_norm": 1.2327464818954468,
"learning_rate": 4.481546572934974e-06,
"loss": 1.2166963815689087,
"step": 52
},
{
"epoch": 0.02278481012658228,
"grad_norm": 1.9708983898162842,
"learning_rate": 4.657293497363796e-06,
"loss": 1.25709867477417,
"step": 54
},
{
"epoch": 0.02362869198312236,
"grad_norm": 1.180569052696228,
"learning_rate": 4.833040421792619e-06,
"loss": 1.2886158227920532,
"step": 56
},
{
"epoch": 0.024472573839662448,
"grad_norm": 1.5029548406600952,
"learning_rate": 5.008787346221441e-06,
"loss": 1.29886794090271,
"step": 58
},
{
"epoch": 0.02531645569620253,
"grad_norm": 1.5380216836929321,
"learning_rate": 5.184534270650264e-06,
"loss": 1.2387628555297852,
"step": 60
},
{
"epoch": 0.026160337552742614,
"grad_norm": 1.572144865989685,
"learning_rate": 5.3602811950790864e-06,
"loss": 1.2177000045776367,
"step": 62
},
{
"epoch": 0.0270042194092827,
"grad_norm": 1.4882780313491821,
"learning_rate": 5.536028119507909e-06,
"loss": 1.181516170501709,
"step": 64
},
{
"epoch": 0.027848101265822784,
"grad_norm": 1.2982488870620728,
"learning_rate": 5.7117750439367315e-06,
"loss": 1.2101733684539795,
"step": 66
},
{
"epoch": 0.02869198312236287,
"grad_norm": 1.5236955881118774,
"learning_rate": 5.887521968365554e-06,
"loss": 1.2277681827545166,
"step": 68
},
{
"epoch": 0.029535864978902954,
"grad_norm": 1.4521006345748901,
"learning_rate": 6.0632688927943766e-06,
"loss": 1.1688424348831177,
"step": 70
},
{
"epoch": 0.030379746835443037,
"grad_norm": 1.2352311611175537,
"learning_rate": 6.239015817223199e-06,
"loss": 1.273059368133545,
"step": 72
},
{
"epoch": 0.031223628691983123,
"grad_norm": 1.3438209295272827,
"learning_rate": 6.414762741652021e-06,
"loss": 1.1609034538269043,
"step": 74
},
{
"epoch": 0.032067510548523206,
"grad_norm": 1.9009398221969604,
"learning_rate": 6.590509666080843e-06,
"loss": 1.2508260011672974,
"step": 76
},
{
"epoch": 0.03291139240506329,
"grad_norm": 1.6718412637710571,
"learning_rate": 6.766256590509666e-06,
"loss": 1.2524956464767456,
"step": 78
},
{
"epoch": 0.03375527426160337,
"grad_norm": 1.249891757965088,
"learning_rate": 6.942003514938488e-06,
"loss": 1.1472493410110474,
"step": 80
},
{
"epoch": 0.03459915611814346,
"grad_norm": 1.4398653507232666,
"learning_rate": 7.117750439367312e-06,
"loss": 1.0845389366149902,
"step": 82
},
{
"epoch": 0.035443037974683546,
"grad_norm": 1.3701167106628418,
"learning_rate": 7.293497363796134e-06,
"loss": 1.1088868379592896,
"step": 84
},
{
"epoch": 0.036286919831223625,
"grad_norm": 1.277998924255371,
"learning_rate": 7.469244288224957e-06,
"loss": 1.1513772010803223,
"step": 86
},
{
"epoch": 0.03713080168776371,
"grad_norm": 1.4970002174377441,
"learning_rate": 7.644991212653779e-06,
"loss": 1.1385771036148071,
"step": 88
},
{
"epoch": 0.0379746835443038,
"grad_norm": 1.3384218215942383,
"learning_rate": 7.820738137082601e-06,
"loss": 1.1632680892944336,
"step": 90
},
{
"epoch": 0.038818565400843885,
"grad_norm": 1.4317446947097778,
"learning_rate": 7.996485061511425e-06,
"loss": 1.2256064414978027,
"step": 92
},
{
"epoch": 0.039662447257383965,
"grad_norm": 1.8743640184402466,
"learning_rate": 8.172231985940246e-06,
"loss": 1.1935789585113525,
"step": 94
},
{
"epoch": 0.04050632911392405,
"grad_norm": 1.4789546728134155,
"learning_rate": 8.347978910369069e-06,
"loss": 1.1429362297058105,
"step": 96
},
{
"epoch": 0.04135021097046414,
"grad_norm": 1.658605694770813,
"learning_rate": 8.523725834797891e-06,
"loss": 1.1831508874893188,
"step": 98
},
{
"epoch": 0.04219409282700422,
"grad_norm": 1.5077892541885376,
"learning_rate": 8.699472759226714e-06,
"loss": 1.0539867877960205,
"step": 100
},
{
"epoch": 0.04219409282700422,
"eval_loss": 1.138856053352356,
"eval_runtime": 859.7128,
"eval_samples_per_second": 2.451,
"eval_steps_per_second": 2.451,
"step": 100
},
{
"epoch": 0.043037974683544304,
"grad_norm": 1.4335681200027466,
"learning_rate": 8.875219683655536e-06,
"loss": 1.0719901323318481,
"step": 102
},
{
"epoch": 0.04388185654008439,
"grad_norm": 1.7387681007385254,
"learning_rate": 9.050966608084359e-06,
"loss": 1.0654313564300537,
"step": 104
},
{
"epoch": 0.04472573839662447,
"grad_norm": 1.6071950197219849,
"learning_rate": 9.226713532513181e-06,
"loss": 1.0752698183059692,
"step": 106
},
{
"epoch": 0.04556962025316456,
"grad_norm": 1.40005362033844,
"learning_rate": 9.402460456942004e-06,
"loss": 1.1029763221740723,
"step": 108
},
{
"epoch": 0.046413502109704644,
"grad_norm": 2.2338669300079346,
"learning_rate": 9.578207381370826e-06,
"loss": 1.1157960891723633,
"step": 110
},
{
"epoch": 0.04725738396624472,
"grad_norm": 1.4972727298736572,
"learning_rate": 9.753954305799649e-06,
"loss": 1.1095420122146606,
"step": 112
},
{
"epoch": 0.04810126582278481,
"grad_norm": 1.317979097366333,
"learning_rate": 9.929701230228471e-06,
"loss": 1.109113097190857,
"step": 114
},
{
"epoch": 0.048945147679324896,
"grad_norm": 1.496346116065979,
"learning_rate": 1.0105448154657294e-05,
"loss": 1.1055104732513428,
"step": 116
},
{
"epoch": 0.049789029535864976,
"grad_norm": 1.385406732559204,
"learning_rate": 1.0281195079086117e-05,
"loss": 1.118395209312439,
"step": 118
},
{
"epoch": 0.05063291139240506,
"grad_norm": 1.524222731590271,
"learning_rate": 1.0456942003514939e-05,
"loss": 1.1008446216583252,
"step": 120
},
{
"epoch": 0.05147679324894515,
"grad_norm": 1.6308200359344482,
"learning_rate": 1.0632688927943762e-05,
"loss": 1.0891425609588623,
"step": 122
},
{
"epoch": 0.05232067510548523,
"grad_norm": 1.3681106567382812,
"learning_rate": 1.0808435852372584e-05,
"loss": 0.9080473184585571,
"step": 124
},
{
"epoch": 0.053164556962025315,
"grad_norm": 1.9429908990859985,
"learning_rate": 1.0984182776801407e-05,
"loss": 1.0337369441986084,
"step": 126
},
{
"epoch": 0.0540084388185654,
"grad_norm": 1.5830830335617065,
"learning_rate": 1.115992970123023e-05,
"loss": 1.0703333616256714,
"step": 128
},
{
"epoch": 0.05485232067510549,
"grad_norm": 1.4792555570602417,
"learning_rate": 1.1335676625659052e-05,
"loss": 1.004652738571167,
"step": 130
},
{
"epoch": 0.05569620253164557,
"grad_norm": 1.7196226119995117,
"learning_rate": 1.1511423550087874e-05,
"loss": 0.9798293709754944,
"step": 132
},
{
"epoch": 0.056540084388185655,
"grad_norm": 1.8733659982681274,
"learning_rate": 1.1687170474516697e-05,
"loss": 1.0213249921798706,
"step": 134
},
{
"epoch": 0.05738396624472574,
"grad_norm": 1.3431142568588257,
"learning_rate": 1.186291739894552e-05,
"loss": 1.0358591079711914,
"step": 136
},
{
"epoch": 0.05822784810126582,
"grad_norm": 1.527864933013916,
"learning_rate": 1.2038664323374342e-05,
"loss": 0.9372249841690063,
"step": 138
},
{
"epoch": 0.05907172995780591,
"grad_norm": 1.5495563745498657,
"learning_rate": 1.2214411247803164e-05,
"loss": 1.0277758836746216,
"step": 140
},
{
"epoch": 0.059915611814345994,
"grad_norm": 1.6792418956756592,
"learning_rate": 1.2390158172231985e-05,
"loss": 1.0349801778793335,
"step": 142
},
{
"epoch": 0.060759493670886074,
"grad_norm": 1.6468945741653442,
"learning_rate": 1.256590509666081e-05,
"loss": 0.9578297734260559,
"step": 144
},
{
"epoch": 0.06160337552742616,
"grad_norm": 1.7243824005126953,
"learning_rate": 1.2741652021089632e-05,
"loss": 1.0628854036331177,
"step": 146
},
{
"epoch": 0.06244725738396625,
"grad_norm": 1.7286981344223022,
"learning_rate": 1.2917398945518455e-05,
"loss": 0.9336449503898621,
"step": 148
},
{
"epoch": 0.06329113924050633,
"grad_norm": 1.6411832571029663,
"learning_rate": 1.3093145869947277e-05,
"loss": 0.953730583190918,
"step": 150
},
{
"epoch": 0.06413502109704641,
"grad_norm": 1.8297001123428345,
"learning_rate": 1.3268892794376098e-05,
"loss": 1.051239013671875,
"step": 152
},
{
"epoch": 0.06497890295358649,
"grad_norm": 1.9660519361495972,
"learning_rate": 1.3444639718804922e-05,
"loss": 0.9955035448074341,
"step": 154
},
{
"epoch": 0.06582278481012659,
"grad_norm": 1.8423733711242676,
"learning_rate": 1.3620386643233743e-05,
"loss": 0.913300096988678,
"step": 156
},
{
"epoch": 0.06666666666666667,
"grad_norm": 1.9146347045898438,
"learning_rate": 1.3796133567662567e-05,
"loss": 1.0429846048355103,
"step": 158
},
{
"epoch": 0.06751054852320675,
"grad_norm": 1.6221821308135986,
"learning_rate": 1.3971880492091388e-05,
"loss": 1.0360238552093506,
"step": 160
},
{
"epoch": 0.06835443037974684,
"grad_norm": 2.173283338546753,
"learning_rate": 1.4147627416520212e-05,
"loss": 1.0227266550064087,
"step": 162
},
{
"epoch": 0.06919831223628692,
"grad_norm": 1.7091665267944336,
"learning_rate": 1.4323374340949033e-05,
"loss": 1.0075194835662842,
"step": 164
},
{
"epoch": 0.070042194092827,
"grad_norm": 1.7219135761260986,
"learning_rate": 1.4499121265377857e-05,
"loss": 1.0044782161712646,
"step": 166
},
{
"epoch": 0.07088607594936709,
"grad_norm": 1.6558159589767456,
"learning_rate": 1.4674868189806678e-05,
"loss": 0.9393973350524902,
"step": 168
},
{
"epoch": 0.07172995780590717,
"grad_norm": 1.9362739324569702,
"learning_rate": 1.4850615114235502e-05,
"loss": 0.9955337643623352,
"step": 170
},
{
"epoch": 0.07257383966244725,
"grad_norm": 1.7792853116989136,
"learning_rate": 1.5026362038664323e-05,
"loss": 0.9659126400947571,
"step": 172
},
{
"epoch": 0.07341772151898734,
"grad_norm": 1.7184511423110962,
"learning_rate": 1.5202108963093147e-05,
"loss": 0.9077855348587036,
"step": 174
},
{
"epoch": 0.07426160337552742,
"grad_norm": 1.5701428651809692,
"learning_rate": 1.537785588752197e-05,
"loss": 0.9305018782615662,
"step": 176
},
{
"epoch": 0.0751054852320675,
"grad_norm": 1.970229148864746,
"learning_rate": 1.555360281195079e-05,
"loss": 1.0211774110794067,
"step": 178
},
{
"epoch": 0.0759493670886076,
"grad_norm": 1.8410269021987915,
"learning_rate": 1.5729349736379615e-05,
"loss": 0.9479315876960754,
"step": 180
},
{
"epoch": 0.07679324894514768,
"grad_norm": 1.8991246223449707,
"learning_rate": 1.5905096660808434e-05,
"loss": 1.0629050731658936,
"step": 182
},
{
"epoch": 0.07763713080168777,
"grad_norm": 1.8052008152008057,
"learning_rate": 1.608084358523726e-05,
"loss": 0.946983814239502,
"step": 184
},
{
"epoch": 0.07848101265822785,
"grad_norm": 1.547108769416809,
"learning_rate": 1.625659050966608e-05,
"loss": 0.9413356184959412,
"step": 186
},
{
"epoch": 0.07932489451476793,
"grad_norm": 1.9713538885116577,
"learning_rate": 1.6432337434094905e-05,
"loss": 0.9337888956069946,
"step": 188
},
{
"epoch": 0.08016877637130802,
"grad_norm": 1.708789348602295,
"learning_rate": 1.6608084358523728e-05,
"loss": 0.9816337823867798,
"step": 190
},
{
"epoch": 0.0810126582278481,
"grad_norm": 1.815292477607727,
"learning_rate": 1.678383128295255e-05,
"loss": 1.017122507095337,
"step": 192
},
{
"epoch": 0.08185654008438818,
"grad_norm": 1.7950682640075684,
"learning_rate": 1.6959578207381373e-05,
"loss": 0.991599440574646,
"step": 194
},
{
"epoch": 0.08270042194092828,
"grad_norm": 1.692512035369873,
"learning_rate": 1.7135325131810195e-05,
"loss": 0.9570834040641785,
"step": 196
},
{
"epoch": 0.08354430379746836,
"grad_norm": 2.056089162826538,
"learning_rate": 1.7311072056239018e-05,
"loss": 1.035754919052124,
"step": 198
},
{
"epoch": 0.08438818565400844,
"grad_norm": 1.7022203207015991,
"learning_rate": 1.7486818980667837e-05,
"loss": 1.0124205350875854,
"step": 200
},
{
"epoch": 0.08438818565400844,
"eval_loss": 0.995743453502655,
"eval_runtime": 846.8257,
"eval_samples_per_second": 2.488,
"eval_steps_per_second": 2.488,
"step": 200
},
{
"epoch": 0.08523206751054853,
"grad_norm": 1.6088604927062988,
"learning_rate": 1.7662565905096663e-05,
"loss": 0.8946985006332397,
"step": 202
},
{
"epoch": 0.08607594936708861,
"grad_norm": 2.02270770072937,
"learning_rate": 1.7838312829525482e-05,
"loss": 0.976133406162262,
"step": 204
},
{
"epoch": 0.08691983122362869,
"grad_norm": 1.7832789421081543,
"learning_rate": 1.8014059753954308e-05,
"loss": 0.9079383611679077,
"step": 206
},
{
"epoch": 0.08776371308016878,
"grad_norm": 1.9793545007705688,
"learning_rate": 1.8189806678383127e-05,
"loss": 0.8650367856025696,
"step": 208
},
{
"epoch": 0.08860759493670886,
"grad_norm": 1.8124271631240845,
"learning_rate": 1.8365553602811953e-05,
"loss": 0.9327266812324524,
"step": 210
},
{
"epoch": 0.08945147679324894,
"grad_norm": 1.8581212759017944,
"learning_rate": 1.8541300527240772e-05,
"loss": 0.9811079502105713,
"step": 212
},
{
"epoch": 0.09029535864978903,
"grad_norm": 2.001699447631836,
"learning_rate": 1.8717047451669598e-05,
"loss": 0.9546971321105957,
"step": 214
},
{
"epoch": 0.09113924050632911,
"grad_norm": 1.6994978189468384,
"learning_rate": 1.8892794376098417e-05,
"loss": 0.9611319899559021,
"step": 216
},
{
"epoch": 0.0919831223628692,
"grad_norm": 2.1379497051239014,
"learning_rate": 1.9068541300527243e-05,
"loss": 0.9781531095504761,
"step": 218
},
{
"epoch": 0.09282700421940929,
"grad_norm": 1.8961224555969238,
"learning_rate": 1.9244288224956066e-05,
"loss": 0.9374833106994629,
"step": 220
},
{
"epoch": 0.09367088607594937,
"grad_norm": 1.851464033126831,
"learning_rate": 1.9420035149384885e-05,
"loss": 0.9681299328804016,
"step": 222
},
{
"epoch": 0.09451476793248945,
"grad_norm": 2.0642266273498535,
"learning_rate": 1.959578207381371e-05,
"loss": 1.0086225271224976,
"step": 224
},
{
"epoch": 0.09535864978902954,
"grad_norm": 1.8658756017684937,
"learning_rate": 1.977152899824253e-05,
"loss": 0.9190312623977661,
"step": 226
},
{
"epoch": 0.09620253164556962,
"grad_norm": 2.4398674964904785,
"learning_rate": 1.9947275922671356e-05,
"loss": 0.9740874171257019,
"step": 228
},
{
"epoch": 0.0970464135021097,
"grad_norm": 1.849183440208435,
"learning_rate": 2.0123022847100175e-05,
"loss": 0.884376049041748,
"step": 230
},
{
"epoch": 0.09789029535864979,
"grad_norm": 2.027320384979248,
"learning_rate": 2.0298769771529e-05,
"loss": 0.9116487503051758,
"step": 232
},
{
"epoch": 0.09873417721518987,
"grad_norm": 1.6800135374069214,
"learning_rate": 2.047451669595782e-05,
"loss": 0.9035115242004395,
"step": 234
},
{
"epoch": 0.09957805907172995,
"grad_norm": 2.2362256050109863,
"learning_rate": 2.0650263620386646e-05,
"loss": 0.9043796062469482,
"step": 236
},
{
"epoch": 0.10042194092827005,
"grad_norm": 1.938215970993042,
"learning_rate": 2.0826010544815465e-05,
"loss": 1.0888828039169312,
"step": 238
},
{
"epoch": 0.10126582278481013,
"grad_norm": 1.890328049659729,
"learning_rate": 2.100175746924429e-05,
"loss": 0.9960280656814575,
"step": 240
},
{
"epoch": 0.1021097046413502,
"grad_norm": 2.021235227584839,
"learning_rate": 2.117750439367311e-05,
"loss": 0.9848901629447937,
"step": 242
},
{
"epoch": 0.1029535864978903,
"grad_norm": 2.023920774459839,
"learning_rate": 2.1353251318101936e-05,
"loss": 0.891694188117981,
"step": 244
},
{
"epoch": 0.10379746835443038,
"grad_norm": 1.8061069250106812,
"learning_rate": 2.1528998242530755e-05,
"loss": 0.9059976935386658,
"step": 246
},
{
"epoch": 0.10464135021097046,
"grad_norm": 2.176302194595337,
"learning_rate": 2.1704745166959578e-05,
"loss": 1.0056109428405762,
"step": 248
},
{
"epoch": 0.10548523206751055,
"grad_norm": 1.9820969104766846,
"learning_rate": 2.18804920913884e-05,
"loss": 0.9645357728004456,
"step": 250
},
{
"epoch": 0.10632911392405063,
"grad_norm": 1.8764572143554688,
"learning_rate": 2.2056239015817223e-05,
"loss": 1.0178182125091553,
"step": 252
},
{
"epoch": 0.10717299578059072,
"grad_norm": 2.56221342086792,
"learning_rate": 2.223198594024605e-05,
"loss": 0.9546761512756348,
"step": 254
},
{
"epoch": 0.1080168776371308,
"grad_norm": 2.6779074668884277,
"learning_rate": 2.2407732864674868e-05,
"loss": 0.9300968647003174,
"step": 256
},
{
"epoch": 0.10886075949367088,
"grad_norm": 2.140897512435913,
"learning_rate": 2.2583479789103694e-05,
"loss": 0.926638662815094,
"step": 258
},
{
"epoch": 0.10970464135021098,
"grad_norm": 2.0880508422851562,
"learning_rate": 2.2759226713532513e-05,
"loss": 1.0681840181350708,
"step": 260
},
{
"epoch": 0.11054852320675106,
"grad_norm": 2.7273616790771484,
"learning_rate": 2.293497363796134e-05,
"loss": 1.0840941667556763,
"step": 262
},
{
"epoch": 0.11139240506329114,
"grad_norm": 1.6723874807357788,
"learning_rate": 2.3110720562390158e-05,
"loss": 0.8637182116508484,
"step": 264
},
{
"epoch": 0.11223628691983123,
"grad_norm": 1.806243896484375,
"learning_rate": 2.3286467486818984e-05,
"loss": 0.9554686546325684,
"step": 266
},
{
"epoch": 0.11308016877637131,
"grad_norm": 1.9086743593215942,
"learning_rate": 2.3462214411247803e-05,
"loss": 0.9556593894958496,
"step": 268
},
{
"epoch": 0.11392405063291139,
"grad_norm": 2.1822304725646973,
"learning_rate": 2.3637961335676626e-05,
"loss": 0.9177709817886353,
"step": 270
},
{
"epoch": 0.11476793248945148,
"grad_norm": 2.1009039878845215,
"learning_rate": 2.3813708260105448e-05,
"loss": 0.9288759827613831,
"step": 272
},
{
"epoch": 0.11561181434599156,
"grad_norm": 1.9814810752868652,
"learning_rate": 2.398945518453427e-05,
"loss": 0.9881691932678223,
"step": 274
},
{
"epoch": 0.11645569620253164,
"grad_norm": 1.9946284294128418,
"learning_rate": 2.4165202108963093e-05,
"loss": 0.9390727281570435,
"step": 276
},
{
"epoch": 0.11729957805907174,
"grad_norm": 2.4489169120788574,
"learning_rate": 2.4340949033391916e-05,
"loss": 0.9625692963600159,
"step": 278
},
{
"epoch": 0.11814345991561181,
"grad_norm": 2.0919103622436523,
"learning_rate": 2.451669595782074e-05,
"loss": 0.9304702877998352,
"step": 280
},
{
"epoch": 0.1189873417721519,
"grad_norm": 1.912914752960205,
"learning_rate": 2.469244288224956e-05,
"loss": 0.9313994646072388,
"step": 282
},
{
"epoch": 0.11983122362869199,
"grad_norm": 2.1553256511688232,
"learning_rate": 2.4868189806678387e-05,
"loss": 1.004011869430542,
"step": 284
},
{
"epoch": 0.12067510548523207,
"grad_norm": 2.0129058361053467,
"learning_rate": 2.504393673110721e-05,
"loss": 0.9092531204223633,
"step": 286
},
{
"epoch": 0.12151898734177215,
"grad_norm": 2.1632325649261475,
"learning_rate": 2.5219683655536032e-05,
"loss": 0.993347704410553,
"step": 288
},
{
"epoch": 0.12236286919831224,
"grad_norm": 2.3072738647460938,
"learning_rate": 2.539543057996485e-05,
"loss": 0.978348433971405,
"step": 290
},
{
"epoch": 0.12320675105485232,
"grad_norm": 2.056560516357422,
"learning_rate": 2.5571177504393674e-05,
"loss": 1.0018101930618286,
"step": 292
},
{
"epoch": 0.1240506329113924,
"grad_norm": 1.8906747102737427,
"learning_rate": 2.5746924428822493e-05,
"loss": 0.9607775211334229,
"step": 294
},
{
"epoch": 0.1248945147679325,
"grad_norm": 2.1375651359558105,
"learning_rate": 2.5922671353251322e-05,
"loss": 0.9259153008460999,
"step": 296
},
{
"epoch": 0.1257383966244726,
"grad_norm": 1.9994823932647705,
"learning_rate": 2.609841827768014e-05,
"loss": 0.8524524569511414,
"step": 298
},
{
"epoch": 0.12658227848101267,
"grad_norm": 2.2421181201934814,
"learning_rate": 2.6274165202108964e-05,
"loss": 1.0047069787979126,
"step": 300
},
{
"epoch": 0.12658227848101267,
"eval_loss": 0.9517185688018799,
"eval_runtime": 860.0287,
"eval_samples_per_second": 2.45,
"eval_steps_per_second": 2.45,
"step": 300
},
{
"epoch": 0.12742616033755275,
"grad_norm": 2.1206254959106445,
"learning_rate": 2.6449912126537786e-05,
"loss": 0.8475471138954163,
"step": 302
},
{
"epoch": 0.12827004219409283,
"grad_norm": 1.885161280632019,
"learning_rate": 2.6625659050966612e-05,
"loss": 0.8643121123313904,
"step": 304
},
{
"epoch": 0.1291139240506329,
"grad_norm": 3.1441781520843506,
"learning_rate": 2.680140597539543e-05,
"loss": 0.8804612159729004,
"step": 306
},
{
"epoch": 0.12995780590717299,
"grad_norm": 1.953133225440979,
"learning_rate": 2.6977152899824254e-05,
"loss": 0.8348029255867004,
"step": 308
},
{
"epoch": 0.1308016877637131,
"grad_norm": 2.3762667179107666,
"learning_rate": 2.7152899824253076e-05,
"loss": 0.8889057040214539,
"step": 310
},
{
"epoch": 0.13164556962025317,
"grad_norm": 2.4651103019714355,
"learning_rate": 2.7328646748681902e-05,
"loss": 1.025565505027771,
"step": 312
},
{
"epoch": 0.13248945147679325,
"grad_norm": 1.8522284030914307,
"learning_rate": 2.7504393673110725e-05,
"loss": 0.868915855884552,
"step": 314
},
{
"epoch": 0.13333333333333333,
"grad_norm": 1.8048083782196045,
"learning_rate": 2.7680140597539544e-05,
"loss": 0.8821638226509094,
"step": 316
},
{
"epoch": 0.1341772151898734,
"grad_norm": 1.9933605194091797,
"learning_rate": 2.7855887521968367e-05,
"loss": 0.8735360503196716,
"step": 318
},
{
"epoch": 0.1350210970464135,
"grad_norm": 2.044337034225464,
"learning_rate": 2.8031634446397186e-05,
"loss": 0.8288834691047668,
"step": 320
},
{
"epoch": 0.1358649789029536,
"grad_norm": 2.416067361831665,
"learning_rate": 2.8207381370826015e-05,
"loss": 0.9104969501495361,
"step": 322
},
{
"epoch": 0.13670886075949368,
"grad_norm": 2.0731265544891357,
"learning_rate": 2.8383128295254834e-05,
"loss": 0.8689924478530884,
"step": 324
},
{
"epoch": 0.13755274261603376,
"grad_norm": 2.049126386642456,
"learning_rate": 2.8558875219683657e-05,
"loss": 0.9312222003936768,
"step": 326
},
{
"epoch": 0.13839662447257384,
"grad_norm": 2.131026268005371,
"learning_rate": 2.8734622144112476e-05,
"loss": 0.8933501839637756,
"step": 328
},
{
"epoch": 0.13924050632911392,
"grad_norm": 1.766754150390625,
"learning_rate": 2.8910369068541305e-05,
"loss": 0.8998261094093323,
"step": 330
},
{
"epoch": 0.140084388185654,
"grad_norm": 2.197706460952759,
"learning_rate": 2.9086115992970124e-05,
"loss": 0.8826426267623901,
"step": 332
},
{
"epoch": 0.1409282700421941,
"grad_norm": 1.953715443611145,
"learning_rate": 2.9261862917398947e-05,
"loss": 0.8590307831764221,
"step": 334
},
{
"epoch": 0.14177215189873418,
"grad_norm": 2.200929880142212,
"learning_rate": 2.943760984182777e-05,
"loss": 0.9317060708999634,
"step": 336
},
{
"epoch": 0.14261603375527426,
"grad_norm": 2.1195082664489746,
"learning_rate": 2.961335676625659e-05,
"loss": 0.9965578317642212,
"step": 338
},
{
"epoch": 0.14345991561181434,
"grad_norm": 2.3449771404266357,
"learning_rate": 2.9789103690685414e-05,
"loss": 0.8353848457336426,
"step": 340
},
{
"epoch": 0.14430379746835442,
"grad_norm": 2.000497579574585,
"learning_rate": 2.9964850615114237e-05,
"loss": 0.9154735803604126,
"step": 342
},
{
"epoch": 0.1451476793248945,
"grad_norm": 2.141890525817871,
"learning_rate": 3.014059753954306e-05,
"loss": 0.9530655741691589,
"step": 344
},
{
"epoch": 0.1459915611814346,
"grad_norm": 1.7717392444610596,
"learning_rate": 3.031634446397188e-05,
"loss": 0.896998405456543,
"step": 346
},
{
"epoch": 0.1468354430379747,
"grad_norm": 1.8796685934066772,
"learning_rate": 3.0492091388400708e-05,
"loss": 0.9084208011627197,
"step": 348
},
{
"epoch": 0.14767932489451477,
"grad_norm": 2.0298709869384766,
"learning_rate": 3.066783831282953e-05,
"loss": 0.9183387756347656,
"step": 350
},
{
"epoch": 0.14852320675105485,
"grad_norm": 1.9245645999908447,
"learning_rate": 3.084358523725835e-05,
"loss": 0.8624772429466248,
"step": 352
},
{
"epoch": 0.14936708860759493,
"grad_norm": 2.325681209564209,
"learning_rate": 3.101933216168717e-05,
"loss": 0.9142400026321411,
"step": 354
},
{
"epoch": 0.150210970464135,
"grad_norm": 2.1200530529022217,
"learning_rate": 3.1195079086115995e-05,
"loss": 0.9064018130302429,
"step": 356
},
{
"epoch": 0.15105485232067511,
"grad_norm": 1.979314923286438,
"learning_rate": 3.137082601054482e-05,
"loss": 0.9199238419532776,
"step": 358
},
{
"epoch": 0.1518987341772152,
"grad_norm": 2.1122689247131348,
"learning_rate": 3.154657293497364e-05,
"loss": 0.8030132055282593,
"step": 360
},
{
"epoch": 0.15274261603375527,
"grad_norm": 2.105767250061035,
"learning_rate": 3.172231985940246e-05,
"loss": 0.9185854196548462,
"step": 362
},
{
"epoch": 0.15358649789029535,
"grad_norm": 2.179471015930176,
"learning_rate": 3.1898066783831285e-05,
"loss": 0.9365083575248718,
"step": 364
},
{
"epoch": 0.15443037974683543,
"grad_norm": 2.1444311141967773,
"learning_rate": 3.207381370826011e-05,
"loss": 0.8965140581130981,
"step": 366
},
{
"epoch": 0.15527426160337554,
"grad_norm": 2.4171674251556396,
"learning_rate": 3.224956063268893e-05,
"loss": 0.8787504434585571,
"step": 368
},
{
"epoch": 0.15611814345991562,
"grad_norm": 2.418628215789795,
"learning_rate": 3.242530755711775e-05,
"loss": 0.8925284147262573,
"step": 370
},
{
"epoch": 0.1569620253164557,
"grad_norm": 2.2228314876556396,
"learning_rate": 3.2601054481546575e-05,
"loss": 0.876179039478302,
"step": 372
},
{
"epoch": 0.15780590717299578,
"grad_norm": 2.324237108230591,
"learning_rate": 3.27768014059754e-05,
"loss": 0.8365707993507385,
"step": 374
},
{
"epoch": 0.15864978902953586,
"grad_norm": 2.6344552040100098,
"learning_rate": 3.295254833040422e-05,
"loss": 0.7864399552345276,
"step": 376
},
{
"epoch": 0.15949367088607594,
"grad_norm": 2.047536611557007,
"learning_rate": 3.312829525483304e-05,
"loss": 0.9271875023841858,
"step": 378
},
{
"epoch": 0.16033755274261605,
"grad_norm": 2.120025157928467,
"learning_rate": 3.3304042179261865e-05,
"loss": 0.8799133896827698,
"step": 380
},
{
"epoch": 0.16118143459915613,
"grad_norm": 2.363692045211792,
"learning_rate": 3.347978910369069e-05,
"loss": 0.8973530530929565,
"step": 382
},
{
"epoch": 0.1620253164556962,
"grad_norm": 2.1796772480010986,
"learning_rate": 3.365553602811951e-05,
"loss": 1.0277652740478516,
"step": 384
},
{
"epoch": 0.16286919831223629,
"grad_norm": 1.9192595481872559,
"learning_rate": 3.383128295254833e-05,
"loss": 0.8909643888473511,
"step": 386
},
{
"epoch": 0.16371308016877636,
"grad_norm": 1.7874376773834229,
"learning_rate": 3.4007029876977155e-05,
"loss": 0.837049663066864,
"step": 388
},
{
"epoch": 0.16455696202531644,
"grad_norm": 2.3402366638183594,
"learning_rate": 3.4182776801405974e-05,
"loss": 0.8625202775001526,
"step": 390
},
{
"epoch": 0.16540084388185655,
"grad_norm": 2.1137185096740723,
"learning_rate": 3.43585237258348e-05,
"loss": 0.9288321137428284,
"step": 392
},
{
"epoch": 0.16624472573839663,
"grad_norm": 2.3776895999908447,
"learning_rate": 3.453427065026362e-05,
"loss": 0.9328726530075073,
"step": 394
},
{
"epoch": 0.1670886075949367,
"grad_norm": 2.34941029548645,
"learning_rate": 3.4710017574692445e-05,
"loss": 0.9273309707641602,
"step": 396
},
{
"epoch": 0.1679324894514768,
"grad_norm": 2.1272573471069336,
"learning_rate": 3.4885764499121264e-05,
"loss": 0.8703887462615967,
"step": 398
},
{
"epoch": 0.16877637130801687,
"grad_norm": 2.047290802001953,
"learning_rate": 3.506151142355009e-05,
"loss": 0.8808165788650513,
"step": 400
},
{
"epoch": 0.16877637130801687,
"eval_loss": 0.9282881617546082,
"eval_runtime": 869.6867,
"eval_samples_per_second": 2.423,
"eval_steps_per_second": 2.423,
"step": 400
},
{
"epoch": 0.16962025316455695,
"grad_norm": 1.9874159097671509,
"learning_rate": 3.5237258347978916e-05,
"loss": 0.9643645286560059,
"step": 402
},
{
"epoch": 0.17046413502109706,
"grad_norm": 1.9299919605255127,
"learning_rate": 3.5413005272407735e-05,
"loss": 0.9173495769500732,
"step": 404
},
{
"epoch": 0.17130801687763714,
"grad_norm": 2.3379697799682617,
"learning_rate": 3.5588752196836555e-05,
"loss": 0.8998411893844604,
"step": 406
},
{
"epoch": 0.17215189873417722,
"grad_norm": 2.241370916366577,
"learning_rate": 3.5764499121265374e-05,
"loss": 0.9310802221298218,
"step": 408
},
{
"epoch": 0.1729957805907173,
"grad_norm": 2.4490108489990234,
"learning_rate": 3.5940246045694206e-05,
"loss": 0.9605053067207336,
"step": 410
},
{
"epoch": 0.17383966244725738,
"grad_norm": 1.8247230052947998,
"learning_rate": 3.6115992970123026e-05,
"loss": 0.8485683798789978,
"step": 412
},
{
"epoch": 0.17468354430379746,
"grad_norm": 2.4608843326568604,
"learning_rate": 3.6291739894551845e-05,
"loss": 0.9325968623161316,
"step": 414
},
{
"epoch": 0.17552742616033756,
"grad_norm": 1.8923161029815674,
"learning_rate": 3.646748681898067e-05,
"loss": 0.9125096201896667,
"step": 416
},
{
"epoch": 0.17637130801687764,
"grad_norm": 1.8502769470214844,
"learning_rate": 3.6643233743409497e-05,
"loss": 0.8852217197418213,
"step": 418
},
{
"epoch": 0.17721518987341772,
"grad_norm": 1.9155100584030151,
"learning_rate": 3.6818980667838316e-05,
"loss": 0.9192792773246765,
"step": 420
},
{
"epoch": 0.1780590717299578,
"grad_norm": 2.181476593017578,
"learning_rate": 3.6994727592267135e-05,
"loss": 0.8787404298782349,
"step": 422
},
{
"epoch": 0.17890295358649788,
"grad_norm": 2.2469847202301025,
"learning_rate": 3.717047451669596e-05,
"loss": 0.9109582901000977,
"step": 424
},
{
"epoch": 0.17974683544303796,
"grad_norm": 2.08145809173584,
"learning_rate": 3.734622144112479e-05,
"loss": 0.8560389280319214,
"step": 426
},
{
"epoch": 0.18059071729957807,
"grad_norm": 4.121932506561279,
"learning_rate": 3.7521968365553606e-05,
"loss": 0.9456104040145874,
"step": 428
},
{
"epoch": 0.18143459915611815,
"grad_norm": 2.177459478378296,
"learning_rate": 3.7697715289982425e-05,
"loss": 0.8421300649642944,
"step": 430
},
{
"epoch": 0.18227848101265823,
"grad_norm": 2.324970245361328,
"learning_rate": 3.787346221441125e-05,
"loss": 0.9199858903884888,
"step": 432
},
{
"epoch": 0.1831223628691983,
"grad_norm": 2.133718490600586,
"learning_rate": 3.804920913884007e-05,
"loss": 0.8953126668930054,
"step": 434
},
{
"epoch": 0.1839662447257384,
"grad_norm": 1.8527995347976685,
"learning_rate": 3.8224956063268896e-05,
"loss": 0.8732239007949829,
"step": 436
},
{
"epoch": 0.1848101265822785,
"grad_norm": 1.95817232131958,
"learning_rate": 3.8400702987697715e-05,
"loss": 0.8818746209144592,
"step": 438
},
{
"epoch": 0.18565400843881857,
"grad_norm": 2.2107293605804443,
"learning_rate": 3.857644991212654e-05,
"loss": 0.9153507947921753,
"step": 440
},
{
"epoch": 0.18649789029535865,
"grad_norm": 2.004754066467285,
"learning_rate": 3.875219683655536e-05,
"loss": 0.8960154056549072,
"step": 442
},
{
"epoch": 0.18734177215189873,
"grad_norm": 2.1851706504821777,
"learning_rate": 3.8927943760984186e-05,
"loss": 0.909011721611023,
"step": 444
},
{
"epoch": 0.1881856540084388,
"grad_norm": 2.4492485523223877,
"learning_rate": 3.9103690685413005e-05,
"loss": 0.8880158066749573,
"step": 446
},
{
"epoch": 0.1890295358649789,
"grad_norm": 2.745453119277954,
"learning_rate": 3.927943760984183e-05,
"loss": 0.8500842452049255,
"step": 448
},
{
"epoch": 0.189873417721519,
"grad_norm": 2.1924264430999756,
"learning_rate": 3.945518453427065e-05,
"loss": 0.9004045724868774,
"step": 450
},
{
"epoch": 0.19071729957805908,
"grad_norm": 2.4051687717437744,
"learning_rate": 3.9630931458699476e-05,
"loss": 0.9020664095878601,
"step": 452
},
{
"epoch": 0.19156118143459916,
"grad_norm": 1.8077667951583862,
"learning_rate": 3.9806678383128295e-05,
"loss": 0.8639500737190247,
"step": 454
},
{
"epoch": 0.19240506329113924,
"grad_norm": 2.089043378829956,
"learning_rate": 3.998242530755712e-05,
"loss": 0.8642048239707947,
"step": 456
},
{
"epoch": 0.19324894514767932,
"grad_norm": 2.029578447341919,
"learning_rate": 4.015817223198594e-05,
"loss": 0.9371927380561829,
"step": 458
},
{
"epoch": 0.1940928270042194,
"grad_norm": 2.26582407951355,
"learning_rate": 4.033391915641476e-05,
"loss": 0.9120588302612305,
"step": 460
},
{
"epoch": 0.1949367088607595,
"grad_norm": 1.8671411275863647,
"learning_rate": 4.050966608084359e-05,
"loss": 0.8758644461631775,
"step": 462
},
{
"epoch": 0.19578059071729959,
"grad_norm": 1.9403492212295532,
"learning_rate": 4.068541300527241e-05,
"loss": 0.914577305316925,
"step": 464
},
{
"epoch": 0.19662447257383966,
"grad_norm": 1.9939641952514648,
"learning_rate": 4.086115992970123e-05,
"loss": 0.8592531681060791,
"step": 466
},
{
"epoch": 0.19746835443037974,
"grad_norm": 2.1511380672454834,
"learning_rate": 4.103690685413005e-05,
"loss": 0.9251965880393982,
"step": 468
},
{
"epoch": 0.19831223628691982,
"grad_norm": 2.2260982990264893,
"learning_rate": 4.121265377855888e-05,
"loss": 0.8465172052383423,
"step": 470
},
{
"epoch": 0.1991561181434599,
"grad_norm": 2.0510010719299316,
"learning_rate": 4.13884007029877e-05,
"loss": 0.8943672180175781,
"step": 472
},
{
"epoch": 0.2,
"grad_norm": 2.2040133476257324,
"learning_rate": 4.156414762741652e-05,
"loss": 0.9594319462776184,
"step": 474
},
{
"epoch": 0.2008438818565401,
"grad_norm": 2.355181932449341,
"learning_rate": 4.173989455184534e-05,
"loss": 0.9031813144683838,
"step": 476
},
{
"epoch": 0.20168776371308017,
"grad_norm": 2.8434665203094482,
"learning_rate": 4.1915641476274166e-05,
"loss": 0.9225798845291138,
"step": 478
},
{
"epoch": 0.20253164556962025,
"grad_norm": 2.1715340614318848,
"learning_rate": 4.209138840070299e-05,
"loss": 0.894163966178894,
"step": 480
},
{
"epoch": 0.20337552742616033,
"grad_norm": 2.078916072845459,
"learning_rate": 4.226713532513181e-05,
"loss": 0.8424109816551208,
"step": 482
},
{
"epoch": 0.2042194092827004,
"grad_norm": 1.9760961532592773,
"learning_rate": 4.244288224956064e-05,
"loss": 0.9102715849876404,
"step": 484
},
{
"epoch": 0.20506329113924052,
"grad_norm": 1.9684507846832275,
"learning_rate": 4.2618629173989456e-05,
"loss": 0.8693854808807373,
"step": 486
},
{
"epoch": 0.2059071729957806,
"grad_norm": 2.1633450984954834,
"learning_rate": 4.279437609841828e-05,
"loss": 0.8617543578147888,
"step": 488
},
{
"epoch": 0.20675105485232068,
"grad_norm": 2.2695257663726807,
"learning_rate": 4.29701230228471e-05,
"loss": 0.9167086482048035,
"step": 490
},
{
"epoch": 0.20759493670886076,
"grad_norm": 2.4180049896240234,
"learning_rate": 4.314586994727593e-05,
"loss": 0.8333520889282227,
"step": 492
},
{
"epoch": 0.20843881856540084,
"grad_norm": 2.2942769527435303,
"learning_rate": 4.3321616871704746e-05,
"loss": 0.918351411819458,
"step": 494
},
{
"epoch": 0.20928270042194091,
"grad_norm": 1.826458215713501,
"learning_rate": 4.349736379613357e-05,
"loss": 0.8565171957015991,
"step": 496
},
{
"epoch": 0.21012658227848102,
"grad_norm": 1.9694055318832397,
"learning_rate": 4.367311072056239e-05,
"loss": 0.8684167861938477,
"step": 498
},
{
"epoch": 0.2109704641350211,
"grad_norm": 1.892659306526184,
"learning_rate": 4.384885764499122e-05,
"loss": 0.7752788662910461,
"step": 500
},
{
"epoch": 0.2109704641350211,
"eval_loss": 0.9080732464790344,
"eval_runtime": 857.0753,
"eval_samples_per_second": 2.458,
"eval_steps_per_second": 2.458,
"step": 500
},
{
"epoch": 0.21181434599156118,
"grad_norm": 1.9322253465652466,
"learning_rate": 4.4024604569420036e-05,
"loss": 0.948570728302002,
"step": 502
},
{
"epoch": 0.21265822784810126,
"grad_norm": 2.0456058979034424,
"learning_rate": 4.4200351493848855e-05,
"loss": 0.8741024732589722,
"step": 504
},
{
"epoch": 0.21350210970464134,
"grad_norm": 2.2406177520751953,
"learning_rate": 4.437609841827768e-05,
"loss": 0.9053841829299927,
"step": 506
},
{
"epoch": 0.21434599156118145,
"grad_norm": 2.013934850692749,
"learning_rate": 4.455184534270651e-05,
"loss": 0.8886576294898987,
"step": 508
},
{
"epoch": 0.21518987341772153,
"grad_norm": 1.9771125316619873,
"learning_rate": 4.4727592267135326e-05,
"loss": 0.8834167718887329,
"step": 510
},
{
"epoch": 0.2160337552742616,
"grad_norm": 1.785905361175537,
"learning_rate": 4.4903339191564146e-05,
"loss": 0.7938863039016724,
"step": 512
},
{
"epoch": 0.2168776371308017,
"grad_norm": 1.7946031093597412,
"learning_rate": 4.507908611599297e-05,
"loss": 0.8071596026420593,
"step": 514
},
{
"epoch": 0.21772151898734177,
"grad_norm": 2.2217721939086914,
"learning_rate": 4.52548330404218e-05,
"loss": 0.797417163848877,
"step": 516
},
{
"epoch": 0.21856540084388185,
"grad_norm": 1.9022471904754639,
"learning_rate": 4.5430579964850617e-05,
"loss": 0.8109536170959473,
"step": 518
},
{
"epoch": 0.21940928270042195,
"grad_norm": 1.8988343477249146,
"learning_rate": 4.5606326889279436e-05,
"loss": 0.8647034168243408,
"step": 520
},
{
"epoch": 0.22025316455696203,
"grad_norm": 2.6014881134033203,
"learning_rate": 4.578207381370827e-05,
"loss": 0.8763713240623474,
"step": 522
},
{
"epoch": 0.2210970464135021,
"grad_norm": 1.9512032270431519,
"learning_rate": 4.595782073813709e-05,
"loss": 0.9525764584541321,
"step": 524
},
{
"epoch": 0.2219409282700422,
"grad_norm": 1.9246160984039307,
"learning_rate": 4.613356766256591e-05,
"loss": 0.8839208483695984,
"step": 526
},
{
"epoch": 0.22278481012658227,
"grad_norm": 1.9713703393936157,
"learning_rate": 4.6309314586994726e-05,
"loss": 0.8888868093490601,
"step": 528
},
{
"epoch": 0.22362869198312235,
"grad_norm": 2.1175239086151123,
"learning_rate": 4.648506151142355e-05,
"loss": 0.8123540878295898,
"step": 530
},
{
"epoch": 0.22447257383966246,
"grad_norm": 1.7656135559082031,
"learning_rate": 4.666080843585238e-05,
"loss": 0.7447702884674072,
"step": 532
},
{
"epoch": 0.22531645569620254,
"grad_norm": 2.15748929977417,
"learning_rate": 4.68365553602812e-05,
"loss": 0.8778411746025085,
"step": 534
},
{
"epoch": 0.22616033755274262,
"grad_norm": 2.1733345985412598,
"learning_rate": 4.7012302284710016e-05,
"loss": 0.8985894918441772,
"step": 536
},
{
"epoch": 0.2270042194092827,
"grad_norm": 1.7182204723358154,
"learning_rate": 4.718804920913884e-05,
"loss": 0.8031114339828491,
"step": 538
},
{
"epoch": 0.22784810126582278,
"grad_norm": 1.8586329221725464,
"learning_rate": 4.736379613356767e-05,
"loss": 0.9399706721305847,
"step": 540
},
{
"epoch": 0.22869198312236286,
"grad_norm": 2.105637311935425,
"learning_rate": 4.753954305799649e-05,
"loss": 0.8672119975090027,
"step": 542
},
{
"epoch": 0.22953586497890296,
"grad_norm": 1.760584831237793,
"learning_rate": 4.771528998242531e-05,
"loss": 0.8663905262947083,
"step": 544
},
{
"epoch": 0.23037974683544304,
"grad_norm": 1.579990267753601,
"learning_rate": 4.789103690685413e-05,
"loss": 0.8575801849365234,
"step": 546
},
{
"epoch": 0.23122362869198312,
"grad_norm": 1.9242485761642456,
"learning_rate": 4.806678383128295e-05,
"loss": 0.828412652015686,
"step": 548
},
{
"epoch": 0.2320675105485232,
"grad_norm": 1.812137246131897,
"learning_rate": 4.824253075571178e-05,
"loss": 0.8183464407920837,
"step": 550
},
{
"epoch": 0.23291139240506328,
"grad_norm": 1.804733395576477,
"learning_rate": 4.84182776801406e-05,
"loss": 0.7822491526603699,
"step": 552
},
{
"epoch": 0.23375527426160336,
"grad_norm": 2.052257537841797,
"learning_rate": 4.859402460456942e-05,
"loss": 0.9050943851470947,
"step": 554
},
{
"epoch": 0.23459915611814347,
"grad_norm": 1.9803621768951416,
"learning_rate": 4.876977152899824e-05,
"loss": 0.8846852779388428,
"step": 556
},
{
"epoch": 0.23544303797468355,
"grad_norm": 1.820125937461853,
"learning_rate": 4.894551845342707e-05,
"loss": 0.8649531602859497,
"step": 558
},
{
"epoch": 0.23628691983122363,
"grad_norm": 2.0963921546936035,
"learning_rate": 4.912126537785589e-05,
"loss": 0.9307748079299927,
"step": 560
},
{
"epoch": 0.2371308016877637,
"grad_norm": 2.079697847366333,
"learning_rate": 4.929701230228471e-05,
"loss": 0.9092473387718201,
"step": 562
},
{
"epoch": 0.2379746835443038,
"grad_norm": 2.0291287899017334,
"learning_rate": 4.947275922671353e-05,
"loss": 0.8976567983627319,
"step": 564
},
{
"epoch": 0.23881856540084387,
"grad_norm": 1.9636707305908203,
"learning_rate": 4.964850615114236e-05,
"loss": 0.8931006193161011,
"step": 566
},
{
"epoch": 0.23966244725738398,
"grad_norm": 1.922049880027771,
"learning_rate": 4.982425307557118e-05,
"loss": 0.829562246799469,
"step": 568
},
{
"epoch": 0.24050632911392406,
"grad_norm": 2.150334596633911,
"learning_rate": 5e-05,
"loss": 0.8568030595779419,
"step": 570
},
{
"epoch": 0.24135021097046414,
"grad_norm": 2.024437427520752,
"learning_rate": 5.017574692442882e-05,
"loss": 0.8623508810997009,
"step": 572
},
{
"epoch": 0.24219409282700421,
"grad_norm": 1.8312673568725586,
"learning_rate": 5.035149384885765e-05,
"loss": 0.7853795886039734,
"step": 574
},
{
"epoch": 0.2430379746835443,
"grad_norm": 1.9271961450576782,
"learning_rate": 5.0527240773286467e-05,
"loss": 0.9727587103843689,
"step": 576
},
{
"epoch": 0.2438818565400844,
"grad_norm": 1.931249976158142,
"learning_rate": 5.0702987697715286e-05,
"loss": 0.8859632015228271,
"step": 578
},
{
"epoch": 0.24472573839662448,
"grad_norm": 1.8195210695266724,
"learning_rate": 5.087873462214412e-05,
"loss": 0.8959492444992065,
"step": 580
},
{
"epoch": 0.24556962025316456,
"grad_norm": 2.0018749237060547,
"learning_rate": 5.105448154657294e-05,
"loss": 0.8146185874938965,
"step": 582
},
{
"epoch": 0.24641350210970464,
"grad_norm": 2.09798526763916,
"learning_rate": 5.1230228471001764e-05,
"loss": 0.8545317053794861,
"step": 584
},
{
"epoch": 0.24725738396624472,
"grad_norm": 1.8063944578170776,
"learning_rate": 5.140597539543058e-05,
"loss": 0.8650105595588684,
"step": 586
},
{
"epoch": 0.2481012658227848,
"grad_norm": 1.8535740375518799,
"learning_rate": 5.15817223198594e-05,
"loss": 0.8395693302154541,
"step": 588
},
{
"epoch": 0.2489451476793249,
"grad_norm": 2.1443960666656494,
"learning_rate": 5.175746924428823e-05,
"loss": 0.8267397284507751,
"step": 590
},
{
"epoch": 0.249789029535865,
"grad_norm": 1.9637391567230225,
"learning_rate": 5.193321616871705e-05,
"loss": 0.8500015139579773,
"step": 592
},
{
"epoch": 0.25063291139240507,
"grad_norm": 1.9457582235336304,
"learning_rate": 5.2108963093145866e-05,
"loss": 0.887481153011322,
"step": 594
},
{
"epoch": 0.2514767932489452,
"grad_norm": 1.7458715438842773,
"learning_rate": 5.228471001757469e-05,
"loss": 0.8444154858589172,
"step": 596
},
{
"epoch": 0.2523206751054852,
"grad_norm": 1.8341439962387085,
"learning_rate": 5.2460456942003525e-05,
"loss": 0.8301781415939331,
"step": 598
},
{
"epoch": 0.25316455696202533,
"grad_norm": 2.127747058868408,
"learning_rate": 5.2636203866432344e-05,
"loss": 0.8921551704406738,
"step": 600
},
{
"epoch": 0.25316455696202533,
"eval_loss": 0.8903881311416626,
"eval_runtime": 845.9969,
"eval_samples_per_second": 2.491,
"eval_steps_per_second": 2.491,
"step": 600
},
{
"epoch": 0.2540084388185654,
"grad_norm": 2.421459674835205,
"learning_rate": 5.281195079086116e-05,
"loss": 0.8678019642829895,
"step": 602
},
{
"epoch": 0.2548523206751055,
"grad_norm": 1.7736057043075562,
"learning_rate": 5.298769771528999e-05,
"loss": 0.8564275503158569,
"step": 604
},
{
"epoch": 0.25569620253164554,
"grad_norm": 2.28430438041687,
"learning_rate": 5.316344463971881e-05,
"loss": 0.8529049158096313,
"step": 606
},
{
"epoch": 0.25654008438818565,
"grad_norm": 1.8892366886138916,
"learning_rate": 5.333919156414763e-05,
"loss": 0.8672881126403809,
"step": 608
},
{
"epoch": 0.25738396624472576,
"grad_norm": 1.9059702157974243,
"learning_rate": 5.3514938488576446e-05,
"loss": 0.9094445109367371,
"step": 610
},
{
"epoch": 0.2582278481012658,
"grad_norm": 2.0657339096069336,
"learning_rate": 5.369068541300527e-05,
"loss": 0.8361946940422058,
"step": 612
},
{
"epoch": 0.2590717299578059,
"grad_norm": 1.8987553119659424,
"learning_rate": 5.3866432337434105e-05,
"loss": 0.8319925665855408,
"step": 614
},
{
"epoch": 0.25991561181434597,
"grad_norm": 2.1176226139068604,
"learning_rate": 5.4042179261862924e-05,
"loss": 0.9818069934844971,
"step": 616
},
{
"epoch": 0.2607594936708861,
"grad_norm": 2.142096519470215,
"learning_rate": 5.421792618629174e-05,
"loss": 0.8675919771194458,
"step": 618
},
{
"epoch": 0.2616033755274262,
"grad_norm": 1.9527089595794678,
"learning_rate": 5.439367311072057e-05,
"loss": 0.8845479488372803,
"step": 620
},
{
"epoch": 0.26244725738396624,
"grad_norm": 1.7071453332901,
"learning_rate": 5.456942003514939e-05,
"loss": 0.809393048286438,
"step": 622
},
{
"epoch": 0.26329113924050634,
"grad_norm": 1.9133527278900146,
"learning_rate": 5.474516695957821e-05,
"loss": 0.8262377977371216,
"step": 624
},
{
"epoch": 0.2641350210970464,
"grad_norm": 2.0217554569244385,
"learning_rate": 5.492091388400703e-05,
"loss": 0.9006736278533936,
"step": 626
},
{
"epoch": 0.2649789029535865,
"grad_norm": 1.773273229598999,
"learning_rate": 5.509666080843585e-05,
"loss": 0.8243603110313416,
"step": 628
},
{
"epoch": 0.26582278481012656,
"grad_norm": 1.6580880880355835,
"learning_rate": 5.527240773286467e-05,
"loss": 0.8112778663635254,
"step": 630
},
{
"epoch": 0.26666666666666666,
"grad_norm": 1.8342082500457764,
"learning_rate": 5.5448154657293504e-05,
"loss": 0.8390820622444153,
"step": 632
},
{
"epoch": 0.26751054852320677,
"grad_norm": 1.863695502281189,
"learning_rate": 5.5623901581722323e-05,
"loss": 0.8264521360397339,
"step": 634
},
{
"epoch": 0.2683544303797468,
"grad_norm": 1.9462928771972656,
"learning_rate": 5.579964850615115e-05,
"loss": 0.9512701630592346,
"step": 636
},
{
"epoch": 0.26919831223628693,
"grad_norm": 1.7776058912277222,
"learning_rate": 5.597539543057997e-05,
"loss": 0.9422703981399536,
"step": 638
},
{
"epoch": 0.270042194092827,
"grad_norm": 2.9457077980041504,
"learning_rate": 5.615114235500879e-05,
"loss": 0.7991042137145996,
"step": 640
},
{
"epoch": 0.2708860759493671,
"grad_norm": 1.445265531539917,
"learning_rate": 5.6326889279437614e-05,
"loss": 0.8188099265098572,
"step": 642
},
{
"epoch": 0.2717299578059072,
"grad_norm": 2.063850164413452,
"learning_rate": 5.650263620386643e-05,
"loss": 0.9799772500991821,
"step": 644
},
{
"epoch": 0.27257383966244725,
"grad_norm": 2.0488009452819824,
"learning_rate": 5.667838312829525e-05,
"loss": 0.8462742567062378,
"step": 646
},
{
"epoch": 0.27341772151898736,
"grad_norm": 1.8747851848602295,
"learning_rate": 5.685413005272408e-05,
"loss": 0.8226412534713745,
"step": 648
},
{
"epoch": 0.2742616033755274,
"grad_norm": 1.849074125289917,
"learning_rate": 5.702987697715291e-05,
"loss": 0.9146338105201721,
"step": 650
},
{
"epoch": 0.2751054852320675,
"grad_norm": 1.7738500833511353,
"learning_rate": 5.720562390158173e-05,
"loss": 0.7574424147605896,
"step": 652
},
{
"epoch": 0.2759493670886076,
"grad_norm": 1.911102294921875,
"learning_rate": 5.738137082601055e-05,
"loss": 0.8930003046989441,
"step": 654
},
{
"epoch": 0.2767932489451477,
"grad_norm": 1.5716617107391357,
"learning_rate": 5.755711775043937e-05,
"loss": 0.7578965425491333,
"step": 656
},
{
"epoch": 0.2776371308016878,
"grad_norm": 1.789036512374878,
"learning_rate": 5.7732864674868194e-05,
"loss": 0.8149038553237915,
"step": 658
},
{
"epoch": 0.27848101265822783,
"grad_norm": 1.68622624874115,
"learning_rate": 5.790861159929701e-05,
"loss": 0.8265765905380249,
"step": 660
},
{
"epoch": 0.27932489451476794,
"grad_norm": 2.078423261642456,
"learning_rate": 5.808435852372583e-05,
"loss": 0.9651970267295837,
"step": 662
},
{
"epoch": 0.280168776371308,
"grad_norm": 1.7878645658493042,
"learning_rate": 5.826010544815466e-05,
"loss": 0.8295148015022278,
"step": 664
},
{
"epoch": 0.2810126582278481,
"grad_norm": 1.970838189125061,
"learning_rate": 5.843585237258348e-05,
"loss": 0.7778491377830505,
"step": 666
},
{
"epoch": 0.2818565400843882,
"grad_norm": 1.943596363067627,
"learning_rate": 5.861159929701231e-05,
"loss": 0.9818071722984314,
"step": 668
},
{
"epoch": 0.28270042194092826,
"grad_norm": 1.8793812990188599,
"learning_rate": 5.878734622144113e-05,
"loss": 0.9297797083854675,
"step": 670
},
{
"epoch": 0.28354430379746837,
"grad_norm": 1.8813483715057373,
"learning_rate": 5.8963093145869955e-05,
"loss": 0.8748109936714172,
"step": 672
},
{
"epoch": 0.2843881856540084,
"grad_norm": 1.7658562660217285,
"learning_rate": 5.9138840070298774e-05,
"loss": 0.8505244851112366,
"step": 674
},
{
"epoch": 0.2852320675105485,
"grad_norm": 1.6767617464065552,
"learning_rate": 5.931458699472759e-05,
"loss": 0.8476597666740417,
"step": 676
},
{
"epoch": 0.28607594936708863,
"grad_norm": 2.703104257583618,
"learning_rate": 5.949033391915641e-05,
"loss": 0.8775192499160767,
"step": 678
},
{
"epoch": 0.2869198312236287,
"grad_norm": 1.9959728717803955,
"learning_rate": 5.966608084358524e-05,
"loss": 0.855262279510498,
"step": 680
},
{
"epoch": 0.2877637130801688,
"grad_norm": 1.9093716144561768,
"learning_rate": 5.984182776801406e-05,
"loss": 0.7574936151504517,
"step": 682
},
{
"epoch": 0.28860759493670884,
"grad_norm": 1.9829599857330322,
"learning_rate": 6.001757469244289e-05,
"loss": 0.8630690574645996,
"step": 684
},
{
"epoch": 0.28945147679324895,
"grad_norm": 1.8777490854263306,
"learning_rate": 6.019332161687171e-05,
"loss": 0.8513249158859253,
"step": 686
},
{
"epoch": 0.290295358649789,
"grad_norm": 1.9453173875808716,
"learning_rate": 6.0369068541300535e-05,
"loss": 0.9097008109092712,
"step": 688
},
{
"epoch": 0.2911392405063291,
"grad_norm": 1.8527908325195312,
"learning_rate": 6.0544815465729354e-05,
"loss": 0.8291722536087036,
"step": 690
},
{
"epoch": 0.2919831223628692,
"grad_norm": 1.9255812168121338,
"learning_rate": 6.0720562390158174e-05,
"loss": 0.880009651184082,
"step": 692
},
{
"epoch": 0.29282700421940927,
"grad_norm": 1.6637977361679077,
"learning_rate": 6.0896309314587e-05,
"loss": 0.8791794180870056,
"step": 694
},
{
"epoch": 0.2936708860759494,
"grad_norm": 1.825940728187561,
"learning_rate": 6.107205623901582e-05,
"loss": 0.8662407398223877,
"step": 696
},
{
"epoch": 0.29451476793248943,
"grad_norm": 1.9348198175430298,
"learning_rate": 6.124780316344464e-05,
"loss": 0.8984515070915222,
"step": 698
},
{
"epoch": 0.29535864978902954,
"grad_norm": 1.659345030784607,
"learning_rate": 6.142355008787346e-05,
"loss": 0.827385663986206,
"step": 700
},
{
"epoch": 0.29535864978902954,
"eval_loss": 0.8730722069740295,
"eval_runtime": 858.184,
"eval_samples_per_second": 2.455,
"eval_steps_per_second": 2.455,
"step": 700
},
{
"epoch": 0.29620253164556964,
"grad_norm": 1.6531789302825928,
"learning_rate": 6.159929701230229e-05,
"loss": 0.9337764382362366,
"step": 702
},
{
"epoch": 0.2970464135021097,
"grad_norm": 1.8269121646881104,
"learning_rate": 6.177504393673111e-05,
"loss": 0.8250943422317505,
"step": 704
},
{
"epoch": 0.2978902953586498,
"grad_norm": 1.692808747291565,
"learning_rate": 6.195079086115994e-05,
"loss": 0.8657428026199341,
"step": 706
},
{
"epoch": 0.29873417721518986,
"grad_norm": 1.6736913919448853,
"learning_rate": 6.212653778558876e-05,
"loss": 0.8889590501785278,
"step": 708
},
{
"epoch": 0.29957805907172996,
"grad_norm": 1.6841140985488892,
"learning_rate": 6.230228471001758e-05,
"loss": 0.7822914123535156,
"step": 710
},
{
"epoch": 0.30042194092827,
"grad_norm": 1.6644599437713623,
"learning_rate": 6.24780316344464e-05,
"loss": 0.8747053742408752,
"step": 712
},
{
"epoch": 0.3012658227848101,
"grad_norm": 1.8187819719314575,
"learning_rate": 6.265377855887522e-05,
"loss": 0.8976446390151978,
"step": 714
},
{
"epoch": 0.30210970464135023,
"grad_norm": 1.7845178842544556,
"learning_rate": 6.282952548330404e-05,
"loss": 0.9401160478591919,
"step": 716
},
{
"epoch": 0.3029535864978903,
"grad_norm": 1.559773564338684,
"learning_rate": 6.300527240773286e-05,
"loss": 0.8754280209541321,
"step": 718
},
{
"epoch": 0.3037974683544304,
"grad_norm": 1.5919631719589233,
"learning_rate": 6.318101933216169e-05,
"loss": 0.8278581500053406,
"step": 720
},
{
"epoch": 0.30464135021097044,
"grad_norm": 1.8551076650619507,
"learning_rate": 6.335676625659052e-05,
"loss": 0.8868640065193176,
"step": 722
},
{
"epoch": 0.30548523206751055,
"grad_norm": 1.6907769441604614,
"learning_rate": 6.353251318101934e-05,
"loss": 0.8631605505943298,
"step": 724
},
{
"epoch": 0.30632911392405066,
"grad_norm": 1.820867657661438,
"learning_rate": 6.370826010544816e-05,
"loss": 0.9142873883247375,
"step": 726
},
{
"epoch": 0.3071729957805907,
"grad_norm": 1.685154676437378,
"learning_rate": 6.388400702987698e-05,
"loss": 0.8258634805679321,
"step": 728
},
{
"epoch": 0.3080168776371308,
"grad_norm": 1.9294627904891968,
"learning_rate": 6.40597539543058e-05,
"loss": 0.9545516967773438,
"step": 730
},
{
"epoch": 0.30886075949367087,
"grad_norm": 1.6075409650802612,
"learning_rate": 6.423550087873462e-05,
"loss": 0.8370757699012756,
"step": 732
},
{
"epoch": 0.309704641350211,
"grad_norm": 1.635750651359558,
"learning_rate": 6.441124780316345e-05,
"loss": 0.8356084823608398,
"step": 734
},
{
"epoch": 0.3105485232067511,
"grad_norm": 1.6376131772994995,
"learning_rate": 6.458699472759227e-05,
"loss": 0.7579531669616699,
"step": 736
},
{
"epoch": 0.31139240506329113,
"grad_norm": 1.7135766744613647,
"learning_rate": 6.47627416520211e-05,
"loss": 0.8436318039894104,
"step": 738
},
{
"epoch": 0.31223628691983124,
"grad_norm": 1.7095093727111816,
"learning_rate": 6.493848857644992e-05,
"loss": 0.7998805046081543,
"step": 740
},
{
"epoch": 0.3130801687763713,
"grad_norm": 1.782615303993225,
"learning_rate": 6.511423550087874e-05,
"loss": 0.915776789188385,
"step": 742
},
{
"epoch": 0.3139240506329114,
"grad_norm": 1.8461172580718994,
"learning_rate": 6.528998242530756e-05,
"loss": 0.8300962448120117,
"step": 744
},
{
"epoch": 0.31476793248945145,
"grad_norm": 1.5659871101379395,
"learning_rate": 6.546572934973638e-05,
"loss": 0.8239848017692566,
"step": 746
},
{
"epoch": 0.31561181434599156,
"grad_norm": 1.9997349977493286,
"learning_rate": 6.56414762741652e-05,
"loss": 0.8236988186836243,
"step": 748
},
{
"epoch": 0.31645569620253167,
"grad_norm": 1.9811526536941528,
"learning_rate": 6.581722319859403e-05,
"loss": 0.8516603112220764,
"step": 750
},
{
"epoch": 0.3172995780590717,
"grad_norm": 1.9877923727035522,
"learning_rate": 6.599297012302285e-05,
"loss": 0.9037567973136902,
"step": 752
},
{
"epoch": 0.3181434599156118,
"grad_norm": 1.6729352474212646,
"learning_rate": 6.616871704745168e-05,
"loss": 0.8350864052772522,
"step": 754
},
{
"epoch": 0.3189873417721519,
"grad_norm": 1.9055802822113037,
"learning_rate": 6.63444639718805e-05,
"loss": 0.8246616125106812,
"step": 756
},
{
"epoch": 0.319831223628692,
"grad_norm": 1.597999930381775,
"learning_rate": 6.652021089630932e-05,
"loss": 0.8014416098594666,
"step": 758
},
{
"epoch": 0.3206751054852321,
"grad_norm": 1.7432531118392944,
"learning_rate": 6.669595782073814e-05,
"loss": 0.9199523329734802,
"step": 760
},
{
"epoch": 0.32151898734177214,
"grad_norm": 1.820164442062378,
"learning_rate": 6.687170474516696e-05,
"loss": 0.7764829397201538,
"step": 762
},
{
"epoch": 0.32236286919831225,
"grad_norm": 1.6408652067184448,
"learning_rate": 6.704745166959578e-05,
"loss": 0.8072620630264282,
"step": 764
},
{
"epoch": 0.3232067510548523,
"grad_norm": 1.8894155025482178,
"learning_rate": 6.722319859402461e-05,
"loss": 0.9006885886192322,
"step": 766
},
{
"epoch": 0.3240506329113924,
"grad_norm": 1.6903613805770874,
"learning_rate": 6.739894551845343e-05,
"loss": 0.7772189378738403,
"step": 768
},
{
"epoch": 0.32489451476793246,
"grad_norm": 1.7540696859359741,
"learning_rate": 6.757469244288225e-05,
"loss": 0.8825590014457703,
"step": 770
},
{
"epoch": 0.32573839662447257,
"grad_norm": 1.603008508682251,
"learning_rate": 6.775043936731108e-05,
"loss": 0.8376453518867493,
"step": 772
},
{
"epoch": 0.3265822784810127,
"grad_norm": 1.5381462574005127,
"learning_rate": 6.79261862917399e-05,
"loss": 0.92608243227005,
"step": 774
},
{
"epoch": 0.32742616033755273,
"grad_norm": 1.4815537929534912,
"learning_rate": 6.810193321616872e-05,
"loss": 0.6842183470726013,
"step": 776
},
{
"epoch": 0.32827004219409284,
"grad_norm": 1.8543411493301392,
"learning_rate": 6.827768014059754e-05,
"loss": 0.8868235349655151,
"step": 778
},
{
"epoch": 0.3291139240506329,
"grad_norm": 1.8895748853683472,
"learning_rate": 6.845342706502637e-05,
"loss": 0.8148112297058105,
"step": 780
},
{
"epoch": 0.329957805907173,
"grad_norm": 1.8150591850280762,
"learning_rate": 6.862917398945519e-05,
"loss": 0.8760337829589844,
"step": 782
},
{
"epoch": 0.3308016877637131,
"grad_norm": 1.6661378145217896,
"learning_rate": 6.880492091388401e-05,
"loss": 0.8266322612762451,
"step": 784
},
{
"epoch": 0.33164556962025316,
"grad_norm": 2.2849128246307373,
"learning_rate": 6.898066783831283e-05,
"loss": 0.8599053025245667,
"step": 786
},
{
"epoch": 0.33248945147679326,
"grad_norm": 1.7233171463012695,
"learning_rate": 6.915641476274165e-05,
"loss": 0.8312317132949829,
"step": 788
},
{
"epoch": 0.3333333333333333,
"grad_norm": 1.7637618780136108,
"learning_rate": 6.933216168717048e-05,
"loss": 0.8379700779914856,
"step": 790
},
{
"epoch": 0.3341772151898734,
"grad_norm": 1.7780474424362183,
"learning_rate": 6.95079086115993e-05,
"loss": 0.8994934558868408,
"step": 792
},
{
"epoch": 0.33502109704641353,
"grad_norm": 1.5798883438110352,
"learning_rate": 6.968365553602812e-05,
"loss": 0.8021857738494873,
"step": 794
},
{
"epoch": 0.3358649789029536,
"grad_norm": 1.7316070795059204,
"learning_rate": 6.985940246045695e-05,
"loss": 0.8814419507980347,
"step": 796
},
{
"epoch": 0.3367088607594937,
"grad_norm": 1.711315631866455,
"learning_rate": 7.003514938488577e-05,
"loss": 0.8545029163360596,
"step": 798
},
{
"epoch": 0.33755274261603374,
"grad_norm": 1.5023137331008911,
"learning_rate": 7.021089630931459e-05,
"loss": 0.8006189465522766,
"step": 800
},
{
"epoch": 0.33755274261603374,
"eval_loss": 0.8635594248771667,
"eval_runtime": 865.9348,
"eval_samples_per_second": 2.433,
"eval_steps_per_second": 2.433,
"step": 800
},
{
"epoch": 0.33839662447257385,
"grad_norm": 1.8377124071121216,
"learning_rate": 7.038664323374341e-05,
"loss": 0.7625874280929565,
"step": 802
},
{
"epoch": 0.3392405063291139,
"grad_norm": 1.5361332893371582,
"learning_rate": 7.056239015817223e-05,
"loss": 0.8490484356880188,
"step": 804
},
{
"epoch": 0.340084388185654,
"grad_norm": 1.8727388381958008,
"learning_rate": 7.073813708260105e-05,
"loss": 0.8915753364562988,
"step": 806
},
{
"epoch": 0.3409282700421941,
"grad_norm": 1.567700743675232,
"learning_rate": 7.091388400702988e-05,
"loss": 0.8902620077133179,
"step": 808
},
{
"epoch": 0.34177215189873417,
"grad_norm": 1.5302914381027222,
"learning_rate": 7.10896309314587e-05,
"loss": 0.7897103428840637,
"step": 810
},
{
"epoch": 0.3426160337552743,
"grad_norm": 1.8819153308868408,
"learning_rate": 7.126537785588753e-05,
"loss": 0.8648831248283386,
"step": 812
},
{
"epoch": 0.3434599156118143,
"grad_norm": 1.5671379566192627,
"learning_rate": 7.144112478031635e-05,
"loss": 0.8449499607086182,
"step": 814
},
{
"epoch": 0.34430379746835443,
"grad_norm": 1.6570971012115479,
"learning_rate": 7.161687170474517e-05,
"loss": 0.848559558391571,
"step": 816
},
{
"epoch": 0.34514767932489454,
"grad_norm": 1.9108437299728394,
"learning_rate": 7.179261862917399e-05,
"loss": 0.8847543597221375,
"step": 818
},
{
"epoch": 0.3459915611814346,
"grad_norm": 1.4909496307373047,
"learning_rate": 7.196836555360281e-05,
"loss": 0.7642563581466675,
"step": 820
},
{
"epoch": 0.3468354430379747,
"grad_norm": 1.768518328666687,
"learning_rate": 7.214411247803163e-05,
"loss": 0.8714305758476257,
"step": 822
},
{
"epoch": 0.34767932489451475,
"grad_norm": 1.715343952178955,
"learning_rate": 7.231985940246046e-05,
"loss": 0.7712987661361694,
"step": 824
},
{
"epoch": 0.34852320675105486,
"grad_norm": 1.6687803268432617,
"learning_rate": 7.24956063268893e-05,
"loss": 0.8122798204421997,
"step": 826
},
{
"epoch": 0.3493670886075949,
"grad_norm": 1.5160514116287231,
"learning_rate": 7.267135325131811e-05,
"loss": 0.793245792388916,
"step": 828
},
{
"epoch": 0.350210970464135,
"grad_norm": 1.6449401378631592,
"learning_rate": 7.284710017574693e-05,
"loss": 0.8747497200965881,
"step": 830
},
{
"epoch": 0.3510548523206751,
"grad_norm": 1.3907722234725952,
"learning_rate": 7.302284710017575e-05,
"loss": 0.6743978261947632,
"step": 832
},
{
"epoch": 0.3518987341772152,
"grad_norm": 1.633555293083191,
"learning_rate": 7.319859402460457e-05,
"loss": 0.8524789214134216,
"step": 834
},
{
"epoch": 0.3527426160337553,
"grad_norm": 1.5414257049560547,
"learning_rate": 7.337434094903339e-05,
"loss": 0.8045110702514648,
"step": 836
},
{
"epoch": 0.35358649789029534,
"grad_norm": 1.8520616292953491,
"learning_rate": 7.355008787346221e-05,
"loss": 0.8319593071937561,
"step": 838
},
{
"epoch": 0.35443037974683544,
"grad_norm": 1.6629763841629028,
"learning_rate": 7.372583479789104e-05,
"loss": 0.8188939094543457,
"step": 840
},
{
"epoch": 0.35527426160337555,
"grad_norm": 1.804087519645691,
"learning_rate": 7.390158172231987e-05,
"loss": 0.8875360488891602,
"step": 842
},
{
"epoch": 0.3561181434599156,
"grad_norm": 1.6031663417816162,
"learning_rate": 7.407732864674869e-05,
"loss": 0.8159612417221069,
"step": 844
},
{
"epoch": 0.3569620253164557,
"grad_norm": 1.7413033246994019,
"learning_rate": 7.425307557117751e-05,
"loss": 0.8422684669494629,
"step": 846
},
{
"epoch": 0.35780590717299576,
"grad_norm": 1.7699719667434692,
"learning_rate": 7.442882249560633e-05,
"loss": 0.9343502521514893,
"step": 848
},
{
"epoch": 0.35864978902953587,
"grad_norm": 1.4613301753997803,
"learning_rate": 7.460456942003515e-05,
"loss": 0.8168979287147522,
"step": 850
},
{
"epoch": 0.3594936708860759,
"grad_norm": 1.542431354522705,
"learning_rate": 7.478031634446397e-05,
"loss": 0.9014382362365723,
"step": 852
},
{
"epoch": 0.36033755274261603,
"grad_norm": 1.6070159673690796,
"learning_rate": 7.49560632688928e-05,
"loss": 0.8162738084793091,
"step": 854
},
{
"epoch": 0.36118143459915614,
"grad_norm": 1.7979451417922974,
"learning_rate": 7.513181019332162e-05,
"loss": 0.8354527950286865,
"step": 856
},
{
"epoch": 0.3620253164556962,
"grad_norm": 2.327045202255249,
"learning_rate": 7.530755711775044e-05,
"loss": 0.8214042782783508,
"step": 858
},
{
"epoch": 0.3628691983122363,
"grad_norm": 1.5085111856460571,
"learning_rate": 7.548330404217927e-05,
"loss": 0.7472147941589355,
"step": 860
},
{
"epoch": 0.36371308016877635,
"grad_norm": 1.6006290912628174,
"learning_rate": 7.565905096660809e-05,
"loss": 0.7586950063705444,
"step": 862
},
{
"epoch": 0.36455696202531646,
"grad_norm": 1.5170620679855347,
"learning_rate": 7.583479789103691e-05,
"loss": 0.8169914484024048,
"step": 864
},
{
"epoch": 0.36540084388185656,
"grad_norm": 1.5848352909088135,
"learning_rate": 7.601054481546573e-05,
"loss": 0.8263922929763794,
"step": 866
},
{
"epoch": 0.3662447257383966,
"grad_norm": 1.8502342700958252,
"learning_rate": 7.618629173989455e-05,
"loss": 0.8726240992546082,
"step": 868
},
{
"epoch": 0.3670886075949367,
"grad_norm": 1.506847620010376,
"learning_rate": 7.636203866432338e-05,
"loss": 0.7220374941825867,
"step": 870
},
{
"epoch": 0.3679324894514768,
"grad_norm": 1.5350452661514282,
"learning_rate": 7.65377855887522e-05,
"loss": 0.8028547167778015,
"step": 872
},
{
"epoch": 0.3687763713080169,
"grad_norm": 1.5011043548583984,
"learning_rate": 7.671353251318102e-05,
"loss": 0.7659649848937988,
"step": 874
},
{
"epoch": 0.369620253164557,
"grad_norm": 1.7019832134246826,
"learning_rate": 7.688927943760984e-05,
"loss": 0.8773653507232666,
"step": 876
},
{
"epoch": 0.37046413502109704,
"grad_norm": 1.4918498992919922,
"learning_rate": 7.706502636203867e-05,
"loss": 0.7977569103240967,
"step": 878
},
{
"epoch": 0.37130801687763715,
"grad_norm": 1.6422638893127441,
"learning_rate": 7.724077328646749e-05,
"loss": 0.7491976022720337,
"step": 880
},
{
"epoch": 0.3721518987341772,
"grad_norm": 1.7590434551239014,
"learning_rate": 7.741652021089631e-05,
"loss": 0.8754181265830994,
"step": 882
},
{
"epoch": 0.3729957805907173,
"grad_norm": 3.868894100189209,
"learning_rate": 7.759226713532513e-05,
"loss": 0.8482301235198975,
"step": 884
},
{
"epoch": 0.37383966244725736,
"grad_norm": 2.111875534057617,
"learning_rate": 7.776801405975396e-05,
"loss": 0.8109031915664673,
"step": 886
},
{
"epoch": 0.37468354430379747,
"grad_norm": 2.0838418006896973,
"learning_rate": 7.794376098418278e-05,
"loss": 0.8660775423049927,
"step": 888
},
{
"epoch": 0.3755274261603376,
"grad_norm": 1.553022027015686,
"learning_rate": 7.81195079086116e-05,
"loss": 0.8418024778366089,
"step": 890
},
{
"epoch": 0.3763713080168776,
"grad_norm": 1.334747314453125,
"learning_rate": 7.829525483304042e-05,
"loss": 0.7764869928359985,
"step": 892
},
{
"epoch": 0.37721518987341773,
"grad_norm": 1.4692286252975464,
"learning_rate": 7.847100175746925e-05,
"loss": 0.7460401654243469,
"step": 894
},
{
"epoch": 0.3780590717299578,
"grad_norm": 1.5374023914337158,
"learning_rate": 7.864674868189807e-05,
"loss": 0.7662873268127441,
"step": 896
},
{
"epoch": 0.3789029535864979,
"grad_norm": 1.5662524700164795,
"learning_rate": 7.882249560632689e-05,
"loss": 0.8165306448936462,
"step": 898
},
{
"epoch": 0.379746835443038,
"grad_norm": 4.498590469360352,
"learning_rate": 7.899824253075572e-05,
"loss": 0.7913232445716858,
"step": 900
},
{
"epoch": 0.379746835443038,
"eval_loss": 0.8491304516792297,
"eval_runtime": 852.6211,
"eval_samples_per_second": 2.471,
"eval_steps_per_second": 2.471,
"step": 900
},
{
"epoch": 0.38059071729957805,
"grad_norm": 1.6320613622665405,
"learning_rate": 7.917398945518454e-05,
"loss": 0.8097161054611206,
"step": 902
},
{
"epoch": 0.38143459915611816,
"grad_norm": 1.2562934160232544,
"learning_rate": 7.934973637961336e-05,
"loss": 0.786399781703949,
"step": 904
},
{
"epoch": 0.3822784810126582,
"grad_norm": 1.6957594156265259,
"learning_rate": 7.952548330404218e-05,
"loss": 0.8385500311851501,
"step": 906
},
{
"epoch": 0.3831223628691983,
"grad_norm": 1.6662386655807495,
"learning_rate": 7.9701230228471e-05,
"loss": 0.8157848715782166,
"step": 908
},
{
"epoch": 0.38396624472573837,
"grad_norm": 1.6717777252197266,
"learning_rate": 7.987697715289982e-05,
"loss": 0.7937968373298645,
"step": 910
},
{
"epoch": 0.3848101265822785,
"grad_norm": 1.399484395980835,
"learning_rate": 8.005272407732865e-05,
"loss": 0.7800109386444092,
"step": 912
},
{
"epoch": 0.3856540084388186,
"grad_norm": 1.5671080350875854,
"learning_rate": 8.022847100175747e-05,
"loss": 0.8135939240455627,
"step": 914
},
{
"epoch": 0.38649789029535864,
"grad_norm": 1.4427763223648071,
"learning_rate": 8.04042179261863e-05,
"loss": 0.7482035160064697,
"step": 916
},
{
"epoch": 0.38734177215189874,
"grad_norm": 1.3314121961593628,
"learning_rate": 8.057996485061512e-05,
"loss": 0.7201873064041138,
"step": 918
},
{
"epoch": 0.3881856540084388,
"grad_norm": 1.5695286989212036,
"learning_rate": 8.075571177504394e-05,
"loss": 0.7933040857315063,
"step": 920
},
{
"epoch": 0.3890295358649789,
"grad_norm": 1.5091747045516968,
"learning_rate": 8.093145869947276e-05,
"loss": 0.8058338165283203,
"step": 922
},
{
"epoch": 0.389873417721519,
"grad_norm": 1.6287630796432495,
"learning_rate": 8.110720562390158e-05,
"loss": 0.7617828249931335,
"step": 924
},
{
"epoch": 0.39071729957805906,
"grad_norm": 1.6129482984542847,
"learning_rate": 8.12829525483304e-05,
"loss": 0.8710150122642517,
"step": 926
},
{
"epoch": 0.39156118143459917,
"grad_norm": 1.6457173824310303,
"learning_rate": 8.145869947275922e-05,
"loss": 0.9122233390808105,
"step": 928
},
{
"epoch": 0.3924050632911392,
"grad_norm": 1.6768827438354492,
"learning_rate": 8.163444639718805e-05,
"loss": 0.8339303731918335,
"step": 930
},
{
"epoch": 0.39324894514767933,
"grad_norm": 1.5419740676879883,
"learning_rate": 8.181019332161688e-05,
"loss": 0.8220396041870117,
"step": 932
},
{
"epoch": 0.39409282700421944,
"grad_norm": 1.4563747644424438,
"learning_rate": 8.19859402460457e-05,
"loss": 0.8531478047370911,
"step": 934
},
{
"epoch": 0.3949367088607595,
"grad_norm": 1.6208328008651733,
"learning_rate": 8.216168717047452e-05,
"loss": 0.8330869078636169,
"step": 936
},
{
"epoch": 0.3957805907172996,
"grad_norm": 1.6492482423782349,
"learning_rate": 8.233743409490334e-05,
"loss": 0.8011296987533569,
"step": 938
},
{
"epoch": 0.39662447257383965,
"grad_norm": 2.1611905097961426,
"learning_rate": 8.251318101933216e-05,
"loss": 0.8111353516578674,
"step": 940
},
{
"epoch": 0.39746835443037976,
"grad_norm": 1.7108231782913208,
"learning_rate": 8.268892794376098e-05,
"loss": 0.8282017111778259,
"step": 942
},
{
"epoch": 0.3983122362869198,
"grad_norm": 1.543465495109558,
"learning_rate": 8.286467486818981e-05,
"loss": 0.7770059704780579,
"step": 944
},
{
"epoch": 0.3991561181434599,
"grad_norm": 1.419969081878662,
"learning_rate": 8.304042179261863e-05,
"loss": 0.8646430373191833,
"step": 946
},
{
"epoch": 0.4,
"grad_norm": 1.5002100467681885,
"learning_rate": 8.321616871704746e-05,
"loss": 0.7949403524398804,
"step": 948
},
{
"epoch": 0.4008438818565401,
"grad_norm": 1.38933265209198,
"learning_rate": 8.339191564147628e-05,
"loss": 0.8124079704284668,
"step": 950
},
{
"epoch": 0.4016877637130802,
"grad_norm": 1.5948443412780762,
"learning_rate": 8.35676625659051e-05,
"loss": 0.8634148836135864,
"step": 952
},
{
"epoch": 0.40253164556962023,
"grad_norm": 1.4437624216079712,
"learning_rate": 8.374340949033392e-05,
"loss": 0.7410681247711182,
"step": 954
},
{
"epoch": 0.40337552742616034,
"grad_norm": 1.3457095623016357,
"learning_rate": 8.391915641476274e-05,
"loss": 0.7680280208587646,
"step": 956
},
{
"epoch": 0.40421940928270045,
"grad_norm": 1.610288143157959,
"learning_rate": 8.409490333919156e-05,
"loss": 0.7921904921531677,
"step": 958
},
{
"epoch": 0.4050632911392405,
"grad_norm": 1.5321530103683472,
"learning_rate": 8.427065026362039e-05,
"loss": 0.8320037126541138,
"step": 960
},
{
"epoch": 0.4059071729957806,
"grad_norm": 1.699881672859192,
"learning_rate": 8.444639718804921e-05,
"loss": 0.8303092122077942,
"step": 962
},
{
"epoch": 0.40675105485232066,
"grad_norm": 1.591515064239502,
"learning_rate": 8.462214411247804e-05,
"loss": 0.9029796719551086,
"step": 964
},
{
"epoch": 0.40759493670886077,
"grad_norm": 1.5930429697036743,
"learning_rate": 8.479789103690686e-05,
"loss": 0.8165359497070312,
"step": 966
},
{
"epoch": 0.4084388185654008,
"grad_norm": 1.509774923324585,
"learning_rate": 8.497363796133568e-05,
"loss": 0.8276026248931885,
"step": 968
},
{
"epoch": 0.4092827004219409,
"grad_norm": 1.3617016077041626,
"learning_rate": 8.51493848857645e-05,
"loss": 0.8159419894218445,
"step": 970
},
{
"epoch": 0.41012658227848103,
"grad_norm": 1.3580708503723145,
"learning_rate": 8.532513181019332e-05,
"loss": 0.7882336378097534,
"step": 972
},
{
"epoch": 0.4109704641350211,
"grad_norm": 1.3337358236312866,
"learning_rate": 8.550087873462214e-05,
"loss": 0.7462319731712341,
"step": 974
},
{
"epoch": 0.4118143459915612,
"grad_norm": 1.450363278388977,
"learning_rate": 8.567662565905097e-05,
"loss": 0.7500866651535034,
"step": 976
},
{
"epoch": 0.41265822784810124,
"grad_norm": 1.5305321216583252,
"learning_rate": 8.585237258347979e-05,
"loss": 0.8432503342628479,
"step": 978
},
{
"epoch": 0.41350210970464135,
"grad_norm": 1.2097326517105103,
"learning_rate": 8.602811950790861e-05,
"loss": 0.8330482840538025,
"step": 980
},
{
"epoch": 0.41434599156118146,
"grad_norm": 1.3916101455688477,
"learning_rate": 8.620386643233744e-05,
"loss": 0.8137149810791016,
"step": 982
},
{
"epoch": 0.4151898734177215,
"grad_norm": 1.6411453485488892,
"learning_rate": 8.637961335676626e-05,
"loss": 0.8273854851722717,
"step": 984
},
{
"epoch": 0.4160337552742616,
"grad_norm": 1.6734566688537598,
"learning_rate": 8.655536028119508e-05,
"loss": 0.794026255607605,
"step": 986
},
{
"epoch": 0.41687763713080167,
"grad_norm": 1.352325677871704,
"learning_rate": 8.67311072056239e-05,
"loss": 0.7721655368804932,
"step": 988
},
{
"epoch": 0.4177215189873418,
"grad_norm": 1.5368729829788208,
"learning_rate": 8.690685413005273e-05,
"loss": 0.8123438954353333,
"step": 990
},
{
"epoch": 0.41856540084388183,
"grad_norm": 1.4903568029403687,
"learning_rate": 8.708260105448155e-05,
"loss": 0.8370974659919739,
"step": 992
},
{
"epoch": 0.41940928270042194,
"grad_norm": 1.3405622243881226,
"learning_rate": 8.725834797891037e-05,
"loss": 0.780426561832428,
"step": 994
},
{
"epoch": 0.42025316455696204,
"grad_norm": 1.4761021137237549,
"learning_rate": 8.743409490333919e-05,
"loss": 0.8304934501647949,
"step": 996
},
{
"epoch": 0.4210970464135021,
"grad_norm": 1.520033359527588,
"learning_rate": 8.760984182776801e-05,
"loss": 0.7960568070411682,
"step": 998
},
{
"epoch": 0.4219409282700422,
"grad_norm": 1.6916255950927734,
"learning_rate": 8.778558875219684e-05,
"loss": 0.7884663939476013,
"step": 1000
},
{
"epoch": 0.4219409282700422,
"eval_loss": 0.8388314247131348,
"eval_runtime": 847.4828,
"eval_samples_per_second": 2.486,
"eval_steps_per_second": 2.486,
"step": 1000
},
{
"epoch": 0.42278481012658226,
"grad_norm": 1.6796396970748901,
"learning_rate": 8.796133567662566e-05,
"loss": 0.7930826544761658,
"step": 1002
},
{
"epoch": 0.42362869198312236,
"grad_norm": 1.4480048418045044,
"learning_rate": 8.813708260105448e-05,
"loss": 0.7138194441795349,
"step": 1004
},
{
"epoch": 0.42447257383966247,
"grad_norm": 1.2499021291732788,
"learning_rate": 8.831282952548331e-05,
"loss": 0.7367453575134277,
"step": 1006
},
{
"epoch": 0.4253164556962025,
"grad_norm": 1.6906769275665283,
"learning_rate": 8.848857644991213e-05,
"loss": 0.9051005244255066,
"step": 1008
},
{
"epoch": 0.42616033755274263,
"grad_norm": 1.4196792840957642,
"learning_rate": 8.866432337434095e-05,
"loss": 0.7469457387924194,
"step": 1010
},
{
"epoch": 0.4270042194092827,
"grad_norm": 1.5132776498794556,
"learning_rate": 8.884007029876977e-05,
"loss": 0.7443049550056458,
"step": 1012
},
{
"epoch": 0.4278481012658228,
"grad_norm": 1.335705280303955,
"learning_rate": 8.901581722319859e-05,
"loss": 0.784084677696228,
"step": 1014
},
{
"epoch": 0.4286919831223629,
"grad_norm": 1.6510252952575684,
"learning_rate": 8.919156414762741e-05,
"loss": 0.8603647947311401,
"step": 1016
},
{
"epoch": 0.42953586497890295,
"grad_norm": 1.35535728931427,
"learning_rate": 8.936731107205624e-05,
"loss": 0.7921645641326904,
"step": 1018
},
{
"epoch": 0.43037974683544306,
"grad_norm": 1.4952049255371094,
"learning_rate": 8.954305799648506e-05,
"loss": 0.799993634223938,
"step": 1020
},
{
"epoch": 0.4312236286919831,
"grad_norm": 1.5026042461395264,
"learning_rate": 8.97188049209139e-05,
"loss": 0.7697094082832336,
"step": 1022
},
{
"epoch": 0.4320675105485232,
"grad_norm": 1.5424275398254395,
"learning_rate": 8.989455184534271e-05,
"loss": 0.7988215684890747,
"step": 1024
},
{
"epoch": 0.43291139240506327,
"grad_norm": 1.438716173171997,
"learning_rate": 9.007029876977153e-05,
"loss": 0.7841635942459106,
"step": 1026
},
{
"epoch": 0.4337552742616034,
"grad_norm": 1.5040369033813477,
"learning_rate": 9.024604569420035e-05,
"loss": 0.7485025525093079,
"step": 1028
},
{
"epoch": 0.4345991561181435,
"grad_norm": 1.4354394674301147,
"learning_rate": 9.042179261862917e-05,
"loss": 0.7735623121261597,
"step": 1030
},
{
"epoch": 0.43544303797468353,
"grad_norm": 1.4841680526733398,
"learning_rate": 9.059753954305799e-05,
"loss": 0.8918828964233398,
"step": 1032
},
{
"epoch": 0.43628691983122364,
"grad_norm": 1.428813099861145,
"learning_rate": 9.077328646748682e-05,
"loss": 0.835110068321228,
"step": 1034
},
{
"epoch": 0.4371308016877637,
"grad_norm": 1.559020757675171,
"learning_rate": 9.094903339191566e-05,
"loss": 0.746295690536499,
"step": 1036
},
{
"epoch": 0.4379746835443038,
"grad_norm": 1.6996115446090698,
"learning_rate": 9.112478031634448e-05,
"loss": 0.8089123368263245,
"step": 1038
},
{
"epoch": 0.4388185654008439,
"grad_norm": 1.6615465879440308,
"learning_rate": 9.13005272407733e-05,
"loss": 0.8807073831558228,
"step": 1040
},
{
"epoch": 0.43966244725738396,
"grad_norm": 1.239142894744873,
"learning_rate": 9.147627416520211e-05,
"loss": 0.7638427019119263,
"step": 1042
},
{
"epoch": 0.44050632911392407,
"grad_norm": 1.1915178298950195,
"learning_rate": 9.165202108963093e-05,
"loss": 0.7817409634590149,
"step": 1044
},
{
"epoch": 0.4413502109704641,
"grad_norm": 1.6276934146881104,
"learning_rate": 9.182776801405975e-05,
"loss": 0.8586427569389343,
"step": 1046
},
{
"epoch": 0.4421940928270042,
"grad_norm": 1.480345606803894,
"learning_rate": 9.200351493848857e-05,
"loss": 0.7481811046600342,
"step": 1048
},
{
"epoch": 0.4430379746835443,
"grad_norm": 1.308419108390808,
"learning_rate": 9.21792618629174e-05,
"loss": 0.8074686527252197,
"step": 1050
},
{
"epoch": 0.4438818565400844,
"grad_norm": 1.6167182922363281,
"learning_rate": 9.235500878734624e-05,
"loss": 0.8455166816711426,
"step": 1052
},
{
"epoch": 0.4447257383966245,
"grad_norm": 1.6058826446533203,
"learning_rate": 9.253075571177506e-05,
"loss": 0.7255295515060425,
"step": 1054
},
{
"epoch": 0.44556962025316454,
"grad_norm": 1.6745728254318237,
"learning_rate": 9.270650263620387e-05,
"loss": 0.8329368233680725,
"step": 1056
},
{
"epoch": 0.44641350210970465,
"grad_norm": 1.5657380819320679,
"learning_rate": 9.28822495606327e-05,
"loss": 0.8583613634109497,
"step": 1058
},
{
"epoch": 0.4472573839662447,
"grad_norm": 1.5052601099014282,
"learning_rate": 9.305799648506151e-05,
"loss": 0.8546127080917358,
"step": 1060
},
{
"epoch": 0.4481012658227848,
"grad_norm": 1.510636806488037,
"learning_rate": 9.323374340949033e-05,
"loss": 0.8416863679885864,
"step": 1062
},
{
"epoch": 0.4489451476793249,
"grad_norm": 1.4446617364883423,
"learning_rate": 9.340949033391916e-05,
"loss": 0.830390453338623,
"step": 1064
},
{
"epoch": 0.44978902953586497,
"grad_norm": 1.6032582521438599,
"learning_rate": 9.358523725834798e-05,
"loss": 0.8000447154045105,
"step": 1066
},
{
"epoch": 0.4506329113924051,
"grad_norm": 1.5295692682266235,
"learning_rate": 9.37609841827768e-05,
"loss": 0.8310818672180176,
"step": 1068
},
{
"epoch": 0.45147679324894513,
"grad_norm": 1.3161942958831787,
"learning_rate": 9.393673110720564e-05,
"loss": 0.8377846479415894,
"step": 1070
},
{
"epoch": 0.45232067510548524,
"grad_norm": 1.4101601839065552,
"learning_rate": 9.411247803163445e-05,
"loss": 0.7852389216423035,
"step": 1072
},
{
"epoch": 0.4531645569620253,
"grad_norm": 1.4352775812149048,
"learning_rate": 9.428822495606327e-05,
"loss": 0.8763723969459534,
"step": 1074
},
{
"epoch": 0.4540084388185654,
"grad_norm": 1.4584673643112183,
"learning_rate": 9.44639718804921e-05,
"loss": 0.8177199363708496,
"step": 1076
},
{
"epoch": 0.4548523206751055,
"grad_norm": 1.6470575332641602,
"learning_rate": 9.463971880492091e-05,
"loss": 0.8333053588867188,
"step": 1078
},
{
"epoch": 0.45569620253164556,
"grad_norm": 1.4429512023925781,
"learning_rate": 9.481546572934975e-05,
"loss": 0.8546649217605591,
"step": 1080
},
{
"epoch": 0.45654008438818566,
"grad_norm": 1.4885371923446655,
"learning_rate": 9.499121265377856e-05,
"loss": 0.838036298751831,
"step": 1082
},
{
"epoch": 0.4573839662447257,
"grad_norm": 1.4601678848266602,
"learning_rate": 9.516695957820738e-05,
"loss": 0.7295010089874268,
"step": 1084
},
{
"epoch": 0.4582278481012658,
"grad_norm": 1.2399365901947021,
"learning_rate": 9.53427065026362e-05,
"loss": 0.6990782618522644,
"step": 1086
},
{
"epoch": 0.45907172995780593,
"grad_norm": 1.2936921119689941,
"learning_rate": 9.551845342706504e-05,
"loss": 0.7790928483009338,
"step": 1088
},
{
"epoch": 0.459915611814346,
"grad_norm": 1.3408331871032715,
"learning_rate": 9.569420035149385e-05,
"loss": 0.8061056733131409,
"step": 1090
},
{
"epoch": 0.4607594936708861,
"grad_norm": 1.5525178909301758,
"learning_rate": 9.586994727592267e-05,
"loss": 0.856796383857727,
"step": 1092
},
{
"epoch": 0.46160337552742614,
"grad_norm": 1.2944618463516235,
"learning_rate": 9.604569420035149e-05,
"loss": 0.7626663446426392,
"step": 1094
},
{
"epoch": 0.46244725738396625,
"grad_norm": 1.412204623222351,
"learning_rate": 9.622144112478033e-05,
"loss": 0.7524681091308594,
"step": 1096
},
{
"epoch": 0.46329113924050636,
"grad_norm": 1.4851596355438232,
"learning_rate": 9.639718804920914e-05,
"loss": 0.8430375456809998,
"step": 1098
},
{
"epoch": 0.4641350210970464,
"grad_norm": 1.831943154335022,
"learning_rate": 9.657293497363796e-05,
"loss": 0.8374918103218079,
"step": 1100
},
{
"epoch": 0.4641350210970464,
"eval_loss": 0.8283821940422058,
"eval_runtime": 861.0464,
"eval_samples_per_second": 2.447,
"eval_steps_per_second": 2.447,
"step": 1100
},
{
"epoch": 0.4649789029535865,
"grad_norm": 1.4989945888519287,
"learning_rate": 9.674868189806678e-05,
"loss": 0.8063139915466309,
"step": 1102
},
{
"epoch": 0.46582278481012657,
"grad_norm": 1.3772722482681274,
"learning_rate": 9.692442882249562e-05,
"loss": 0.8109207153320312,
"step": 1104
},
{
"epoch": 0.4666666666666667,
"grad_norm": 1.4963124990463257,
"learning_rate": 9.710017574692443e-05,
"loss": 0.8667853474617004,
"step": 1106
},
{
"epoch": 0.4675105485232067,
"grad_norm": 1.4250836372375488,
"learning_rate": 9.727592267135325e-05,
"loss": 0.8020523190498352,
"step": 1108
},
{
"epoch": 0.46835443037974683,
"grad_norm": 1.475599765777588,
"learning_rate": 9.745166959578209e-05,
"loss": 0.8271048069000244,
"step": 1110
},
{
"epoch": 0.46919831223628694,
"grad_norm": 1.3727436065673828,
"learning_rate": 9.76274165202109e-05,
"loss": 0.7615619897842407,
"step": 1112
},
{
"epoch": 0.470042194092827,
"grad_norm": 1.2233914136886597,
"learning_rate": 9.780316344463972e-05,
"loss": 0.7843242883682251,
"step": 1114
},
{
"epoch": 0.4708860759493671,
"grad_norm": 1.5734832286834717,
"learning_rate": 9.797891036906854e-05,
"loss": 0.834839940071106,
"step": 1116
},
{
"epoch": 0.47172995780590715,
"grad_norm": 1.3778531551361084,
"learning_rate": 9.815465729349736e-05,
"loss": 0.7584373950958252,
"step": 1118
},
{
"epoch": 0.47257383966244726,
"grad_norm": 1.5535035133361816,
"learning_rate": 9.833040421792618e-05,
"loss": 0.8204697370529175,
"step": 1120
},
{
"epoch": 0.47341772151898737,
"grad_norm": 1.4743636846542358,
"learning_rate": 9.850615114235501e-05,
"loss": 0.9012852311134338,
"step": 1122
},
{
"epoch": 0.4742616033755274,
"grad_norm": 1.4134864807128906,
"learning_rate": 9.868189806678383e-05,
"loss": 0.8392805457115173,
"step": 1124
},
{
"epoch": 0.4751054852320675,
"grad_norm": 1.3308019638061523,
"learning_rate": 9.885764499121267e-05,
"loss": 0.7135441303253174,
"step": 1126
},
{
"epoch": 0.4759493670886076,
"grad_norm": 1.5354844331741333,
"learning_rate": 9.903339191564149e-05,
"loss": 0.8464727401733398,
"step": 1128
},
{
"epoch": 0.4767932489451477,
"grad_norm": 1.2730523347854614,
"learning_rate": 9.92091388400703e-05,
"loss": 0.7691597938537598,
"step": 1130
},
{
"epoch": 0.47763713080168774,
"grad_norm": 1.5459758043289185,
"learning_rate": 9.938488576449912e-05,
"loss": 0.8068788647651672,
"step": 1132
},
{
"epoch": 0.47848101265822784,
"grad_norm": 1.345678687095642,
"learning_rate": 9.956063268892794e-05,
"loss": 0.8091006278991699,
"step": 1134
},
{
"epoch": 0.47932489451476795,
"grad_norm": 1.317076563835144,
"learning_rate": 9.973637961335676e-05,
"loss": 0.735533595085144,
"step": 1136
},
{
"epoch": 0.480168776371308,
"grad_norm": 1.5011168718338013,
"learning_rate": 9.99121265377856e-05,
"loss": 0.7935182452201843,
"step": 1138
},
{
"epoch": 0.4810126582278481,
"grad_norm": 1.673899531364441,
"learning_rate": 9.999999855824502e-05,
"loss": 0.8203520774841309,
"step": 1140
},
{
"epoch": 0.48185654008438816,
"grad_norm": 1.344337821006775,
"learning_rate": 9.999998702420562e-05,
"loss": 0.7233241200447083,
"step": 1142
},
{
"epoch": 0.48270042194092827,
"grad_norm": 1.5819076299667358,
"learning_rate": 9.999996395612948e-05,
"loss": 0.8795552849769592,
"step": 1144
},
{
"epoch": 0.4835443037974684,
"grad_norm": 1.7427241802215576,
"learning_rate": 9.999992935402192e-05,
"loss": 0.8482733964920044,
"step": 1146
},
{
"epoch": 0.48438818565400843,
"grad_norm": 1.2877503633499146,
"learning_rate": 9.999988321789093e-05,
"loss": 0.7905706167221069,
"step": 1148
},
{
"epoch": 0.48523206751054854,
"grad_norm": 1.4887222051620483,
"learning_rate": 9.999982554774715e-05,
"loss": 0.8609708547592163,
"step": 1150
},
{
"epoch": 0.4860759493670886,
"grad_norm": 1.3625136613845825,
"learning_rate": 9.999975634360388e-05,
"loss": 0.7890065908432007,
"step": 1152
},
{
"epoch": 0.4869198312236287,
"grad_norm": 1.3631492853164673,
"learning_rate": 9.999967560547708e-05,
"loss": 0.7908958196640015,
"step": 1154
},
{
"epoch": 0.4877637130801688,
"grad_norm": 1.5244156122207642,
"learning_rate": 9.99995833333854e-05,
"loss": 0.8509655594825745,
"step": 1156
},
{
"epoch": 0.48860759493670886,
"grad_norm": 1.2513200044631958,
"learning_rate": 9.999947952735007e-05,
"loss": 0.7329106330871582,
"step": 1158
},
{
"epoch": 0.48945147679324896,
"grad_norm": 1.1539413928985596,
"learning_rate": 9.99993641873951e-05,
"loss": 0.7237489223480225,
"step": 1160
},
{
"epoch": 0.490295358649789,
"grad_norm": 1.3859314918518066,
"learning_rate": 9.999923731354706e-05,
"loss": 0.8650591373443604,
"step": 1162
},
{
"epoch": 0.4911392405063291,
"grad_norm": 1.2910805940628052,
"learning_rate": 9.999909890583521e-05,
"loss": 0.7516807913780212,
"step": 1164
},
{
"epoch": 0.4919831223628692,
"grad_norm": 1.6100077629089355,
"learning_rate": 9.999894896429152e-05,
"loss": 0.7082475423812866,
"step": 1166
},
{
"epoch": 0.4928270042194093,
"grad_norm": 1.2313556671142578,
"learning_rate": 9.999878748895053e-05,
"loss": 0.8403750658035278,
"step": 1168
},
{
"epoch": 0.4936708860759494,
"grad_norm": 1.3402830362319946,
"learning_rate": 9.999861447984952e-05,
"loss": 0.8083041906356812,
"step": 1170
},
{
"epoch": 0.49451476793248944,
"grad_norm": 1.516775131225586,
"learning_rate": 9.999842993702839e-05,
"loss": 0.8339354991912842,
"step": 1172
},
{
"epoch": 0.49535864978902955,
"grad_norm": 1.2698423862457275,
"learning_rate": 9.999823386052971e-05,
"loss": 0.7708724141120911,
"step": 1174
},
{
"epoch": 0.4962025316455696,
"grad_norm": 1.339390516281128,
"learning_rate": 9.999802625039872e-05,
"loss": 0.7589715719223022,
"step": 1176
},
{
"epoch": 0.4970464135021097,
"grad_norm": 1.4618452787399292,
"learning_rate": 9.99978071066833e-05,
"loss": 0.8523206114768982,
"step": 1178
},
{
"epoch": 0.4978902953586498,
"grad_norm": 1.4812564849853516,
"learning_rate": 9.9997576429434e-05,
"loss": 0.8143196105957031,
"step": 1180
},
{
"epoch": 0.49873417721518987,
"grad_norm": 1.5720716714859009,
"learning_rate": 9.999733421870405e-05,
"loss": 0.800125002861023,
"step": 1182
},
{
"epoch": 0.49957805907173,
"grad_norm": 1.4421230554580688,
"learning_rate": 9.99970804745493e-05,
"loss": 0.7618259191513062,
"step": 1184
},
{
"epoch": 0.5004219409282701,
"grad_norm": 1.5794934034347534,
"learning_rate": 9.99968151970283e-05,
"loss": 0.7162163853645325,
"step": 1186
},
{
"epoch": 0.5012658227848101,
"grad_norm": 1.8590432405471802,
"learning_rate": 9.999653838620225e-05,
"loss": 0.8089820146560669,
"step": 1188
},
{
"epoch": 0.5021097046413502,
"grad_norm": 1.5194507837295532,
"learning_rate": 9.999625004213498e-05,
"loss": 0.8011203408241272,
"step": 1190
},
{
"epoch": 0.5029535864978903,
"grad_norm": 1.6986470222473145,
"learning_rate": 9.999595016489303e-05,
"loss": 0.761158287525177,
"step": 1192
},
{
"epoch": 0.5037974683544304,
"grad_norm": 1.4413946866989136,
"learning_rate": 9.999563875454559e-05,
"loss": 0.7898027300834656,
"step": 1194
},
{
"epoch": 0.5046413502109705,
"grad_norm": 1.4509994983673096,
"learning_rate": 9.999531581116443e-05,
"loss": 0.8018442392349243,
"step": 1196
},
{
"epoch": 0.5054852320675105,
"grad_norm": 1.400659441947937,
"learning_rate": 9.999498133482412e-05,
"loss": 0.7804076075553894,
"step": 1198
},
{
"epoch": 0.5063291139240507,
"grad_norm": 1.486840009689331,
"learning_rate": 9.999463532560178e-05,
"loss": 0.82496178150177,
"step": 1200
},
{
"epoch": 0.5063291139240507,
"eval_loss": 0.8186545968055725,
"eval_runtime": 862.1638,
"eval_samples_per_second": 2.444,
"eval_steps_per_second": 2.444,
"step": 1200
},
{
"epoch": 0.5071729957805907,
"grad_norm": 1.2770357131958008,
"learning_rate": 9.999427778357723e-05,
"loss": 0.8037722706794739,
"step": 1202
},
{
"epoch": 0.5080168776371308,
"grad_norm": 1.4540977478027344,
"learning_rate": 9.999390870883297e-05,
"loss": 0.7329373359680176,
"step": 1204
},
{
"epoch": 0.5088607594936709,
"grad_norm": 1.4469913244247437,
"learning_rate": 9.999352810145412e-05,
"loss": 0.8224589824676514,
"step": 1206
},
{
"epoch": 0.509704641350211,
"grad_norm": 1.46500563621521,
"learning_rate": 9.999313596152847e-05,
"loss": 0.8106292486190796,
"step": 1208
},
{
"epoch": 0.510548523206751,
"grad_norm": 1.3526637554168701,
"learning_rate": 9.999273228914649e-05,
"loss": 0.747698187828064,
"step": 1210
},
{
"epoch": 0.5113924050632911,
"grad_norm": 1.28840172290802,
"learning_rate": 9.999231708440131e-05,
"loss": 0.7612425684928894,
"step": 1212
},
{
"epoch": 0.5122362869198313,
"grad_norm": 1.0283230543136597,
"learning_rate": 9.99918903473887e-05,
"loss": 0.6839463710784912,
"step": 1214
},
{
"epoch": 0.5130801687763713,
"grad_norm": 1.5231431722640991,
"learning_rate": 9.999145207820708e-05,
"loss": 0.8539203405380249,
"step": 1216
},
{
"epoch": 0.5139240506329114,
"grad_norm": 1.3289231061935425,
"learning_rate": 9.999100227695758e-05,
"loss": 0.7960102558135986,
"step": 1218
},
{
"epoch": 0.5147679324894515,
"grad_norm": 1.3770930767059326,
"learning_rate": 9.999054094374396e-05,
"loss": 0.7639255523681641,
"step": 1220
},
{
"epoch": 0.5156118143459916,
"grad_norm": 1.3028030395507812,
"learning_rate": 9.999006807867262e-05,
"loss": 0.7743061780929565,
"step": 1222
},
{
"epoch": 0.5164556962025316,
"grad_norm": 1.1827034950256348,
"learning_rate": 9.998958368185265e-05,
"loss": 0.7922407984733582,
"step": 1224
},
{
"epoch": 0.5172995780590718,
"grad_norm": 1.2973705530166626,
"learning_rate": 9.99890877533958e-05,
"loss": 0.7671286463737488,
"step": 1226
},
{
"epoch": 0.5181434599156118,
"grad_norm": 1.5820153951644897,
"learning_rate": 9.998858029341646e-05,
"loss": 0.7546951174736023,
"step": 1228
},
{
"epoch": 0.5189873417721519,
"grad_norm": 1.6140317916870117,
"learning_rate": 9.99880613020317e-05,
"loss": 0.8734183311462402,
"step": 1230
},
{
"epoch": 0.5198312236286919,
"grad_norm": 1.1190184354782104,
"learning_rate": 9.998753077936122e-05,
"loss": 0.8410643339157104,
"step": 1232
},
{
"epoch": 0.5206751054852321,
"grad_norm": 1.3876196146011353,
"learning_rate": 9.998698872552744e-05,
"loss": 0.7769841551780701,
"step": 1234
},
{
"epoch": 0.5215189873417722,
"grad_norm": 1.699522852897644,
"learning_rate": 9.998643514065535e-05,
"loss": 0.8846109509468079,
"step": 1236
},
{
"epoch": 0.5223628691983122,
"grad_norm": 1.3805134296417236,
"learning_rate": 9.998587002487271e-05,
"loss": 0.7664945125579834,
"step": 1238
},
{
"epoch": 0.5232067510548524,
"grad_norm": 1.3679476976394653,
"learning_rate": 9.998529337830984e-05,
"loss": 0.7243514060974121,
"step": 1240
},
{
"epoch": 0.5240506329113924,
"grad_norm": 1.399200677871704,
"learning_rate": 9.998470520109977e-05,
"loss": 0.8061941862106323,
"step": 1242
},
{
"epoch": 0.5248945147679325,
"grad_norm": 1.3441044092178345,
"learning_rate": 9.99841054933782e-05,
"loss": 0.7741840481758118,
"step": 1244
},
{
"epoch": 0.5257383966244725,
"grad_norm": 1.3375325202941895,
"learning_rate": 9.998349425528344e-05,
"loss": 0.7619491815567017,
"step": 1246
},
{
"epoch": 0.5265822784810127,
"grad_norm": 1.5517847537994385,
"learning_rate": 9.998287148695651e-05,
"loss": 0.8315094113349915,
"step": 1248
},
{
"epoch": 0.5274261603375527,
"grad_norm": 1.244997501373291,
"learning_rate": 9.998223718854107e-05,
"loss": 0.7536082863807678,
"step": 1250
},
{
"epoch": 0.5282700421940928,
"grad_norm": 1.3190033435821533,
"learning_rate": 9.998159136018344e-05,
"loss": 0.826419472694397,
"step": 1252
},
{
"epoch": 0.529113924050633,
"grad_norm": 1.2750061750411987,
"learning_rate": 9.998093400203259e-05,
"loss": 0.7866435647010803,
"step": 1254
},
{
"epoch": 0.529957805907173,
"grad_norm": 1.422908067703247,
"learning_rate": 9.998026511424017e-05,
"loss": 0.7796626687049866,
"step": 1256
},
{
"epoch": 0.5308016877637131,
"grad_norm": 1.435552954673767,
"learning_rate": 9.997958469696048e-05,
"loss": 0.815027117729187,
"step": 1258
},
{
"epoch": 0.5316455696202531,
"grad_norm": 1.1950994729995728,
"learning_rate": 9.997889275035049e-05,
"loss": 0.6925795674324036,
"step": 1260
},
{
"epoch": 0.5324894514767933,
"grad_norm": 1.3049622774124146,
"learning_rate": 9.997818927456978e-05,
"loss": 0.822464108467102,
"step": 1262
},
{
"epoch": 0.5333333333333333,
"grad_norm": 1.2197340726852417,
"learning_rate": 9.997747426978066e-05,
"loss": 0.7955381274223328,
"step": 1264
},
{
"epoch": 0.5341772151898734,
"grad_norm": 1.2463661432266235,
"learning_rate": 9.997674773614807e-05,
"loss": 0.8642181754112244,
"step": 1266
},
{
"epoch": 0.5350210970464135,
"grad_norm": 1.421393871307373,
"learning_rate": 9.99760096738396e-05,
"loss": 0.8776891827583313,
"step": 1268
},
{
"epoch": 0.5358649789029536,
"grad_norm": 1.4347561597824097,
"learning_rate": 9.997526008302549e-05,
"loss": 0.7446491122245789,
"step": 1270
},
{
"epoch": 0.5367088607594936,
"grad_norm": 1.2056710720062256,
"learning_rate": 9.99744989638787e-05,
"loss": 0.8581281304359436,
"step": 1272
},
{
"epoch": 0.5375527426160338,
"grad_norm": 1.1672608852386475,
"learning_rate": 9.997372631657475e-05,
"loss": 0.7386330366134644,
"step": 1274
},
{
"epoch": 0.5383966244725739,
"grad_norm": 1.4313966035842896,
"learning_rate": 9.997294214129191e-05,
"loss": 0.7806804776191711,
"step": 1276
},
{
"epoch": 0.5392405063291139,
"grad_norm": 1.1666971445083618,
"learning_rate": 9.997214643821107e-05,
"loss": 0.6830351948738098,
"step": 1278
},
{
"epoch": 0.540084388185654,
"grad_norm": 1.491783857345581,
"learning_rate": 9.997133920751578e-05,
"loss": 0.8570694327354431,
"step": 1280
},
{
"epoch": 0.5409282700421941,
"grad_norm": 1.1879212856292725,
"learning_rate": 9.997052044939226e-05,
"loss": 0.7016772031784058,
"step": 1282
},
{
"epoch": 0.5417721518987342,
"grad_norm": 1.2692012786865234,
"learning_rate": 9.996969016402935e-05,
"loss": 0.7711107134819031,
"step": 1284
},
{
"epoch": 0.5426160337552742,
"grad_norm": 1.3318448066711426,
"learning_rate": 9.996884835161863e-05,
"loss": 0.7807164788246155,
"step": 1286
},
{
"epoch": 0.5434599156118144,
"grad_norm": 1.1786744594573975,
"learning_rate": 9.996799501235425e-05,
"loss": 0.7331319451332092,
"step": 1288
},
{
"epoch": 0.5443037974683544,
"grad_norm": 1.4092369079589844,
"learning_rate": 9.996713014643309e-05,
"loss": 0.7191547155380249,
"step": 1290
},
{
"epoch": 0.5451476793248945,
"grad_norm": 1.377099633216858,
"learning_rate": 9.996625375405463e-05,
"loss": 0.7233871221542358,
"step": 1292
},
{
"epoch": 0.5459915611814345,
"grad_norm": 1.404945969581604,
"learning_rate": 9.996536583542105e-05,
"loss": 0.7925472855567932,
"step": 1294
},
{
"epoch": 0.5468354430379747,
"grad_norm": 1.2555286884307861,
"learning_rate": 9.996446639073718e-05,
"loss": 0.7749786376953125,
"step": 1296
},
{
"epoch": 0.5476793248945148,
"grad_norm": 1.2577459812164307,
"learning_rate": 9.996355542021048e-05,
"loss": 0.7647517919540405,
"step": 1298
},
{
"epoch": 0.5485232067510548,
"grad_norm": 1.3587758541107178,
"learning_rate": 9.996263292405113e-05,
"loss": 0.8621891140937805,
"step": 1300
},
{
"epoch": 0.5485232067510548,
"eval_loss": 0.808323085308075,
"eval_runtime": 853.577,
"eval_samples_per_second": 2.468,
"eval_steps_per_second": 2.468,
"step": 1300
},
{
"epoch": 0.549367088607595,
"grad_norm": 1.327125906944275,
"learning_rate": 9.996169890247191e-05,
"loss": 0.749254584312439,
"step": 1302
},
{
"epoch": 0.550210970464135,
"grad_norm": 1.4620670080184937,
"learning_rate": 9.99607533556883e-05,
"loss": 0.7362856268882751,
"step": 1304
},
{
"epoch": 0.5510548523206751,
"grad_norm": 1.4119454622268677,
"learning_rate": 9.99597962839184e-05,
"loss": 0.7918445467948914,
"step": 1306
},
{
"epoch": 0.5518987341772152,
"grad_norm": 1.497522234916687,
"learning_rate": 9.995882768738298e-05,
"loss": 0.7348005175590515,
"step": 1308
},
{
"epoch": 0.5527426160337553,
"grad_norm": 1.535741925239563,
"learning_rate": 9.99578475663055e-05,
"loss": 0.8310725688934326,
"step": 1310
},
{
"epoch": 0.5535864978902953,
"grad_norm": 1.4606215953826904,
"learning_rate": 9.995685592091204e-05,
"loss": 0.8232766389846802,
"step": 1312
},
{
"epoch": 0.5544303797468354,
"grad_norm": 1.2442357540130615,
"learning_rate": 9.995585275143136e-05,
"loss": 0.8273071050643921,
"step": 1314
},
{
"epoch": 0.5552742616033756,
"grad_norm": 1.5128520727157593,
"learning_rate": 9.995483805809487e-05,
"loss": 0.7518656253814697,
"step": 1316
},
{
"epoch": 0.5561181434599156,
"grad_norm": 1.340149998664856,
"learning_rate": 9.995381184113664e-05,
"loss": 0.8261662721633911,
"step": 1318
},
{
"epoch": 0.5569620253164557,
"grad_norm": 1.1409451961517334,
"learning_rate": 9.99527741007934e-05,
"loss": 0.5775256156921387,
"step": 1320
},
{
"epoch": 0.5578059071729958,
"grad_norm": 1.3489247560501099,
"learning_rate": 9.995172483730455e-05,
"loss": 0.7698423862457275,
"step": 1322
},
{
"epoch": 0.5586497890295359,
"grad_norm": 1.4950530529022217,
"learning_rate": 9.995066405091211e-05,
"loss": 0.8053334355354309,
"step": 1324
},
{
"epoch": 0.5594936708860759,
"grad_norm": 1.3814653158187866,
"learning_rate": 9.994959174186078e-05,
"loss": 0.7826266288757324,
"step": 1326
},
{
"epoch": 0.560337552742616,
"grad_norm": 1.3383625745773315,
"learning_rate": 9.994850791039796e-05,
"loss": 0.7862131595611572,
"step": 1328
},
{
"epoch": 0.5611814345991561,
"grad_norm": 1.3529670238494873,
"learning_rate": 9.994741255677363e-05,
"loss": 0.8428501486778259,
"step": 1330
},
{
"epoch": 0.5620253164556962,
"grad_norm": 1.254215121269226,
"learning_rate": 9.994630568124049e-05,
"loss": 0.7340869307518005,
"step": 1332
},
{
"epoch": 0.5628691983122363,
"grad_norm": 1.2869828939437866,
"learning_rate": 9.994518728405386e-05,
"loss": 0.7052226662635803,
"step": 1334
},
{
"epoch": 0.5637130801687764,
"grad_norm": 1.4321808815002441,
"learning_rate": 9.994405736547174e-05,
"loss": 0.8297074437141418,
"step": 1336
},
{
"epoch": 0.5645569620253165,
"grad_norm": 1.4638891220092773,
"learning_rate": 9.994291592575478e-05,
"loss": 0.7183220982551575,
"step": 1338
},
{
"epoch": 0.5654008438818565,
"grad_norm": 1.4947413206100464,
"learning_rate": 9.994176296516628e-05,
"loss": 0.8146093487739563,
"step": 1340
},
{
"epoch": 0.5662447257383966,
"grad_norm": 1.343862533569336,
"learning_rate": 9.994059848397221e-05,
"loss": 0.7583593130111694,
"step": 1342
},
{
"epoch": 0.5670886075949367,
"grad_norm": 1.203550100326538,
"learning_rate": 9.993942248244121e-05,
"loss": 0.7682924270629883,
"step": 1344
},
{
"epoch": 0.5679324894514768,
"grad_norm": 1.287660002708435,
"learning_rate": 9.993823496084455e-05,
"loss": 0.8139828443527222,
"step": 1346
},
{
"epoch": 0.5687763713080168,
"grad_norm": 1.3326014280319214,
"learning_rate": 9.993703591945616e-05,
"loss": 0.7529099583625793,
"step": 1348
},
{
"epoch": 0.569620253164557,
"grad_norm": 1.2441487312316895,
"learning_rate": 9.993582535855263e-05,
"loss": 0.6997471451759338,
"step": 1350
},
{
"epoch": 0.570464135021097,
"grad_norm": 1.2647649049758911,
"learning_rate": 9.993460327841325e-05,
"loss": 0.7421218752861023,
"step": 1352
},
{
"epoch": 0.5713080168776371,
"grad_norm": 1.146399974822998,
"learning_rate": 9.99333696793199e-05,
"loss": 0.7342398166656494,
"step": 1354
},
{
"epoch": 0.5721518987341773,
"grad_norm": 1.3346691131591797,
"learning_rate": 9.993212456155715e-05,
"loss": 0.7175891399383545,
"step": 1356
},
{
"epoch": 0.5729957805907173,
"grad_norm": 1.3950672149658203,
"learning_rate": 9.993086792541222e-05,
"loss": 0.8108891248703003,
"step": 1358
},
{
"epoch": 0.5738396624472574,
"grad_norm": 1.339931845664978,
"learning_rate": 9.992959977117502e-05,
"loss": 0.6979889273643494,
"step": 1360
},
{
"epoch": 0.5746835443037974,
"grad_norm": 1.3276840448379517,
"learning_rate": 9.992832009913806e-05,
"loss": 0.7635799050331116,
"step": 1362
},
{
"epoch": 0.5755274261603376,
"grad_norm": 1.5015610456466675,
"learning_rate": 9.992702890959653e-05,
"loss": 0.7575043439865112,
"step": 1364
},
{
"epoch": 0.5763713080168776,
"grad_norm": 1.4755414724349976,
"learning_rate": 9.99257262028483e-05,
"loss": 0.8134847283363342,
"step": 1366
},
{
"epoch": 0.5772151898734177,
"grad_norm": 1.3788783550262451,
"learning_rate": 9.992441197919388e-05,
"loss": 0.7663828134536743,
"step": 1368
},
{
"epoch": 0.5780590717299579,
"grad_norm": 1.2814711332321167,
"learning_rate": 9.992308623893644e-05,
"loss": 0.6711251735687256,
"step": 1370
},
{
"epoch": 0.5789029535864979,
"grad_norm": 1.5343635082244873,
"learning_rate": 9.99217489823818e-05,
"loss": 0.8097200393676758,
"step": 1372
},
{
"epoch": 0.579746835443038,
"grad_norm": 1.3029557466506958,
"learning_rate": 9.992040020983843e-05,
"loss": 0.8274240493774414,
"step": 1374
},
{
"epoch": 0.580590717299578,
"grad_norm": 1.4034144878387451,
"learning_rate": 9.991903992161746e-05,
"loss": 0.7758964896202087,
"step": 1376
},
{
"epoch": 0.5814345991561182,
"grad_norm": 1.2340021133422852,
"learning_rate": 9.991766811803271e-05,
"loss": 0.6571930050849915,
"step": 1378
},
{
"epoch": 0.5822784810126582,
"grad_norm": 1.3082842826843262,
"learning_rate": 9.991628479940061e-05,
"loss": 0.7381542921066284,
"step": 1380
},
{
"epoch": 0.5831223628691983,
"grad_norm": 1.8134801387786865,
"learning_rate": 9.991488996604025e-05,
"loss": 0.8081237077713013,
"step": 1382
},
{
"epoch": 0.5839662447257384,
"grad_norm": 1.4598309993743896,
"learning_rate": 9.991348361827343e-05,
"loss": 0.7761610746383667,
"step": 1384
},
{
"epoch": 0.5848101265822785,
"grad_norm": 1.2974225282669067,
"learning_rate": 9.991206575642453e-05,
"loss": 0.6872953176498413,
"step": 1386
},
{
"epoch": 0.5856540084388185,
"grad_norm": 1.24009370803833,
"learning_rate": 9.991063638082065e-05,
"loss": 0.7601345777511597,
"step": 1388
},
{
"epoch": 0.5864978902953587,
"grad_norm": 1.176713228225708,
"learning_rate": 9.99091954917915e-05,
"loss": 0.7138593792915344,
"step": 1390
},
{
"epoch": 0.5873417721518988,
"grad_norm": 1.1056525707244873,
"learning_rate": 9.990774308966949e-05,
"loss": 0.7730305194854736,
"step": 1392
},
{
"epoch": 0.5881856540084388,
"grad_norm": 1.382847547531128,
"learning_rate": 9.990627917478962e-05,
"loss": 0.7076689600944519,
"step": 1394
},
{
"epoch": 0.5890295358649789,
"grad_norm": 1.2507930994033813,
"learning_rate": 9.990480374748964e-05,
"loss": 0.7970513105392456,
"step": 1396
},
{
"epoch": 0.589873417721519,
"grad_norm": 1.2266724109649658,
"learning_rate": 9.990331680810987e-05,
"loss": 0.7906717658042908,
"step": 1398
},
{
"epoch": 0.5907172995780591,
"grad_norm": 1.299920916557312,
"learning_rate": 9.99018183569933e-05,
"loss": 0.853204607963562,
"step": 1400
},
{
"epoch": 0.5907172995780591,
"eval_loss": 0.8009664416313171,
"eval_runtime": 851.9417,
"eval_samples_per_second": 2.473,
"eval_steps_per_second": 2.473,
"step": 1400
},
{
"epoch": 0.5915611814345991,
"grad_norm": 1.2114863395690918,
"learning_rate": 9.990030839448564e-05,
"loss": 0.8140703439712524,
"step": 1402
},
{
"epoch": 0.5924050632911393,
"grad_norm": 1.3301794528961182,
"learning_rate": 9.989878692093518e-05,
"loss": 0.7471320629119873,
"step": 1404
},
{
"epoch": 0.5932489451476793,
"grad_norm": 1.2611899375915527,
"learning_rate": 9.98972539366929e-05,
"loss": 0.7307024002075195,
"step": 1406
},
{
"epoch": 0.5940928270042194,
"grad_norm": 1.1717802286148071,
"learning_rate": 9.989570944211244e-05,
"loss": 0.6843112111091614,
"step": 1408
},
{
"epoch": 0.5949367088607594,
"grad_norm": 1.3323513269424438,
"learning_rate": 9.989415343755006e-05,
"loss": 0.7025372385978699,
"step": 1410
},
{
"epoch": 0.5957805907172996,
"grad_norm": 1.4225109815597534,
"learning_rate": 9.989258592336473e-05,
"loss": 0.7792683839797974,
"step": 1412
},
{
"epoch": 0.5966244725738397,
"grad_norm": 1.2878522872924805,
"learning_rate": 9.989100689991804e-05,
"loss": 0.8328315019607544,
"step": 1414
},
{
"epoch": 0.5974683544303797,
"grad_norm": 1.2067214250564575,
"learning_rate": 9.988941636757421e-05,
"loss": 0.7700617909431458,
"step": 1416
},
{
"epoch": 0.5983122362869199,
"grad_norm": 1.1213195323944092,
"learning_rate": 9.988781432670019e-05,
"loss": 0.6872363090515137,
"step": 1418
},
{
"epoch": 0.5991561181434599,
"grad_norm": 1.3211694955825806,
"learning_rate": 9.98862007776655e-05,
"loss": 0.7184111475944519,
"step": 1420
},
{
"epoch": 0.6,
"grad_norm": 1.1916998624801636,
"learning_rate": 9.98845757208424e-05,
"loss": 0.8120859265327454,
"step": 1422
},
{
"epoch": 0.60084388185654,
"grad_norm": 1.2772804498672485,
"learning_rate": 9.988293915660572e-05,
"loss": 0.7586462497711182,
"step": 1424
},
{
"epoch": 0.6016877637130802,
"grad_norm": 1.4139106273651123,
"learning_rate": 9.988129108533299e-05,
"loss": 0.8175994157791138,
"step": 1426
},
{
"epoch": 0.6025316455696202,
"grad_norm": 1.4481157064437866,
"learning_rate": 9.987963150740439e-05,
"loss": 0.7662636041641235,
"step": 1428
},
{
"epoch": 0.6033755274261603,
"grad_norm": 1.6000999212265015,
"learning_rate": 9.987796042320277e-05,
"loss": 0.7477837800979614,
"step": 1430
},
{
"epoch": 0.6042194092827005,
"grad_norm": 1.26194429397583,
"learning_rate": 9.98762778331136e-05,
"loss": 0.7392798662185669,
"step": 1432
},
{
"epoch": 0.6050632911392405,
"grad_norm": 1.2370645999908447,
"learning_rate": 9.987458373752503e-05,
"loss": 0.7795998454093933,
"step": 1434
},
{
"epoch": 0.6059071729957806,
"grad_norm": 1.4908311367034912,
"learning_rate": 9.987287813682784e-05,
"loss": 0.7833777070045471,
"step": 1436
},
{
"epoch": 0.6067510548523207,
"grad_norm": 1.2918652296066284,
"learning_rate": 9.987116103141549e-05,
"loss": 0.7269768118858337,
"step": 1438
},
{
"epoch": 0.6075949367088608,
"grad_norm": 1.2170461416244507,
"learning_rate": 9.98694324216841e-05,
"loss": 0.7599279284477234,
"step": 1440
},
{
"epoch": 0.6084388185654008,
"grad_norm": 1.4373505115509033,
"learning_rate": 9.98676923080324e-05,
"loss": 0.8256514668464661,
"step": 1442
},
{
"epoch": 0.6092827004219409,
"grad_norm": 1.3523614406585693,
"learning_rate": 9.986594069086181e-05,
"loss": 0.8462428450584412,
"step": 1444
},
{
"epoch": 0.610126582278481,
"grad_norm": 1.5131851434707642,
"learning_rate": 9.98641775705764e-05,
"loss": 0.8402239084243774,
"step": 1446
},
{
"epoch": 0.6109704641350211,
"grad_norm": 1.3518229722976685,
"learning_rate": 9.98624029475829e-05,
"loss": 0.7585759162902832,
"step": 1448
},
{
"epoch": 0.6118143459915611,
"grad_norm": 1.3403998613357544,
"learning_rate": 9.986061682229064e-05,
"loss": 0.773881733417511,
"step": 1450
},
{
"epoch": 0.6126582278481013,
"grad_norm": 1.1835366487503052,
"learning_rate": 9.985881919511168e-05,
"loss": 0.6770316958427429,
"step": 1452
},
{
"epoch": 0.6135021097046414,
"grad_norm": 1.1825730800628662,
"learning_rate": 9.985701006646069e-05,
"loss": 0.7081645727157593,
"step": 1454
},
{
"epoch": 0.6143459915611814,
"grad_norm": 1.378994345664978,
"learning_rate": 9.9855189436755e-05,
"loss": 0.7750917673110962,
"step": 1456
},
{
"epoch": 0.6151898734177215,
"grad_norm": 1.4208749532699585,
"learning_rate": 9.985335730641458e-05,
"loss": 0.7517801523208618,
"step": 1458
},
{
"epoch": 0.6160337552742616,
"grad_norm": 1.1413639783859253,
"learning_rate": 9.98515136758621e-05,
"loss": 0.712832510471344,
"step": 1460
},
{
"epoch": 0.6168776371308017,
"grad_norm": 1.3949562311172485,
"learning_rate": 9.984965854552283e-05,
"loss": 0.7884142994880676,
"step": 1462
},
{
"epoch": 0.6177215189873417,
"grad_norm": 1.4057096242904663,
"learning_rate": 9.984779191582471e-05,
"loss": 0.796623706817627,
"step": 1464
},
{
"epoch": 0.6185654008438819,
"grad_norm": 1.1681689023971558,
"learning_rate": 9.984591378719834e-05,
"loss": 0.7862933874130249,
"step": 1466
},
{
"epoch": 0.619409282700422,
"grad_norm": 1.2585291862487793,
"learning_rate": 9.984402416007696e-05,
"loss": 0.7889828681945801,
"step": 1468
},
{
"epoch": 0.620253164556962,
"grad_norm": 1.2598098516464233,
"learning_rate": 9.984212303489649e-05,
"loss": 0.7375997304916382,
"step": 1470
},
{
"epoch": 0.6210970464135022,
"grad_norm": 1.4628467559814453,
"learning_rate": 9.984021041209547e-05,
"loss": 0.7839564085006714,
"step": 1472
},
{
"epoch": 0.6219409282700422,
"grad_norm": 1.3606770038604736,
"learning_rate": 9.983828629211511e-05,
"loss": 0.7566051483154297,
"step": 1474
},
{
"epoch": 0.6227848101265823,
"grad_norm": 1.182644248008728,
"learning_rate": 9.983635067539927e-05,
"loss": 0.6638457179069519,
"step": 1476
},
{
"epoch": 0.6236286919831223,
"grad_norm": 1.5617793798446655,
"learning_rate": 9.983440356239445e-05,
"loss": 0.8227225542068481,
"step": 1478
},
{
"epoch": 0.6244725738396625,
"grad_norm": 1.2290058135986328,
"learning_rate": 9.98324449535498e-05,
"loss": 0.7086431980133057,
"step": 1480
},
{
"epoch": 0.6253164556962025,
"grad_norm": 1.3822678327560425,
"learning_rate": 9.983047484931716e-05,
"loss": 0.8076596856117249,
"step": 1482
},
{
"epoch": 0.6261603375527426,
"grad_norm": 1.163699746131897,
"learning_rate": 9.982849325015098e-05,
"loss": 0.7514539361000061,
"step": 1484
},
{
"epoch": 0.6270042194092827,
"grad_norm": 1.2635631561279297,
"learning_rate": 9.982650015650839e-05,
"loss": 0.7298142910003662,
"step": 1486
},
{
"epoch": 0.6278481012658228,
"grad_norm": 1.3135387897491455,
"learning_rate": 9.982449556884914e-05,
"loss": 0.8092831373214722,
"step": 1488
},
{
"epoch": 0.6286919831223629,
"grad_norm": 1.3577877283096313,
"learning_rate": 9.982247948763567e-05,
"loss": 0.7934147715568542,
"step": 1490
},
{
"epoch": 0.6295358649789029,
"grad_norm": 1.1482092142105103,
"learning_rate": 9.982045191333304e-05,
"loss": 0.789363443851471,
"step": 1492
},
{
"epoch": 0.6303797468354431,
"grad_norm": 1.189771056175232,
"learning_rate": 9.981841284640895e-05,
"loss": 0.7458413243293762,
"step": 1494
},
{
"epoch": 0.6312236286919831,
"grad_norm": 1.2815836668014526,
"learning_rate": 9.981636228733383e-05,
"loss": 0.7299918532371521,
"step": 1496
},
{
"epoch": 0.6320675105485232,
"grad_norm": 1.36761474609375,
"learning_rate": 9.981430023658068e-05,
"loss": 0.7545169591903687,
"step": 1498
},
{
"epoch": 0.6329113924050633,
"grad_norm": 1.2594345808029175,
"learning_rate": 9.981222669462513e-05,
"loss": 0.7358481884002686,
"step": 1500
},
{
"epoch": 0.6329113924050633,
"eval_loss": 0.7896141409873962,
"eval_runtime": 865.9069,
"eval_samples_per_second": 2.433,
"eval_steps_per_second": 2.433,
"step": 1500
},
{
"epoch": 0.6337552742616034,
"grad_norm": 3.6419246196746826,
"learning_rate": 9.981014166194556e-05,
"loss": 0.8253764510154724,
"step": 1502
},
{
"epoch": 0.6345991561181434,
"grad_norm": 1.7333487272262573,
"learning_rate": 9.980804513902294e-05,
"loss": 0.8254884481430054,
"step": 1504
},
{
"epoch": 0.6354430379746835,
"grad_norm": 1.1998231410980225,
"learning_rate": 9.980593712634088e-05,
"loss": 0.7833738327026367,
"step": 1506
},
{
"epoch": 0.6362869198312237,
"grad_norm": 1.347011685371399,
"learning_rate": 9.980381762438566e-05,
"loss": 0.753408670425415,
"step": 1508
},
{
"epoch": 0.6371308016877637,
"grad_norm": 1.1759053468704224,
"learning_rate": 9.980168663364622e-05,
"loss": 0.7867791652679443,
"step": 1510
},
{
"epoch": 0.6379746835443038,
"grad_norm": 1.3113552331924438,
"learning_rate": 9.979954415461412e-05,
"loss": 0.6753612160682678,
"step": 1512
},
{
"epoch": 0.6388185654008439,
"grad_norm": 1.3258320093154907,
"learning_rate": 9.979739018778362e-05,
"loss": 0.750367283821106,
"step": 1514
},
{
"epoch": 0.639662447257384,
"grad_norm": 1.175145149230957,
"learning_rate": 9.979522473365157e-05,
"loss": 0.7505861520767212,
"step": 1516
},
{
"epoch": 0.640506329113924,
"grad_norm": 1.2276148796081543,
"learning_rate": 9.979304779271752e-05,
"loss": 0.7429317831993103,
"step": 1518
},
{
"epoch": 0.6413502109704642,
"grad_norm": 1.3262875080108643,
"learning_rate": 9.979085936548362e-05,
"loss": 0.786217212677002,
"step": 1520
},
{
"epoch": 0.6421940928270042,
"grad_norm": 1.3067121505737305,
"learning_rate": 9.978865945245473e-05,
"loss": 0.6942036151885986,
"step": 1522
},
{
"epoch": 0.6430379746835443,
"grad_norm": 1.5352400541305542,
"learning_rate": 9.978644805413832e-05,
"loss": 0.8281817436218262,
"step": 1524
},
{
"epoch": 0.6438818565400843,
"grad_norm": 1.2848507165908813,
"learning_rate": 9.97842251710445e-05,
"loss": 0.8110972046852112,
"step": 1526
},
{
"epoch": 0.6447257383966245,
"grad_norm": 1.352196216583252,
"learning_rate": 9.978199080368607e-05,
"loss": 0.7354730367660522,
"step": 1528
},
{
"epoch": 0.6455696202531646,
"grad_norm": 1.2427687644958496,
"learning_rate": 9.977974495257842e-05,
"loss": 0.7915583848953247,
"step": 1530
},
{
"epoch": 0.6464135021097046,
"grad_norm": 1.3163504600524902,
"learning_rate": 9.977748761823967e-05,
"loss": 0.7400109171867371,
"step": 1532
},
{
"epoch": 0.6472573839662448,
"grad_norm": 1.2496893405914307,
"learning_rate": 9.977521880119049e-05,
"loss": 0.7104899287223816,
"step": 1534
},
{
"epoch": 0.6481012658227848,
"grad_norm": 1.0907179117202759,
"learning_rate": 9.97729385019543e-05,
"loss": 0.8074463605880737,
"step": 1536
},
{
"epoch": 0.6489451476793249,
"grad_norm": 1.2323429584503174,
"learning_rate": 9.977064672105712e-05,
"loss": 0.7770540714263916,
"step": 1538
},
{
"epoch": 0.6497890295358649,
"grad_norm": 1.224428415298462,
"learning_rate": 9.976834345902759e-05,
"loss": 0.806465208530426,
"step": 1540
},
{
"epoch": 0.6506329113924051,
"grad_norm": 1.3529564142227173,
"learning_rate": 9.976602871639705e-05,
"loss": 0.7306749224662781,
"step": 1542
},
{
"epoch": 0.6514767932489451,
"grad_norm": 1.1770031452178955,
"learning_rate": 9.976370249369946e-05,
"loss": 0.783933699131012,
"step": 1544
},
{
"epoch": 0.6523206751054852,
"grad_norm": 1.205283522605896,
"learning_rate": 9.976136479147144e-05,
"loss": 0.6937689185142517,
"step": 1546
},
{
"epoch": 0.6531645569620254,
"grad_norm": 1.2329360246658325,
"learning_rate": 9.975901561025223e-05,
"loss": 0.8041763305664062,
"step": 1548
},
{
"epoch": 0.6540084388185654,
"grad_norm": 1.499973177909851,
"learning_rate": 9.975665495058377e-05,
"loss": 0.750390887260437,
"step": 1550
},
{
"epoch": 0.6548523206751055,
"grad_norm": 1.31832754611969,
"learning_rate": 9.975428281301061e-05,
"loss": 0.7658298015594482,
"step": 1552
},
{
"epoch": 0.6556962025316456,
"grad_norm": 1.3998414278030396,
"learning_rate": 9.975189919807994e-05,
"loss": 0.8651264905929565,
"step": 1554
},
{
"epoch": 0.6565400843881857,
"grad_norm": 1.2002551555633545,
"learning_rate": 9.974950410634164e-05,
"loss": 0.6776561141014099,
"step": 1556
},
{
"epoch": 0.6573839662447257,
"grad_norm": 1.1986602544784546,
"learning_rate": 9.97470975383482e-05,
"loss": 0.8159130811691284,
"step": 1558
},
{
"epoch": 0.6582278481012658,
"grad_norm": 1.3583602905273438,
"learning_rate": 9.974467949465477e-05,
"loss": 0.7528039216995239,
"step": 1560
},
{
"epoch": 0.6590717299578059,
"grad_norm": 1.4176239967346191,
"learning_rate": 9.974224997581913e-05,
"loss": 0.6970920562744141,
"step": 1562
},
{
"epoch": 0.659915611814346,
"grad_norm": 1.3899401426315308,
"learning_rate": 9.973980898240177e-05,
"loss": 0.7718377113342285,
"step": 1564
},
{
"epoch": 0.660759493670886,
"grad_norm": 1.222413182258606,
"learning_rate": 9.973735651496571e-05,
"loss": 0.7346280217170715,
"step": 1566
},
{
"epoch": 0.6616033755274262,
"grad_norm": 1.3750087022781372,
"learning_rate": 9.973489257407676e-05,
"loss": 0.7923588156700134,
"step": 1568
},
{
"epoch": 0.6624472573839663,
"grad_norm": 1.24547278881073,
"learning_rate": 9.973241716030325e-05,
"loss": 0.8258910179138184,
"step": 1570
},
{
"epoch": 0.6632911392405063,
"grad_norm": 1.2464141845703125,
"learning_rate": 9.972993027421624e-05,
"loss": 0.7869232296943665,
"step": 1572
},
{
"epoch": 0.6641350210970464,
"grad_norm": 1.3088903427124023,
"learning_rate": 9.972743191638939e-05,
"loss": 0.8144775629043579,
"step": 1574
},
{
"epoch": 0.6649789029535865,
"grad_norm": 1.2252418994903564,
"learning_rate": 9.972492208739903e-05,
"loss": 0.7432073950767517,
"step": 1576
},
{
"epoch": 0.6658227848101266,
"grad_norm": 1.2303717136383057,
"learning_rate": 9.972240078782413e-05,
"loss": 0.7386854887008667,
"step": 1578
},
{
"epoch": 0.6666666666666666,
"grad_norm": 1.0226294994354248,
"learning_rate": 9.971986801824631e-05,
"loss": 0.7127882838249207,
"step": 1580
},
{
"epoch": 0.6675105485232068,
"grad_norm": 1.362332820892334,
"learning_rate": 9.971732377924982e-05,
"loss": 0.7557716369628906,
"step": 1582
},
{
"epoch": 0.6683544303797468,
"grad_norm": 1.4436695575714111,
"learning_rate": 9.971476807142158e-05,
"loss": 0.7832611203193665,
"step": 1584
},
{
"epoch": 0.6691983122362869,
"grad_norm": 1.276695966720581,
"learning_rate": 9.971220089535113e-05,
"loss": 0.8190197944641113,
"step": 1586
},
{
"epoch": 0.6700421940928271,
"grad_norm": 1.2413527965545654,
"learning_rate": 9.970962225163069e-05,
"loss": 0.747222363948822,
"step": 1588
},
{
"epoch": 0.6708860759493671,
"grad_norm": 1.3395767211914062,
"learning_rate": 9.970703214085507e-05,
"loss": 0.7846449017524719,
"step": 1590
},
{
"epoch": 0.6717299578059072,
"grad_norm": 1.291327953338623,
"learning_rate": 9.970443056362178e-05,
"loss": 0.8160232901573181,
"step": 1592
},
{
"epoch": 0.6725738396624472,
"grad_norm": 1.3139684200286865,
"learning_rate": 9.970181752053097e-05,
"loss": 0.7413806915283203,
"step": 1594
},
{
"epoch": 0.6734177215189874,
"grad_norm": 1.3170921802520752,
"learning_rate": 9.969919301218537e-05,
"loss": 0.7637304067611694,
"step": 1596
},
{
"epoch": 0.6742616033755274,
"grad_norm": 1.3349758386611938,
"learning_rate": 9.969655703919044e-05,
"loss": 0.7823366522789001,
"step": 1598
},
{
"epoch": 0.6751054852320675,
"grad_norm": 1.2151578664779663,
"learning_rate": 9.969390960215425e-05,
"loss": 0.6587790846824646,
"step": 1600
},
{
"epoch": 0.6751054852320675,
"eval_loss": 0.7836604714393616,
"eval_runtime": 861.5352,
"eval_samples_per_second": 2.446,
"eval_steps_per_second": 2.446,
"step": 1600
},
{
"epoch": 0.6759493670886076,
"grad_norm": 1.2541478872299194,
"learning_rate": 9.96912507016875e-05,
"loss": 0.7314544320106506,
"step": 1602
},
{
"epoch": 0.6767932489451477,
"grad_norm": 1.091790795326233,
"learning_rate": 9.968858033840357e-05,
"loss": 0.702468752861023,
"step": 1604
},
{
"epoch": 0.6776371308016877,
"grad_norm": 1.36745285987854,
"learning_rate": 9.968589851291841e-05,
"loss": 0.7691897749900818,
"step": 1606
},
{
"epoch": 0.6784810126582278,
"grad_norm": 1.1325993537902832,
"learning_rate": 9.968320522585072e-05,
"loss": 0.7422228455543518,
"step": 1608
},
{
"epoch": 0.679324894514768,
"grad_norm": 1.1015450954437256,
"learning_rate": 9.968050047782176e-05,
"loss": 0.677532434463501,
"step": 1610
},
{
"epoch": 0.680168776371308,
"grad_norm": 1.2216695547103882,
"learning_rate": 9.967778426945548e-05,
"loss": 0.7973438501358032,
"step": 1612
},
{
"epoch": 0.6810126582278481,
"grad_norm": 1.159395456314087,
"learning_rate": 9.967505660137843e-05,
"loss": 0.6742876172065735,
"step": 1614
},
{
"epoch": 0.6818565400843882,
"grad_norm": 1.404433250427246,
"learning_rate": 9.967231747421988e-05,
"loss": 0.7592008709907532,
"step": 1616
},
{
"epoch": 0.6827004219409283,
"grad_norm": 1.2489168643951416,
"learning_rate": 9.966956688861164e-05,
"loss": 0.7565826177597046,
"step": 1618
},
{
"epoch": 0.6835443037974683,
"grad_norm": 1.2960615158081055,
"learning_rate": 9.966680484518825e-05,
"loss": 0.7694597840309143,
"step": 1620
},
{
"epoch": 0.6843881856540084,
"grad_norm": 1.3598436117172241,
"learning_rate": 9.966403134458685e-05,
"loss": 0.8392959833145142,
"step": 1622
},
{
"epoch": 0.6852320675105485,
"grad_norm": 1.258065938949585,
"learning_rate": 9.966124638744722e-05,
"loss": 0.8014217019081116,
"step": 1624
},
{
"epoch": 0.6860759493670886,
"grad_norm": 1.3132309913635254,
"learning_rate": 9.965844997441184e-05,
"loss": 0.7029755711555481,
"step": 1626
},
{
"epoch": 0.6869198312236287,
"grad_norm": 1.1204946041107178,
"learning_rate": 9.965564210612575e-05,
"loss": 0.7213528752326965,
"step": 1628
},
{
"epoch": 0.6877637130801688,
"grad_norm": 1.037251591682434,
"learning_rate": 9.965282278323667e-05,
"loss": 0.6895437240600586,
"step": 1630
},
{
"epoch": 0.6886075949367089,
"grad_norm": 1.093807578086853,
"learning_rate": 9.964999200639498e-05,
"loss": 0.8035063743591309,
"step": 1632
},
{
"epoch": 0.6894514767932489,
"grad_norm": 1.367386817932129,
"learning_rate": 9.964714977625367e-05,
"loss": 0.6191847920417786,
"step": 1634
},
{
"epoch": 0.6902953586497891,
"grad_norm": 1.3160961866378784,
"learning_rate": 9.964429609346841e-05,
"loss": 0.7469727993011475,
"step": 1636
},
{
"epoch": 0.6911392405063291,
"grad_norm": 1.3736863136291504,
"learning_rate": 9.964143095869748e-05,
"loss": 0.7987836599349976,
"step": 1638
},
{
"epoch": 0.6919831223628692,
"grad_norm": 1.323209524154663,
"learning_rate": 9.963855437260182e-05,
"loss": 0.7901709675788879,
"step": 1640
},
{
"epoch": 0.6928270042194092,
"grad_norm": 1.3943440914154053,
"learning_rate": 9.963566633584496e-05,
"loss": 0.7889530658721924,
"step": 1642
},
{
"epoch": 0.6936708860759494,
"grad_norm": 1.3699116706848145,
"learning_rate": 9.963276684909317e-05,
"loss": 0.756829559803009,
"step": 1644
},
{
"epoch": 0.6945147679324895,
"grad_norm": 1.4216378927230835,
"learning_rate": 9.962985591301529e-05,
"loss": 0.7840303182601929,
"step": 1646
},
{
"epoch": 0.6953586497890295,
"grad_norm": 1.2231985330581665,
"learning_rate": 9.962693352828279e-05,
"loss": 0.700393557548523,
"step": 1648
},
{
"epoch": 0.6962025316455697,
"grad_norm": 1.3568313121795654,
"learning_rate": 9.962399969556983e-05,
"loss": 0.7010306715965271,
"step": 1650
},
{
"epoch": 0.6970464135021097,
"grad_norm": 1.1662907600402832,
"learning_rate": 9.96210544155532e-05,
"loss": 0.6935506463050842,
"step": 1652
},
{
"epoch": 0.6978902953586498,
"grad_norm": 1.3066680431365967,
"learning_rate": 9.96180976889123e-05,
"loss": 0.7913851141929626,
"step": 1654
},
{
"epoch": 0.6987341772151898,
"grad_norm": 1.2268375158309937,
"learning_rate": 9.961512951632918e-05,
"loss": 0.764849066734314,
"step": 1656
},
{
"epoch": 0.69957805907173,
"grad_norm": 1.4509469270706177,
"learning_rate": 9.96121498984886e-05,
"loss": 0.7544103860855103,
"step": 1658
},
{
"epoch": 0.70042194092827,
"grad_norm": 1.200772762298584,
"learning_rate": 9.960915883607782e-05,
"loss": 0.7766591310501099,
"step": 1660
},
{
"epoch": 0.7012658227848101,
"grad_norm": 1.3825311660766602,
"learning_rate": 9.960615632978687e-05,
"loss": 0.7433559894561768,
"step": 1662
},
{
"epoch": 0.7021097046413503,
"grad_norm": 1.3197243213653564,
"learning_rate": 9.960314238030836e-05,
"loss": 0.7770103812217712,
"step": 1664
},
{
"epoch": 0.7029535864978903,
"grad_norm": 1.515163779258728,
"learning_rate": 9.960011698833755e-05,
"loss": 0.8597216606140137,
"step": 1666
},
{
"epoch": 0.7037974683544304,
"grad_norm": 1.2329891920089722,
"learning_rate": 9.959708015457234e-05,
"loss": 0.7630532383918762,
"step": 1668
},
{
"epoch": 0.7046413502109705,
"grad_norm": 1.0592037439346313,
"learning_rate": 9.959403187971327e-05,
"loss": 0.7299806475639343,
"step": 1670
},
{
"epoch": 0.7054852320675106,
"grad_norm": 2.2717394828796387,
"learning_rate": 9.959097216446351e-05,
"loss": 0.6999854445457458,
"step": 1672
},
{
"epoch": 0.7063291139240506,
"grad_norm": 1.1552131175994873,
"learning_rate": 9.958790100952889e-05,
"loss": 0.8403060436248779,
"step": 1674
},
{
"epoch": 0.7071729957805907,
"grad_norm": 1.290488839149475,
"learning_rate": 9.958481841561787e-05,
"loss": 0.7729134559631348,
"step": 1676
},
{
"epoch": 0.7080168776371308,
"grad_norm": 1.1913278102874756,
"learning_rate": 9.958172438344152e-05,
"loss": 0.7100697755813599,
"step": 1678
},
{
"epoch": 0.7088607594936709,
"grad_norm": 1.2355852127075195,
"learning_rate": 9.957861891371359e-05,
"loss": 0.7014795541763306,
"step": 1680
},
{
"epoch": 0.7097046413502109,
"grad_norm": 1.258705496788025,
"learning_rate": 9.957550200715044e-05,
"loss": 0.8131424784660339,
"step": 1682
},
{
"epoch": 0.7105485232067511,
"grad_norm": 1.1102997064590454,
"learning_rate": 9.957237366447112e-05,
"loss": 0.6842480301856995,
"step": 1684
},
{
"epoch": 0.7113924050632912,
"grad_norm": 1.4466290473937988,
"learning_rate": 9.956923388639724e-05,
"loss": 0.6730120182037354,
"step": 1686
},
{
"epoch": 0.7122362869198312,
"grad_norm": 1.261152982711792,
"learning_rate": 9.956608267365311e-05,
"loss": 0.7109374403953552,
"step": 1688
},
{
"epoch": 0.7130801687763713,
"grad_norm": 1.4070630073547363,
"learning_rate": 9.956292002696562e-05,
"loss": 0.7545008063316345,
"step": 1690
},
{
"epoch": 0.7139240506329114,
"grad_norm": 1.2532793283462524,
"learning_rate": 9.955974594706436e-05,
"loss": 0.7892587184906006,
"step": 1692
},
{
"epoch": 0.7147679324894515,
"grad_norm": 1.1180293560028076,
"learning_rate": 9.955656043468153e-05,
"loss": 0.7348554134368896,
"step": 1694
},
{
"epoch": 0.7156118143459915,
"grad_norm": 1.333054542541504,
"learning_rate": 9.955336349055195e-05,
"loss": 0.8207674026489258,
"step": 1696
},
{
"epoch": 0.7164556962025317,
"grad_norm": 1.1373547315597534,
"learning_rate": 9.95501551154131e-05,
"loss": 0.7226691842079163,
"step": 1698
},
{
"epoch": 0.7172995780590717,
"grad_norm": 1.2342052459716797,
"learning_rate": 9.95469353100051e-05,
"loss": 0.726982831954956,
"step": 1700
},
{
"epoch": 0.7172995780590717,
"eval_loss": 0.7783148884773254,
"eval_runtime": 846.1986,
"eval_samples_per_second": 2.49,
"eval_steps_per_second": 2.49,
"step": 1700
},
{
"epoch": 0.7181434599156118,
"grad_norm": 1.3781483173370361,
"learning_rate": 9.95437040750707e-05,
"loss": 0.7623077034950256,
"step": 1702
},
{
"epoch": 0.7189873417721518,
"grad_norm": 1.301440715789795,
"learning_rate": 9.954046141135526e-05,
"loss": 0.7421616315841675,
"step": 1704
},
{
"epoch": 0.719831223628692,
"grad_norm": 1.1375854015350342,
"learning_rate": 9.953720731960683e-05,
"loss": 0.685523509979248,
"step": 1706
},
{
"epoch": 0.7206751054852321,
"grad_norm": 1.2014397382736206,
"learning_rate": 9.953394180057604e-05,
"loss": 0.756073534488678,
"step": 1708
},
{
"epoch": 0.7215189873417721,
"grad_norm": 1.232802152633667,
"learning_rate": 9.95306648550162e-05,
"loss": 0.7364522814750671,
"step": 1710
},
{
"epoch": 0.7223628691983123,
"grad_norm": 1.4462472200393677,
"learning_rate": 9.952737648368323e-05,
"loss": 0.7073688507080078,
"step": 1712
},
{
"epoch": 0.7232067510548523,
"grad_norm": 1.123523473739624,
"learning_rate": 9.95240766873357e-05,
"loss": 0.7147064805030823,
"step": 1714
},
{
"epoch": 0.7240506329113924,
"grad_norm": 1.4111510515213013,
"learning_rate": 9.95207654667348e-05,
"loss": 0.7108398079872131,
"step": 1716
},
{
"epoch": 0.7248945147679325,
"grad_norm": 1.2785903215408325,
"learning_rate": 9.951744282264437e-05,
"loss": 0.7080079317092896,
"step": 1718
},
{
"epoch": 0.7257383966244726,
"grad_norm": 1.1361653804779053,
"learning_rate": 9.951410875583089e-05,
"loss": 0.7396624684333801,
"step": 1720
},
{
"epoch": 0.7265822784810126,
"grad_norm": 1.0762585401535034,
"learning_rate": 9.951076326706346e-05,
"loss": 0.7724334597587585,
"step": 1722
},
{
"epoch": 0.7274261603375527,
"grad_norm": 1.3104428052902222,
"learning_rate": 9.950740635711379e-05,
"loss": 0.7311923503875732,
"step": 1724
},
{
"epoch": 0.7282700421940929,
"grad_norm": 1.1291942596435547,
"learning_rate": 9.95040380267563e-05,
"loss": 0.6878296732902527,
"step": 1726
},
{
"epoch": 0.7291139240506329,
"grad_norm": 1.5171746015548706,
"learning_rate": 9.9500658276768e-05,
"loss": 0.7410538196563721,
"step": 1728
},
{
"epoch": 0.729957805907173,
"grad_norm": 1.0966423749923706,
"learning_rate": 9.949726710792848e-05,
"loss": 0.6953532695770264,
"step": 1730
},
{
"epoch": 0.7308016877637131,
"grad_norm": 1.2436997890472412,
"learning_rate": 9.949386452102007e-05,
"loss": 0.6679023504257202,
"step": 1732
},
{
"epoch": 0.7316455696202532,
"grad_norm": 1.1364835500717163,
"learning_rate": 9.949045051682766e-05,
"loss": 0.8046789765357971,
"step": 1734
},
{
"epoch": 0.7324894514767932,
"grad_norm": 1.296648383140564,
"learning_rate": 9.948702509613878e-05,
"loss": 0.7322937846183777,
"step": 1736
},
{
"epoch": 0.7333333333333333,
"grad_norm": 1.2355525493621826,
"learning_rate": 9.948358825974365e-05,
"loss": 0.7442626357078552,
"step": 1738
},
{
"epoch": 0.7341772151898734,
"grad_norm": 1.1634451150894165,
"learning_rate": 9.948014000843504e-05,
"loss": 0.7231078743934631,
"step": 1740
},
{
"epoch": 0.7350210970464135,
"grad_norm": 1.1500129699707031,
"learning_rate": 9.947668034300843e-05,
"loss": 0.6436833143234253,
"step": 1742
},
{
"epoch": 0.7358649789029535,
"grad_norm": 1.3881278038024902,
"learning_rate": 9.947320926426189e-05,
"loss": 0.8170580863952637,
"step": 1744
},
{
"epoch": 0.7367088607594937,
"grad_norm": 1.3479492664337158,
"learning_rate": 9.94697267729961e-05,
"loss": 0.7830947041511536,
"step": 1746
},
{
"epoch": 0.7375527426160338,
"grad_norm": 1.0187158584594727,
"learning_rate": 9.946623287001444e-05,
"loss": 0.7358533143997192,
"step": 1748
},
{
"epoch": 0.7383966244725738,
"grad_norm": 1.2575689554214478,
"learning_rate": 9.946272755612287e-05,
"loss": 0.7279790639877319,
"step": 1750
},
{
"epoch": 0.739240506329114,
"grad_norm": 1.2045027017593384,
"learning_rate": 9.945921083213002e-05,
"loss": 0.6953092217445374,
"step": 1752
},
{
"epoch": 0.740084388185654,
"grad_norm": 1.3994466066360474,
"learning_rate": 9.945568269884708e-05,
"loss": 0.8094141483306885,
"step": 1754
},
{
"epoch": 0.7409282700421941,
"grad_norm": 1.2892286777496338,
"learning_rate": 9.945214315708797e-05,
"loss": 0.6979201436042786,
"step": 1756
},
{
"epoch": 0.7417721518987341,
"grad_norm": 1.2006971836090088,
"learning_rate": 9.944859220766919e-05,
"loss": 0.6810774803161621,
"step": 1758
},
{
"epoch": 0.7426160337552743,
"grad_norm": 1.055793285369873,
"learning_rate": 9.944502985140986e-05,
"loss": 0.6796762347221375,
"step": 1760
},
{
"epoch": 0.7434599156118143,
"grad_norm": 1.174714207649231,
"learning_rate": 9.944145608913175e-05,
"loss": 0.7954121828079224,
"step": 1762
},
{
"epoch": 0.7443037974683544,
"grad_norm": 1.1638222932815552,
"learning_rate": 9.943787092165926e-05,
"loss": 0.6939491629600525,
"step": 1764
},
{
"epoch": 0.7451476793248946,
"grad_norm": 1.1861820220947266,
"learning_rate": 9.943427434981942e-05,
"loss": 0.8112956285476685,
"step": 1766
},
{
"epoch": 0.7459915611814346,
"grad_norm": 0.9667421579360962,
"learning_rate": 9.943066637444189e-05,
"loss": 0.6812481880187988,
"step": 1768
},
{
"epoch": 0.7468354430379747,
"grad_norm": 1.2826191186904907,
"learning_rate": 9.942704699635898e-05,
"loss": 0.7598370313644409,
"step": 1770
},
{
"epoch": 0.7476793248945147,
"grad_norm": 1.2257909774780273,
"learning_rate": 9.942341621640558e-05,
"loss": 0.7118877172470093,
"step": 1772
},
{
"epoch": 0.7485232067510549,
"grad_norm": 1.5224615335464478,
"learning_rate": 9.941977403541925e-05,
"loss": 0.8037024736404419,
"step": 1774
},
{
"epoch": 0.7493670886075949,
"grad_norm": 1.188689947128296,
"learning_rate": 9.941612045424018e-05,
"loss": 0.6795828938484192,
"step": 1776
},
{
"epoch": 0.750210970464135,
"grad_norm": 1.0685369968414307,
"learning_rate": 9.941245547371116e-05,
"loss": 0.6934568881988525,
"step": 1778
},
{
"epoch": 0.7510548523206751,
"grad_norm": 1.1643654108047485,
"learning_rate": 9.940877909467767e-05,
"loss": 0.6883851289749146,
"step": 1780
},
{
"epoch": 0.7518987341772152,
"grad_norm": 1.15621018409729,
"learning_rate": 9.940509131798775e-05,
"loss": 0.8284637928009033,
"step": 1782
},
{
"epoch": 0.7527426160337553,
"grad_norm": 1.1946302652359009,
"learning_rate": 9.94013921444921e-05,
"loss": 0.7108310461044312,
"step": 1784
},
{
"epoch": 0.7535864978902953,
"grad_norm": 1.1536555290222168,
"learning_rate": 9.939768157504404e-05,
"loss": 0.7166154384613037,
"step": 1786
},
{
"epoch": 0.7544303797468355,
"grad_norm": 1.3184611797332764,
"learning_rate": 9.939395961049956e-05,
"loss": 0.7774572372436523,
"step": 1788
},
{
"epoch": 0.7552742616033755,
"grad_norm": 1.0782374143600464,
"learning_rate": 9.939022625171723e-05,
"loss": 0.7386471033096313,
"step": 1790
},
{
"epoch": 0.7561181434599156,
"grad_norm": 1.1616696119308472,
"learning_rate": 9.938648149955824e-05,
"loss": 0.6495215892791748,
"step": 1792
},
{
"epoch": 0.7569620253164557,
"grad_norm": 1.1715892553329468,
"learning_rate": 9.938272535488647e-05,
"loss": 0.7733646631240845,
"step": 1794
},
{
"epoch": 0.7578059071729958,
"grad_norm": 1.203466773033142,
"learning_rate": 9.937895781856838e-05,
"loss": 0.7354782223701477,
"step": 1796
},
{
"epoch": 0.7586497890295358,
"grad_norm": 1.246559977531433,
"learning_rate": 9.937517889147305e-05,
"loss": 0.823226273059845,
"step": 1798
},
{
"epoch": 0.759493670886076,
"grad_norm": 0.9968833923339844,
"learning_rate": 9.937138857447221e-05,
"loss": 0.6221681833267212,
"step": 1800
},
{
"epoch": 0.759493670886076,
"eval_loss": 0.7719914317131042,
"eval_runtime": 853.1943,
"eval_samples_per_second": 2.47,
"eval_steps_per_second": 2.47,
"step": 1800
},
{
"epoch": 0.760337552742616,
"grad_norm": 1.5454338788986206,
"learning_rate": 9.936758686844024e-05,
"loss": 0.7799059152603149,
"step": 1802
},
{
"epoch": 0.7611814345991561,
"grad_norm": 1.1954455375671387,
"learning_rate": 9.936377377425409e-05,
"loss": 0.653838038444519,
"step": 1804
},
{
"epoch": 0.7620253164556962,
"grad_norm": 1.2538350820541382,
"learning_rate": 9.935994929279339e-05,
"loss": 0.7046942710876465,
"step": 1806
},
{
"epoch": 0.7628691983122363,
"grad_norm": 1.2358729839324951,
"learning_rate": 9.935611342494035e-05,
"loss": 0.7821131348609924,
"step": 1808
},
{
"epoch": 0.7637130801687764,
"grad_norm": 1.2401310205459595,
"learning_rate": 9.935226617157986e-05,
"loss": 0.7594596147537231,
"step": 1810
},
{
"epoch": 0.7645569620253164,
"grad_norm": 1.3197205066680908,
"learning_rate": 9.934840753359938e-05,
"loss": 0.7512493133544922,
"step": 1812
},
{
"epoch": 0.7654008438818566,
"grad_norm": 1.2482305765151978,
"learning_rate": 9.934453751188903e-05,
"loss": 0.6953311562538147,
"step": 1814
},
{
"epoch": 0.7662447257383966,
"grad_norm": 1.5995157957077026,
"learning_rate": 9.934065610734157e-05,
"loss": 0.7699819803237915,
"step": 1816
},
{
"epoch": 0.7670886075949367,
"grad_norm": 1.2414922714233398,
"learning_rate": 9.933676332085235e-05,
"loss": 0.6532001495361328,
"step": 1818
},
{
"epoch": 0.7679324894514767,
"grad_norm": 1.2274713516235352,
"learning_rate": 9.933285915331937e-05,
"loss": 0.7716373801231384,
"step": 1820
},
{
"epoch": 0.7687763713080169,
"grad_norm": 1.2894618511199951,
"learning_rate": 9.932894360564322e-05,
"loss": 0.7002654671669006,
"step": 1822
},
{
"epoch": 0.769620253164557,
"grad_norm": 1.10796320438385,
"learning_rate": 9.932501667872718e-05,
"loss": 0.7970587015151978,
"step": 1824
},
{
"epoch": 0.770464135021097,
"grad_norm": 1.2393653392791748,
"learning_rate": 9.932107837347708e-05,
"loss": 0.8071644306182861,
"step": 1826
},
{
"epoch": 0.7713080168776372,
"grad_norm": 1.1999030113220215,
"learning_rate": 9.931712869080144e-05,
"loss": 0.7376157641410828,
"step": 1828
},
{
"epoch": 0.7721518987341772,
"grad_norm": 1.1166026592254639,
"learning_rate": 9.931316763161135e-05,
"loss": 0.7487053275108337,
"step": 1830
},
{
"epoch": 0.7729957805907173,
"grad_norm": 1.1788052320480347,
"learning_rate": 9.930919519682059e-05,
"loss": 0.733161985874176,
"step": 1832
},
{
"epoch": 0.7738396624472574,
"grad_norm": 1.309968113899231,
"learning_rate": 9.930521138734548e-05,
"loss": 0.7907692790031433,
"step": 1834
},
{
"epoch": 0.7746835443037975,
"grad_norm": 1.1685889959335327,
"learning_rate": 9.930121620410502e-05,
"loss": 0.7192210555076599,
"step": 1836
},
{
"epoch": 0.7755274261603375,
"grad_norm": 1.2243701219558716,
"learning_rate": 9.929720964802085e-05,
"loss": 0.7394438982009888,
"step": 1838
},
{
"epoch": 0.7763713080168776,
"grad_norm": 1.2940958738327026,
"learning_rate": 9.929319172001717e-05,
"loss": 0.7885041832923889,
"step": 1840
},
{
"epoch": 0.7772151898734178,
"grad_norm": 1.0952763557434082,
"learning_rate": 9.928916242102086e-05,
"loss": 0.6822885274887085,
"step": 1842
},
{
"epoch": 0.7780590717299578,
"grad_norm": 1.0333503484725952,
"learning_rate": 9.928512175196139e-05,
"loss": 0.7070927619934082,
"step": 1844
},
{
"epoch": 0.7789029535864979,
"grad_norm": 1.201359510421753,
"learning_rate": 9.928106971377088e-05,
"loss": 0.7041296362876892,
"step": 1846
},
{
"epoch": 0.779746835443038,
"grad_norm": 1.5381278991699219,
"learning_rate": 9.927700630738404e-05,
"loss": 0.6630192995071411,
"step": 1848
},
{
"epoch": 0.7805907172995781,
"grad_norm": 1.2858322858810425,
"learning_rate": 9.927293153373823e-05,
"loss": 0.7628101110458374,
"step": 1850
},
{
"epoch": 0.7814345991561181,
"grad_norm": 1.3730580806732178,
"learning_rate": 9.926884539377343e-05,
"loss": 0.7557390928268433,
"step": 1852
},
{
"epoch": 0.7822784810126582,
"grad_norm": 1.4954931735992432,
"learning_rate": 9.92647478884322e-05,
"loss": 0.8217329978942871,
"step": 1854
},
{
"epoch": 0.7831223628691983,
"grad_norm": 1.1092652082443237,
"learning_rate": 9.92606390186598e-05,
"loss": 0.672879695892334,
"step": 1856
},
{
"epoch": 0.7839662447257384,
"grad_norm": 1.2077893018722534,
"learning_rate": 9.925651878540404e-05,
"loss": 0.7380653619766235,
"step": 1858
},
{
"epoch": 0.7848101265822784,
"grad_norm": 1.0789313316345215,
"learning_rate": 9.925238718961538e-05,
"loss": 0.6648160219192505,
"step": 1860
},
{
"epoch": 0.7856540084388186,
"grad_norm": 1.3950812816619873,
"learning_rate": 9.924824423224692e-05,
"loss": 0.8316769003868103,
"step": 1862
},
{
"epoch": 0.7864978902953587,
"grad_norm": 1.3934763669967651,
"learning_rate": 9.924408991425433e-05,
"loss": 0.7901778817176819,
"step": 1864
},
{
"epoch": 0.7873417721518987,
"grad_norm": 1.2191659212112427,
"learning_rate": 9.923992423659596e-05,
"loss": 0.7643826007843018,
"step": 1866
},
{
"epoch": 0.7881856540084389,
"grad_norm": 0.986673891544342,
"learning_rate": 9.923574720023274e-05,
"loss": 0.6314064860343933,
"step": 1868
},
{
"epoch": 0.7890295358649789,
"grad_norm": 1.003552794456482,
"learning_rate": 9.923155880612823e-05,
"loss": 0.8244763016700745,
"step": 1870
},
{
"epoch": 0.789873417721519,
"grad_norm": 1.0831382274627686,
"learning_rate": 9.92273590552486e-05,
"loss": 0.7398403882980347,
"step": 1872
},
{
"epoch": 0.790717299578059,
"grad_norm": 1.1782667636871338,
"learning_rate": 9.922314794856267e-05,
"loss": 0.735211968421936,
"step": 1874
},
{
"epoch": 0.7915611814345992,
"grad_norm": 2.230534076690674,
"learning_rate": 9.921892548704186e-05,
"loss": 0.7550510764122009,
"step": 1876
},
{
"epoch": 0.7924050632911392,
"grad_norm": 1.0191401243209839,
"learning_rate": 9.92146916716602e-05,
"loss": 0.7676286697387695,
"step": 1878
},
{
"epoch": 0.7932489451476793,
"grad_norm": 1.1347072124481201,
"learning_rate": 9.921044650339438e-05,
"loss": 0.7409467697143555,
"step": 1880
},
{
"epoch": 0.7940928270042195,
"grad_norm": 1.107528567314148,
"learning_rate": 9.920618998322364e-05,
"loss": 0.7760165333747864,
"step": 1882
},
{
"epoch": 0.7949367088607595,
"grad_norm": 1.1110666990280151,
"learning_rate": 9.92019221121299e-05,
"loss": 0.7360131740570068,
"step": 1884
},
{
"epoch": 0.7957805907172996,
"grad_norm": 1.267580509185791,
"learning_rate": 9.919764289109765e-05,
"loss": 0.7784845232963562,
"step": 1886
},
{
"epoch": 0.7966244725738396,
"grad_norm": 1.5894557237625122,
"learning_rate": 9.919335232111407e-05,
"loss": 0.7880831360816956,
"step": 1888
},
{
"epoch": 0.7974683544303798,
"grad_norm": 1.1906384229660034,
"learning_rate": 9.918905040316886e-05,
"loss": 0.7315587997436523,
"step": 1890
},
{
"epoch": 0.7983122362869198,
"grad_norm": 1.3626811504364014,
"learning_rate": 9.918473713825445e-05,
"loss": 0.7808622121810913,
"step": 1892
},
{
"epoch": 0.7991561181434599,
"grad_norm": 1.1801300048828125,
"learning_rate": 9.918041252736577e-05,
"loss": 0.7055642604827881,
"step": 1894
},
{
"epoch": 0.8,
"grad_norm": 1.2669063806533813,
"learning_rate": 9.917607657150046e-05,
"loss": 0.7188893556594849,
"step": 1896
},
{
"epoch": 0.8008438818565401,
"grad_norm": 1.1746855974197388,
"learning_rate": 9.91717292716587e-05,
"loss": 0.7787454128265381,
"step": 1898
},
{
"epoch": 0.8016877637130801,
"grad_norm": 1.120012640953064,
"learning_rate": 9.916737062884338e-05,
"loss": 0.720715343952179,
"step": 1900
},
{
"epoch": 0.8016877637130801,
"eval_loss": 0.7648926973342896,
"eval_runtime": 865.9394,
"eval_samples_per_second": 2.433,
"eval_steps_per_second": 2.433,
"step": 1900
},
{
"epoch": 0.8025316455696202,
"grad_norm": 1.1745549440383911,
"learning_rate": 9.916300064405993e-05,
"loss": 0.7544789910316467,
"step": 1902
},
{
"epoch": 0.8033755274261604,
"grad_norm": 1.1439874172210693,
"learning_rate": 9.915861931831643e-05,
"loss": 0.7479203343391418,
"step": 1904
},
{
"epoch": 0.8042194092827004,
"grad_norm": 1.3508219718933105,
"learning_rate": 9.915422665262356e-05,
"loss": 0.6995842456817627,
"step": 1906
},
{
"epoch": 0.8050632911392405,
"grad_norm": 1.1519006490707397,
"learning_rate": 9.914982264799462e-05,
"loss": 0.7152725458145142,
"step": 1908
},
{
"epoch": 0.8059071729957806,
"grad_norm": 1.0818005800247192,
"learning_rate": 9.914540730544554e-05,
"loss": 0.7105516195297241,
"step": 1910
},
{
"epoch": 0.8067510548523207,
"grad_norm": 1.1611127853393555,
"learning_rate": 9.914098062599485e-05,
"loss": 0.6911059617996216,
"step": 1912
},
{
"epoch": 0.8075949367088607,
"grad_norm": 1.1964445114135742,
"learning_rate": 9.91365426106637e-05,
"loss": 0.6897286772727966,
"step": 1914
},
{
"epoch": 0.8084388185654009,
"grad_norm": 1.3873497247695923,
"learning_rate": 9.913209326047585e-05,
"loss": 0.7263250350952148,
"step": 1916
},
{
"epoch": 0.809282700421941,
"grad_norm": 1.1729894876480103,
"learning_rate": 9.91276325764577e-05,
"loss": 0.7045295238494873,
"step": 1918
},
{
"epoch": 0.810126582278481,
"grad_norm": 0.9089694619178772,
"learning_rate": 9.912316055963822e-05,
"loss": 0.587131142616272,
"step": 1920
},
{
"epoch": 0.810970464135021,
"grad_norm": 1.2051384449005127,
"learning_rate": 9.911867721104902e-05,
"loss": 0.7237880229949951,
"step": 1922
},
{
"epoch": 0.8118143459915612,
"grad_norm": 1.2152670621871948,
"learning_rate": 9.911418253172433e-05,
"loss": 0.6967294216156006,
"step": 1924
},
{
"epoch": 0.8126582278481013,
"grad_norm": 1.1193642616271973,
"learning_rate": 9.9109676522701e-05,
"loss": 0.7636315822601318,
"step": 1926
},
{
"epoch": 0.8135021097046413,
"grad_norm": 1.2457597255706787,
"learning_rate": 9.910515918501843e-05,
"loss": 0.7451969981193542,
"step": 1928
},
{
"epoch": 0.8143459915611815,
"grad_norm": 1.057009220123291,
"learning_rate": 9.910063051971876e-05,
"loss": 0.6320056319236755,
"step": 1930
},
{
"epoch": 0.8151898734177215,
"grad_norm": 1.2820258140563965,
"learning_rate": 9.909609052784661e-05,
"loss": 0.691004753112793,
"step": 1932
},
{
"epoch": 0.8160337552742616,
"grad_norm": 1.331312656402588,
"learning_rate": 9.909153921044927e-05,
"loss": 0.7741923332214355,
"step": 1934
},
{
"epoch": 0.8168776371308016,
"grad_norm": 1.2055360078811646,
"learning_rate": 9.908697656857668e-05,
"loss": 0.668049156665802,
"step": 1936
},
{
"epoch": 0.8177215189873418,
"grad_norm": 1.2124541997909546,
"learning_rate": 9.90824026032813e-05,
"loss": 0.6584748029708862,
"step": 1938
},
{
"epoch": 0.8185654008438819,
"grad_norm": 1.244288682937622,
"learning_rate": 9.90778173156183e-05,
"loss": 0.7081992626190186,
"step": 1940
},
{
"epoch": 0.8194092827004219,
"grad_norm": 1.250558853149414,
"learning_rate": 9.907322070664542e-05,
"loss": 0.7977840900421143,
"step": 1942
},
{
"epoch": 0.8202531645569621,
"grad_norm": 1.3892892599105835,
"learning_rate": 9.906861277742297e-05,
"loss": 0.7830103635787964,
"step": 1944
},
{
"epoch": 0.8210970464135021,
"grad_norm": 1.3152644634246826,
"learning_rate": 9.906399352901393e-05,
"loss": 0.8451479077339172,
"step": 1946
},
{
"epoch": 0.8219409282700422,
"grad_norm": 1.1102250814437866,
"learning_rate": 9.905936296248388e-05,
"loss": 0.7035528421401978,
"step": 1948
},
{
"epoch": 0.8227848101265823,
"grad_norm": 1.0271214246749878,
"learning_rate": 9.905472107890101e-05,
"loss": 0.764616847038269,
"step": 1950
},
{
"epoch": 0.8236286919831224,
"grad_norm": 1.1772255897521973,
"learning_rate": 9.905006787933609e-05,
"loss": 0.7699717283248901,
"step": 1952
},
{
"epoch": 0.8244725738396624,
"grad_norm": 1.2486404180526733,
"learning_rate": 9.904540336486252e-05,
"loss": 0.7755605578422546,
"step": 1954
},
{
"epoch": 0.8253164556962025,
"grad_norm": 1.070148229598999,
"learning_rate": 9.904072753655635e-05,
"loss": 0.688934326171875,
"step": 1956
},
{
"epoch": 0.8261603375527427,
"grad_norm": 1.118401288986206,
"learning_rate": 9.903604039549617e-05,
"loss": 0.7447791695594788,
"step": 1958
},
{
"epoch": 0.8270042194092827,
"grad_norm": 1.2209899425506592,
"learning_rate": 9.903134194276323e-05,
"loss": 0.7990683317184448,
"step": 1960
},
{
"epoch": 0.8278481012658228,
"grad_norm": 1.296093225479126,
"learning_rate": 9.902663217944137e-05,
"loss": 0.7290873527526855,
"step": 1962
},
{
"epoch": 0.8286919831223629,
"grad_norm": 1.2594937086105347,
"learning_rate": 9.902191110661704e-05,
"loss": 0.7971217036247253,
"step": 1964
},
{
"epoch": 0.829535864978903,
"grad_norm": 1.6016536951065063,
"learning_rate": 9.90171787253793e-05,
"loss": 0.6728768348693848,
"step": 1966
},
{
"epoch": 0.830379746835443,
"grad_norm": 3.3128950595855713,
"learning_rate": 9.901243503681983e-05,
"loss": 0.7684211730957031,
"step": 1968
},
{
"epoch": 0.8312236286919831,
"grad_norm": 1.2970373630523682,
"learning_rate": 9.90076800420329e-05,
"loss": 0.756637454032898,
"step": 1970
},
{
"epoch": 0.8320675105485232,
"grad_norm": 1.1388959884643555,
"learning_rate": 9.900291374211538e-05,
"loss": 0.6692084074020386,
"step": 1972
},
{
"epoch": 0.8329113924050633,
"grad_norm": 1.050641655921936,
"learning_rate": 9.899813613816677e-05,
"loss": 0.7298309803009033,
"step": 1974
},
{
"epoch": 0.8337552742616033,
"grad_norm": 1.2598577737808228,
"learning_rate": 9.899334723128922e-05,
"loss": 0.6886547803878784,
"step": 1976
},
{
"epoch": 0.8345991561181435,
"grad_norm": 1.2800767421722412,
"learning_rate": 9.898854702258735e-05,
"loss": 0.745341420173645,
"step": 1978
},
{
"epoch": 0.8354430379746836,
"grad_norm": 1.1923155784606934,
"learning_rate": 9.898373551316856e-05,
"loss": 0.7133575081825256,
"step": 1980
},
{
"epoch": 0.8362869198312236,
"grad_norm": 1.156121015548706,
"learning_rate": 9.897891270414272e-05,
"loss": 0.8117790818214417,
"step": 1982
},
{
"epoch": 0.8371308016877637,
"grad_norm": 1.0400618314743042,
"learning_rate": 9.897407859662238e-05,
"loss": 0.6094260215759277,
"step": 1984
},
{
"epoch": 0.8379746835443038,
"grad_norm": 1.451953411102295,
"learning_rate": 9.896923319172268e-05,
"loss": 0.7680332064628601,
"step": 1986
},
{
"epoch": 0.8388185654008439,
"grad_norm": 1.2560248374938965,
"learning_rate": 9.896437649056134e-05,
"loss": 0.6918784379959106,
"step": 1988
},
{
"epoch": 0.8396624472573839,
"grad_norm": 1.2744325399398804,
"learning_rate": 9.895950849425874e-05,
"loss": 0.7654696106910706,
"step": 1990
},
{
"epoch": 0.8405063291139241,
"grad_norm": 1.304439902305603,
"learning_rate": 9.895462920393781e-05,
"loss": 0.7585932612419128,
"step": 1992
},
{
"epoch": 0.8413502109704641,
"grad_norm": 1.578957200050354,
"learning_rate": 9.89497386207241e-05,
"loss": 0.7474164962768555,
"step": 1994
},
{
"epoch": 0.8421940928270042,
"grad_norm": 1.0358996391296387,
"learning_rate": 9.89448367457458e-05,
"loss": 0.663844883441925,
"step": 1996
},
{
"epoch": 0.8430379746835444,
"grad_norm": 1.2285103797912598,
"learning_rate": 9.893992358013366e-05,
"loss": 0.7578557729721069,
"step": 1998
},
{
"epoch": 0.8438818565400844,
"grad_norm": 1.2051875591278076,
"learning_rate": 9.893499912502108e-05,
"loss": 0.7795036435127258,
"step": 2000
},
{
"epoch": 0.8438818565400844,
"eval_loss": 0.7587011456489563,
"eval_runtime": 856.2276,
"eval_samples_per_second": 2.461,
"eval_steps_per_second": 2.461,
"step": 2000
}
],
"logging_steps": 2,
"max_steps": 14220,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.001
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.0761430952197837e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}