CodCodingCode's picture
Upload folder using huggingface_hub
705e4bc verified
{
"best_global_step": 13500,
"best_metric": 0.6786578297615051,
"best_model_checkpoint": "outputs/full_finetune/checkpoint-13500",
"epoch": 1.8034199452274398,
"eval_steps": 250,
"global_step": 13500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0013359161044686394,
"grad_norm": 6.53125,
"learning_rate": 4.9969943895271175e-06,
"loss": 1.6242,
"step": 10
},
{
"epoch": 0.002671832208937279,
"grad_norm": 5.625,
"learning_rate": 4.993654822335026e-06,
"loss": 1.185,
"step": 20
},
{
"epoch": 0.004007748313405918,
"grad_norm": 4.125,
"learning_rate": 4.990315255142934e-06,
"loss": 1.0613,
"step": 30
},
{
"epoch": 0.005343664417874558,
"grad_norm": 4.15625,
"learning_rate": 4.986975687950842e-06,
"loss": 1.0374,
"step": 40
},
{
"epoch": 0.0066795805223431965,
"grad_norm": 3.65625,
"learning_rate": 4.98363612075875e-06,
"loss": 0.9817,
"step": 50
},
{
"epoch": 0.008015496626811836,
"grad_norm": 3.828125,
"learning_rate": 4.980296553566658e-06,
"loss": 0.9144,
"step": 60
},
{
"epoch": 0.009351412731280475,
"grad_norm": 3.59375,
"learning_rate": 4.976956986374567e-06,
"loss": 0.9283,
"step": 70
},
{
"epoch": 0.010687328835749115,
"grad_norm": 3.671875,
"learning_rate": 4.9736174191824745e-06,
"loss": 0.922,
"step": 80
},
{
"epoch": 0.012023244940217755,
"grad_norm": 3.765625,
"learning_rate": 4.970277851990383e-06,
"loss": 0.9555,
"step": 90
},
{
"epoch": 0.013359161044686393,
"grad_norm": 3.6875,
"learning_rate": 4.966938284798291e-06,
"loss": 0.9253,
"step": 100
},
{
"epoch": 0.014695077149155033,
"grad_norm": 3.609375,
"learning_rate": 4.963598717606198e-06,
"loss": 0.908,
"step": 110
},
{
"epoch": 0.01603099325362367,
"grad_norm": 3.578125,
"learning_rate": 4.960259150414107e-06,
"loss": 0.8962,
"step": 120
},
{
"epoch": 0.017366909358092313,
"grad_norm": 3.515625,
"learning_rate": 4.9569195832220145e-06,
"loss": 0.8264,
"step": 130
},
{
"epoch": 0.01870282546256095,
"grad_norm": 3.671875,
"learning_rate": 4.953580016029923e-06,
"loss": 0.9133,
"step": 140
},
{
"epoch": 0.020038741567029592,
"grad_norm": 3.796875,
"learning_rate": 4.950240448837831e-06,
"loss": 0.8665,
"step": 150
},
{
"epoch": 0.02137465767149823,
"grad_norm": 3.671875,
"learning_rate": 4.946900881645739e-06,
"loss": 0.8992,
"step": 160
},
{
"epoch": 0.02271057377596687,
"grad_norm": 3.546875,
"learning_rate": 4.943561314453647e-06,
"loss": 0.8392,
"step": 170
},
{
"epoch": 0.02404648988043551,
"grad_norm": 3.78125,
"learning_rate": 4.940221747261555e-06,
"loss": 0.8548,
"step": 180
},
{
"epoch": 0.025382405984904148,
"grad_norm": 3.578125,
"learning_rate": 4.936882180069463e-06,
"loss": 0.834,
"step": 190
},
{
"epoch": 0.026718322089372786,
"grad_norm": 3.75,
"learning_rate": 4.933542612877371e-06,
"loss": 0.8561,
"step": 200
},
{
"epoch": 0.028054238193841428,
"grad_norm": 3.453125,
"learning_rate": 4.93020304568528e-06,
"loss": 0.8379,
"step": 210
},
{
"epoch": 0.029390154298310066,
"grad_norm": 3.6875,
"learning_rate": 4.9268634784931876e-06,
"loss": 0.86,
"step": 220
},
{
"epoch": 0.030726070402778704,
"grad_norm": 3.84375,
"learning_rate": 4.923523911301096e-06,
"loss": 0.8812,
"step": 230
},
{
"epoch": 0.03206198650724734,
"grad_norm": 3.765625,
"learning_rate": 4.9201843441090045e-06,
"loss": 0.8708,
"step": 240
},
{
"epoch": 0.033397902611715984,
"grad_norm": 3.734375,
"learning_rate": 4.916844776916912e-06,
"loss": 0.8282,
"step": 250
},
{
"epoch": 0.033397902611715984,
"eval_loss": 0.8481760621070862,
"eval_runtime": 259.2241,
"eval_samples_per_second": 25.669,
"eval_steps_per_second": 3.21,
"step": 250
},
{
"epoch": 0.034733818716184625,
"grad_norm": 3.640625,
"learning_rate": 4.91350520972482e-06,
"loss": 0.8325,
"step": 260
},
{
"epoch": 0.03606973482065326,
"grad_norm": 3.84375,
"learning_rate": 4.910165642532728e-06,
"loss": 0.8487,
"step": 270
},
{
"epoch": 0.0374056509251219,
"grad_norm": 3.75,
"learning_rate": 4.906826075340636e-06,
"loss": 0.798,
"step": 280
},
{
"epoch": 0.03874156702959054,
"grad_norm": 3.625,
"learning_rate": 4.9034865081485445e-06,
"loss": 0.8482,
"step": 290
},
{
"epoch": 0.040077483134059184,
"grad_norm": 3.703125,
"learning_rate": 4.900146940956452e-06,
"loss": 0.8455,
"step": 300
},
{
"epoch": 0.04141339923852782,
"grad_norm": 3.671875,
"learning_rate": 4.896807373764361e-06,
"loss": 0.813,
"step": 310
},
{
"epoch": 0.04274931534299646,
"grad_norm": 3.625,
"learning_rate": 4.893467806572268e-06,
"loss": 0.8418,
"step": 320
},
{
"epoch": 0.0440852314474651,
"grad_norm": 3.75,
"learning_rate": 4.890128239380177e-06,
"loss": 0.8379,
"step": 330
},
{
"epoch": 0.04542114755193374,
"grad_norm": 3.5625,
"learning_rate": 4.8867886721880845e-06,
"loss": 0.8689,
"step": 340
},
{
"epoch": 0.04675706365640238,
"grad_norm": 3.53125,
"learning_rate": 4.883449104995993e-06,
"loss": 0.8155,
"step": 350
},
{
"epoch": 0.04809297976087102,
"grad_norm": 3.546875,
"learning_rate": 4.880109537803901e-06,
"loss": 0.8216,
"step": 360
},
{
"epoch": 0.049428895865339655,
"grad_norm": 3.671875,
"learning_rate": 4.876769970611809e-06,
"loss": 0.8461,
"step": 370
},
{
"epoch": 0.050764811969808296,
"grad_norm": 3.703125,
"learning_rate": 4.873430403419718e-06,
"loss": 0.8159,
"step": 380
},
{
"epoch": 0.05210072807427694,
"grad_norm": 3.625,
"learning_rate": 4.870090836227625e-06,
"loss": 0.8186,
"step": 390
},
{
"epoch": 0.05343664417874557,
"grad_norm": 3.453125,
"learning_rate": 4.866751269035534e-06,
"loss": 0.7775,
"step": 400
},
{
"epoch": 0.054772560283214214,
"grad_norm": 3.5625,
"learning_rate": 4.863411701843441e-06,
"loss": 0.815,
"step": 410
},
{
"epoch": 0.056108476387682855,
"grad_norm": 3.515625,
"learning_rate": 4.86007213465135e-06,
"loss": 0.8099,
"step": 420
},
{
"epoch": 0.05744439249215149,
"grad_norm": 3.609375,
"learning_rate": 4.8567325674592576e-06,
"loss": 0.8349,
"step": 430
},
{
"epoch": 0.05878030859662013,
"grad_norm": 3.65625,
"learning_rate": 4.853393000267166e-06,
"loss": 0.7937,
"step": 440
},
{
"epoch": 0.06011622470108877,
"grad_norm": 3.875,
"learning_rate": 4.850053433075074e-06,
"loss": 0.8143,
"step": 450
},
{
"epoch": 0.06145214080555741,
"grad_norm": 3.609375,
"learning_rate": 4.846713865882982e-06,
"loss": 0.8386,
"step": 460
},
{
"epoch": 0.06278805691002605,
"grad_norm": 3.859375,
"learning_rate": 4.84337429869089e-06,
"loss": 0.8066,
"step": 470
},
{
"epoch": 0.06412397301449468,
"grad_norm": 3.640625,
"learning_rate": 4.840034731498798e-06,
"loss": 0.8033,
"step": 480
},
{
"epoch": 0.06545988911896333,
"grad_norm": 3.734375,
"learning_rate": 4.836695164306706e-06,
"loss": 0.7867,
"step": 490
},
{
"epoch": 0.06679580522343197,
"grad_norm": 3.8125,
"learning_rate": 4.8333555971146145e-06,
"loss": 0.8048,
"step": 500
},
{
"epoch": 0.06679580522343197,
"eval_loss": 0.8064413070678711,
"eval_runtime": 258.5665,
"eval_samples_per_second": 25.734,
"eval_steps_per_second": 3.218,
"step": 500
},
{
"epoch": 0.0681317213279006,
"grad_norm": 3.5,
"learning_rate": 4.830016029922522e-06,
"loss": 0.8091,
"step": 510
},
{
"epoch": 0.06946763743236925,
"grad_norm": 3.671875,
"learning_rate": 4.826676462730431e-06,
"loss": 0.7971,
"step": 520
},
{
"epoch": 0.07080355353683788,
"grad_norm": 3.546875,
"learning_rate": 4.823336895538338e-06,
"loss": 0.8103,
"step": 530
},
{
"epoch": 0.07213946964130652,
"grad_norm": 3.859375,
"learning_rate": 4.819997328346247e-06,
"loss": 0.81,
"step": 540
},
{
"epoch": 0.07347538574577517,
"grad_norm": 3.5,
"learning_rate": 4.8166577611541545e-06,
"loss": 0.7851,
"step": 550
},
{
"epoch": 0.0748113018502438,
"grad_norm": 3.734375,
"learning_rate": 4.813318193962063e-06,
"loss": 0.803,
"step": 560
},
{
"epoch": 0.07614721795471244,
"grad_norm": 3.546875,
"learning_rate": 4.8099786267699715e-06,
"loss": 0.7721,
"step": 570
},
{
"epoch": 0.07748313405918109,
"grad_norm": 3.6875,
"learning_rate": 4.806639059577879e-06,
"loss": 0.825,
"step": 580
},
{
"epoch": 0.07881905016364972,
"grad_norm": 3.78125,
"learning_rate": 4.803299492385788e-06,
"loss": 0.7933,
"step": 590
},
{
"epoch": 0.08015496626811837,
"grad_norm": 3.75,
"learning_rate": 4.799959925193695e-06,
"loss": 0.7706,
"step": 600
},
{
"epoch": 0.081490882372587,
"grad_norm": 3.859375,
"learning_rate": 4.796620358001604e-06,
"loss": 0.8063,
"step": 610
},
{
"epoch": 0.08282679847705564,
"grad_norm": 3.5,
"learning_rate": 4.793280790809511e-06,
"loss": 0.7837,
"step": 620
},
{
"epoch": 0.08416271458152429,
"grad_norm": 4.09375,
"learning_rate": 4.78994122361742e-06,
"loss": 0.7854,
"step": 630
},
{
"epoch": 0.08549863068599292,
"grad_norm": 3.65625,
"learning_rate": 4.7866016564253276e-06,
"loss": 0.7912,
"step": 640
},
{
"epoch": 0.08683454679046156,
"grad_norm": 3.71875,
"learning_rate": 4.783262089233236e-06,
"loss": 0.7996,
"step": 650
},
{
"epoch": 0.0881704628949302,
"grad_norm": 3.796875,
"learning_rate": 4.779922522041144e-06,
"loss": 0.7642,
"step": 660
},
{
"epoch": 0.08950637899939884,
"grad_norm": 3.765625,
"learning_rate": 4.776582954849052e-06,
"loss": 0.799,
"step": 670
},
{
"epoch": 0.09084229510386747,
"grad_norm": 3.921875,
"learning_rate": 4.77324338765696e-06,
"loss": 0.8081,
"step": 680
},
{
"epoch": 0.09217821120833612,
"grad_norm": 3.921875,
"learning_rate": 4.7699038204648675e-06,
"loss": 0.8277,
"step": 690
},
{
"epoch": 0.09351412731280476,
"grad_norm": 3.703125,
"learning_rate": 4.766564253272776e-06,
"loss": 0.7484,
"step": 700
},
{
"epoch": 0.09485004341727339,
"grad_norm": 3.53125,
"learning_rate": 4.7632246860806845e-06,
"loss": 0.7728,
"step": 710
},
{
"epoch": 0.09618595952174204,
"grad_norm": 3.671875,
"learning_rate": 4.759885118888592e-06,
"loss": 0.7648,
"step": 720
},
{
"epoch": 0.09752187562621067,
"grad_norm": 3.71875,
"learning_rate": 4.756545551696501e-06,
"loss": 0.7592,
"step": 730
},
{
"epoch": 0.09885779173067931,
"grad_norm": 3.625,
"learning_rate": 4.753205984504409e-06,
"loss": 0.7774,
"step": 740
},
{
"epoch": 0.10019370783514796,
"grad_norm": 3.59375,
"learning_rate": 4.749866417312317e-06,
"loss": 0.7606,
"step": 750
},
{
"epoch": 0.10019370783514796,
"eval_loss": 0.7836291193962097,
"eval_runtime": 258.4756,
"eval_samples_per_second": 25.743,
"eval_steps_per_second": 3.219,
"step": 750
},
{
"epoch": 0.10152962393961659,
"grad_norm": 3.734375,
"learning_rate": 4.746526850120225e-06,
"loss": 0.7555,
"step": 760
},
{
"epoch": 0.10286554004408523,
"grad_norm": 3.859375,
"learning_rate": 4.743187282928133e-06,
"loss": 0.7689,
"step": 770
},
{
"epoch": 0.10420145614855388,
"grad_norm": 3.78125,
"learning_rate": 4.7398477157360415e-06,
"loss": 0.787,
"step": 780
},
{
"epoch": 0.10553737225302251,
"grad_norm": 3.796875,
"learning_rate": 4.736508148543949e-06,
"loss": 0.7824,
"step": 790
},
{
"epoch": 0.10687328835749114,
"grad_norm": 3.546875,
"learning_rate": 4.733168581351858e-06,
"loss": 0.798,
"step": 800
},
{
"epoch": 0.1082092044619598,
"grad_norm": 3.765625,
"learning_rate": 4.729829014159765e-06,
"loss": 0.777,
"step": 810
},
{
"epoch": 0.10954512056642843,
"grad_norm": 3.859375,
"learning_rate": 4.726489446967674e-06,
"loss": 0.7436,
"step": 820
},
{
"epoch": 0.11088103667089706,
"grad_norm": 3.421875,
"learning_rate": 4.723149879775581e-06,
"loss": 0.7593,
"step": 830
},
{
"epoch": 0.11221695277536571,
"grad_norm": 3.71875,
"learning_rate": 4.719810312583489e-06,
"loss": 0.7766,
"step": 840
},
{
"epoch": 0.11355286887983435,
"grad_norm": 3.6875,
"learning_rate": 4.7164707453913976e-06,
"loss": 0.7829,
"step": 850
},
{
"epoch": 0.11488878498430298,
"grad_norm": 3.453125,
"learning_rate": 4.713131178199305e-06,
"loss": 0.7813,
"step": 860
},
{
"epoch": 0.11622470108877163,
"grad_norm": 3.53125,
"learning_rate": 4.709791611007214e-06,
"loss": 0.7833,
"step": 870
},
{
"epoch": 0.11756061719324026,
"grad_norm": 3.78125,
"learning_rate": 4.706452043815121e-06,
"loss": 0.7923,
"step": 880
},
{
"epoch": 0.1188965332977089,
"grad_norm": 3.90625,
"learning_rate": 4.70311247662303e-06,
"loss": 0.7869,
"step": 890
},
{
"epoch": 0.12023244940217755,
"grad_norm": 3.71875,
"learning_rate": 4.699772909430938e-06,
"loss": 0.7428,
"step": 900
},
{
"epoch": 0.12156836550664618,
"grad_norm": 3.546875,
"learning_rate": 4.696433342238846e-06,
"loss": 0.7675,
"step": 910
},
{
"epoch": 0.12290428161111482,
"grad_norm": 3.5625,
"learning_rate": 4.6930937750467545e-06,
"loss": 0.7593,
"step": 920
},
{
"epoch": 0.12424019771558346,
"grad_norm": 3.59375,
"learning_rate": 4.689754207854663e-06,
"loss": 0.7672,
"step": 930
},
{
"epoch": 0.1255761138200521,
"grad_norm": 3.75,
"learning_rate": 4.686414640662571e-06,
"loss": 0.7717,
"step": 940
},
{
"epoch": 0.12691202992452075,
"grad_norm": 3.859375,
"learning_rate": 4.683075073470479e-06,
"loss": 0.765,
"step": 950
},
{
"epoch": 0.12824794602898937,
"grad_norm": 3.625,
"learning_rate": 4.679735506278387e-06,
"loss": 0.758,
"step": 960
},
{
"epoch": 0.12958386213345802,
"grad_norm": 3.671875,
"learning_rate": 4.6763959390862945e-06,
"loss": 0.7718,
"step": 970
},
{
"epoch": 0.13091977823792667,
"grad_norm": 3.875,
"learning_rate": 4.673056371894203e-06,
"loss": 0.8161,
"step": 980
},
{
"epoch": 0.13225569434239529,
"grad_norm": 3.703125,
"learning_rate": 4.669716804702111e-06,
"loss": 0.7649,
"step": 990
},
{
"epoch": 0.13359161044686393,
"grad_norm": 3.59375,
"learning_rate": 4.666377237510019e-06,
"loss": 0.7762,
"step": 1000
},
{
"epoch": 0.13359161044686393,
"eval_loss": 0.767823338508606,
"eval_runtime": 253.9061,
"eval_samples_per_second": 26.207,
"eval_steps_per_second": 3.277,
"step": 1000
},
{
"epoch": 0.13492752655133258,
"grad_norm": 3.71875,
"learning_rate": 4.663037670317927e-06,
"loss": 0.7505,
"step": 1010
},
{
"epoch": 0.1362634426558012,
"grad_norm": 3.859375,
"learning_rate": 4.659698103125835e-06,
"loss": 0.7883,
"step": 1020
},
{
"epoch": 0.13759935876026985,
"grad_norm": 3.59375,
"learning_rate": 4.656358535933743e-06,
"loss": 0.7747,
"step": 1030
},
{
"epoch": 0.1389352748647385,
"grad_norm": 3.65625,
"learning_rate": 4.653018968741651e-06,
"loss": 0.7889,
"step": 1040
},
{
"epoch": 0.14027119096920712,
"grad_norm": 3.90625,
"learning_rate": 4.649679401549559e-06,
"loss": 0.8012,
"step": 1050
},
{
"epoch": 0.14160710707367577,
"grad_norm": 3.53125,
"learning_rate": 4.6463398343574676e-06,
"loss": 0.7613,
"step": 1060
},
{
"epoch": 0.14294302317814442,
"grad_norm": 3.640625,
"learning_rate": 4.643000267165376e-06,
"loss": 0.7801,
"step": 1070
},
{
"epoch": 0.14427893928261304,
"grad_norm": 3.875,
"learning_rate": 4.639660699973284e-06,
"loss": 0.754,
"step": 1080
},
{
"epoch": 0.1456148553870817,
"grad_norm": 3.84375,
"learning_rate": 4.636321132781192e-06,
"loss": 0.7581,
"step": 1090
},
{
"epoch": 0.14695077149155034,
"grad_norm": 3.65625,
"learning_rate": 4.632981565589101e-06,
"loss": 0.7728,
"step": 1100
},
{
"epoch": 0.14828668759601896,
"grad_norm": 3.625,
"learning_rate": 4.629641998397008e-06,
"loss": 0.7707,
"step": 1110
},
{
"epoch": 0.1496226037004876,
"grad_norm": 3.53125,
"learning_rate": 4.626302431204916e-06,
"loss": 0.7879,
"step": 1120
},
{
"epoch": 0.15095851980495625,
"grad_norm": 3.734375,
"learning_rate": 4.6229628640128245e-06,
"loss": 0.7963,
"step": 1130
},
{
"epoch": 0.15229443590942487,
"grad_norm": 3.953125,
"learning_rate": 4.619623296820732e-06,
"loss": 0.7546,
"step": 1140
},
{
"epoch": 0.15363035201389352,
"grad_norm": 3.71875,
"learning_rate": 4.616283729628641e-06,
"loss": 0.7502,
"step": 1150
},
{
"epoch": 0.15496626811836217,
"grad_norm": 3.765625,
"learning_rate": 4.612944162436548e-06,
"loss": 0.7404,
"step": 1160
},
{
"epoch": 0.15630218422283082,
"grad_norm": 3.75,
"learning_rate": 4.609604595244457e-06,
"loss": 0.7862,
"step": 1170
},
{
"epoch": 0.15763810032729944,
"grad_norm": 3.84375,
"learning_rate": 4.6062650280523645e-06,
"loss": 0.7705,
"step": 1180
},
{
"epoch": 0.1589740164317681,
"grad_norm": 3.515625,
"learning_rate": 4.602925460860273e-06,
"loss": 0.7449,
"step": 1190
},
{
"epoch": 0.16030993253623674,
"grad_norm": 3.8125,
"learning_rate": 4.599585893668181e-06,
"loss": 0.7275,
"step": 1200
},
{
"epoch": 0.16164584864070536,
"grad_norm": 3.578125,
"learning_rate": 4.596246326476089e-06,
"loss": 0.7348,
"step": 1210
},
{
"epoch": 0.162981764745174,
"grad_norm": 3.671875,
"learning_rate": 4.592906759283997e-06,
"loss": 0.7726,
"step": 1220
},
{
"epoch": 0.16431768084964266,
"grad_norm": 3.828125,
"learning_rate": 4.589567192091905e-06,
"loss": 0.7206,
"step": 1230
},
{
"epoch": 0.16565359695411128,
"grad_norm": 3.78125,
"learning_rate": 4.586227624899813e-06,
"loss": 0.7724,
"step": 1240
},
{
"epoch": 0.16698951305857992,
"grad_norm": 3.9375,
"learning_rate": 4.582888057707721e-06,
"loss": 0.777,
"step": 1250
},
{
"epoch": 0.16698951305857992,
"eval_loss": 0.7555676102638245,
"eval_runtime": 255.3606,
"eval_samples_per_second": 26.057,
"eval_steps_per_second": 3.258,
"step": 1250
},
{
"epoch": 0.16832542916304857,
"grad_norm": 3.796875,
"learning_rate": 4.57954849051563e-06,
"loss": 0.7687,
"step": 1260
},
{
"epoch": 0.1696613452675172,
"grad_norm": 3.796875,
"learning_rate": 4.5762089233235376e-06,
"loss": 0.776,
"step": 1270
},
{
"epoch": 0.17099726137198584,
"grad_norm": 3.90625,
"learning_rate": 4.572869356131446e-06,
"loss": 0.7775,
"step": 1280
},
{
"epoch": 0.1723331774764545,
"grad_norm": 3.609375,
"learning_rate": 4.569529788939354e-06,
"loss": 0.7296,
"step": 1290
},
{
"epoch": 0.1736690935809231,
"grad_norm": 3.6875,
"learning_rate": 4.566190221747262e-06,
"loss": 0.7567,
"step": 1300
},
{
"epoch": 0.17500500968539176,
"grad_norm": 3.8125,
"learning_rate": 4.56285065455517e-06,
"loss": 0.7352,
"step": 1310
},
{
"epoch": 0.1763409257898604,
"grad_norm": 3.921875,
"learning_rate": 4.559511087363078e-06,
"loss": 0.7627,
"step": 1320
},
{
"epoch": 0.17767684189432903,
"grad_norm": 3.796875,
"learning_rate": 4.556171520170986e-06,
"loss": 0.7557,
"step": 1330
},
{
"epoch": 0.17901275799879768,
"grad_norm": 3.5625,
"learning_rate": 4.5528319529788945e-06,
"loss": 0.741,
"step": 1340
},
{
"epoch": 0.18034867410326633,
"grad_norm": 3.796875,
"learning_rate": 4.549492385786802e-06,
"loss": 0.7223,
"step": 1350
},
{
"epoch": 0.18168459020773495,
"grad_norm": 3.609375,
"learning_rate": 4.546152818594711e-06,
"loss": 0.7282,
"step": 1360
},
{
"epoch": 0.1830205063122036,
"grad_norm": 3.609375,
"learning_rate": 4.542813251402618e-06,
"loss": 0.739,
"step": 1370
},
{
"epoch": 0.18435642241667224,
"grad_norm": 3.671875,
"learning_rate": 4.539473684210527e-06,
"loss": 0.7293,
"step": 1380
},
{
"epoch": 0.18569233852114087,
"grad_norm": 3.9375,
"learning_rate": 4.5361341170184345e-06,
"loss": 0.7887,
"step": 1390
},
{
"epoch": 0.1870282546256095,
"grad_norm": 3.65625,
"learning_rate": 4.532794549826343e-06,
"loss": 0.7493,
"step": 1400
},
{
"epoch": 0.18836417073007816,
"grad_norm": 3.96875,
"learning_rate": 4.529454982634251e-06,
"loss": 0.7376,
"step": 1410
},
{
"epoch": 0.18970008683454678,
"grad_norm": 3.5625,
"learning_rate": 4.526115415442159e-06,
"loss": 0.7642,
"step": 1420
},
{
"epoch": 0.19103600293901543,
"grad_norm": 3.796875,
"learning_rate": 4.522775848250068e-06,
"loss": 0.746,
"step": 1430
},
{
"epoch": 0.19237191904348408,
"grad_norm": 3.625,
"learning_rate": 4.519436281057975e-06,
"loss": 0.737,
"step": 1440
},
{
"epoch": 0.1937078351479527,
"grad_norm": 3.5625,
"learning_rate": 4.516096713865884e-06,
"loss": 0.7393,
"step": 1450
},
{
"epoch": 0.19504375125242135,
"grad_norm": 4.03125,
"learning_rate": 4.512757146673791e-06,
"loss": 0.7732,
"step": 1460
},
{
"epoch": 0.19637966735689,
"grad_norm": 3.828125,
"learning_rate": 4.5094175794817e-06,
"loss": 0.7792,
"step": 1470
},
{
"epoch": 0.19771558346135862,
"grad_norm": 3.5625,
"learning_rate": 4.5060780122896076e-06,
"loss": 0.7497,
"step": 1480
},
{
"epoch": 0.19905149956582727,
"grad_norm": 3.828125,
"learning_rate": 4.502738445097516e-06,
"loss": 0.7759,
"step": 1490
},
{
"epoch": 0.20038741567029592,
"grad_norm": 3.78125,
"learning_rate": 4.499398877905424e-06,
"loss": 0.7336,
"step": 1500
},
{
"epoch": 0.20038741567029592,
"eval_loss": 0.7456574440002441,
"eval_runtime": 252.7226,
"eval_samples_per_second": 26.329,
"eval_steps_per_second": 3.292,
"step": 1500
},
{
"epoch": 0.20172333177476454,
"grad_norm": 3.671875,
"learning_rate": 4.496059310713332e-06,
"loss": 0.7787,
"step": 1510
},
{
"epoch": 0.20305924787923318,
"grad_norm": 4.1875,
"learning_rate": 4.49271974352124e-06,
"loss": 0.7728,
"step": 1520
},
{
"epoch": 0.20439516398370183,
"grad_norm": 3.890625,
"learning_rate": 4.489380176329148e-06,
"loss": 0.7193,
"step": 1530
},
{
"epoch": 0.20573108008817045,
"grad_norm": 3.46875,
"learning_rate": 4.486040609137056e-06,
"loss": 0.7322,
"step": 1540
},
{
"epoch": 0.2070669961926391,
"grad_norm": 3.671875,
"learning_rate": 4.482701041944964e-06,
"loss": 0.7276,
"step": 1550
},
{
"epoch": 0.20840291229710775,
"grad_norm": 3.9375,
"learning_rate": 4.479361474752872e-06,
"loss": 0.7368,
"step": 1560
},
{
"epoch": 0.20973882840157637,
"grad_norm": 4.09375,
"learning_rate": 4.476021907560781e-06,
"loss": 0.7545,
"step": 1570
},
{
"epoch": 0.21107474450604502,
"grad_norm": 3.75,
"learning_rate": 4.472682340368688e-06,
"loss": 0.7389,
"step": 1580
},
{
"epoch": 0.21241066061051367,
"grad_norm": 3.78125,
"learning_rate": 4.469342773176597e-06,
"loss": 0.7743,
"step": 1590
},
{
"epoch": 0.2137465767149823,
"grad_norm": 3.734375,
"learning_rate": 4.4660032059845045e-06,
"loss": 0.7122,
"step": 1600
},
{
"epoch": 0.21508249281945094,
"grad_norm": 3.8125,
"learning_rate": 4.462663638792413e-06,
"loss": 0.7475,
"step": 1610
},
{
"epoch": 0.2164184089239196,
"grad_norm": 4.03125,
"learning_rate": 4.4593240716003215e-06,
"loss": 0.7549,
"step": 1620
},
{
"epoch": 0.2177543250283882,
"grad_norm": 3.765625,
"learning_rate": 4.455984504408229e-06,
"loss": 0.7438,
"step": 1630
},
{
"epoch": 0.21909024113285686,
"grad_norm": 3.75,
"learning_rate": 4.452644937216138e-06,
"loss": 0.7186,
"step": 1640
},
{
"epoch": 0.2204261572373255,
"grad_norm": 4.0,
"learning_rate": 4.449305370024045e-06,
"loss": 0.7433,
"step": 1650
},
{
"epoch": 0.22176207334179412,
"grad_norm": 3.75,
"learning_rate": 4.445965802831954e-06,
"loss": 0.7476,
"step": 1660
},
{
"epoch": 0.22309798944626277,
"grad_norm": 3.90625,
"learning_rate": 4.442626235639861e-06,
"loss": 0.7444,
"step": 1670
},
{
"epoch": 0.22443390555073142,
"grad_norm": 3.78125,
"learning_rate": 4.43928666844777e-06,
"loss": 0.7182,
"step": 1680
},
{
"epoch": 0.22576982165520004,
"grad_norm": 3.921875,
"learning_rate": 4.4359471012556776e-06,
"loss": 0.7158,
"step": 1690
},
{
"epoch": 0.2271057377596687,
"grad_norm": 3.75,
"learning_rate": 4.432607534063585e-06,
"loss": 0.7201,
"step": 1700
},
{
"epoch": 0.22844165386413734,
"grad_norm": 4.0625,
"learning_rate": 4.429267966871494e-06,
"loss": 0.7755,
"step": 1710
},
{
"epoch": 0.22977756996860596,
"grad_norm": 3.71875,
"learning_rate": 4.425928399679401e-06,
"loss": 0.7339,
"step": 1720
},
{
"epoch": 0.2311134860730746,
"grad_norm": 3.75,
"learning_rate": 4.42258883248731e-06,
"loss": 0.7431,
"step": 1730
},
{
"epoch": 0.23244940217754326,
"grad_norm": 3.78125,
"learning_rate": 4.4192492652952175e-06,
"loss": 0.7264,
"step": 1740
},
{
"epoch": 0.23378531828201188,
"grad_norm": 3.78125,
"learning_rate": 4.415909698103126e-06,
"loss": 0.7263,
"step": 1750
},
{
"epoch": 0.23378531828201188,
"eval_loss": 0.7381161451339722,
"eval_runtime": 253.0916,
"eval_samples_per_second": 26.291,
"eval_steps_per_second": 3.287,
"step": 1750
},
{
"epoch": 0.23512123438648053,
"grad_norm": 3.796875,
"learning_rate": 4.4125701309110345e-06,
"loss": 0.7342,
"step": 1760
},
{
"epoch": 0.23645715049094918,
"grad_norm": 3.609375,
"learning_rate": 4.409230563718942e-06,
"loss": 0.7475,
"step": 1770
},
{
"epoch": 0.2377930665954178,
"grad_norm": 4.0625,
"learning_rate": 4.405890996526851e-06,
"loss": 0.7487,
"step": 1780
},
{
"epoch": 0.23912898269988644,
"grad_norm": 3.859375,
"learning_rate": 4.402551429334759e-06,
"loss": 0.7549,
"step": 1790
},
{
"epoch": 0.2404648988043551,
"grad_norm": 4.15625,
"learning_rate": 4.399211862142667e-06,
"loss": 0.7362,
"step": 1800
},
{
"epoch": 0.2418008149088237,
"grad_norm": 3.8125,
"learning_rate": 4.395872294950575e-06,
"loss": 0.7343,
"step": 1810
},
{
"epoch": 0.24313673101329236,
"grad_norm": 3.84375,
"learning_rate": 4.392532727758483e-06,
"loss": 0.7101,
"step": 1820
},
{
"epoch": 0.244472647117761,
"grad_norm": 3.671875,
"learning_rate": 4.3891931605663915e-06,
"loss": 0.717,
"step": 1830
},
{
"epoch": 0.24580856322222963,
"grad_norm": 3.96875,
"learning_rate": 4.385853593374299e-06,
"loss": 0.7418,
"step": 1840
},
{
"epoch": 0.24714447932669828,
"grad_norm": 3.9375,
"learning_rate": 4.382514026182207e-06,
"loss": 0.7352,
"step": 1850
},
{
"epoch": 0.24848039543116693,
"grad_norm": 3.71875,
"learning_rate": 4.379174458990115e-06,
"loss": 0.7118,
"step": 1860
},
{
"epoch": 0.24981631153563555,
"grad_norm": 4.03125,
"learning_rate": 4.375834891798023e-06,
"loss": 0.7315,
"step": 1870
},
{
"epoch": 0.2511522276401042,
"grad_norm": 3.71875,
"learning_rate": 4.372495324605931e-06,
"loss": 0.7371,
"step": 1880
},
{
"epoch": 0.25248814374457285,
"grad_norm": 3.90625,
"learning_rate": 4.369155757413839e-06,
"loss": 0.7252,
"step": 1890
},
{
"epoch": 0.2538240598490415,
"grad_norm": 3.75,
"learning_rate": 4.3658161902217476e-06,
"loss": 0.7331,
"step": 1900
},
{
"epoch": 0.25515997595351014,
"grad_norm": 4.0,
"learning_rate": 4.362476623029655e-06,
"loss": 0.7635,
"step": 1910
},
{
"epoch": 0.25649589205797874,
"grad_norm": 3.6875,
"learning_rate": 4.359137055837564e-06,
"loss": 0.7447,
"step": 1920
},
{
"epoch": 0.2578318081624474,
"grad_norm": 3.859375,
"learning_rate": 4.355797488645472e-06,
"loss": 0.7239,
"step": 1930
},
{
"epoch": 0.25916772426691603,
"grad_norm": 3.984375,
"learning_rate": 4.35245792145338e-06,
"loss": 0.7499,
"step": 1940
},
{
"epoch": 0.2605036403713847,
"grad_norm": 3.703125,
"learning_rate": 4.349118354261288e-06,
"loss": 0.7322,
"step": 1950
},
{
"epoch": 0.26183955647585333,
"grad_norm": 3.859375,
"learning_rate": 4.345778787069196e-06,
"loss": 0.6944,
"step": 1960
},
{
"epoch": 0.263175472580322,
"grad_norm": 3.796875,
"learning_rate": 4.3424392198771045e-06,
"loss": 0.7243,
"step": 1970
},
{
"epoch": 0.26451138868479057,
"grad_norm": 3.65625,
"learning_rate": 4.339099652685013e-06,
"loss": 0.7349,
"step": 1980
},
{
"epoch": 0.2658473047892592,
"grad_norm": 3.53125,
"learning_rate": 4.335760085492921e-06,
"loss": 0.7107,
"step": 1990
},
{
"epoch": 0.26718322089372787,
"grad_norm": 3.96875,
"learning_rate": 4.332420518300828e-06,
"loss": 0.6981,
"step": 2000
},
{
"epoch": 0.26718322089372787,
"eval_loss": 0.7313552498817444,
"eval_runtime": 255.8649,
"eval_samples_per_second": 26.006,
"eval_steps_per_second": 3.252,
"step": 2000
},
{
"epoch": 0.2685191369981965,
"grad_norm": 4.59375,
"learning_rate": 4.329080951108737e-06,
"loss": 0.7239,
"step": 2010
},
{
"epoch": 0.26985505310266517,
"grad_norm": 3.796875,
"learning_rate": 4.3257413839166445e-06,
"loss": 0.7381,
"step": 2020
},
{
"epoch": 0.2711909692071338,
"grad_norm": 4.03125,
"learning_rate": 4.322401816724553e-06,
"loss": 0.7426,
"step": 2030
},
{
"epoch": 0.2725268853116024,
"grad_norm": 3.6875,
"learning_rate": 4.319062249532461e-06,
"loss": 0.7284,
"step": 2040
},
{
"epoch": 0.27386280141607106,
"grad_norm": 3.734375,
"learning_rate": 4.315722682340369e-06,
"loss": 0.7375,
"step": 2050
},
{
"epoch": 0.2751987175205397,
"grad_norm": 3.671875,
"learning_rate": 4.312383115148277e-06,
"loss": 0.7045,
"step": 2060
},
{
"epoch": 0.27653463362500835,
"grad_norm": 3.6875,
"learning_rate": 4.309043547956185e-06,
"loss": 0.7312,
"step": 2070
},
{
"epoch": 0.277870549729477,
"grad_norm": 3.796875,
"learning_rate": 4.305703980764093e-06,
"loss": 0.7104,
"step": 2080
},
{
"epoch": 0.27920646583394565,
"grad_norm": 4.0,
"learning_rate": 4.302364413572001e-06,
"loss": 0.7254,
"step": 2090
},
{
"epoch": 0.28054238193841424,
"grad_norm": 4.0625,
"learning_rate": 4.299024846379909e-06,
"loss": 0.7085,
"step": 2100
},
{
"epoch": 0.2818782980428829,
"grad_norm": 3.828125,
"learning_rate": 4.2956852791878176e-06,
"loss": 0.7116,
"step": 2110
},
{
"epoch": 0.28321421414735154,
"grad_norm": 3.75,
"learning_rate": 4.292345711995726e-06,
"loss": 0.7305,
"step": 2120
},
{
"epoch": 0.2845501302518202,
"grad_norm": 3.875,
"learning_rate": 4.289006144803634e-06,
"loss": 0.7388,
"step": 2130
},
{
"epoch": 0.28588604635628884,
"grad_norm": 3.90625,
"learning_rate": 4.285666577611542e-06,
"loss": 0.7374,
"step": 2140
},
{
"epoch": 0.2872219624607575,
"grad_norm": 3.78125,
"learning_rate": 4.28232701041945e-06,
"loss": 0.7294,
"step": 2150
},
{
"epoch": 0.2885578785652261,
"grad_norm": 3.921875,
"learning_rate": 4.278987443227358e-06,
"loss": 0.7383,
"step": 2160
},
{
"epoch": 0.2898937946696947,
"grad_norm": 4.0,
"learning_rate": 4.275647876035266e-06,
"loss": 0.7375,
"step": 2170
},
{
"epoch": 0.2912297107741634,
"grad_norm": 3.84375,
"learning_rate": 4.2723083088431745e-06,
"loss": 0.7165,
"step": 2180
},
{
"epoch": 0.292565626878632,
"grad_norm": 3.921875,
"learning_rate": 4.268968741651082e-06,
"loss": 0.7583,
"step": 2190
},
{
"epoch": 0.29390154298310067,
"grad_norm": 3.984375,
"learning_rate": 4.265629174458991e-06,
"loss": 0.7274,
"step": 2200
},
{
"epoch": 0.2952374590875693,
"grad_norm": 3.875,
"learning_rate": 4.262289607266898e-06,
"loss": 0.7288,
"step": 2210
},
{
"epoch": 0.2965733751920379,
"grad_norm": 3.875,
"learning_rate": 4.258950040074807e-06,
"loss": 0.742,
"step": 2220
},
{
"epoch": 0.29790929129650656,
"grad_norm": 4.25,
"learning_rate": 4.2556104728827145e-06,
"loss": 0.7221,
"step": 2230
},
{
"epoch": 0.2992452074009752,
"grad_norm": 3.78125,
"learning_rate": 4.252270905690623e-06,
"loss": 0.7291,
"step": 2240
},
{
"epoch": 0.30058112350544386,
"grad_norm": 4.03125,
"learning_rate": 4.248931338498531e-06,
"loss": 0.7611,
"step": 2250
},
{
"epoch": 0.30058112350544386,
"eval_loss": 0.725321352481842,
"eval_runtime": 255.0744,
"eval_samples_per_second": 26.087,
"eval_steps_per_second": 3.262,
"step": 2250
},
{
"epoch": 0.3019170396099125,
"grad_norm": 3.84375,
"learning_rate": 4.245591771306439e-06,
"loss": 0.7355,
"step": 2260
},
{
"epoch": 0.30325295571438116,
"grad_norm": 3.953125,
"learning_rate": 4.242252204114347e-06,
"loss": 0.702,
"step": 2270
},
{
"epoch": 0.30458887181884975,
"grad_norm": 3.90625,
"learning_rate": 4.238912636922255e-06,
"loss": 0.746,
"step": 2280
},
{
"epoch": 0.3059247879233184,
"grad_norm": 3.9375,
"learning_rate": 4.235573069730164e-06,
"loss": 0.7132,
"step": 2290
},
{
"epoch": 0.30726070402778705,
"grad_norm": 4.15625,
"learning_rate": 4.232233502538071e-06,
"loss": 0.7214,
"step": 2300
},
{
"epoch": 0.3085966201322557,
"grad_norm": 4.15625,
"learning_rate": 4.22889393534598e-06,
"loss": 0.7364,
"step": 2310
},
{
"epoch": 0.30993253623672434,
"grad_norm": 3.78125,
"learning_rate": 4.2255543681538876e-06,
"loss": 0.742,
"step": 2320
},
{
"epoch": 0.311268452341193,
"grad_norm": 4.03125,
"learning_rate": 4.222214800961796e-06,
"loss": 0.726,
"step": 2330
},
{
"epoch": 0.31260436844566164,
"grad_norm": 3.890625,
"learning_rate": 4.218875233769704e-06,
"loss": 0.7157,
"step": 2340
},
{
"epoch": 0.31394028455013023,
"grad_norm": 4.21875,
"learning_rate": 4.215535666577612e-06,
"loss": 0.7923,
"step": 2350
},
{
"epoch": 0.3152762006545989,
"grad_norm": 3.875,
"learning_rate": 4.21219609938552e-06,
"loss": 0.7275,
"step": 2360
},
{
"epoch": 0.31661211675906753,
"grad_norm": 4.0625,
"learning_rate": 4.208856532193428e-06,
"loss": 0.7346,
"step": 2370
},
{
"epoch": 0.3179480328635362,
"grad_norm": 3.96875,
"learning_rate": 4.205516965001336e-06,
"loss": 0.7328,
"step": 2380
},
{
"epoch": 0.3192839489680048,
"grad_norm": 4.15625,
"learning_rate": 4.2021773978092445e-06,
"loss": 0.7547,
"step": 2390
},
{
"epoch": 0.3206198650724735,
"grad_norm": 4.0625,
"learning_rate": 4.198837830617152e-06,
"loss": 0.7366,
"step": 2400
},
{
"epoch": 0.32195578117694207,
"grad_norm": 3.859375,
"learning_rate": 4.195498263425061e-06,
"loss": 0.7104,
"step": 2410
},
{
"epoch": 0.3232916972814107,
"grad_norm": 4.3125,
"learning_rate": 4.192158696232968e-06,
"loss": 0.7339,
"step": 2420
},
{
"epoch": 0.32462761338587937,
"grad_norm": 4.0625,
"learning_rate": 4.188819129040877e-06,
"loss": 0.7453,
"step": 2430
},
{
"epoch": 0.325963529490348,
"grad_norm": 3.984375,
"learning_rate": 4.1854795618487845e-06,
"loss": 0.7033,
"step": 2440
},
{
"epoch": 0.32729944559481666,
"grad_norm": 4.15625,
"learning_rate": 4.182139994656693e-06,
"loss": 0.7193,
"step": 2450
},
{
"epoch": 0.3286353616992853,
"grad_norm": 3.734375,
"learning_rate": 4.178800427464601e-06,
"loss": 0.7078,
"step": 2460
},
{
"epoch": 0.3299712778037539,
"grad_norm": 3.6875,
"learning_rate": 4.175460860272509e-06,
"loss": 0.7199,
"step": 2470
},
{
"epoch": 0.33130719390822255,
"grad_norm": 3.78125,
"learning_rate": 4.172121293080418e-06,
"loss": 0.7428,
"step": 2480
},
{
"epoch": 0.3326431100126912,
"grad_norm": 4.0625,
"learning_rate": 4.168781725888325e-06,
"loss": 0.7177,
"step": 2490
},
{
"epoch": 0.33397902611715985,
"grad_norm": 3.609375,
"learning_rate": 4.165442158696234e-06,
"loss": 0.7148,
"step": 2500
},
{
"epoch": 0.33397902611715985,
"eval_loss": 0.7201098799705505,
"eval_runtime": 252.4792,
"eval_samples_per_second": 26.355,
"eval_steps_per_second": 3.295,
"step": 2500
},
{
"epoch": 0.3353149422216285,
"grad_norm": 3.90625,
"learning_rate": 4.162102591504141e-06,
"loss": 0.718,
"step": 2510
},
{
"epoch": 0.33665085832609715,
"grad_norm": 3.859375,
"learning_rate": 4.15876302431205e-06,
"loss": 0.7136,
"step": 2520
},
{
"epoch": 0.33798677443056574,
"grad_norm": 3.84375,
"learning_rate": 4.1554234571199576e-06,
"loss": 0.7181,
"step": 2530
},
{
"epoch": 0.3393226905350344,
"grad_norm": 4.0,
"learning_rate": 4.152083889927866e-06,
"loss": 0.7254,
"step": 2540
},
{
"epoch": 0.34065860663950304,
"grad_norm": 4.1875,
"learning_rate": 4.148744322735774e-06,
"loss": 0.7175,
"step": 2550
},
{
"epoch": 0.3419945227439717,
"grad_norm": 4.15625,
"learning_rate": 4.145404755543681e-06,
"loss": 0.7205,
"step": 2560
},
{
"epoch": 0.34333043884844033,
"grad_norm": 3.953125,
"learning_rate": 4.14206518835159e-06,
"loss": 0.7157,
"step": 2570
},
{
"epoch": 0.344666354952909,
"grad_norm": 3.953125,
"learning_rate": 4.1387256211594975e-06,
"loss": 0.718,
"step": 2580
},
{
"epoch": 0.3460022710573776,
"grad_norm": 3.890625,
"learning_rate": 4.135386053967406e-06,
"loss": 0.6862,
"step": 2590
},
{
"epoch": 0.3473381871618462,
"grad_norm": 3.921875,
"learning_rate": 4.132046486775314e-06,
"loss": 0.7078,
"step": 2600
},
{
"epoch": 0.34867410326631487,
"grad_norm": 4.0,
"learning_rate": 4.128706919583222e-06,
"loss": 0.7061,
"step": 2610
},
{
"epoch": 0.3500100193707835,
"grad_norm": 4.1875,
"learning_rate": 4.125367352391131e-06,
"loss": 0.7337,
"step": 2620
},
{
"epoch": 0.35134593547525217,
"grad_norm": 3.828125,
"learning_rate": 4.122027785199038e-06,
"loss": 0.7386,
"step": 2630
},
{
"epoch": 0.3526818515797208,
"grad_norm": 4.15625,
"learning_rate": 4.118688218006947e-06,
"loss": 0.7431,
"step": 2640
},
{
"epoch": 0.3540177676841894,
"grad_norm": 4.09375,
"learning_rate": 4.115348650814855e-06,
"loss": 0.7567,
"step": 2650
},
{
"epoch": 0.35535368378865806,
"grad_norm": 3.953125,
"learning_rate": 4.112009083622763e-06,
"loss": 0.7152,
"step": 2660
},
{
"epoch": 0.3566895998931267,
"grad_norm": 3.921875,
"learning_rate": 4.1086695164306715e-06,
"loss": 0.7214,
"step": 2670
},
{
"epoch": 0.35802551599759536,
"grad_norm": 4.125,
"learning_rate": 4.105329949238579e-06,
"loss": 0.7158,
"step": 2680
},
{
"epoch": 0.359361432102064,
"grad_norm": 4.15625,
"learning_rate": 4.101990382046488e-06,
"loss": 0.7259,
"step": 2690
},
{
"epoch": 0.36069734820653265,
"grad_norm": 4.0625,
"learning_rate": 4.098650814854395e-06,
"loss": 0.7391,
"step": 2700
},
{
"epoch": 0.36203326431100125,
"grad_norm": 3.8125,
"learning_rate": 4.095311247662303e-06,
"loss": 0.705,
"step": 2710
},
{
"epoch": 0.3633691804154699,
"grad_norm": 4.59375,
"learning_rate": 4.091971680470211e-06,
"loss": 0.6911,
"step": 2720
},
{
"epoch": 0.36470509651993854,
"grad_norm": 4.1875,
"learning_rate": 4.088632113278119e-06,
"loss": 0.7339,
"step": 2730
},
{
"epoch": 0.3660410126244072,
"grad_norm": 3.90625,
"learning_rate": 4.0852925460860276e-06,
"loss": 0.6803,
"step": 2740
},
{
"epoch": 0.36737692872887584,
"grad_norm": 3.921875,
"learning_rate": 4.081952978893935e-06,
"loss": 0.7147,
"step": 2750
},
{
"epoch": 0.36737692872887584,
"eval_loss": 0.7155716419219971,
"eval_runtime": 253.222,
"eval_samples_per_second": 26.277,
"eval_steps_per_second": 3.286,
"step": 2750
},
{
"epoch": 0.3687128448333445,
"grad_norm": 4.09375,
"learning_rate": 4.078613411701844e-06,
"loss": 0.72,
"step": 2760
},
{
"epoch": 0.3700487609378131,
"grad_norm": 3.765625,
"learning_rate": 4.075273844509751e-06,
"loss": 0.6894,
"step": 2770
},
{
"epoch": 0.37138467704228173,
"grad_norm": 4.15625,
"learning_rate": 4.07193427731766e-06,
"loss": 0.6935,
"step": 2780
},
{
"epoch": 0.3727205931467504,
"grad_norm": 4.25,
"learning_rate": 4.068594710125568e-06,
"loss": 0.7328,
"step": 2790
},
{
"epoch": 0.374056509251219,
"grad_norm": 4.03125,
"learning_rate": 4.065255142933476e-06,
"loss": 0.7083,
"step": 2800
},
{
"epoch": 0.3753924253556877,
"grad_norm": 3.8125,
"learning_rate": 4.0619155757413845e-06,
"loss": 0.7018,
"step": 2810
},
{
"epoch": 0.3767283414601563,
"grad_norm": 4.34375,
"learning_rate": 4.058576008549292e-06,
"loss": 0.6831,
"step": 2820
},
{
"epoch": 0.3780642575646249,
"grad_norm": 3.9375,
"learning_rate": 4.055236441357201e-06,
"loss": 0.6923,
"step": 2830
},
{
"epoch": 0.37940017366909357,
"grad_norm": 4.3125,
"learning_rate": 4.051896874165109e-06,
"loss": 0.7312,
"step": 2840
},
{
"epoch": 0.3807360897735622,
"grad_norm": 3.953125,
"learning_rate": 4.048557306973017e-06,
"loss": 0.7162,
"step": 2850
},
{
"epoch": 0.38207200587803086,
"grad_norm": 4.1875,
"learning_rate": 4.0452177397809245e-06,
"loss": 0.7233,
"step": 2860
},
{
"epoch": 0.3834079219824995,
"grad_norm": 3.875,
"learning_rate": 4.041878172588833e-06,
"loss": 0.716,
"step": 2870
},
{
"epoch": 0.38474383808696816,
"grad_norm": 4.1875,
"learning_rate": 4.038538605396741e-06,
"loss": 0.691,
"step": 2880
},
{
"epoch": 0.38607975419143675,
"grad_norm": 3.84375,
"learning_rate": 4.035199038204649e-06,
"loss": 0.7189,
"step": 2890
},
{
"epoch": 0.3874156702959054,
"grad_norm": 4.03125,
"learning_rate": 4.031859471012557e-06,
"loss": 0.6949,
"step": 2900
},
{
"epoch": 0.38875158640037405,
"grad_norm": 4.125,
"learning_rate": 4.028519903820465e-06,
"loss": 0.6945,
"step": 2910
},
{
"epoch": 0.3900875025048427,
"grad_norm": 4.0,
"learning_rate": 4.025180336628373e-06,
"loss": 0.6936,
"step": 2920
},
{
"epoch": 0.39142341860931135,
"grad_norm": 4.21875,
"learning_rate": 4.021840769436281e-06,
"loss": 0.686,
"step": 2930
},
{
"epoch": 0.39275933471378,
"grad_norm": 3.921875,
"learning_rate": 4.018501202244189e-06,
"loss": 0.7042,
"step": 2940
},
{
"epoch": 0.3940952508182486,
"grad_norm": 4.09375,
"learning_rate": 4.0151616350520976e-06,
"loss": 0.702,
"step": 2950
},
{
"epoch": 0.39543116692271724,
"grad_norm": 4.03125,
"learning_rate": 4.011822067860005e-06,
"loss": 0.6889,
"step": 2960
},
{
"epoch": 0.3967670830271859,
"grad_norm": 4.09375,
"learning_rate": 4.008482500667914e-06,
"loss": 0.7078,
"step": 2970
},
{
"epoch": 0.39810299913165453,
"grad_norm": 3.90625,
"learning_rate": 4.005142933475822e-06,
"loss": 0.721,
"step": 2980
},
{
"epoch": 0.3994389152361232,
"grad_norm": 4.3125,
"learning_rate": 4.00180336628373e-06,
"loss": 0.7175,
"step": 2990
},
{
"epoch": 0.40077483134059183,
"grad_norm": 4.125,
"learning_rate": 3.998463799091638e-06,
"loss": 0.6741,
"step": 3000
},
{
"epoch": 0.40077483134059183,
"eval_loss": 0.7119663953781128,
"eval_runtime": 255.7665,
"eval_samples_per_second": 26.016,
"eval_steps_per_second": 3.253,
"step": 3000
},
{
"epoch": 0.4021107474450604,
"grad_norm": 3.828125,
"learning_rate": 3.995124231899546e-06,
"loss": 0.694,
"step": 3010
},
{
"epoch": 0.40344666354952907,
"grad_norm": 3.890625,
"learning_rate": 3.9917846647074545e-06,
"loss": 0.7019,
"step": 3020
},
{
"epoch": 0.4047825796539977,
"grad_norm": 3.84375,
"learning_rate": 3.988445097515362e-06,
"loss": 0.7228,
"step": 3030
},
{
"epoch": 0.40611849575846637,
"grad_norm": 3.921875,
"learning_rate": 3.985105530323271e-06,
"loss": 0.7208,
"step": 3040
},
{
"epoch": 0.407454411862935,
"grad_norm": 3.765625,
"learning_rate": 3.981765963131178e-06,
"loss": 0.6944,
"step": 3050
},
{
"epoch": 0.40879032796740367,
"grad_norm": 3.53125,
"learning_rate": 3.978426395939087e-06,
"loss": 0.6835,
"step": 3060
},
{
"epoch": 0.41012624407187226,
"grad_norm": 4.15625,
"learning_rate": 3.9750868287469945e-06,
"loss": 0.7404,
"step": 3070
},
{
"epoch": 0.4114621601763409,
"grad_norm": 4.125,
"learning_rate": 3.971747261554903e-06,
"loss": 0.7085,
"step": 3080
},
{
"epoch": 0.41279807628080956,
"grad_norm": 4.03125,
"learning_rate": 3.968407694362811e-06,
"loss": 0.7039,
"step": 3090
},
{
"epoch": 0.4141339923852782,
"grad_norm": 4.3125,
"learning_rate": 3.965068127170719e-06,
"loss": 0.7001,
"step": 3100
},
{
"epoch": 0.41546990848974685,
"grad_norm": 4.09375,
"learning_rate": 3.961728559978627e-06,
"loss": 0.7204,
"step": 3110
},
{
"epoch": 0.4168058245942155,
"grad_norm": 4.0,
"learning_rate": 3.958388992786535e-06,
"loss": 0.7119,
"step": 3120
},
{
"epoch": 0.41814174069868415,
"grad_norm": 4.0625,
"learning_rate": 3.955049425594443e-06,
"loss": 0.7243,
"step": 3130
},
{
"epoch": 0.41947765680315274,
"grad_norm": 4.25,
"learning_rate": 3.9517098584023514e-06,
"loss": 0.7045,
"step": 3140
},
{
"epoch": 0.4208135729076214,
"grad_norm": 3.875,
"learning_rate": 3.948370291210259e-06,
"loss": 0.7215,
"step": 3150
},
{
"epoch": 0.42214948901209004,
"grad_norm": 3.8125,
"learning_rate": 3.9450307240181676e-06,
"loss": 0.6764,
"step": 3160
},
{
"epoch": 0.4234854051165587,
"grad_norm": 3.65625,
"learning_rate": 3.941691156826076e-06,
"loss": 0.7211,
"step": 3170
},
{
"epoch": 0.42482132122102734,
"grad_norm": 4.1875,
"learning_rate": 3.938351589633984e-06,
"loss": 0.7141,
"step": 3180
},
{
"epoch": 0.426157237325496,
"grad_norm": 4.28125,
"learning_rate": 3.935012022441892e-06,
"loss": 0.724,
"step": 3190
},
{
"epoch": 0.4274931534299646,
"grad_norm": 3.9375,
"learning_rate": 3.9316724552498e-06,
"loss": 0.723,
"step": 3200
},
{
"epoch": 0.4288290695344332,
"grad_norm": 4.21875,
"learning_rate": 3.928332888057708e-06,
"loss": 0.7029,
"step": 3210
},
{
"epoch": 0.4301649856389019,
"grad_norm": 4.03125,
"learning_rate": 3.924993320865616e-06,
"loss": 0.7042,
"step": 3220
},
{
"epoch": 0.4315009017433705,
"grad_norm": 4.1875,
"learning_rate": 3.9216537536735245e-06,
"loss": 0.716,
"step": 3230
},
{
"epoch": 0.4328368178478392,
"grad_norm": 4.375,
"learning_rate": 3.918314186481432e-06,
"loss": 0.6866,
"step": 3240
},
{
"epoch": 0.4341727339523078,
"grad_norm": 3.953125,
"learning_rate": 3.914974619289341e-06,
"loss": 0.7343,
"step": 3250
},
{
"epoch": 0.4341727339523078,
"eval_loss": 0.7082972526550293,
"eval_runtime": 255.5257,
"eval_samples_per_second": 26.04,
"eval_steps_per_second": 3.256,
"step": 3250
},
{
"epoch": 0.4355086500567764,
"grad_norm": 3.96875,
"learning_rate": 3.911635052097248e-06,
"loss": 0.6994,
"step": 3260
},
{
"epoch": 0.43684456616124506,
"grad_norm": 3.78125,
"learning_rate": 3.908295484905157e-06,
"loss": 0.6736,
"step": 3270
},
{
"epoch": 0.4381804822657137,
"grad_norm": 3.90625,
"learning_rate": 3.9049559177130645e-06,
"loss": 0.7078,
"step": 3280
},
{
"epoch": 0.43951639837018236,
"grad_norm": 4.21875,
"learning_rate": 3.901616350520972e-06,
"loss": 0.7155,
"step": 3290
},
{
"epoch": 0.440852314474651,
"grad_norm": 4.28125,
"learning_rate": 3.898276783328881e-06,
"loss": 0.702,
"step": 3300
},
{
"epoch": 0.44218823057911966,
"grad_norm": 4.15625,
"learning_rate": 3.894937216136789e-06,
"loss": 0.7082,
"step": 3310
},
{
"epoch": 0.44352414668358825,
"grad_norm": 4.03125,
"learning_rate": 3.891597648944697e-06,
"loss": 0.6865,
"step": 3320
},
{
"epoch": 0.4448600627880569,
"grad_norm": 4.125,
"learning_rate": 3.888258081752605e-06,
"loss": 0.708,
"step": 3330
},
{
"epoch": 0.44619597889252555,
"grad_norm": 4.03125,
"learning_rate": 3.884918514560514e-06,
"loss": 0.7067,
"step": 3340
},
{
"epoch": 0.4475318949969942,
"grad_norm": 4.09375,
"learning_rate": 3.8815789473684214e-06,
"loss": 0.7224,
"step": 3350
},
{
"epoch": 0.44886781110146284,
"grad_norm": 3.984375,
"learning_rate": 3.87823938017633e-06,
"loss": 0.7184,
"step": 3360
},
{
"epoch": 0.4502037272059315,
"grad_norm": 4.03125,
"learning_rate": 3.8748998129842376e-06,
"loss": 0.7036,
"step": 3370
},
{
"epoch": 0.4515396433104001,
"grad_norm": 4.21875,
"learning_rate": 3.871560245792146e-06,
"loss": 0.6918,
"step": 3380
},
{
"epoch": 0.45287555941486873,
"grad_norm": 3.921875,
"learning_rate": 3.868220678600054e-06,
"loss": 0.6886,
"step": 3390
},
{
"epoch": 0.4542114755193374,
"grad_norm": 3.984375,
"learning_rate": 3.864881111407962e-06,
"loss": 0.6921,
"step": 3400
},
{
"epoch": 0.45554739162380603,
"grad_norm": 3.921875,
"learning_rate": 3.86154154421587e-06,
"loss": 0.7179,
"step": 3410
},
{
"epoch": 0.4568833077282747,
"grad_norm": 3.953125,
"learning_rate": 3.858201977023778e-06,
"loss": 0.7011,
"step": 3420
},
{
"epoch": 0.4582192238327433,
"grad_norm": 4.15625,
"learning_rate": 3.854862409831686e-06,
"loss": 0.7181,
"step": 3430
},
{
"epoch": 0.4595551399372119,
"grad_norm": 4.28125,
"learning_rate": 3.851522842639594e-06,
"loss": 0.719,
"step": 3440
},
{
"epoch": 0.46089105604168057,
"grad_norm": 4.34375,
"learning_rate": 3.848183275447502e-06,
"loss": 0.7156,
"step": 3450
},
{
"epoch": 0.4622269721461492,
"grad_norm": 4.3125,
"learning_rate": 3.84484370825541e-06,
"loss": 0.7408,
"step": 3460
},
{
"epoch": 0.46356288825061787,
"grad_norm": 4.375,
"learning_rate": 3.841504141063318e-06,
"loss": 0.7285,
"step": 3470
},
{
"epoch": 0.4648988043550865,
"grad_norm": 4.1875,
"learning_rate": 3.838164573871227e-06,
"loss": 0.7227,
"step": 3480
},
{
"epoch": 0.46623472045955516,
"grad_norm": 4.21875,
"learning_rate": 3.8348250066791345e-06,
"loss": 0.6922,
"step": 3490
},
{
"epoch": 0.46757063656402376,
"grad_norm": 4.375,
"learning_rate": 3.831485439487043e-06,
"loss": 0.6978,
"step": 3500
},
{
"epoch": 0.46757063656402376,
"eval_loss": 0.7053844332695007,
"eval_runtime": 253.0049,
"eval_samples_per_second": 26.3,
"eval_steps_per_second": 3.288,
"step": 3500
},
{
"epoch": 0.4689065526684924,
"grad_norm": 4.03125,
"learning_rate": 3.828145872294951e-06,
"loss": 0.6694,
"step": 3510
},
{
"epoch": 0.47024246877296105,
"grad_norm": 4.125,
"learning_rate": 3.824806305102859e-06,
"loss": 0.7262,
"step": 3520
},
{
"epoch": 0.4715783848774297,
"grad_norm": 4.15625,
"learning_rate": 3.821466737910768e-06,
"loss": 0.7241,
"step": 3530
},
{
"epoch": 0.47291430098189835,
"grad_norm": 4.28125,
"learning_rate": 3.818127170718675e-06,
"loss": 0.7089,
"step": 3540
},
{
"epoch": 0.474250217086367,
"grad_norm": 4.4375,
"learning_rate": 3.814787603526584e-06,
"loss": 0.7031,
"step": 3550
},
{
"epoch": 0.4755861331908356,
"grad_norm": 3.953125,
"learning_rate": 3.8114480363344914e-06,
"loss": 0.6876,
"step": 3560
},
{
"epoch": 0.47692204929530424,
"grad_norm": 4.125,
"learning_rate": 3.8081084691424e-06,
"loss": 0.6875,
"step": 3570
},
{
"epoch": 0.4782579653997729,
"grad_norm": 3.84375,
"learning_rate": 3.8047689019503076e-06,
"loss": 0.6992,
"step": 3580
},
{
"epoch": 0.47959388150424154,
"grad_norm": 4.09375,
"learning_rate": 3.8014293347582152e-06,
"loss": 0.6999,
"step": 3590
},
{
"epoch": 0.4809297976087102,
"grad_norm": 4.21875,
"learning_rate": 3.7980897675661237e-06,
"loss": 0.6897,
"step": 3600
},
{
"epoch": 0.48226571371317883,
"grad_norm": 4.03125,
"learning_rate": 3.794750200374032e-06,
"loss": 0.7004,
"step": 3610
},
{
"epoch": 0.4836016298176474,
"grad_norm": 4.34375,
"learning_rate": 3.79141063318194e-06,
"loss": 0.6983,
"step": 3620
},
{
"epoch": 0.4849375459221161,
"grad_norm": 3.765625,
"learning_rate": 3.788071065989848e-06,
"loss": 0.7035,
"step": 3630
},
{
"epoch": 0.4862734620265847,
"grad_norm": 4.0625,
"learning_rate": 3.7847314987977565e-06,
"loss": 0.7003,
"step": 3640
},
{
"epoch": 0.4876093781310534,
"grad_norm": 4.3125,
"learning_rate": 3.781391931605664e-06,
"loss": 0.7221,
"step": 3650
},
{
"epoch": 0.488945294235522,
"grad_norm": 4.0,
"learning_rate": 3.7780523644135726e-06,
"loss": 0.7035,
"step": 3660
},
{
"epoch": 0.49028121033999067,
"grad_norm": 4.1875,
"learning_rate": 3.7747127972214803e-06,
"loss": 0.68,
"step": 3670
},
{
"epoch": 0.49161712644445926,
"grad_norm": 4.4375,
"learning_rate": 3.7713732300293888e-06,
"loss": 0.716,
"step": 3680
},
{
"epoch": 0.4929530425489279,
"grad_norm": 4.28125,
"learning_rate": 3.7680336628372964e-06,
"loss": 0.6904,
"step": 3690
},
{
"epoch": 0.49428895865339656,
"grad_norm": 4.28125,
"learning_rate": 3.764694095645205e-06,
"loss": 0.6839,
"step": 3700
},
{
"epoch": 0.4956248747578652,
"grad_norm": 4.375,
"learning_rate": 3.761354528453113e-06,
"loss": 0.7143,
"step": 3710
},
{
"epoch": 0.49696079086233386,
"grad_norm": 4.34375,
"learning_rate": 3.7580149612610206e-06,
"loss": 0.7024,
"step": 3720
},
{
"epoch": 0.4982967069668025,
"grad_norm": 4.125,
"learning_rate": 3.754675394068929e-06,
"loss": 0.6954,
"step": 3730
},
{
"epoch": 0.4996326230712711,
"grad_norm": 4.28125,
"learning_rate": 3.7513358268768368e-06,
"loss": 0.7176,
"step": 3740
},
{
"epoch": 0.5009685391757398,
"grad_norm": 4.03125,
"learning_rate": 3.7479962596847453e-06,
"loss": 0.6977,
"step": 3750
},
{
"epoch": 0.5009685391757398,
"eval_loss": 0.7025312185287476,
"eval_runtime": 252.6561,
"eval_samples_per_second": 26.336,
"eval_steps_per_second": 3.293,
"step": 3750
},
{
"epoch": 0.5023044552802084,
"grad_norm": 3.9375,
"learning_rate": 3.744656692492653e-06,
"loss": 0.6779,
"step": 3760
},
{
"epoch": 0.503640371384677,
"grad_norm": 4.1875,
"learning_rate": 3.7413171253005614e-06,
"loss": 0.7102,
"step": 3770
},
{
"epoch": 0.5049762874891457,
"grad_norm": 4.28125,
"learning_rate": 3.7379775581084695e-06,
"loss": 0.72,
"step": 3780
},
{
"epoch": 0.5063122035936143,
"grad_norm": 4.09375,
"learning_rate": 3.7346379909163776e-06,
"loss": 0.6866,
"step": 3790
},
{
"epoch": 0.507648119698083,
"grad_norm": 4.03125,
"learning_rate": 3.7312984237242857e-06,
"loss": 0.7016,
"step": 3800
},
{
"epoch": 0.5089840358025516,
"grad_norm": 4.65625,
"learning_rate": 3.7279588565321937e-06,
"loss": 0.6924,
"step": 3810
},
{
"epoch": 0.5103199519070203,
"grad_norm": 4.0,
"learning_rate": 3.724619289340102e-06,
"loss": 0.6948,
"step": 3820
},
{
"epoch": 0.5116558680114889,
"grad_norm": 4.0,
"learning_rate": 3.7212797221480103e-06,
"loss": 0.7075,
"step": 3830
},
{
"epoch": 0.5129917841159575,
"grad_norm": 4.34375,
"learning_rate": 3.717940154955918e-06,
"loss": 0.6979,
"step": 3840
},
{
"epoch": 0.5143277002204262,
"grad_norm": 4.125,
"learning_rate": 3.7146005877638265e-06,
"loss": 0.6876,
"step": 3850
},
{
"epoch": 0.5156636163248948,
"grad_norm": 4.125,
"learning_rate": 3.711261020571734e-06,
"loss": 0.6823,
"step": 3860
},
{
"epoch": 0.5169995324293635,
"grad_norm": 4.0,
"learning_rate": 3.707921453379642e-06,
"loss": 0.684,
"step": 3870
},
{
"epoch": 0.5183354485338321,
"grad_norm": 4.34375,
"learning_rate": 3.7045818861875503e-06,
"loss": 0.6715,
"step": 3880
},
{
"epoch": 0.5196713646383008,
"grad_norm": 4.28125,
"learning_rate": 3.7012423189954583e-06,
"loss": 0.7144,
"step": 3890
},
{
"epoch": 0.5210072807427694,
"grad_norm": 4.21875,
"learning_rate": 3.697902751803367e-06,
"loss": 0.7035,
"step": 3900
},
{
"epoch": 0.522343196847238,
"grad_norm": 4.1875,
"learning_rate": 3.6945631846112745e-06,
"loss": 0.7154,
"step": 3910
},
{
"epoch": 0.5236791129517067,
"grad_norm": 4.6875,
"learning_rate": 3.691223617419183e-06,
"loss": 0.712,
"step": 3920
},
{
"epoch": 0.5250150290561753,
"grad_norm": 4.3125,
"learning_rate": 3.6878840502270906e-06,
"loss": 0.7141,
"step": 3930
},
{
"epoch": 0.526350945160644,
"grad_norm": 4.34375,
"learning_rate": 3.684544483034999e-06,
"loss": 0.7239,
"step": 3940
},
{
"epoch": 0.5276868612651126,
"grad_norm": 4.625,
"learning_rate": 3.6812049158429068e-06,
"loss": 0.6717,
"step": 3950
},
{
"epoch": 0.5290227773695811,
"grad_norm": 4.1875,
"learning_rate": 3.6778653486508153e-06,
"loss": 0.6986,
"step": 3960
},
{
"epoch": 0.5303586934740498,
"grad_norm": 4.9375,
"learning_rate": 3.6745257814587234e-06,
"loss": 0.7062,
"step": 3970
},
{
"epoch": 0.5316946095785184,
"grad_norm": 4.5625,
"learning_rate": 3.6711862142666314e-06,
"loss": 0.721,
"step": 3980
},
{
"epoch": 0.5330305256829871,
"grad_norm": 4.21875,
"learning_rate": 3.6678466470745395e-06,
"loss": 0.6908,
"step": 3990
},
{
"epoch": 0.5343664417874557,
"grad_norm": 4.21875,
"learning_rate": 3.664507079882448e-06,
"loss": 0.6921,
"step": 4000
},
{
"epoch": 0.5343664417874557,
"eval_loss": 0.7001104950904846,
"eval_runtime": 255.3609,
"eval_samples_per_second": 26.057,
"eval_steps_per_second": 3.258,
"step": 4000
},
{
"epoch": 0.5357023578919244,
"grad_norm": 4.46875,
"learning_rate": 3.6611675126903557e-06,
"loss": 0.7076,
"step": 4010
},
{
"epoch": 0.537038273996393,
"grad_norm": 4.0,
"learning_rate": 3.6578279454982633e-06,
"loss": 0.7048,
"step": 4020
},
{
"epoch": 0.5383741901008616,
"grad_norm": 4.34375,
"learning_rate": 3.654488378306172e-06,
"loss": 0.7001,
"step": 4030
},
{
"epoch": 0.5397101062053303,
"grad_norm": 4.4375,
"learning_rate": 3.65114881111408e-06,
"loss": 0.6865,
"step": 4040
},
{
"epoch": 0.5410460223097989,
"grad_norm": 4.3125,
"learning_rate": 3.647809243921988e-06,
"loss": 0.6856,
"step": 4050
},
{
"epoch": 0.5423819384142676,
"grad_norm": 4.125,
"learning_rate": 3.644469676729896e-06,
"loss": 0.7233,
"step": 4060
},
{
"epoch": 0.5437178545187362,
"grad_norm": 3.96875,
"learning_rate": 3.6411301095378045e-06,
"loss": 0.7212,
"step": 4070
},
{
"epoch": 0.5450537706232048,
"grad_norm": 4.46875,
"learning_rate": 3.637790542345712e-06,
"loss": 0.708,
"step": 4080
},
{
"epoch": 0.5463896867276735,
"grad_norm": 4.125,
"learning_rate": 3.6344509751536207e-06,
"loss": 0.712,
"step": 4090
},
{
"epoch": 0.5477256028321421,
"grad_norm": 4.21875,
"learning_rate": 3.6311114079615283e-06,
"loss": 0.7102,
"step": 4100
},
{
"epoch": 0.5490615189366108,
"grad_norm": 4.15625,
"learning_rate": 3.627771840769437e-06,
"loss": 0.6841,
"step": 4110
},
{
"epoch": 0.5503974350410794,
"grad_norm": 4.21875,
"learning_rate": 3.6244322735773445e-06,
"loss": 0.7087,
"step": 4120
},
{
"epoch": 0.5517333511455481,
"grad_norm": 4.875,
"learning_rate": 3.621092706385253e-06,
"loss": 0.689,
"step": 4130
},
{
"epoch": 0.5530692672500167,
"grad_norm": 4.3125,
"learning_rate": 3.617753139193161e-06,
"loss": 0.7036,
"step": 4140
},
{
"epoch": 0.5544051833544853,
"grad_norm": 4.25,
"learning_rate": 3.6144135720010687e-06,
"loss": 0.7003,
"step": 4150
},
{
"epoch": 0.555741099458954,
"grad_norm": 4.125,
"learning_rate": 3.611074004808977e-06,
"loss": 0.676,
"step": 4160
},
{
"epoch": 0.5570770155634226,
"grad_norm": 3.84375,
"learning_rate": 3.607734437616885e-06,
"loss": 0.6833,
"step": 4170
},
{
"epoch": 0.5584129316678913,
"grad_norm": 4.375,
"learning_rate": 3.6043948704247934e-06,
"loss": 0.7204,
"step": 4180
},
{
"epoch": 0.5597488477723599,
"grad_norm": 4.0625,
"learning_rate": 3.601055303232701e-06,
"loss": 0.6966,
"step": 4190
},
{
"epoch": 0.5610847638768285,
"grad_norm": 4.4375,
"learning_rate": 3.5977157360406095e-06,
"loss": 0.6921,
"step": 4200
},
{
"epoch": 0.5624206799812972,
"grad_norm": 4.3125,
"learning_rate": 3.594376168848517e-06,
"loss": 0.6833,
"step": 4210
},
{
"epoch": 0.5637565960857658,
"grad_norm": 4.0,
"learning_rate": 3.5910366016564257e-06,
"loss": 0.6981,
"step": 4220
},
{
"epoch": 0.5650925121902345,
"grad_norm": 4.3125,
"learning_rate": 3.5876970344643337e-06,
"loss": 0.6917,
"step": 4230
},
{
"epoch": 0.5664284282947031,
"grad_norm": 4.125,
"learning_rate": 3.584357467272242e-06,
"loss": 0.6743,
"step": 4240
},
{
"epoch": 0.5677643443991718,
"grad_norm": 4.15625,
"learning_rate": 3.58101790008015e-06,
"loss": 0.6818,
"step": 4250
},
{
"epoch": 0.5677643443991718,
"eval_loss": 0.6978068351745605,
"eval_runtime": 255.6056,
"eval_samples_per_second": 26.032,
"eval_steps_per_second": 3.255,
"step": 4250
},
{
"epoch": 0.5691002605036404,
"grad_norm": 4.40625,
"learning_rate": 3.5776783328880584e-06,
"loss": 0.6927,
"step": 4260
},
{
"epoch": 0.570436176608109,
"grad_norm": 4.21875,
"learning_rate": 3.574338765695966e-06,
"loss": 0.7078,
"step": 4270
},
{
"epoch": 0.5717720927125777,
"grad_norm": 4.34375,
"learning_rate": 3.5709991985038745e-06,
"loss": 0.7098,
"step": 4280
},
{
"epoch": 0.5731080088170463,
"grad_norm": 4.34375,
"learning_rate": 3.567659631311782e-06,
"loss": 0.6667,
"step": 4290
},
{
"epoch": 0.574443924921515,
"grad_norm": 3.921875,
"learning_rate": 3.5643200641196903e-06,
"loss": 0.6994,
"step": 4300
},
{
"epoch": 0.5757798410259836,
"grad_norm": 4.3125,
"learning_rate": 3.5609804969275983e-06,
"loss": 0.6712,
"step": 4310
},
{
"epoch": 0.5771157571304522,
"grad_norm": 4.25,
"learning_rate": 3.5576409297355064e-06,
"loss": 0.7096,
"step": 4320
},
{
"epoch": 0.5784516732349209,
"grad_norm": 4.28125,
"learning_rate": 3.554301362543415e-06,
"loss": 0.6859,
"step": 4330
},
{
"epoch": 0.5797875893393895,
"grad_norm": 3.984375,
"learning_rate": 3.5509617953513226e-06,
"loss": 0.7046,
"step": 4340
},
{
"epoch": 0.5811235054438582,
"grad_norm": 3.84375,
"learning_rate": 3.547622228159231e-06,
"loss": 0.7108,
"step": 4350
},
{
"epoch": 0.5824594215483268,
"grad_norm": 4.1875,
"learning_rate": 3.5442826609671387e-06,
"loss": 0.7161,
"step": 4360
},
{
"epoch": 0.5837953376527955,
"grad_norm": 4.34375,
"learning_rate": 3.540943093775047e-06,
"loss": 0.7028,
"step": 4370
},
{
"epoch": 0.585131253757264,
"grad_norm": 4.125,
"learning_rate": 3.537603526582955e-06,
"loss": 0.6698,
"step": 4380
},
{
"epoch": 0.5864671698617326,
"grad_norm": 4.21875,
"learning_rate": 3.5342639593908634e-06,
"loss": 0.7042,
"step": 4390
},
{
"epoch": 0.5878030859662013,
"grad_norm": 4.0625,
"learning_rate": 3.5309243921987714e-06,
"loss": 0.7034,
"step": 4400
},
{
"epoch": 0.5891390020706699,
"grad_norm": 4.40625,
"learning_rate": 3.5275848250066795e-06,
"loss": 0.6819,
"step": 4410
},
{
"epoch": 0.5904749181751386,
"grad_norm": 4.3125,
"learning_rate": 3.5242452578145876e-06,
"loss": 0.7039,
"step": 4420
},
{
"epoch": 0.5918108342796072,
"grad_norm": 4.65625,
"learning_rate": 3.520905690622496e-06,
"loss": 0.6948,
"step": 4430
},
{
"epoch": 0.5931467503840758,
"grad_norm": 4.4375,
"learning_rate": 3.5175661234304037e-06,
"loss": 0.7188,
"step": 4440
},
{
"epoch": 0.5944826664885445,
"grad_norm": 3.921875,
"learning_rate": 3.5142265562383114e-06,
"loss": 0.6782,
"step": 4450
},
{
"epoch": 0.5958185825930131,
"grad_norm": 4.5,
"learning_rate": 3.51088698904622e-06,
"loss": 0.7279,
"step": 4460
},
{
"epoch": 0.5971544986974818,
"grad_norm": 4.28125,
"learning_rate": 3.507547421854128e-06,
"loss": 0.6873,
"step": 4470
},
{
"epoch": 0.5984904148019504,
"grad_norm": 4.375,
"learning_rate": 3.504207854662036e-06,
"loss": 0.6869,
"step": 4480
},
{
"epoch": 0.5998263309064191,
"grad_norm": 4.0,
"learning_rate": 3.500868287469944e-06,
"loss": 0.6892,
"step": 4490
},
{
"epoch": 0.6011622470108877,
"grad_norm": 4.1875,
"learning_rate": 3.497528720277852e-06,
"loss": 0.7126,
"step": 4500
},
{
"epoch": 0.6011622470108877,
"eval_loss": 0.695655345916748,
"eval_runtime": 253.7237,
"eval_samples_per_second": 26.225,
"eval_steps_per_second": 3.279,
"step": 4500
},
{
"epoch": 0.6024981631153563,
"grad_norm": 4.34375,
"learning_rate": 3.4941891530857603e-06,
"loss": 0.6961,
"step": 4510
},
{
"epoch": 0.603834079219825,
"grad_norm": 4.5625,
"learning_rate": 3.4908495858936688e-06,
"loss": 0.6887,
"step": 4520
},
{
"epoch": 0.6051699953242936,
"grad_norm": 4.34375,
"learning_rate": 3.4875100187015764e-06,
"loss": 0.693,
"step": 4530
},
{
"epoch": 0.6065059114287623,
"grad_norm": 4.625,
"learning_rate": 3.484170451509485e-06,
"loss": 0.7028,
"step": 4540
},
{
"epoch": 0.6078418275332309,
"grad_norm": 4.34375,
"learning_rate": 3.4808308843173926e-06,
"loss": 0.6989,
"step": 4550
},
{
"epoch": 0.6091777436376995,
"grad_norm": 4.34375,
"learning_rate": 3.477491317125301e-06,
"loss": 0.719,
"step": 4560
},
{
"epoch": 0.6105136597421682,
"grad_norm": 4.3125,
"learning_rate": 3.4741517499332087e-06,
"loss": 0.6756,
"step": 4570
},
{
"epoch": 0.6118495758466368,
"grad_norm": 4.1875,
"learning_rate": 3.470812182741117e-06,
"loss": 0.6627,
"step": 4580
},
{
"epoch": 0.6131854919511055,
"grad_norm": 4.1875,
"learning_rate": 3.4674726155490253e-06,
"loss": 0.7032,
"step": 4590
},
{
"epoch": 0.6145214080555741,
"grad_norm": 3.953125,
"learning_rate": 3.464133048356933e-06,
"loss": 0.6984,
"step": 4600
},
{
"epoch": 0.6158573241600428,
"grad_norm": 4.15625,
"learning_rate": 3.4607934811648414e-06,
"loss": 0.6945,
"step": 4610
},
{
"epoch": 0.6171932402645114,
"grad_norm": 4.40625,
"learning_rate": 3.457453913972749e-06,
"loss": 0.6859,
"step": 4620
},
{
"epoch": 0.61852915636898,
"grad_norm": 4.03125,
"learning_rate": 3.4541143467806576e-06,
"loss": 0.6805,
"step": 4630
},
{
"epoch": 0.6198650724734487,
"grad_norm": 4.375,
"learning_rate": 3.4507747795885652e-06,
"loss": 0.6938,
"step": 4640
},
{
"epoch": 0.6212009885779173,
"grad_norm": 4.125,
"learning_rate": 3.4474352123964737e-06,
"loss": 0.6964,
"step": 4650
},
{
"epoch": 0.622536904682386,
"grad_norm": 4.3125,
"learning_rate": 3.444095645204382e-06,
"loss": 0.6957,
"step": 4660
},
{
"epoch": 0.6238728207868546,
"grad_norm": 4.25,
"learning_rate": 3.44075607801229e-06,
"loss": 0.6652,
"step": 4670
},
{
"epoch": 0.6252087368913233,
"grad_norm": 4.34375,
"learning_rate": 3.437416510820198e-06,
"loss": 0.7084,
"step": 4680
},
{
"epoch": 0.6265446529957919,
"grad_norm": 4.3125,
"learning_rate": 3.4340769436281065e-06,
"loss": 0.68,
"step": 4690
},
{
"epoch": 0.6278805691002605,
"grad_norm": 3.953125,
"learning_rate": 3.430737376436014e-06,
"loss": 0.6886,
"step": 4700
},
{
"epoch": 0.6292164852047292,
"grad_norm": 4.59375,
"learning_rate": 3.4273978092439226e-06,
"loss": 0.6702,
"step": 4710
},
{
"epoch": 0.6305524013091978,
"grad_norm": 4.3125,
"learning_rate": 3.4240582420518303e-06,
"loss": 0.6923,
"step": 4720
},
{
"epoch": 0.6318883174136665,
"grad_norm": 4.09375,
"learning_rate": 3.4207186748597383e-06,
"loss": 0.6893,
"step": 4730
},
{
"epoch": 0.6332242335181351,
"grad_norm": 4.25,
"learning_rate": 3.4173791076676464e-06,
"loss": 0.6878,
"step": 4740
},
{
"epoch": 0.6345601496226037,
"grad_norm": 4.34375,
"learning_rate": 3.4140395404755545e-06,
"loss": 0.7255,
"step": 4750
},
{
"epoch": 0.6345601496226037,
"eval_loss": 0.6938580870628357,
"eval_runtime": 252.4989,
"eval_samples_per_second": 26.353,
"eval_steps_per_second": 3.295,
"step": 4750
},
{
"epoch": 0.6358960657270724,
"grad_norm": 4.09375,
"learning_rate": 3.410699973283463e-06,
"loss": 0.6993,
"step": 4760
},
{
"epoch": 0.637231981831541,
"grad_norm": 4.21875,
"learning_rate": 3.4073604060913706e-06,
"loss": 0.694,
"step": 4770
},
{
"epoch": 0.6385678979360097,
"grad_norm": 4.0625,
"learning_rate": 3.404020838899279e-06,
"loss": 0.6956,
"step": 4780
},
{
"epoch": 0.6399038140404782,
"grad_norm": 4.125,
"learning_rate": 3.400681271707187e-06,
"loss": 0.6602,
"step": 4790
},
{
"epoch": 0.641239730144947,
"grad_norm": 4.3125,
"learning_rate": 3.3973417045150953e-06,
"loss": 0.7062,
"step": 4800
},
{
"epoch": 0.6425756462494155,
"grad_norm": 4.375,
"learning_rate": 3.394002137323003e-06,
"loss": 0.6678,
"step": 4810
},
{
"epoch": 0.6439115623538841,
"grad_norm": 4.25,
"learning_rate": 3.3906625701309114e-06,
"loss": 0.6775,
"step": 4820
},
{
"epoch": 0.6452474784583528,
"grad_norm": 4.375,
"learning_rate": 3.3873230029388195e-06,
"loss": 0.7201,
"step": 4830
},
{
"epoch": 0.6465833945628214,
"grad_norm": 4.34375,
"learning_rate": 3.3839834357467276e-06,
"loss": 0.7082,
"step": 4840
},
{
"epoch": 0.6479193106672901,
"grad_norm": 4.34375,
"learning_rate": 3.3806438685546357e-06,
"loss": 0.6921,
"step": 4850
},
{
"epoch": 0.6492552267717587,
"grad_norm": 4.46875,
"learning_rate": 3.3773043013625437e-06,
"loss": 0.6908,
"step": 4860
},
{
"epoch": 0.6505911428762273,
"grad_norm": 4.28125,
"learning_rate": 3.373964734170452e-06,
"loss": 0.7033,
"step": 4870
},
{
"epoch": 0.651927058980696,
"grad_norm": 4.6875,
"learning_rate": 3.3706251669783595e-06,
"loss": 0.6887,
"step": 4880
},
{
"epoch": 0.6532629750851646,
"grad_norm": 4.21875,
"learning_rate": 3.367285599786268e-06,
"loss": 0.6837,
"step": 4890
},
{
"epoch": 0.6545988911896333,
"grad_norm": 4.03125,
"learning_rate": 3.363946032594176e-06,
"loss": 0.7105,
"step": 4900
},
{
"epoch": 0.6559348072941019,
"grad_norm": 4.25,
"learning_rate": 3.360606465402084e-06,
"loss": 0.6894,
"step": 4910
},
{
"epoch": 0.6572707233985706,
"grad_norm": 4.40625,
"learning_rate": 3.357266898209992e-06,
"loss": 0.6973,
"step": 4920
},
{
"epoch": 0.6586066395030392,
"grad_norm": 4.5625,
"learning_rate": 3.3539273310179003e-06,
"loss": 0.7103,
"step": 4930
},
{
"epoch": 0.6599425556075078,
"grad_norm": 4.03125,
"learning_rate": 3.3505877638258083e-06,
"loss": 0.6993,
"step": 4940
},
{
"epoch": 0.6612784717119765,
"grad_norm": 4.96875,
"learning_rate": 3.347248196633717e-06,
"loss": 0.6951,
"step": 4950
},
{
"epoch": 0.6626143878164451,
"grad_norm": 4.34375,
"learning_rate": 3.3439086294416245e-06,
"loss": 0.6882,
"step": 4960
},
{
"epoch": 0.6639503039209138,
"grad_norm": 4.25,
"learning_rate": 3.340569062249533e-06,
"loss": 0.6795,
"step": 4970
},
{
"epoch": 0.6652862200253824,
"grad_norm": 4.21875,
"learning_rate": 3.3372294950574406e-06,
"loss": 0.7015,
"step": 4980
},
{
"epoch": 0.666622136129851,
"grad_norm": 4.65625,
"learning_rate": 3.333889927865349e-06,
"loss": 0.6637,
"step": 4990
},
{
"epoch": 0.6679580522343197,
"grad_norm": 4.1875,
"learning_rate": 3.330550360673257e-06,
"loss": 0.6756,
"step": 5000
},
{
"epoch": 0.6679580522343197,
"eval_loss": 0.6922534704208374,
"eval_runtime": 255.0021,
"eval_samples_per_second": 26.094,
"eval_steps_per_second": 3.263,
"step": 5000
},
{
"epoch": 0.6692939683387883,
"grad_norm": 4.40625,
"learning_rate": 3.3272107934811653e-06,
"loss": 0.6962,
"step": 5010
},
{
"epoch": 0.670629884443257,
"grad_norm": 4.28125,
"learning_rate": 3.3238712262890734e-06,
"loss": 0.71,
"step": 5020
},
{
"epoch": 0.6719658005477256,
"grad_norm": 4.5,
"learning_rate": 3.320531659096981e-06,
"loss": 0.6734,
"step": 5030
},
{
"epoch": 0.6733017166521943,
"grad_norm": 4.34375,
"learning_rate": 3.3171920919048895e-06,
"loss": 0.6997,
"step": 5040
},
{
"epoch": 0.6746376327566629,
"grad_norm": 4.1875,
"learning_rate": 3.313852524712797e-06,
"loss": 0.6943,
"step": 5050
},
{
"epoch": 0.6759735488611315,
"grad_norm": 4.34375,
"learning_rate": 3.3105129575207057e-06,
"loss": 0.7012,
"step": 5060
},
{
"epoch": 0.6773094649656002,
"grad_norm": 4.625,
"learning_rate": 3.3071733903286133e-06,
"loss": 0.7069,
"step": 5070
},
{
"epoch": 0.6786453810700688,
"grad_norm": 4.3125,
"learning_rate": 3.303833823136522e-06,
"loss": 0.7147,
"step": 5080
},
{
"epoch": 0.6799812971745375,
"grad_norm": 4.34375,
"learning_rate": 3.30049425594443e-06,
"loss": 0.668,
"step": 5090
},
{
"epoch": 0.6813172132790061,
"grad_norm": 4.3125,
"learning_rate": 3.297154688752338e-06,
"loss": 0.6898,
"step": 5100
},
{
"epoch": 0.6826531293834747,
"grad_norm": 4.625,
"learning_rate": 3.293815121560246e-06,
"loss": 0.7131,
"step": 5110
},
{
"epoch": 0.6839890454879434,
"grad_norm": 4.5,
"learning_rate": 3.2904755543681545e-06,
"loss": 0.6894,
"step": 5120
},
{
"epoch": 0.685324961592412,
"grad_norm": 4.3125,
"learning_rate": 3.287135987176062e-06,
"loss": 0.6642,
"step": 5130
},
{
"epoch": 0.6866608776968807,
"grad_norm": 4.4375,
"learning_rate": 3.2837964199839707e-06,
"loss": 0.6666,
"step": 5140
},
{
"epoch": 0.6879967938013493,
"grad_norm": 4.3125,
"learning_rate": 3.2804568527918783e-06,
"loss": 0.688,
"step": 5150
},
{
"epoch": 0.689332709905818,
"grad_norm": 4.46875,
"learning_rate": 3.277117285599787e-06,
"loss": 0.6893,
"step": 5160
},
{
"epoch": 0.6906686260102866,
"grad_norm": 4.59375,
"learning_rate": 3.2737777184076945e-06,
"loss": 0.6861,
"step": 5170
},
{
"epoch": 0.6920045421147552,
"grad_norm": 4.53125,
"learning_rate": 3.2704381512156026e-06,
"loss": 0.6923,
"step": 5180
},
{
"epoch": 0.6933404582192239,
"grad_norm": 4.625,
"learning_rate": 3.267098584023511e-06,
"loss": 0.7132,
"step": 5190
},
{
"epoch": 0.6946763743236924,
"grad_norm": 4.28125,
"learning_rate": 3.2637590168314187e-06,
"loss": 0.6973,
"step": 5200
},
{
"epoch": 0.6960122904281612,
"grad_norm": 4.34375,
"learning_rate": 3.2604194496393272e-06,
"loss": 0.7069,
"step": 5210
},
{
"epoch": 0.6973482065326297,
"grad_norm": 4.78125,
"learning_rate": 3.257079882447235e-06,
"loss": 0.7184,
"step": 5220
},
{
"epoch": 0.6986841226370983,
"grad_norm": 4.125,
"learning_rate": 3.2537403152551434e-06,
"loss": 0.6771,
"step": 5230
},
{
"epoch": 0.700020038741567,
"grad_norm": 4.3125,
"learning_rate": 3.250400748063051e-06,
"loss": 0.6892,
"step": 5240
},
{
"epoch": 0.7013559548460356,
"grad_norm": 4.4375,
"learning_rate": 3.2470611808709595e-06,
"loss": 0.6873,
"step": 5250
},
{
"epoch": 0.7013559548460356,
"eval_loss": 0.6906906366348267,
"eval_runtime": 254.5589,
"eval_samples_per_second": 26.139,
"eval_steps_per_second": 3.268,
"step": 5250
},
{
"epoch": 0.7026918709505043,
"grad_norm": 4.125,
"learning_rate": 3.2437216136788676e-06,
"loss": 0.7006,
"step": 5260
},
{
"epoch": 0.7040277870549729,
"grad_norm": 4.09375,
"learning_rate": 3.2403820464867757e-06,
"loss": 0.6899,
"step": 5270
},
{
"epoch": 0.7053637031594416,
"grad_norm": 4.46875,
"learning_rate": 3.2370424792946837e-06,
"loss": 0.7053,
"step": 5280
},
{
"epoch": 0.7066996192639102,
"grad_norm": 6.03125,
"learning_rate": 3.233702912102592e-06,
"loss": 0.6889,
"step": 5290
},
{
"epoch": 0.7080355353683788,
"grad_norm": 4.84375,
"learning_rate": 3.2303633449105e-06,
"loss": 0.7144,
"step": 5300
},
{
"epoch": 0.7093714514728475,
"grad_norm": 4.40625,
"learning_rate": 3.2270237777184075e-06,
"loss": 0.6994,
"step": 5310
},
{
"epoch": 0.7107073675773161,
"grad_norm": 4.40625,
"learning_rate": 3.223684210526316e-06,
"loss": 0.7008,
"step": 5320
},
{
"epoch": 0.7120432836817848,
"grad_norm": 4.46875,
"learning_rate": 3.220344643334224e-06,
"loss": 0.6877,
"step": 5330
},
{
"epoch": 0.7133791997862534,
"grad_norm": 4.46875,
"learning_rate": 3.217005076142132e-06,
"loss": 0.7241,
"step": 5340
},
{
"epoch": 0.714715115890722,
"grad_norm": 4.53125,
"learning_rate": 3.2136655089500403e-06,
"loss": 0.7012,
"step": 5350
},
{
"epoch": 0.7160510319951907,
"grad_norm": 4.15625,
"learning_rate": 3.2103259417579483e-06,
"loss": 0.6753,
"step": 5360
},
{
"epoch": 0.7173869480996593,
"grad_norm": 4.1875,
"learning_rate": 3.2069863745658564e-06,
"loss": 0.7083,
"step": 5370
},
{
"epoch": 0.718722864204128,
"grad_norm": 4.46875,
"learning_rate": 3.203646807373765e-06,
"loss": 0.6992,
"step": 5380
},
{
"epoch": 0.7200587803085966,
"grad_norm": 3.9375,
"learning_rate": 3.2003072401816726e-06,
"loss": 0.6601,
"step": 5390
},
{
"epoch": 0.7213946964130653,
"grad_norm": 4.875,
"learning_rate": 3.196967672989581e-06,
"loss": 0.6993,
"step": 5400
},
{
"epoch": 0.7227306125175339,
"grad_norm": 4.53125,
"learning_rate": 3.1936281057974887e-06,
"loss": 0.6848,
"step": 5410
},
{
"epoch": 0.7240665286220025,
"grad_norm": 4.3125,
"learning_rate": 3.1902885386053972e-06,
"loss": 0.6546,
"step": 5420
},
{
"epoch": 0.7254024447264712,
"grad_norm": 4.65625,
"learning_rate": 3.186948971413305e-06,
"loss": 0.6807,
"step": 5430
},
{
"epoch": 0.7267383608309398,
"grad_norm": 4.375,
"learning_rate": 3.1836094042212134e-06,
"loss": 0.697,
"step": 5440
},
{
"epoch": 0.7280742769354085,
"grad_norm": 4.5,
"learning_rate": 3.1802698370291214e-06,
"loss": 0.6968,
"step": 5450
},
{
"epoch": 0.7294101930398771,
"grad_norm": 4.3125,
"learning_rate": 3.176930269837029e-06,
"loss": 0.668,
"step": 5460
},
{
"epoch": 0.7307461091443458,
"grad_norm": 4.4375,
"learning_rate": 3.1735907026449376e-06,
"loss": 0.697,
"step": 5470
},
{
"epoch": 0.7320820252488144,
"grad_norm": 4.125,
"learning_rate": 3.1702511354528452e-06,
"loss": 0.6697,
"step": 5480
},
{
"epoch": 0.733417941353283,
"grad_norm": 4.4375,
"learning_rate": 3.1669115682607537e-06,
"loss": 0.6638,
"step": 5490
},
{
"epoch": 0.7347538574577517,
"grad_norm": 4.59375,
"learning_rate": 3.1635720010686614e-06,
"loss": 0.6842,
"step": 5500
},
{
"epoch": 0.7347538574577517,
"eval_loss": 0.6894299387931824,
"eval_runtime": 252.4388,
"eval_samples_per_second": 26.359,
"eval_steps_per_second": 3.296,
"step": 5500
},
{
"epoch": 0.7360897735622203,
"grad_norm": 4.6875,
"learning_rate": 3.16023243387657e-06,
"loss": 0.6932,
"step": 5510
},
{
"epoch": 0.737425689666689,
"grad_norm": 4.375,
"learning_rate": 3.156892866684478e-06,
"loss": 0.6859,
"step": 5520
},
{
"epoch": 0.7387616057711576,
"grad_norm": 4.34375,
"learning_rate": 3.153553299492386e-06,
"loss": 0.6715,
"step": 5530
},
{
"epoch": 0.7400975218756262,
"grad_norm": 4.28125,
"learning_rate": 3.150213732300294e-06,
"loss": 0.6759,
"step": 5540
},
{
"epoch": 0.7414334379800949,
"grad_norm": 4.15625,
"learning_rate": 3.1468741651082026e-06,
"loss": 0.6798,
"step": 5550
},
{
"epoch": 0.7427693540845635,
"grad_norm": 4.34375,
"learning_rate": 3.1435345979161103e-06,
"loss": 0.6917,
"step": 5560
},
{
"epoch": 0.7441052701890322,
"grad_norm": 4.46875,
"learning_rate": 3.1401950307240188e-06,
"loss": 0.6778,
"step": 5570
},
{
"epoch": 0.7454411862935008,
"grad_norm": 4.25,
"learning_rate": 3.1368554635319264e-06,
"loss": 0.7027,
"step": 5580
},
{
"epoch": 0.7467771023979695,
"grad_norm": 4.25,
"learning_rate": 3.133515896339835e-06,
"loss": 0.7011,
"step": 5590
},
{
"epoch": 0.748113018502438,
"grad_norm": 4.28125,
"learning_rate": 3.1301763291477426e-06,
"loss": 0.6744,
"step": 5600
},
{
"epoch": 0.7494489346069066,
"grad_norm": 4.25,
"learning_rate": 3.1268367619556506e-06,
"loss": 0.6817,
"step": 5610
},
{
"epoch": 0.7507848507113754,
"grad_norm": 4.53125,
"learning_rate": 3.123497194763559e-06,
"loss": 0.6708,
"step": 5620
},
{
"epoch": 0.7521207668158439,
"grad_norm": 4.5625,
"learning_rate": 3.120157627571467e-06,
"loss": 0.6958,
"step": 5630
},
{
"epoch": 0.7534566829203126,
"grad_norm": 4.4375,
"learning_rate": 3.1168180603793753e-06,
"loss": 0.6897,
"step": 5640
},
{
"epoch": 0.7547925990247812,
"grad_norm": 4.5,
"learning_rate": 3.113478493187283e-06,
"loss": 0.7017,
"step": 5650
},
{
"epoch": 0.7561285151292498,
"grad_norm": 4.75,
"learning_rate": 3.1101389259951914e-06,
"loss": 0.6682,
"step": 5660
},
{
"epoch": 0.7574644312337185,
"grad_norm": 4.59375,
"learning_rate": 3.106799358803099e-06,
"loss": 0.6939,
"step": 5670
},
{
"epoch": 0.7588003473381871,
"grad_norm": 4.46875,
"learning_rate": 3.1034597916110076e-06,
"loss": 0.6803,
"step": 5680
},
{
"epoch": 0.7601362634426558,
"grad_norm": 4.28125,
"learning_rate": 3.1001202244189157e-06,
"loss": 0.6984,
"step": 5690
},
{
"epoch": 0.7614721795471244,
"grad_norm": 4.5,
"learning_rate": 3.0967806572268237e-06,
"loss": 0.6944,
"step": 5700
},
{
"epoch": 0.7628080956515931,
"grad_norm": 4.5,
"learning_rate": 3.093441090034732e-06,
"loss": 0.7123,
"step": 5710
},
{
"epoch": 0.7641440117560617,
"grad_norm": 4.5625,
"learning_rate": 3.09010152284264e-06,
"loss": 0.6913,
"step": 5720
},
{
"epoch": 0.7654799278605303,
"grad_norm": 4.5625,
"learning_rate": 3.086761955650548e-06,
"loss": 0.6961,
"step": 5730
},
{
"epoch": 0.766815843964999,
"grad_norm": 4.59375,
"learning_rate": 3.0834223884584556e-06,
"loss": 0.6847,
"step": 5740
},
{
"epoch": 0.7681517600694676,
"grad_norm": 4.1875,
"learning_rate": 3.080082821266364e-06,
"loss": 0.6777,
"step": 5750
},
{
"epoch": 0.7681517600694676,
"eval_loss": 0.6881247758865356,
"eval_runtime": 255.4097,
"eval_samples_per_second": 26.052,
"eval_steps_per_second": 3.258,
"step": 5750
},
{
"epoch": 0.7694876761739363,
"grad_norm": 4.40625,
"learning_rate": 3.076743254074272e-06,
"loss": 0.6726,
"step": 5760
},
{
"epoch": 0.7708235922784049,
"grad_norm": 4.3125,
"learning_rate": 3.0734036868821803e-06,
"loss": 0.6699,
"step": 5770
},
{
"epoch": 0.7721595083828735,
"grad_norm": 4.125,
"learning_rate": 3.0700641196900883e-06,
"loss": 0.6841,
"step": 5780
},
{
"epoch": 0.7734954244873422,
"grad_norm": 4.5,
"learning_rate": 3.0667245524979964e-06,
"loss": 0.6779,
"step": 5790
},
{
"epoch": 0.7748313405918108,
"grad_norm": 4.84375,
"learning_rate": 3.0633849853059045e-06,
"loss": 0.6632,
"step": 5800
},
{
"epoch": 0.7761672566962795,
"grad_norm": 4.84375,
"learning_rate": 3.060045418113813e-06,
"loss": 0.7113,
"step": 5810
},
{
"epoch": 0.7775031728007481,
"grad_norm": 4.75,
"learning_rate": 3.0567058509217206e-06,
"loss": 0.7068,
"step": 5820
},
{
"epoch": 0.7788390889052168,
"grad_norm": 4.46875,
"learning_rate": 3.053366283729629e-06,
"loss": 0.6908,
"step": 5830
},
{
"epoch": 0.7801750050096854,
"grad_norm": 4.65625,
"learning_rate": 3.050026716537537e-06,
"loss": 0.6967,
"step": 5840
},
{
"epoch": 0.781510921114154,
"grad_norm": 4.4375,
"learning_rate": 3.0466871493454453e-06,
"loss": 0.6893,
"step": 5850
},
{
"epoch": 0.7828468372186227,
"grad_norm": 4.84375,
"learning_rate": 3.043347582153353e-06,
"loss": 0.6879,
"step": 5860
},
{
"epoch": 0.7841827533230913,
"grad_norm": 4.3125,
"learning_rate": 3.0400080149612614e-06,
"loss": 0.6828,
"step": 5870
},
{
"epoch": 0.78551866942756,
"grad_norm": 4.3125,
"learning_rate": 3.0366684477691695e-06,
"loss": 0.6902,
"step": 5880
},
{
"epoch": 0.7868545855320286,
"grad_norm": 4.53125,
"learning_rate": 3.033328880577077e-06,
"loss": 0.6929,
"step": 5890
},
{
"epoch": 0.7881905016364972,
"grad_norm": 4.375,
"learning_rate": 3.0299893133849857e-06,
"loss": 0.6655,
"step": 5900
},
{
"epoch": 0.7895264177409659,
"grad_norm": 4.5625,
"learning_rate": 3.0266497461928933e-06,
"loss": 0.6936,
"step": 5910
},
{
"epoch": 0.7908623338454345,
"grad_norm": 4.5,
"learning_rate": 3.023310179000802e-06,
"loss": 0.7074,
"step": 5920
},
{
"epoch": 0.7921982499499032,
"grad_norm": 4.3125,
"learning_rate": 3.0199706118087095e-06,
"loss": 0.6958,
"step": 5930
},
{
"epoch": 0.7935341660543718,
"grad_norm": 4.1875,
"learning_rate": 3.016631044616618e-06,
"loss": 0.6604,
"step": 5940
},
{
"epoch": 0.7948700821588405,
"grad_norm": 4.78125,
"learning_rate": 3.013291477424526e-06,
"loss": 0.6831,
"step": 5950
},
{
"epoch": 0.7962059982633091,
"grad_norm": 4.3125,
"learning_rate": 3.009951910232434e-06,
"loss": 0.6929,
"step": 5960
},
{
"epoch": 0.7975419143677777,
"grad_norm": 4.3125,
"learning_rate": 3.006612343040342e-06,
"loss": 0.6681,
"step": 5970
},
{
"epoch": 0.7988778304722464,
"grad_norm": 4.8125,
"learning_rate": 3.0032727758482507e-06,
"loss": 0.6832,
"step": 5980
},
{
"epoch": 0.800213746576715,
"grad_norm": 4.28125,
"learning_rate": 2.9999332086561583e-06,
"loss": 0.6867,
"step": 5990
},
{
"epoch": 0.8015496626811837,
"grad_norm": 4.4375,
"learning_rate": 2.996593641464067e-06,
"loss": 0.6643,
"step": 6000
},
{
"epoch": 0.8015496626811837,
"eval_loss": 0.6869954466819763,
"eval_runtime": 255.1316,
"eval_samples_per_second": 26.081,
"eval_steps_per_second": 3.261,
"step": 6000
},
{
"epoch": 0.8028855787856523,
"grad_norm": 4.5625,
"learning_rate": 2.9932540742719745e-06,
"loss": 0.669,
"step": 6010
},
{
"epoch": 0.8042214948901208,
"grad_norm": 4.34375,
"learning_rate": 2.989914507079883e-06,
"loss": 0.6517,
"step": 6020
},
{
"epoch": 0.8055574109945896,
"grad_norm": 4.65625,
"learning_rate": 2.9865749398877906e-06,
"loss": 0.7005,
"step": 6030
},
{
"epoch": 0.8068933270990581,
"grad_norm": 4.625,
"learning_rate": 2.9832353726956987e-06,
"loss": 0.6788,
"step": 6040
},
{
"epoch": 0.8082292432035268,
"grad_norm": 4.5,
"learning_rate": 2.9798958055036072e-06,
"loss": 0.6853,
"step": 6050
},
{
"epoch": 0.8095651593079954,
"grad_norm": 4.5625,
"learning_rate": 2.976556238311515e-06,
"loss": 0.6787,
"step": 6060
},
{
"epoch": 0.8109010754124641,
"grad_norm": 4.4375,
"learning_rate": 2.9732166711194234e-06,
"loss": 0.6709,
"step": 6070
},
{
"epoch": 0.8122369915169327,
"grad_norm": 4.5,
"learning_rate": 2.969877103927331e-06,
"loss": 0.6819,
"step": 6080
},
{
"epoch": 0.8135729076214013,
"grad_norm": 4.6875,
"learning_rate": 2.9665375367352395e-06,
"loss": 0.6992,
"step": 6090
},
{
"epoch": 0.81490882372587,
"grad_norm": 4.40625,
"learning_rate": 2.963197969543147e-06,
"loss": 0.6949,
"step": 6100
},
{
"epoch": 0.8162447398303386,
"grad_norm": 4.4375,
"learning_rate": 2.9598584023510557e-06,
"loss": 0.6905,
"step": 6110
},
{
"epoch": 0.8175806559348073,
"grad_norm": 4.375,
"learning_rate": 2.9565188351589637e-06,
"loss": 0.7238,
"step": 6120
},
{
"epoch": 0.8189165720392759,
"grad_norm": 4.53125,
"learning_rate": 2.953179267966872e-06,
"loss": 0.6839,
"step": 6130
},
{
"epoch": 0.8202524881437445,
"grad_norm": 4.625,
"learning_rate": 2.94983970077478e-06,
"loss": 0.6943,
"step": 6140
},
{
"epoch": 0.8215884042482132,
"grad_norm": 4.75,
"learning_rate": 2.946500133582688e-06,
"loss": 0.7072,
"step": 6150
},
{
"epoch": 0.8229243203526818,
"grad_norm": 4.4375,
"learning_rate": 2.943160566390596e-06,
"loss": 0.7141,
"step": 6160
},
{
"epoch": 0.8242602364571505,
"grad_norm": 4.21875,
"learning_rate": 2.9398209991985045e-06,
"loss": 0.664,
"step": 6170
},
{
"epoch": 0.8255961525616191,
"grad_norm": 4.4375,
"learning_rate": 2.936481432006412e-06,
"loss": 0.6745,
"step": 6180
},
{
"epoch": 0.8269320686660878,
"grad_norm": 4.4375,
"learning_rate": 2.93314186481432e-06,
"loss": 0.6816,
"step": 6190
},
{
"epoch": 0.8282679847705564,
"grad_norm": 4.5,
"learning_rate": 2.9298022976222283e-06,
"loss": 0.6908,
"step": 6200
},
{
"epoch": 0.829603900875025,
"grad_norm": 4.75,
"learning_rate": 2.9264627304301364e-06,
"loss": 0.6894,
"step": 6210
},
{
"epoch": 0.8309398169794937,
"grad_norm": 4.5625,
"learning_rate": 2.9231231632380445e-06,
"loss": 0.6654,
"step": 6220
},
{
"epoch": 0.8322757330839623,
"grad_norm": 4.46875,
"learning_rate": 2.9197835960459526e-06,
"loss": 0.6746,
"step": 6230
},
{
"epoch": 0.833611649188431,
"grad_norm": 4.3125,
"learning_rate": 2.916444028853861e-06,
"loss": 0.6862,
"step": 6240
},
{
"epoch": 0.8349475652928996,
"grad_norm": 4.59375,
"learning_rate": 2.9131044616617687e-06,
"loss": 0.7101,
"step": 6250
},
{
"epoch": 0.8349475652928996,
"eval_loss": 0.6859682202339172,
"eval_runtime": 252.3991,
"eval_samples_per_second": 26.363,
"eval_steps_per_second": 3.296,
"step": 6250
},
{
"epoch": 0.8362834813973683,
"grad_norm": 4.65625,
"learning_rate": 2.9097648944696772e-06,
"loss": 0.7055,
"step": 6260
},
{
"epoch": 0.8376193975018369,
"grad_norm": 4.375,
"learning_rate": 2.906425327277585e-06,
"loss": 0.6817,
"step": 6270
},
{
"epoch": 0.8389553136063055,
"grad_norm": 4.65625,
"learning_rate": 2.9030857600854934e-06,
"loss": 0.6769,
"step": 6280
},
{
"epoch": 0.8402912297107742,
"grad_norm": 4.71875,
"learning_rate": 2.899746192893401e-06,
"loss": 0.6948,
"step": 6290
},
{
"epoch": 0.8416271458152428,
"grad_norm": 4.4375,
"learning_rate": 2.8964066257013095e-06,
"loss": 0.6766,
"step": 6300
},
{
"epoch": 0.8429630619197115,
"grad_norm": 4.34375,
"learning_rate": 2.8930670585092176e-06,
"loss": 0.7015,
"step": 6310
},
{
"epoch": 0.8442989780241801,
"grad_norm": 4.5,
"learning_rate": 2.8897274913171253e-06,
"loss": 0.6858,
"step": 6320
},
{
"epoch": 0.8456348941286487,
"grad_norm": 4.5625,
"learning_rate": 2.8863879241250337e-06,
"loss": 0.7049,
"step": 6330
},
{
"epoch": 0.8469708102331174,
"grad_norm": 4.625,
"learning_rate": 2.8830483569329414e-06,
"loss": 0.6877,
"step": 6340
},
{
"epoch": 0.848306726337586,
"grad_norm": 4.4375,
"learning_rate": 2.87970878974085e-06,
"loss": 0.682,
"step": 6350
},
{
"epoch": 0.8496426424420547,
"grad_norm": 4.59375,
"learning_rate": 2.8763692225487576e-06,
"loss": 0.6489,
"step": 6360
},
{
"epoch": 0.8509785585465233,
"grad_norm": 4.75,
"learning_rate": 2.873029655356666e-06,
"loss": 0.6873,
"step": 6370
},
{
"epoch": 0.852314474650992,
"grad_norm": 4.65625,
"learning_rate": 2.869690088164574e-06,
"loss": 0.6956,
"step": 6380
},
{
"epoch": 0.8536503907554606,
"grad_norm": 4.53125,
"learning_rate": 2.866350520972482e-06,
"loss": 0.6715,
"step": 6390
},
{
"epoch": 0.8549863068599292,
"grad_norm": 4.46875,
"learning_rate": 2.8630109537803903e-06,
"loss": 0.6665,
"step": 6400
},
{
"epoch": 0.8563222229643979,
"grad_norm": 4.5,
"learning_rate": 2.8596713865882988e-06,
"loss": 0.6664,
"step": 6410
},
{
"epoch": 0.8576581390688665,
"grad_norm": 4.59375,
"learning_rate": 2.8563318193962064e-06,
"loss": 0.6964,
"step": 6420
},
{
"epoch": 0.8589940551733352,
"grad_norm": 4.65625,
"learning_rate": 2.852992252204115e-06,
"loss": 0.6687,
"step": 6430
},
{
"epoch": 0.8603299712778038,
"grad_norm": 4.75,
"learning_rate": 2.8496526850120226e-06,
"loss": 0.6885,
"step": 6440
},
{
"epoch": 0.8616658873822723,
"grad_norm": 4.75,
"learning_rate": 2.846313117819931e-06,
"loss": 0.6959,
"step": 6450
},
{
"epoch": 0.863001803486741,
"grad_norm": 4.46875,
"learning_rate": 2.8429735506278387e-06,
"loss": 0.6695,
"step": 6460
},
{
"epoch": 0.8643377195912096,
"grad_norm": 4.53125,
"learning_rate": 2.839633983435747e-06,
"loss": 0.6922,
"step": 6470
},
{
"epoch": 0.8656736356956783,
"grad_norm": 4.5625,
"learning_rate": 2.8362944162436553e-06,
"loss": 0.7111,
"step": 6480
},
{
"epoch": 0.8670095518001469,
"grad_norm": 4.5625,
"learning_rate": 2.832954849051563e-06,
"loss": 0.6785,
"step": 6490
},
{
"epoch": 0.8683454679046156,
"grad_norm": 4.4375,
"learning_rate": 2.8296152818594715e-06,
"loss": 0.6806,
"step": 6500
},
{
"epoch": 0.8683454679046156,
"eval_loss": 0.6850719451904297,
"eval_runtime": 253.3451,
"eval_samples_per_second": 26.265,
"eval_steps_per_second": 3.284,
"step": 6500
},
{
"epoch": 0.8696813840090842,
"grad_norm": 4.25,
"learning_rate": 2.826275714667379e-06,
"loss": 0.6677,
"step": 6510
},
{
"epoch": 0.8710173001135528,
"grad_norm": 4.6875,
"learning_rate": 2.8229361474752876e-06,
"loss": 0.6833,
"step": 6520
},
{
"epoch": 0.8723532162180215,
"grad_norm": 4.46875,
"learning_rate": 2.8195965802831953e-06,
"loss": 0.691,
"step": 6530
},
{
"epoch": 0.8736891323224901,
"grad_norm": 4.78125,
"learning_rate": 2.8162570130911038e-06,
"loss": 0.6999,
"step": 6540
},
{
"epoch": 0.8750250484269588,
"grad_norm": 4.4375,
"learning_rate": 2.8129174458990114e-06,
"loss": 0.6747,
"step": 6550
},
{
"epoch": 0.8763609645314274,
"grad_norm": 4.625,
"learning_rate": 2.80957787870692e-06,
"loss": 0.6644,
"step": 6560
},
{
"epoch": 0.877696880635896,
"grad_norm": 4.75,
"learning_rate": 2.806238311514828e-06,
"loss": 0.7162,
"step": 6570
},
{
"epoch": 0.8790327967403647,
"grad_norm": 4.65625,
"learning_rate": 2.802898744322736e-06,
"loss": 0.7015,
"step": 6580
},
{
"epoch": 0.8803687128448333,
"grad_norm": 4.84375,
"learning_rate": 2.799559177130644e-06,
"loss": 0.6915,
"step": 6590
},
{
"epoch": 0.881704628949302,
"grad_norm": 4.3125,
"learning_rate": 2.7962196099385526e-06,
"loss": 0.6897,
"step": 6600
},
{
"epoch": 0.8830405450537706,
"grad_norm": 4.1875,
"learning_rate": 2.7928800427464603e-06,
"loss": 0.6767,
"step": 6610
},
{
"epoch": 0.8843764611582393,
"grad_norm": 4.34375,
"learning_rate": 2.789540475554368e-06,
"loss": 0.6478,
"step": 6620
},
{
"epoch": 0.8857123772627079,
"grad_norm": 4.75,
"learning_rate": 2.7862009083622764e-06,
"loss": 0.6756,
"step": 6630
},
{
"epoch": 0.8870482933671765,
"grad_norm": 4.4375,
"learning_rate": 2.7828613411701845e-06,
"loss": 0.649,
"step": 6640
},
{
"epoch": 0.8883842094716452,
"grad_norm": 4.40625,
"learning_rate": 2.7795217739780926e-06,
"loss": 0.6672,
"step": 6650
},
{
"epoch": 0.8897201255761138,
"grad_norm": 4.46875,
"learning_rate": 2.7761822067860007e-06,
"loss": 0.6861,
"step": 6660
},
{
"epoch": 0.8910560416805825,
"grad_norm": 4.5,
"learning_rate": 2.772842639593909e-06,
"loss": 0.6885,
"step": 6670
},
{
"epoch": 0.8923919577850511,
"grad_norm": 4.4375,
"learning_rate": 2.769503072401817e-06,
"loss": 0.6597,
"step": 6680
},
{
"epoch": 0.8937278738895197,
"grad_norm": 4.65625,
"learning_rate": 2.7661635052097253e-06,
"loss": 0.6787,
"step": 6690
},
{
"epoch": 0.8950637899939884,
"grad_norm": 4.59375,
"learning_rate": 2.762823938017633e-06,
"loss": 0.6819,
"step": 6700
},
{
"epoch": 0.896399706098457,
"grad_norm": 4.6875,
"learning_rate": 2.7594843708255415e-06,
"loss": 0.6841,
"step": 6710
},
{
"epoch": 0.8977356222029257,
"grad_norm": 4.4375,
"learning_rate": 2.756144803633449e-06,
"loss": 0.6826,
"step": 6720
},
{
"epoch": 0.8990715383073943,
"grad_norm": 4.625,
"learning_rate": 2.7528052364413576e-06,
"loss": 0.6955,
"step": 6730
},
{
"epoch": 0.900407454411863,
"grad_norm": 4.6875,
"learning_rate": 2.7494656692492657e-06,
"loss": 0.6997,
"step": 6740
},
{
"epoch": 0.9017433705163316,
"grad_norm": 4.5625,
"learning_rate": 2.7461261020571733e-06,
"loss": 0.6789,
"step": 6750
},
{
"epoch": 0.9017433705163316,
"eval_loss": 0.6842080354690552,
"eval_runtime": 255.8216,
"eval_samples_per_second": 26.01,
"eval_steps_per_second": 3.252,
"step": 6750
},
{
"epoch": 0.9030792866208002,
"grad_norm": 4.71875,
"learning_rate": 2.742786534865082e-06,
"loss": 0.7046,
"step": 6760
},
{
"epoch": 0.9044152027252689,
"grad_norm": 4.1875,
"learning_rate": 2.7394469676729895e-06,
"loss": 0.6483,
"step": 6770
},
{
"epoch": 0.9057511188297375,
"grad_norm": 4.625,
"learning_rate": 2.736107400480898e-06,
"loss": 0.6884,
"step": 6780
},
{
"epoch": 0.9070870349342062,
"grad_norm": 4.21875,
"learning_rate": 2.7327678332888056e-06,
"loss": 0.68,
"step": 6790
},
{
"epoch": 0.9084229510386748,
"grad_norm": 4.46875,
"learning_rate": 2.729428266096714e-06,
"loss": 0.6791,
"step": 6800
},
{
"epoch": 0.9097588671431434,
"grad_norm": 4.84375,
"learning_rate": 2.726088698904622e-06,
"loss": 0.7055,
"step": 6810
},
{
"epoch": 0.9110947832476121,
"grad_norm": 4.3125,
"learning_rate": 2.7227491317125303e-06,
"loss": 0.6989,
"step": 6820
},
{
"epoch": 0.9124306993520807,
"grad_norm": 4.5625,
"learning_rate": 2.7194095645204384e-06,
"loss": 0.6467,
"step": 6830
},
{
"epoch": 0.9137666154565494,
"grad_norm": 4.71875,
"learning_rate": 2.716069997328347e-06,
"loss": 0.6883,
"step": 6840
},
{
"epoch": 0.915102531561018,
"grad_norm": 4.78125,
"learning_rate": 2.7127304301362545e-06,
"loss": 0.6996,
"step": 6850
},
{
"epoch": 0.9164384476654867,
"grad_norm": 4.53125,
"learning_rate": 2.709390862944163e-06,
"loss": 0.6717,
"step": 6860
},
{
"epoch": 0.9177743637699552,
"grad_norm": 4.40625,
"learning_rate": 2.7060512957520707e-06,
"loss": 0.6707,
"step": 6870
},
{
"epoch": 0.9191102798744238,
"grad_norm": 4.53125,
"learning_rate": 2.702711728559979e-06,
"loss": 0.6703,
"step": 6880
},
{
"epoch": 0.9204461959788925,
"grad_norm": 4.46875,
"learning_rate": 2.699372161367887e-06,
"loss": 0.6797,
"step": 6890
},
{
"epoch": 0.9217821120833611,
"grad_norm": 4.53125,
"learning_rate": 2.696032594175795e-06,
"loss": 0.689,
"step": 6900
},
{
"epoch": 0.9231180281878298,
"grad_norm": 5.0,
"learning_rate": 2.692693026983703e-06,
"loss": 0.6845,
"step": 6910
},
{
"epoch": 0.9244539442922984,
"grad_norm": 4.25,
"learning_rate": 2.689353459791611e-06,
"loss": 0.6649,
"step": 6920
},
{
"epoch": 0.925789860396767,
"grad_norm": 4.65625,
"learning_rate": 2.6860138925995195e-06,
"loss": 0.6521,
"step": 6930
},
{
"epoch": 0.9271257765012357,
"grad_norm": 4.5625,
"learning_rate": 2.682674325407427e-06,
"loss": 0.6774,
"step": 6940
},
{
"epoch": 0.9284616926057043,
"grad_norm": 4.40625,
"learning_rate": 2.6793347582153357e-06,
"loss": 0.6555,
"step": 6950
},
{
"epoch": 0.929797608710173,
"grad_norm": 4.34375,
"learning_rate": 2.6759951910232433e-06,
"loss": 0.6744,
"step": 6960
},
{
"epoch": 0.9311335248146416,
"grad_norm": 4.25,
"learning_rate": 2.672655623831152e-06,
"loss": 0.6808,
"step": 6970
},
{
"epoch": 0.9324694409191103,
"grad_norm": 4.53125,
"learning_rate": 2.6693160566390595e-06,
"loss": 0.6962,
"step": 6980
},
{
"epoch": 0.9338053570235789,
"grad_norm": 4.53125,
"learning_rate": 2.665976489446968e-06,
"loss": 0.647,
"step": 6990
},
{
"epoch": 0.9351412731280475,
"grad_norm": 5.0625,
"learning_rate": 2.662636922254876e-06,
"loss": 0.6775,
"step": 7000
},
{
"epoch": 0.9351412731280475,
"eval_loss": 0.68352210521698,
"eval_runtime": 254.6837,
"eval_samples_per_second": 26.127,
"eval_steps_per_second": 3.267,
"step": 7000
},
{
"epoch": 0.9364771892325162,
"grad_norm": 4.625,
"learning_rate": 2.659297355062784e-06,
"loss": 0.6849,
"step": 7010
},
{
"epoch": 0.9378131053369848,
"grad_norm": 4.53125,
"learning_rate": 2.655957787870692e-06,
"loss": 0.6861,
"step": 7020
},
{
"epoch": 0.9391490214414535,
"grad_norm": 4.59375,
"learning_rate": 2.6526182206786007e-06,
"loss": 0.682,
"step": 7030
},
{
"epoch": 0.9404849375459221,
"grad_norm": 4.21875,
"learning_rate": 2.6492786534865084e-06,
"loss": 0.6489,
"step": 7040
},
{
"epoch": 0.9418208536503908,
"grad_norm": 4.65625,
"learning_rate": 2.645939086294416e-06,
"loss": 0.667,
"step": 7050
},
{
"epoch": 0.9431567697548594,
"grad_norm": 4.46875,
"learning_rate": 2.6425995191023245e-06,
"loss": 0.6915,
"step": 7060
},
{
"epoch": 0.944492685859328,
"grad_norm": 4.46875,
"learning_rate": 2.6392599519102326e-06,
"loss": 0.6575,
"step": 7070
},
{
"epoch": 0.9458286019637967,
"grad_norm": 4.53125,
"learning_rate": 2.6359203847181407e-06,
"loss": 0.6793,
"step": 7080
},
{
"epoch": 0.9471645180682653,
"grad_norm": 4.40625,
"learning_rate": 2.6325808175260487e-06,
"loss": 0.6534,
"step": 7090
},
{
"epoch": 0.948500434172734,
"grad_norm": 4.5625,
"learning_rate": 2.6292412503339572e-06,
"loss": 0.6805,
"step": 7100
},
{
"epoch": 0.9498363502772026,
"grad_norm": 4.75,
"learning_rate": 2.625901683141865e-06,
"loss": 0.6832,
"step": 7110
},
{
"epoch": 0.9511722663816712,
"grad_norm": 4.5625,
"learning_rate": 2.6225621159497734e-06,
"loss": 0.6693,
"step": 7120
},
{
"epoch": 0.9525081824861399,
"grad_norm": 4.71875,
"learning_rate": 2.619222548757681e-06,
"loss": 0.6669,
"step": 7130
},
{
"epoch": 0.9538440985906085,
"grad_norm": 4.3125,
"learning_rate": 2.6158829815655895e-06,
"loss": 0.6899,
"step": 7140
},
{
"epoch": 0.9551800146950772,
"grad_norm": 4.4375,
"learning_rate": 2.612543414373497e-06,
"loss": 0.6984,
"step": 7150
},
{
"epoch": 0.9565159307995458,
"grad_norm": 4.84375,
"learning_rate": 2.6092038471814057e-06,
"loss": 0.6823,
"step": 7160
},
{
"epoch": 0.9578518469040145,
"grad_norm": 4.6875,
"learning_rate": 2.6058642799893138e-06,
"loss": 0.7037,
"step": 7170
},
{
"epoch": 0.9591877630084831,
"grad_norm": 4.53125,
"learning_rate": 2.602524712797222e-06,
"loss": 0.7057,
"step": 7180
},
{
"epoch": 0.9605236791129517,
"grad_norm": 4.53125,
"learning_rate": 2.59918514560513e-06,
"loss": 0.7063,
"step": 7190
},
{
"epoch": 0.9618595952174204,
"grad_norm": 4.5625,
"learning_rate": 2.5958455784130376e-06,
"loss": 0.6823,
"step": 7200
},
{
"epoch": 0.963195511321889,
"grad_norm": 4.71875,
"learning_rate": 2.592506011220946e-06,
"loss": 0.68,
"step": 7210
},
{
"epoch": 0.9645314274263577,
"grad_norm": 4.5,
"learning_rate": 2.5891664440288537e-06,
"loss": 0.6668,
"step": 7220
},
{
"epoch": 0.9658673435308263,
"grad_norm": 4.375,
"learning_rate": 2.585826876836762e-06,
"loss": 0.6537,
"step": 7230
},
{
"epoch": 0.9672032596352949,
"grad_norm": 4.46875,
"learning_rate": 2.5824873096446703e-06,
"loss": 0.6618,
"step": 7240
},
{
"epoch": 0.9685391757397636,
"grad_norm": 4.34375,
"learning_rate": 2.5791477424525784e-06,
"loss": 0.6809,
"step": 7250
},
{
"epoch": 0.9685391757397636,
"eval_loss": 0.6828362941741943,
"eval_runtime": 252.3197,
"eval_samples_per_second": 26.371,
"eval_steps_per_second": 3.297,
"step": 7250
},
{
"epoch": 0.9698750918442322,
"grad_norm": 4.28125,
"learning_rate": 2.5758081752604864e-06,
"loss": 0.69,
"step": 7260
},
{
"epoch": 0.9712110079487009,
"grad_norm": 4.375,
"learning_rate": 2.5724686080683945e-06,
"loss": 0.6881,
"step": 7270
},
{
"epoch": 0.9725469240531694,
"grad_norm": 4.90625,
"learning_rate": 2.5691290408763026e-06,
"loss": 0.6937,
"step": 7280
},
{
"epoch": 0.9738828401576382,
"grad_norm": 4.6875,
"learning_rate": 2.565789473684211e-06,
"loss": 0.6731,
"step": 7290
},
{
"epoch": 0.9752187562621067,
"grad_norm": 4.75,
"learning_rate": 2.5624499064921187e-06,
"loss": 0.6414,
"step": 7300
},
{
"epoch": 0.9765546723665753,
"grad_norm": 4.25,
"learning_rate": 2.5591103393000272e-06,
"loss": 0.6634,
"step": 7310
},
{
"epoch": 0.977890588471044,
"grad_norm": 4.75,
"learning_rate": 2.555770772107935e-06,
"loss": 0.6918,
"step": 7320
},
{
"epoch": 0.9792265045755126,
"grad_norm": 4.28125,
"learning_rate": 2.552431204915843e-06,
"loss": 0.6682,
"step": 7330
},
{
"epoch": 0.9805624206799813,
"grad_norm": 5.03125,
"learning_rate": 2.549091637723751e-06,
"loss": 0.676,
"step": 7340
},
{
"epoch": 0.9818983367844499,
"grad_norm": 5.0,
"learning_rate": 2.545752070531659e-06,
"loss": 0.6964,
"step": 7350
},
{
"epoch": 0.9832342528889185,
"grad_norm": 4.625,
"learning_rate": 2.5424125033395676e-06,
"loss": 0.6633,
"step": 7360
},
{
"epoch": 0.9845701689933872,
"grad_norm": 4.65625,
"learning_rate": 2.5390729361474753e-06,
"loss": 0.7033,
"step": 7370
},
{
"epoch": 0.9859060850978558,
"grad_norm": 4.40625,
"learning_rate": 2.5357333689553838e-06,
"loss": 0.6751,
"step": 7380
},
{
"epoch": 0.9872420012023245,
"grad_norm": 4.375,
"learning_rate": 2.5323938017632914e-06,
"loss": 0.6835,
"step": 7390
},
{
"epoch": 0.9885779173067931,
"grad_norm": 4.5,
"learning_rate": 2.5290542345712e-06,
"loss": 0.6808,
"step": 7400
},
{
"epoch": 0.9899138334112618,
"grad_norm": 4.6875,
"learning_rate": 2.5257146673791076e-06,
"loss": 0.6755,
"step": 7410
},
{
"epoch": 0.9912497495157304,
"grad_norm": 4.78125,
"learning_rate": 2.522375100187016e-06,
"loss": 0.6702,
"step": 7420
},
{
"epoch": 0.992585665620199,
"grad_norm": 4.53125,
"learning_rate": 2.519035532994924e-06,
"loss": 0.685,
"step": 7430
},
{
"epoch": 0.9939215817246677,
"grad_norm": 4.3125,
"learning_rate": 2.515695965802832e-06,
"loss": 0.6726,
"step": 7440
},
{
"epoch": 0.9952574978291363,
"grad_norm": 4.5625,
"learning_rate": 2.5123563986107403e-06,
"loss": 0.6705,
"step": 7450
},
{
"epoch": 0.996593413933605,
"grad_norm": 4.4375,
"learning_rate": 2.5090168314186488e-06,
"loss": 0.6449,
"step": 7460
},
{
"epoch": 0.9979293300380736,
"grad_norm": 8.625,
"learning_rate": 2.5056772642265564e-06,
"loss": 0.6615,
"step": 7470
},
{
"epoch": 0.9992652461425422,
"grad_norm": 4.625,
"learning_rate": 2.502337697034464e-06,
"loss": 0.6786,
"step": 7480
},
{
"epoch": 1.0005343664417874,
"grad_norm": 4.53125,
"learning_rate": 2.4989981298423726e-06,
"loss": 0.6865,
"step": 7490
},
{
"epoch": 1.001870282546256,
"grad_norm": 4.8125,
"learning_rate": 2.4956585626502807e-06,
"loss": 0.6878,
"step": 7500
},
{
"epoch": 1.001870282546256,
"eval_loss": 0.682263195514679,
"eval_runtime": 253.6366,
"eval_samples_per_second": 26.234,
"eval_steps_per_second": 3.28,
"step": 7500
},
{
"epoch": 1.0032061986507248,
"grad_norm": 4.71875,
"learning_rate": 2.4923189954581887e-06,
"loss": 0.6563,
"step": 7510
},
{
"epoch": 1.0045421147551934,
"grad_norm": 4.8125,
"learning_rate": 2.488979428266097e-06,
"loss": 0.7019,
"step": 7520
},
{
"epoch": 1.005878030859662,
"grad_norm": 4.59375,
"learning_rate": 2.4856398610740053e-06,
"loss": 0.6812,
"step": 7530
},
{
"epoch": 1.0072139469641306,
"grad_norm": 4.375,
"learning_rate": 2.4823002938819134e-06,
"loss": 0.6699,
"step": 7540
},
{
"epoch": 1.0085498630685994,
"grad_norm": 4.59375,
"learning_rate": 2.478960726689821e-06,
"loss": 0.6711,
"step": 7550
},
{
"epoch": 1.009885779173068,
"grad_norm": 4.53125,
"learning_rate": 2.475621159497729e-06,
"loss": 0.6752,
"step": 7560
},
{
"epoch": 1.0112216952775366,
"grad_norm": 4.6875,
"learning_rate": 2.472281592305637e-06,
"loss": 0.6461,
"step": 7570
},
{
"epoch": 1.0125576113820052,
"grad_norm": 4.75,
"learning_rate": 2.4689420251135453e-06,
"loss": 0.6654,
"step": 7580
},
{
"epoch": 1.0138935274864738,
"grad_norm": 4.5,
"learning_rate": 2.4656024579214533e-06,
"loss": 0.6579,
"step": 7590
},
{
"epoch": 1.0152294435909426,
"grad_norm": 4.6875,
"learning_rate": 2.462262890729362e-06,
"loss": 0.6559,
"step": 7600
},
{
"epoch": 1.0165653596954112,
"grad_norm": 5.25,
"learning_rate": 2.45892332353727e-06,
"loss": 0.6451,
"step": 7610
},
{
"epoch": 1.0179012757998798,
"grad_norm": 4.8125,
"learning_rate": 2.455583756345178e-06,
"loss": 0.6724,
"step": 7620
},
{
"epoch": 1.0192371919043484,
"grad_norm": 4.84375,
"learning_rate": 2.452244189153086e-06,
"loss": 0.6784,
"step": 7630
},
{
"epoch": 1.020573108008817,
"grad_norm": 4.53125,
"learning_rate": 2.448904621960994e-06,
"loss": 0.6973,
"step": 7640
},
{
"epoch": 1.0219090241132858,
"grad_norm": 4.6875,
"learning_rate": 2.445565054768902e-06,
"loss": 0.6933,
"step": 7650
},
{
"epoch": 1.0232449402177544,
"grad_norm": 4.75,
"learning_rate": 2.4422254875768103e-06,
"loss": 0.6654,
"step": 7660
},
{
"epoch": 1.024580856322223,
"grad_norm": 4.3125,
"learning_rate": 2.4388859203847184e-06,
"loss": 0.6918,
"step": 7670
},
{
"epoch": 1.0259167724266915,
"grad_norm": 4.40625,
"learning_rate": 2.4355463531926264e-06,
"loss": 0.6661,
"step": 7680
},
{
"epoch": 1.0272526885311601,
"grad_norm": 4.75,
"learning_rate": 2.4322067860005345e-06,
"loss": 0.6791,
"step": 7690
},
{
"epoch": 1.028588604635629,
"grad_norm": 4.5625,
"learning_rate": 2.4288672188084426e-06,
"loss": 0.6624,
"step": 7700
},
{
"epoch": 1.0299245207400975,
"grad_norm": 4.71875,
"learning_rate": 2.4255276516163507e-06,
"loss": 0.6541,
"step": 7710
},
{
"epoch": 1.0312604368445661,
"grad_norm": 4.5625,
"learning_rate": 2.4221880844242587e-06,
"loss": 0.6602,
"step": 7720
},
{
"epoch": 1.0325963529490347,
"grad_norm": 4.90625,
"learning_rate": 2.418848517232167e-06,
"loss": 0.6672,
"step": 7730
},
{
"epoch": 1.0339322690535035,
"grad_norm": 4.90625,
"learning_rate": 2.415508950040075e-06,
"loss": 0.6736,
"step": 7740
},
{
"epoch": 1.0352681851579721,
"grad_norm": 4.71875,
"learning_rate": 2.412169382847983e-06,
"loss": 0.6886,
"step": 7750
},
{
"epoch": 1.0352681851579721,
"eval_loss": 0.6817737221717834,
"eval_runtime": 255.8419,
"eval_samples_per_second": 26.008,
"eval_steps_per_second": 3.252,
"step": 7750
},
{
"epoch": 1.0366041012624407,
"grad_norm": 4.53125,
"learning_rate": 2.408829815655891e-06,
"loss": 0.6605,
"step": 7760
},
{
"epoch": 1.0379400173669093,
"grad_norm": 4.84375,
"learning_rate": 2.405490248463799e-06,
"loss": 0.6849,
"step": 7770
},
{
"epoch": 1.039275933471378,
"grad_norm": 4.5625,
"learning_rate": 2.4021506812717076e-06,
"loss": 0.6851,
"step": 7780
},
{
"epoch": 1.0406118495758467,
"grad_norm": 5.09375,
"learning_rate": 2.3988111140796157e-06,
"loss": 0.6845,
"step": 7790
},
{
"epoch": 1.0419477656803153,
"grad_norm": 4.875,
"learning_rate": 2.3954715468875238e-06,
"loss": 0.6786,
"step": 7800
},
{
"epoch": 1.043283681784784,
"grad_norm": 4.59375,
"learning_rate": 2.392131979695432e-06,
"loss": 0.6923,
"step": 7810
},
{
"epoch": 1.0446195978892525,
"grad_norm": 4.40625,
"learning_rate": 2.38879241250334e-06,
"loss": 0.6748,
"step": 7820
},
{
"epoch": 1.045955513993721,
"grad_norm": 4.84375,
"learning_rate": 2.385452845311248e-06,
"loss": 0.6747,
"step": 7830
},
{
"epoch": 1.04729143009819,
"grad_norm": 4.8125,
"learning_rate": 2.3821132781191556e-06,
"loss": 0.708,
"step": 7840
},
{
"epoch": 1.0486273462026585,
"grad_norm": 4.5,
"learning_rate": 2.378773710927064e-06,
"loss": 0.6536,
"step": 7850
},
{
"epoch": 1.049963262307127,
"grad_norm": 4.625,
"learning_rate": 2.3754341437349722e-06,
"loss": 0.6978,
"step": 7860
},
{
"epoch": 1.0512991784115957,
"grad_norm": 4.71875,
"learning_rate": 2.3720945765428803e-06,
"loss": 0.6675,
"step": 7870
},
{
"epoch": 1.0526350945160643,
"grad_norm": 4.875,
"learning_rate": 2.3687550093507884e-06,
"loss": 0.6713,
"step": 7880
},
{
"epoch": 1.053971010620533,
"grad_norm": 4.59375,
"learning_rate": 2.3654154421586964e-06,
"loss": 0.6844,
"step": 7890
},
{
"epoch": 1.0553069267250017,
"grad_norm": 4.34375,
"learning_rate": 2.3620758749666045e-06,
"loss": 0.6762,
"step": 7900
},
{
"epoch": 1.0566428428294703,
"grad_norm": 4.875,
"learning_rate": 2.3587363077745126e-06,
"loss": 0.6715,
"step": 7910
},
{
"epoch": 1.057978758933939,
"grad_norm": 4.9375,
"learning_rate": 2.3553967405824207e-06,
"loss": 0.6651,
"step": 7920
},
{
"epoch": 1.0593146750384075,
"grad_norm": 4.4375,
"learning_rate": 2.3520571733903287e-06,
"loss": 0.6746,
"step": 7930
},
{
"epoch": 1.0606505911428763,
"grad_norm": 4.6875,
"learning_rate": 2.348717606198237e-06,
"loss": 0.6699,
"step": 7940
},
{
"epoch": 1.061986507247345,
"grad_norm": 4.4375,
"learning_rate": 2.345378039006145e-06,
"loss": 0.6518,
"step": 7950
},
{
"epoch": 1.0633224233518135,
"grad_norm": 4.96875,
"learning_rate": 2.3420384718140534e-06,
"loss": 0.6881,
"step": 7960
},
{
"epoch": 1.064658339456282,
"grad_norm": 4.8125,
"learning_rate": 2.3386989046219615e-06,
"loss": 0.6716,
"step": 7970
},
{
"epoch": 1.0659942555607507,
"grad_norm": 4.65625,
"learning_rate": 2.335359337429869e-06,
"loss": 0.7079,
"step": 7980
},
{
"epoch": 1.0673301716652195,
"grad_norm": 4.59375,
"learning_rate": 2.332019770237777e-06,
"loss": 0.6725,
"step": 7990
},
{
"epoch": 1.068666087769688,
"grad_norm": 4.625,
"learning_rate": 2.3286802030456853e-06,
"loss": 0.6943,
"step": 8000
},
{
"epoch": 1.068666087769688,
"eval_loss": 0.6813973188400269,
"eval_runtime": 254.5415,
"eval_samples_per_second": 26.141,
"eval_steps_per_second": 3.269,
"step": 8000
},
{
"epoch": 1.0700020038741567,
"grad_norm": 4.6875,
"learning_rate": 2.3253406358535933e-06,
"loss": 0.6694,
"step": 8010
},
{
"epoch": 1.0713379199786253,
"grad_norm": 4.5,
"learning_rate": 2.3220010686615014e-06,
"loss": 0.6788,
"step": 8020
},
{
"epoch": 1.072673836083094,
"grad_norm": 4.78125,
"learning_rate": 2.31866150146941e-06,
"loss": 0.6681,
"step": 8030
},
{
"epoch": 1.0740097521875627,
"grad_norm": 4.5625,
"learning_rate": 2.315321934277318e-06,
"loss": 0.6596,
"step": 8040
},
{
"epoch": 1.0753456682920313,
"grad_norm": 4.65625,
"learning_rate": 2.311982367085226e-06,
"loss": 0.6606,
"step": 8050
},
{
"epoch": 1.0766815843964999,
"grad_norm": 4.25,
"learning_rate": 2.308642799893134e-06,
"loss": 0.6589,
"step": 8060
},
{
"epoch": 1.0780175005009685,
"grad_norm": 4.8125,
"learning_rate": 2.3053032327010422e-06,
"loss": 0.6699,
"step": 8070
},
{
"epoch": 1.0793534166054373,
"grad_norm": 4.65625,
"learning_rate": 2.3019636655089503e-06,
"loss": 0.6372,
"step": 8080
},
{
"epoch": 1.0806893327099059,
"grad_norm": 4.375,
"learning_rate": 2.2986240983168584e-06,
"loss": 0.6353,
"step": 8090
},
{
"epoch": 1.0820252488143745,
"grad_norm": 4.4375,
"learning_rate": 2.2952845311247664e-06,
"loss": 0.6356,
"step": 8100
},
{
"epoch": 1.083361164918843,
"grad_norm": 4.8125,
"learning_rate": 2.2919449639326745e-06,
"loss": 0.6656,
"step": 8110
},
{
"epoch": 1.0846970810233116,
"grad_norm": 4.78125,
"learning_rate": 2.2886053967405826e-06,
"loss": 0.6429,
"step": 8120
},
{
"epoch": 1.0860329971277805,
"grad_norm": 4.53125,
"learning_rate": 2.2852658295484907e-06,
"loss": 0.6933,
"step": 8130
},
{
"epoch": 1.087368913232249,
"grad_norm": 4.34375,
"learning_rate": 2.2819262623563987e-06,
"loss": 0.6901,
"step": 8140
},
{
"epoch": 1.0887048293367176,
"grad_norm": 4.5625,
"learning_rate": 2.278586695164307e-06,
"loss": 0.6658,
"step": 8150
},
{
"epoch": 1.0900407454411862,
"grad_norm": 4.8125,
"learning_rate": 2.275247127972215e-06,
"loss": 0.6607,
"step": 8160
},
{
"epoch": 1.091376661545655,
"grad_norm": 4.4375,
"learning_rate": 2.271907560780123e-06,
"loss": 0.6658,
"step": 8170
},
{
"epoch": 1.0927125776501236,
"grad_norm": 4.625,
"learning_rate": 2.268567993588031e-06,
"loss": 0.6655,
"step": 8180
},
{
"epoch": 1.0940484937545922,
"grad_norm": 4.59375,
"learning_rate": 2.265228426395939e-06,
"loss": 0.6643,
"step": 8190
},
{
"epoch": 1.0953844098590608,
"grad_norm": 4.5625,
"learning_rate": 2.261888859203847e-06,
"loss": 0.6695,
"step": 8200
},
{
"epoch": 1.0967203259635294,
"grad_norm": 4.59375,
"learning_rate": 2.2585492920117557e-06,
"loss": 0.6755,
"step": 8210
},
{
"epoch": 1.0980562420679982,
"grad_norm": 4.53125,
"learning_rate": 2.2552097248196638e-06,
"loss": 0.6726,
"step": 8220
},
{
"epoch": 1.0993921581724668,
"grad_norm": 4.875,
"learning_rate": 2.251870157627572e-06,
"loss": 0.7172,
"step": 8230
},
{
"epoch": 1.1007280742769354,
"grad_norm": 4.78125,
"learning_rate": 2.24853059043548e-06,
"loss": 0.6731,
"step": 8240
},
{
"epoch": 1.102063990381404,
"grad_norm": 4.8125,
"learning_rate": 2.245191023243388e-06,
"loss": 0.6915,
"step": 8250
},
{
"epoch": 1.102063990381404,
"eval_loss": 0.6810314059257507,
"eval_runtime": 252.462,
"eval_samples_per_second": 26.356,
"eval_steps_per_second": 3.296,
"step": 8250
},
{
"epoch": 1.1033999064858726,
"grad_norm": 4.53125,
"learning_rate": 2.241851456051296e-06,
"loss": 0.6893,
"step": 8260
},
{
"epoch": 1.1047358225903414,
"grad_norm": 4.5625,
"learning_rate": 2.2385118888592037e-06,
"loss": 0.6809,
"step": 8270
},
{
"epoch": 1.10607173869481,
"grad_norm": 4.875,
"learning_rate": 2.2351723216671122e-06,
"loss": 0.6966,
"step": 8280
},
{
"epoch": 1.1074076547992786,
"grad_norm": 4.90625,
"learning_rate": 2.2318327544750203e-06,
"loss": 0.6693,
"step": 8290
},
{
"epoch": 1.1087435709037472,
"grad_norm": 4.9375,
"learning_rate": 2.2284931872829284e-06,
"loss": 0.7169,
"step": 8300
},
{
"epoch": 1.1100794870082158,
"grad_norm": 4.75,
"learning_rate": 2.2251536200908364e-06,
"loss": 0.6943,
"step": 8310
},
{
"epoch": 1.1114154031126846,
"grad_norm": 4.4375,
"learning_rate": 2.2218140528987445e-06,
"loss": 0.6584,
"step": 8320
},
{
"epoch": 1.1127513192171532,
"grad_norm": 4.71875,
"learning_rate": 2.2184744857066526e-06,
"loss": 0.6514,
"step": 8330
},
{
"epoch": 1.1140872353216218,
"grad_norm": 4.5,
"learning_rate": 2.2151349185145607e-06,
"loss": 0.6863,
"step": 8340
},
{
"epoch": 1.1154231514260904,
"grad_norm": 4.8125,
"learning_rate": 2.2117953513224687e-06,
"loss": 0.6751,
"step": 8350
},
{
"epoch": 1.116759067530559,
"grad_norm": 4.28125,
"learning_rate": 2.208455784130377e-06,
"loss": 0.6752,
"step": 8360
},
{
"epoch": 1.1180949836350278,
"grad_norm": 4.78125,
"learning_rate": 2.205116216938285e-06,
"loss": 0.6665,
"step": 8370
},
{
"epoch": 1.1194308997394964,
"grad_norm": 4.84375,
"learning_rate": 2.201776649746193e-06,
"loss": 0.6953,
"step": 8380
},
{
"epoch": 1.120766815843965,
"grad_norm": 4.40625,
"learning_rate": 2.1984370825541015e-06,
"loss": 0.6613,
"step": 8390
},
{
"epoch": 1.1221027319484336,
"grad_norm": 4.53125,
"learning_rate": 2.1950975153620095e-06,
"loss": 0.6598,
"step": 8400
},
{
"epoch": 1.1234386480529022,
"grad_norm": 4.5625,
"learning_rate": 2.1917579481699176e-06,
"loss": 0.6804,
"step": 8410
},
{
"epoch": 1.124774564157371,
"grad_norm": 4.9375,
"learning_rate": 2.1884183809778253e-06,
"loss": 0.6609,
"step": 8420
},
{
"epoch": 1.1261104802618396,
"grad_norm": 4.78125,
"learning_rate": 2.1850788137857333e-06,
"loss": 0.6764,
"step": 8430
},
{
"epoch": 1.1274463963663082,
"grad_norm": 5.125,
"learning_rate": 2.1817392465936414e-06,
"loss": 0.6541,
"step": 8440
},
{
"epoch": 1.1287823124707768,
"grad_norm": 4.9375,
"learning_rate": 2.1783996794015495e-06,
"loss": 0.6851,
"step": 8450
},
{
"epoch": 1.1301182285752454,
"grad_norm": 4.96875,
"learning_rate": 2.175060112209458e-06,
"loss": 0.6727,
"step": 8460
},
{
"epoch": 1.1314541446797142,
"grad_norm": 4.90625,
"learning_rate": 2.171720545017366e-06,
"loss": 0.6893,
"step": 8470
},
{
"epoch": 1.1327900607841828,
"grad_norm": 4.625,
"learning_rate": 2.168380977825274e-06,
"loss": 0.6761,
"step": 8480
},
{
"epoch": 1.1341259768886514,
"grad_norm": 4.59375,
"learning_rate": 2.1650414106331822e-06,
"loss": 0.6817,
"step": 8490
},
{
"epoch": 1.13546189299312,
"grad_norm": 4.5,
"learning_rate": 2.1617018434410903e-06,
"loss": 0.6668,
"step": 8500
},
{
"epoch": 1.13546189299312,
"eval_loss": 0.6807094812393188,
"eval_runtime": 253.3038,
"eval_samples_per_second": 26.269,
"eval_steps_per_second": 3.285,
"step": 8500
},
{
"epoch": 1.1367978090975888,
"grad_norm": 4.46875,
"learning_rate": 2.1583622762489984e-06,
"loss": 0.6688,
"step": 8510
},
{
"epoch": 1.1381337252020574,
"grad_norm": 4.59375,
"learning_rate": 2.1550227090569064e-06,
"loss": 0.6453,
"step": 8520
},
{
"epoch": 1.139469641306526,
"grad_norm": 4.90625,
"learning_rate": 2.1516831418648145e-06,
"loss": 0.6816,
"step": 8530
},
{
"epoch": 1.1408055574109945,
"grad_norm": 4.71875,
"learning_rate": 2.1483435746727226e-06,
"loss": 0.6552,
"step": 8540
},
{
"epoch": 1.1421414735154631,
"grad_norm": 4.8125,
"learning_rate": 2.1450040074806307e-06,
"loss": 0.6865,
"step": 8550
},
{
"epoch": 1.143477389619932,
"grad_norm": 4.75,
"learning_rate": 2.1416644402885387e-06,
"loss": 0.6727,
"step": 8560
},
{
"epoch": 1.1448133057244005,
"grad_norm": 4.4375,
"learning_rate": 2.138324873096447e-06,
"loss": 0.6728,
"step": 8570
},
{
"epoch": 1.1461492218288691,
"grad_norm": 4.6875,
"learning_rate": 2.134985305904355e-06,
"loss": 0.674,
"step": 8580
},
{
"epoch": 1.1474851379333377,
"grad_norm": 4.6875,
"learning_rate": 2.131645738712263e-06,
"loss": 0.6701,
"step": 8590
},
{
"epoch": 1.1488210540378065,
"grad_norm": 5.0,
"learning_rate": 2.128306171520171e-06,
"loss": 0.6664,
"step": 8600
},
{
"epoch": 1.1501569701422751,
"grad_norm": 4.625,
"learning_rate": 2.124966604328079e-06,
"loss": 0.6615,
"step": 8610
},
{
"epoch": 1.1514928862467437,
"grad_norm": 4.75,
"learning_rate": 2.121627037135987e-06,
"loss": 0.667,
"step": 8620
},
{
"epoch": 1.1528288023512123,
"grad_norm": 4.46875,
"learning_rate": 2.1182874699438953e-06,
"loss": 0.6936,
"step": 8630
},
{
"epoch": 1.154164718455681,
"grad_norm": 4.6875,
"learning_rate": 2.1149479027518038e-06,
"loss": 0.663,
"step": 8640
},
{
"epoch": 1.1555006345601497,
"grad_norm": 4.84375,
"learning_rate": 2.111608335559712e-06,
"loss": 0.6965,
"step": 8650
},
{
"epoch": 1.1568365506646183,
"grad_norm": 4.8125,
"learning_rate": 2.10826876836762e-06,
"loss": 0.6457,
"step": 8660
},
{
"epoch": 1.158172466769087,
"grad_norm": 4.90625,
"learning_rate": 2.104929201175528e-06,
"loss": 0.6419,
"step": 8670
},
{
"epoch": 1.1595083828735555,
"grad_norm": 4.84375,
"learning_rate": 2.101589633983436e-06,
"loss": 0.6915,
"step": 8680
},
{
"epoch": 1.160844298978024,
"grad_norm": 4.8125,
"learning_rate": 2.098250066791344e-06,
"loss": 0.6945,
"step": 8690
},
{
"epoch": 1.162180215082493,
"grad_norm": 4.65625,
"learning_rate": 2.0949104995992522e-06,
"loss": 0.6829,
"step": 8700
},
{
"epoch": 1.1635161311869615,
"grad_norm": 5.09375,
"learning_rate": 2.09157093240716e-06,
"loss": 0.6909,
"step": 8710
},
{
"epoch": 1.16485204729143,
"grad_norm": 4.4375,
"learning_rate": 2.0882313652150684e-06,
"loss": 0.685,
"step": 8720
},
{
"epoch": 1.1661879633958987,
"grad_norm": 5.03125,
"learning_rate": 2.0848917980229764e-06,
"loss": 0.6884,
"step": 8730
},
{
"epoch": 1.1675238795003673,
"grad_norm": 4.84375,
"learning_rate": 2.0815522308308845e-06,
"loss": 0.6587,
"step": 8740
},
{
"epoch": 1.168859795604836,
"grad_norm": 4.9375,
"learning_rate": 2.0782126636387926e-06,
"loss": 0.6493,
"step": 8750
},
{
"epoch": 1.168859795604836,
"eval_loss": 0.680444598197937,
"eval_runtime": 255.5862,
"eval_samples_per_second": 26.034,
"eval_steps_per_second": 3.255,
"step": 8750
},
{
"epoch": 1.1701957117093047,
"grad_norm": 4.46875,
"learning_rate": 2.0748730964467007e-06,
"loss": 0.6866,
"step": 8760
},
{
"epoch": 1.1715316278137733,
"grad_norm": 4.6875,
"learning_rate": 2.0715335292546087e-06,
"loss": 0.6905,
"step": 8770
},
{
"epoch": 1.1728675439182419,
"grad_norm": 4.96875,
"learning_rate": 2.068193962062517e-06,
"loss": 0.6768,
"step": 8780
},
{
"epoch": 1.1742034600227105,
"grad_norm": 4.28125,
"learning_rate": 2.064854394870425e-06,
"loss": 0.6565,
"step": 8790
},
{
"epoch": 1.1755393761271793,
"grad_norm": 4.5,
"learning_rate": 2.061514827678333e-06,
"loss": 0.6762,
"step": 8800
},
{
"epoch": 1.1768752922316479,
"grad_norm": 4.78125,
"learning_rate": 2.058175260486241e-06,
"loss": 0.6785,
"step": 8810
},
{
"epoch": 1.1782112083361165,
"grad_norm": 4.65625,
"learning_rate": 2.0548356932941495e-06,
"loss": 0.6865,
"step": 8820
},
{
"epoch": 1.179547124440585,
"grad_norm": 4.90625,
"learning_rate": 2.0514961261020576e-06,
"loss": 0.6776,
"step": 8830
},
{
"epoch": 1.1808830405450537,
"grad_norm": 4.9375,
"learning_rate": 2.0481565589099657e-06,
"loss": 0.6596,
"step": 8840
},
{
"epoch": 1.1822189566495225,
"grad_norm": 4.96875,
"learning_rate": 2.0448169917178733e-06,
"loss": 0.6637,
"step": 8850
},
{
"epoch": 1.183554872753991,
"grad_norm": 4.90625,
"learning_rate": 2.0414774245257814e-06,
"loss": 0.6726,
"step": 8860
},
{
"epoch": 1.1848907888584597,
"grad_norm": 4.6875,
"learning_rate": 2.0381378573336895e-06,
"loss": 0.683,
"step": 8870
},
{
"epoch": 1.1862267049629283,
"grad_norm": 4.4375,
"learning_rate": 2.0347982901415976e-06,
"loss": 0.6947,
"step": 8880
},
{
"epoch": 1.1875626210673969,
"grad_norm": 4.46875,
"learning_rate": 2.0314587229495056e-06,
"loss": 0.6683,
"step": 8890
},
{
"epoch": 1.1888985371718657,
"grad_norm": 4.53125,
"learning_rate": 2.028119155757414e-06,
"loss": 0.6564,
"step": 8900
},
{
"epoch": 1.1902344532763343,
"grad_norm": 4.96875,
"learning_rate": 2.0247795885653222e-06,
"loss": 0.6766,
"step": 8910
},
{
"epoch": 1.1915703693808029,
"grad_norm": 5.125,
"learning_rate": 2.0214400213732303e-06,
"loss": 0.6973,
"step": 8920
},
{
"epoch": 1.1929062854852714,
"grad_norm": 4.8125,
"learning_rate": 2.0181004541811384e-06,
"loss": 0.6897,
"step": 8930
},
{
"epoch": 1.1942422015897403,
"grad_norm": 5.0,
"learning_rate": 2.0147608869890464e-06,
"loss": 0.6814,
"step": 8940
},
{
"epoch": 1.1955781176942089,
"grad_norm": 4.6875,
"learning_rate": 2.0114213197969545e-06,
"loss": 0.6569,
"step": 8950
},
{
"epoch": 1.1969140337986774,
"grad_norm": 4.28125,
"learning_rate": 2.0080817526048626e-06,
"loss": 0.6387,
"step": 8960
},
{
"epoch": 1.198249949903146,
"grad_norm": 4.6875,
"learning_rate": 2.0047421854127707e-06,
"loss": 0.63,
"step": 8970
},
{
"epoch": 1.1995858660076146,
"grad_norm": 4.875,
"learning_rate": 2.0014026182206787e-06,
"loss": 0.6978,
"step": 8980
},
{
"epoch": 1.2009217821120834,
"grad_norm": 4.6875,
"learning_rate": 1.998063051028587e-06,
"loss": 0.6754,
"step": 8990
},
{
"epoch": 1.202257698216552,
"grad_norm": 4.75,
"learning_rate": 1.994723483836495e-06,
"loss": 0.6963,
"step": 9000
},
{
"epoch": 1.202257698216552,
"eval_loss": 0.6801754832267761,
"eval_runtime": 255.0408,
"eval_samples_per_second": 26.09,
"eval_steps_per_second": 3.262,
"step": 9000
},
{
"epoch": 1.2035936143210206,
"grad_norm": 4.96875,
"learning_rate": 1.991383916644403e-06,
"loss": 0.7035,
"step": 9010
},
{
"epoch": 1.2049295304254892,
"grad_norm": 4.78125,
"learning_rate": 1.988044349452311e-06,
"loss": 0.6682,
"step": 9020
},
{
"epoch": 1.206265446529958,
"grad_norm": 4.6875,
"learning_rate": 1.984704782260219e-06,
"loss": 0.6621,
"step": 9030
},
{
"epoch": 1.2076013626344266,
"grad_norm": 4.5625,
"learning_rate": 1.981365215068127e-06,
"loss": 0.6769,
"step": 9040
},
{
"epoch": 1.2089372787388952,
"grad_norm": 4.9375,
"learning_rate": 1.9780256478760353e-06,
"loss": 0.6575,
"step": 9050
},
{
"epoch": 1.2102731948433638,
"grad_norm": 4.96875,
"learning_rate": 1.9746860806839433e-06,
"loss": 0.6558,
"step": 9060
},
{
"epoch": 1.2116091109478324,
"grad_norm": 5.0,
"learning_rate": 1.9713465134918514e-06,
"loss": 0.6964,
"step": 9070
},
{
"epoch": 1.2129450270523012,
"grad_norm": 4.6875,
"learning_rate": 1.96800694629976e-06,
"loss": 0.6833,
"step": 9080
},
{
"epoch": 1.2142809431567698,
"grad_norm": 4.625,
"learning_rate": 1.964667379107668e-06,
"loss": 0.6925,
"step": 9090
},
{
"epoch": 1.2156168592612384,
"grad_norm": 5.125,
"learning_rate": 1.961327811915576e-06,
"loss": 0.6783,
"step": 9100
},
{
"epoch": 1.216952775365707,
"grad_norm": 4.71875,
"learning_rate": 1.957988244723484e-06,
"loss": 0.6665,
"step": 9110
},
{
"epoch": 1.2182886914701756,
"grad_norm": 4.90625,
"learning_rate": 1.9546486775313922e-06,
"loss": 0.6708,
"step": 9120
},
{
"epoch": 1.2196246075746444,
"grad_norm": 4.65625,
"learning_rate": 1.9513091103393003e-06,
"loss": 0.6731,
"step": 9130
},
{
"epoch": 1.220960523679113,
"grad_norm": 4.75,
"learning_rate": 1.947969543147208e-06,
"loss": 0.6771,
"step": 9140
},
{
"epoch": 1.2222964397835816,
"grad_norm": 4.59375,
"learning_rate": 1.9446299759551164e-06,
"loss": 0.6683,
"step": 9150
},
{
"epoch": 1.2236323558880502,
"grad_norm": 5.09375,
"learning_rate": 1.9412904087630245e-06,
"loss": 0.6624,
"step": 9160
},
{
"epoch": 1.2249682719925188,
"grad_norm": 5.0625,
"learning_rate": 1.9379508415709326e-06,
"loss": 0.6934,
"step": 9170
},
{
"epoch": 1.2263041880969876,
"grad_norm": 5.1875,
"learning_rate": 1.9346112743788407e-06,
"loss": 0.6854,
"step": 9180
},
{
"epoch": 1.2276401042014562,
"grad_norm": 4.5625,
"learning_rate": 1.9312717071867487e-06,
"loss": 0.6695,
"step": 9190
},
{
"epoch": 1.2289760203059248,
"grad_norm": 4.9375,
"learning_rate": 1.927932139994657e-06,
"loss": 0.6711,
"step": 9200
},
{
"epoch": 1.2303119364103934,
"grad_norm": 4.53125,
"learning_rate": 1.924592572802565e-06,
"loss": 0.6741,
"step": 9210
},
{
"epoch": 1.231647852514862,
"grad_norm": 4.6875,
"learning_rate": 1.921253005610473e-06,
"loss": 0.6394,
"step": 9220
},
{
"epoch": 1.2329837686193308,
"grad_norm": 5.03125,
"learning_rate": 1.917913438418381e-06,
"loss": 0.6707,
"step": 9230
},
{
"epoch": 1.2343196847237994,
"grad_norm": 5.0,
"learning_rate": 1.914573871226289e-06,
"loss": 0.6822,
"step": 9240
},
{
"epoch": 1.235655600828268,
"grad_norm": 4.90625,
"learning_rate": 1.911234304034197e-06,
"loss": 0.6909,
"step": 9250
},
{
"epoch": 1.235655600828268,
"eval_loss": 0.6799613833427429,
"eval_runtime": 252.555,
"eval_samples_per_second": 26.347,
"eval_steps_per_second": 3.294,
"step": 9250
},
{
"epoch": 1.2369915169327366,
"grad_norm": 4.84375,
"learning_rate": 1.9078947368421057e-06,
"loss": 0.6629,
"step": 9260
},
{
"epoch": 1.2383274330372052,
"grad_norm": 5.03125,
"learning_rate": 1.9045551696500136e-06,
"loss": 0.6716,
"step": 9270
},
{
"epoch": 1.239663349141674,
"grad_norm": 5.1875,
"learning_rate": 1.9012156024579214e-06,
"loss": 0.6838,
"step": 9280
},
{
"epoch": 1.2409992652461426,
"grad_norm": 5.0,
"learning_rate": 1.8978760352658295e-06,
"loss": 0.6724,
"step": 9290
},
{
"epoch": 1.2423351813506112,
"grad_norm": 5.1875,
"learning_rate": 1.8945364680737378e-06,
"loss": 0.6812,
"step": 9300
},
{
"epoch": 1.2436710974550798,
"grad_norm": 5.34375,
"learning_rate": 1.8911969008816459e-06,
"loss": 0.6939,
"step": 9310
},
{
"epoch": 1.2450070135595483,
"grad_norm": 4.59375,
"learning_rate": 1.887857333689554e-06,
"loss": 0.6542,
"step": 9320
},
{
"epoch": 1.2463429296640172,
"grad_norm": 4.65625,
"learning_rate": 1.884517766497462e-06,
"loss": 0.6697,
"step": 9330
},
{
"epoch": 1.2476788457684858,
"grad_norm": 4.9375,
"learning_rate": 1.88117819930537e-06,
"loss": 0.6881,
"step": 9340
},
{
"epoch": 1.2490147618729543,
"grad_norm": 4.84375,
"learning_rate": 1.8778386321132784e-06,
"loss": 0.6765,
"step": 9350
},
{
"epoch": 1.250350677977423,
"grad_norm": 4.875,
"learning_rate": 1.8744990649211864e-06,
"loss": 0.673,
"step": 9360
},
{
"epoch": 1.2516865940818915,
"grad_norm": 4.59375,
"learning_rate": 1.8711594977290945e-06,
"loss": 0.6463,
"step": 9370
},
{
"epoch": 1.2530225101863603,
"grad_norm": 4.65625,
"learning_rate": 1.8678199305370026e-06,
"loss": 0.6932,
"step": 9380
},
{
"epoch": 1.254358426290829,
"grad_norm": 4.78125,
"learning_rate": 1.8644803633449107e-06,
"loss": 0.6514,
"step": 9390
},
{
"epoch": 1.2556943423952975,
"grad_norm": 4.5,
"learning_rate": 1.8611407961528187e-06,
"loss": 0.666,
"step": 9400
},
{
"epoch": 1.2570302584997664,
"grad_norm": 4.625,
"learning_rate": 1.857801228960727e-06,
"loss": 0.685,
"step": 9410
},
{
"epoch": 1.2583661746042347,
"grad_norm": 4.90625,
"learning_rate": 1.8544616617686351e-06,
"loss": 0.6701,
"step": 9420
},
{
"epoch": 1.2597020907087035,
"grad_norm": 4.84375,
"learning_rate": 1.851122094576543e-06,
"loss": 0.6765,
"step": 9430
},
{
"epoch": 1.2610380068131721,
"grad_norm": 4.9375,
"learning_rate": 1.847782527384451e-06,
"loss": 0.6839,
"step": 9440
},
{
"epoch": 1.2623739229176407,
"grad_norm": 4.6875,
"learning_rate": 1.8444429601923591e-06,
"loss": 0.6846,
"step": 9450
},
{
"epoch": 1.2637098390221095,
"grad_norm": 4.5625,
"learning_rate": 1.8411033930002672e-06,
"loss": 0.6443,
"step": 9460
},
{
"epoch": 1.2650457551265781,
"grad_norm": 5.0625,
"learning_rate": 1.8377638258081753e-06,
"loss": 0.6633,
"step": 9470
},
{
"epoch": 1.2663816712310467,
"grad_norm": 4.59375,
"learning_rate": 1.8344242586160836e-06,
"loss": 0.6792,
"step": 9480
},
{
"epoch": 1.2677175873355153,
"grad_norm": 5.15625,
"learning_rate": 1.8310846914239916e-06,
"loss": 0.6935,
"step": 9490
},
{
"epoch": 1.269053503439984,
"grad_norm": 5.09375,
"learning_rate": 1.8277451242318997e-06,
"loss": 0.6716,
"step": 9500
},
{
"epoch": 1.269053503439984,
"eval_loss": 0.6797373294830322,
"eval_runtime": 252.8856,
"eval_samples_per_second": 26.312,
"eval_steps_per_second": 3.29,
"step": 9500
},
{
"epoch": 1.2703894195444527,
"grad_norm": 4.65625,
"learning_rate": 1.8244055570398078e-06,
"loss": 0.6704,
"step": 9510
},
{
"epoch": 1.2717253356489213,
"grad_norm": 5.1875,
"learning_rate": 1.8210659898477159e-06,
"loss": 0.6817,
"step": 9520
},
{
"epoch": 1.27306125175339,
"grad_norm": 5.15625,
"learning_rate": 1.8177264226556242e-06,
"loss": 0.6975,
"step": 9530
},
{
"epoch": 1.2743971678578585,
"grad_norm": 4.8125,
"learning_rate": 1.8143868554635322e-06,
"loss": 0.6749,
"step": 9540
},
{
"epoch": 1.275733083962327,
"grad_norm": 4.78125,
"learning_rate": 1.8110472882714403e-06,
"loss": 0.6677,
"step": 9550
},
{
"epoch": 1.277069000066796,
"grad_norm": 4.78125,
"learning_rate": 1.8077077210793484e-06,
"loss": 0.6639,
"step": 9560
},
{
"epoch": 1.2784049161712645,
"grad_norm": 4.96875,
"learning_rate": 1.8043681538872562e-06,
"loss": 0.6957,
"step": 9570
},
{
"epoch": 1.279740832275733,
"grad_norm": 5.15625,
"learning_rate": 1.8010285866951643e-06,
"loss": 0.6986,
"step": 9580
},
{
"epoch": 1.2810767483802017,
"grad_norm": 4.90625,
"learning_rate": 1.7976890195030724e-06,
"loss": 0.676,
"step": 9590
},
{
"epoch": 1.2824126644846703,
"grad_norm": 4.5625,
"learning_rate": 1.7943494523109805e-06,
"loss": 0.6534,
"step": 9600
},
{
"epoch": 1.283748580589139,
"grad_norm": 4.65625,
"learning_rate": 1.7910098851188888e-06,
"loss": 0.6783,
"step": 9610
},
{
"epoch": 1.2850844966936077,
"grad_norm": 4.90625,
"learning_rate": 1.7876703179267968e-06,
"loss": 0.6529,
"step": 9620
},
{
"epoch": 1.2864204127980763,
"grad_norm": 4.84375,
"learning_rate": 1.784330750734705e-06,
"loss": 0.6707,
"step": 9630
},
{
"epoch": 1.2877563289025449,
"grad_norm": 4.5625,
"learning_rate": 1.780991183542613e-06,
"loss": 0.6402,
"step": 9640
},
{
"epoch": 1.2890922450070135,
"grad_norm": 4.65625,
"learning_rate": 1.777651616350521e-06,
"loss": 0.6274,
"step": 9650
},
{
"epoch": 1.2904281611114823,
"grad_norm": 4.6875,
"learning_rate": 1.7743120491584293e-06,
"loss": 0.6611,
"step": 9660
},
{
"epoch": 1.2917640772159509,
"grad_norm": 4.9375,
"learning_rate": 1.7709724819663374e-06,
"loss": 0.6731,
"step": 9670
},
{
"epoch": 1.2930999933204195,
"grad_norm": 4.59375,
"learning_rate": 1.7676329147742455e-06,
"loss": 0.6603,
"step": 9680
},
{
"epoch": 1.294435909424888,
"grad_norm": 4.5625,
"learning_rate": 1.7642933475821536e-06,
"loss": 0.6699,
"step": 9690
},
{
"epoch": 1.2957718255293567,
"grad_norm": 4.90625,
"learning_rate": 1.7609537803900616e-06,
"loss": 0.6976,
"step": 9700
},
{
"epoch": 1.2971077416338255,
"grad_norm": 4.6875,
"learning_rate": 1.75761421319797e-06,
"loss": 0.6753,
"step": 9710
},
{
"epoch": 1.298443657738294,
"grad_norm": 4.96875,
"learning_rate": 1.7542746460058776e-06,
"loss": 0.6892,
"step": 9720
},
{
"epoch": 1.2997795738427627,
"grad_norm": 4.90625,
"learning_rate": 1.7509350788137859e-06,
"loss": 0.6895,
"step": 9730
},
{
"epoch": 1.3011154899472313,
"grad_norm": 4.6875,
"learning_rate": 1.747595511621694e-06,
"loss": 0.6742,
"step": 9740
},
{
"epoch": 1.3024514060516998,
"grad_norm": 4.78125,
"learning_rate": 1.744255944429602e-06,
"loss": 0.6659,
"step": 9750
},
{
"epoch": 1.3024514060516998,
"eval_loss": 0.6795492768287659,
"eval_runtime": 255.162,
"eval_samples_per_second": 26.078,
"eval_steps_per_second": 3.261,
"step": 9750
},
{
"epoch": 1.3037873221561687,
"grad_norm": 4.75,
"learning_rate": 1.74091637723751e-06,
"loss": 0.6744,
"step": 9760
},
{
"epoch": 1.3051232382606373,
"grad_norm": 5.09375,
"learning_rate": 1.7375768100454182e-06,
"loss": 0.6923,
"step": 9770
},
{
"epoch": 1.3064591543651058,
"grad_norm": 5.15625,
"learning_rate": 1.7342372428533262e-06,
"loss": 0.686,
"step": 9780
},
{
"epoch": 1.3077950704695744,
"grad_norm": 4.84375,
"learning_rate": 1.7308976756612345e-06,
"loss": 0.6758,
"step": 9790
},
{
"epoch": 1.309130986574043,
"grad_norm": 4.625,
"learning_rate": 1.7275581084691426e-06,
"loss": 0.6643,
"step": 9800
},
{
"epoch": 1.3104669026785118,
"grad_norm": 4.59375,
"learning_rate": 1.7242185412770507e-06,
"loss": 0.6369,
"step": 9810
},
{
"epoch": 1.3118028187829804,
"grad_norm": 4.75,
"learning_rate": 1.7208789740849588e-06,
"loss": 0.6779,
"step": 9820
},
{
"epoch": 1.313138734887449,
"grad_norm": 4.59375,
"learning_rate": 1.7175394068928668e-06,
"loss": 0.6443,
"step": 9830
},
{
"epoch": 1.3144746509919176,
"grad_norm": 4.46875,
"learning_rate": 1.7141998397007751e-06,
"loss": 0.6458,
"step": 9840
},
{
"epoch": 1.3158105670963862,
"grad_norm": 4.5625,
"learning_rate": 1.7108602725086832e-06,
"loss": 0.6609,
"step": 9850
},
{
"epoch": 1.317146483200855,
"grad_norm": 4.625,
"learning_rate": 1.707520705316591e-06,
"loss": 0.6774,
"step": 9860
},
{
"epoch": 1.3184823993053236,
"grad_norm": 4.90625,
"learning_rate": 1.7041811381244991e-06,
"loss": 0.6822,
"step": 9870
},
{
"epoch": 1.3198183154097922,
"grad_norm": 4.34375,
"learning_rate": 1.7008415709324072e-06,
"loss": 0.6641,
"step": 9880
},
{
"epoch": 1.321154231514261,
"grad_norm": 4.78125,
"learning_rate": 1.6975020037403153e-06,
"loss": 0.6651,
"step": 9890
},
{
"epoch": 1.3224901476187294,
"grad_norm": 4.9375,
"learning_rate": 1.6941624365482234e-06,
"loss": 0.6505,
"step": 9900
},
{
"epoch": 1.3238260637231982,
"grad_norm": 4.5,
"learning_rate": 1.6908228693561316e-06,
"loss": 0.6443,
"step": 9910
},
{
"epoch": 1.3251619798276668,
"grad_norm": 4.78125,
"learning_rate": 1.6874833021640397e-06,
"loss": 0.647,
"step": 9920
},
{
"epoch": 1.3264978959321354,
"grad_norm": 4.34375,
"learning_rate": 1.6841437349719478e-06,
"loss": 0.6459,
"step": 9930
},
{
"epoch": 1.3278338120366042,
"grad_norm": 4.90625,
"learning_rate": 1.6808041677798559e-06,
"loss": 0.6907,
"step": 9940
},
{
"epoch": 1.3291697281410728,
"grad_norm": 4.6875,
"learning_rate": 1.677464600587764e-06,
"loss": 0.6816,
"step": 9950
},
{
"epoch": 1.3305056442455414,
"grad_norm": 5.0625,
"learning_rate": 1.674125033395672e-06,
"loss": 0.6724,
"step": 9960
},
{
"epoch": 1.33184156035001,
"grad_norm": 5.09375,
"learning_rate": 1.6707854662035803e-06,
"loss": 0.6383,
"step": 9970
},
{
"epoch": 1.3331774764544786,
"grad_norm": 4.625,
"learning_rate": 1.6674458990114884e-06,
"loss": 0.6544,
"step": 9980
},
{
"epoch": 1.3345133925589474,
"grad_norm": 4.71875,
"learning_rate": 1.6641063318193965e-06,
"loss": 0.6752,
"step": 9990
},
{
"epoch": 1.335849308663416,
"grad_norm": 4.5625,
"learning_rate": 1.6607667646273045e-06,
"loss": 0.6666,
"step": 10000
},
{
"epoch": 1.335849308663416,
"eval_loss": 0.6794190406799316,
"eval_runtime": 254.8087,
"eval_samples_per_second": 26.114,
"eval_steps_per_second": 3.265,
"step": 10000
},
{
"epoch": 1.3371852247678846,
"grad_norm": 4.96875,
"learning_rate": 1.6574271974352124e-06,
"loss": 0.6811,
"step": 10010
},
{
"epoch": 1.3385211408723532,
"grad_norm": 5.0,
"learning_rate": 1.6540876302431205e-06,
"loss": 0.6582,
"step": 10020
},
{
"epoch": 1.3398570569768218,
"grad_norm": 4.8125,
"learning_rate": 1.6507480630510285e-06,
"loss": 0.6782,
"step": 10030
},
{
"epoch": 1.3411929730812906,
"grad_norm": 4.5625,
"learning_rate": 1.6474084958589368e-06,
"loss": 0.6767,
"step": 10040
},
{
"epoch": 1.3425288891857592,
"grad_norm": 4.53125,
"learning_rate": 1.644068928666845e-06,
"loss": 0.6628,
"step": 10050
},
{
"epoch": 1.3438648052902278,
"grad_norm": 5.21875,
"learning_rate": 1.640729361474753e-06,
"loss": 0.6768,
"step": 10060
},
{
"epoch": 1.3452007213946964,
"grad_norm": 4.96875,
"learning_rate": 1.637389794282661e-06,
"loss": 0.7003,
"step": 10070
},
{
"epoch": 1.346536637499165,
"grad_norm": 4.96875,
"learning_rate": 1.6340502270905691e-06,
"loss": 0.6615,
"step": 10080
},
{
"epoch": 1.3478725536036338,
"grad_norm": 4.84375,
"learning_rate": 1.6307106598984774e-06,
"loss": 0.6545,
"step": 10090
},
{
"epoch": 1.3492084697081024,
"grad_norm": 5.28125,
"learning_rate": 1.6273710927063855e-06,
"loss": 0.6654,
"step": 10100
},
{
"epoch": 1.350544385812571,
"grad_norm": 5.40625,
"learning_rate": 1.6240315255142936e-06,
"loss": 0.6955,
"step": 10110
},
{
"epoch": 1.3518803019170396,
"grad_norm": 4.46875,
"learning_rate": 1.6206919583222016e-06,
"loss": 0.6748,
"step": 10120
},
{
"epoch": 1.3532162180215082,
"grad_norm": 4.875,
"learning_rate": 1.6173523911301097e-06,
"loss": 0.6536,
"step": 10130
},
{
"epoch": 1.354552134125977,
"grad_norm": 4.90625,
"learning_rate": 1.6140128239380178e-06,
"loss": 0.6938,
"step": 10140
},
{
"epoch": 1.3558880502304456,
"grad_norm": 4.375,
"learning_rate": 1.6106732567459257e-06,
"loss": 0.656,
"step": 10150
},
{
"epoch": 1.3572239663349142,
"grad_norm": 4.96875,
"learning_rate": 1.607333689553834e-06,
"loss": 0.6836,
"step": 10160
},
{
"epoch": 1.3585598824393827,
"grad_norm": 4.71875,
"learning_rate": 1.603994122361742e-06,
"loss": 0.6439,
"step": 10170
},
{
"epoch": 1.3598957985438513,
"grad_norm": 4.78125,
"learning_rate": 1.60065455516965e-06,
"loss": 0.6532,
"step": 10180
},
{
"epoch": 1.3612317146483202,
"grad_norm": 4.9375,
"learning_rate": 1.5973149879775582e-06,
"loss": 0.6816,
"step": 10190
},
{
"epoch": 1.3625676307527887,
"grad_norm": 4.90625,
"learning_rate": 1.5939754207854662e-06,
"loss": 0.6559,
"step": 10200
},
{
"epoch": 1.3639035468572573,
"grad_norm": 4.46875,
"learning_rate": 1.5906358535933743e-06,
"loss": 0.6568,
"step": 10210
},
{
"epoch": 1.365239462961726,
"grad_norm": 4.5,
"learning_rate": 1.5872962864012826e-06,
"loss": 0.6608,
"step": 10220
},
{
"epoch": 1.3665753790661945,
"grad_norm": 4.75,
"learning_rate": 1.5839567192091907e-06,
"loss": 0.6396,
"step": 10230
},
{
"epoch": 1.3679112951706633,
"grad_norm": 5.09375,
"learning_rate": 1.5806171520170988e-06,
"loss": 0.6776,
"step": 10240
},
{
"epoch": 1.369247211275132,
"grad_norm": 4.6875,
"learning_rate": 1.5772775848250068e-06,
"loss": 0.6871,
"step": 10250
},
{
"epoch": 1.369247211275132,
"eval_loss": 0.6792933940887451,
"eval_runtime": 252.2499,
"eval_samples_per_second": 26.379,
"eval_steps_per_second": 3.298,
"step": 10250
},
{
"epoch": 1.3705831273796005,
"grad_norm": 5.25,
"learning_rate": 1.573938017632915e-06,
"loss": 0.668,
"step": 10260
},
{
"epoch": 1.3719190434840691,
"grad_norm": 4.78125,
"learning_rate": 1.5705984504408232e-06,
"loss": 0.6973,
"step": 10270
},
{
"epoch": 1.3732549595885377,
"grad_norm": 4.59375,
"learning_rate": 1.5672588832487313e-06,
"loss": 0.6532,
"step": 10280
},
{
"epoch": 1.3745908756930065,
"grad_norm": 5.15625,
"learning_rate": 1.5639193160566393e-06,
"loss": 0.6759,
"step": 10290
},
{
"epoch": 1.3759267917974751,
"grad_norm": 5.0,
"learning_rate": 1.5605797488645472e-06,
"loss": 0.6479,
"step": 10300
},
{
"epoch": 1.3772627079019437,
"grad_norm": 4.46875,
"learning_rate": 1.5572401816724553e-06,
"loss": 0.6474,
"step": 10310
},
{
"epoch": 1.3785986240064125,
"grad_norm": 4.84375,
"learning_rate": 1.5539006144803634e-06,
"loss": 0.6599,
"step": 10320
},
{
"epoch": 1.379934540110881,
"grad_norm": 4.78125,
"learning_rate": 1.5505610472882714e-06,
"loss": 0.664,
"step": 10330
},
{
"epoch": 1.3812704562153497,
"grad_norm": 4.6875,
"learning_rate": 1.5472214800961797e-06,
"loss": 0.6708,
"step": 10340
},
{
"epoch": 1.3826063723198183,
"grad_norm": 4.8125,
"learning_rate": 1.5438819129040878e-06,
"loss": 0.6586,
"step": 10350
},
{
"epoch": 1.383942288424287,
"grad_norm": 5.0,
"learning_rate": 1.5405423457119959e-06,
"loss": 0.6803,
"step": 10360
},
{
"epoch": 1.3852782045287557,
"grad_norm": 5.1875,
"learning_rate": 1.537202778519904e-06,
"loss": 0.6989,
"step": 10370
},
{
"epoch": 1.3866141206332243,
"grad_norm": 5.21875,
"learning_rate": 1.533863211327812e-06,
"loss": 0.7034,
"step": 10380
},
{
"epoch": 1.387950036737693,
"grad_norm": 4.78125,
"learning_rate": 1.53052364413572e-06,
"loss": 0.6603,
"step": 10390
},
{
"epoch": 1.3892859528421615,
"grad_norm": 4.90625,
"learning_rate": 1.5271840769436284e-06,
"loss": 0.6672,
"step": 10400
},
{
"epoch": 1.39062186894663,
"grad_norm": 4.90625,
"learning_rate": 1.5238445097515365e-06,
"loss": 0.6711,
"step": 10410
},
{
"epoch": 1.391957785051099,
"grad_norm": 4.65625,
"learning_rate": 1.5205049425594445e-06,
"loss": 0.6672,
"step": 10420
},
{
"epoch": 1.3932937011555675,
"grad_norm": 4.6875,
"learning_rate": 1.5171653753673526e-06,
"loss": 0.6604,
"step": 10430
},
{
"epoch": 1.394629617260036,
"grad_norm": 4.78125,
"learning_rate": 1.5138258081752605e-06,
"loss": 0.6513,
"step": 10440
},
{
"epoch": 1.3959655333645047,
"grad_norm": 4.9375,
"learning_rate": 1.5104862409831685e-06,
"loss": 0.6759,
"step": 10450
},
{
"epoch": 1.3973014494689733,
"grad_norm": 4.84375,
"learning_rate": 1.5071466737910766e-06,
"loss": 0.6859,
"step": 10460
},
{
"epoch": 1.398637365573442,
"grad_norm": 4.5625,
"learning_rate": 1.503807106598985e-06,
"loss": 0.6567,
"step": 10470
},
{
"epoch": 1.3999732816779107,
"grad_norm": 4.6875,
"learning_rate": 1.500467539406893e-06,
"loss": 0.6348,
"step": 10480
},
{
"epoch": 1.4013091977823793,
"grad_norm": 4.59375,
"learning_rate": 1.497127972214801e-06,
"loss": 0.6553,
"step": 10490
},
{
"epoch": 1.4026451138868479,
"grad_norm": 4.96875,
"learning_rate": 1.4937884050227091e-06,
"loss": 0.6883,
"step": 10500
},
{
"epoch": 1.4026451138868479,
"eval_loss": 0.679176926612854,
"eval_runtime": 253.7083,
"eval_samples_per_second": 26.227,
"eval_steps_per_second": 3.279,
"step": 10500
},
{
"epoch": 1.4039810299913165,
"grad_norm": 4.71875,
"learning_rate": 1.4904488378306172e-06,
"loss": 0.6663,
"step": 10510
},
{
"epoch": 1.4053169460957853,
"grad_norm": 4.8125,
"learning_rate": 1.4871092706385255e-06,
"loss": 0.6368,
"step": 10520
},
{
"epoch": 1.4066528622002539,
"grad_norm": 4.46875,
"learning_rate": 1.4837697034464336e-06,
"loss": 0.6754,
"step": 10530
},
{
"epoch": 1.4079887783047225,
"grad_norm": 4.90625,
"learning_rate": 1.4804301362543416e-06,
"loss": 0.6513,
"step": 10540
},
{
"epoch": 1.409324694409191,
"grad_norm": 4.8125,
"learning_rate": 1.4770905690622497e-06,
"loss": 0.6642,
"step": 10550
},
{
"epoch": 1.4106606105136597,
"grad_norm": 4.5625,
"learning_rate": 1.4737510018701578e-06,
"loss": 0.6528,
"step": 10560
},
{
"epoch": 1.4119965266181285,
"grad_norm": 4.625,
"learning_rate": 1.4704114346780659e-06,
"loss": 0.6517,
"step": 10570
},
{
"epoch": 1.413332442722597,
"grad_norm": 4.875,
"learning_rate": 1.4670718674859737e-06,
"loss": 0.6885,
"step": 10580
},
{
"epoch": 1.4146683588270657,
"grad_norm": 4.6875,
"learning_rate": 1.4637323002938818e-06,
"loss": 0.6728,
"step": 10590
},
{
"epoch": 1.4160042749315342,
"grad_norm": 4.875,
"learning_rate": 1.46039273310179e-06,
"loss": 0.6677,
"step": 10600
},
{
"epoch": 1.4173401910360028,
"grad_norm": 4.96875,
"learning_rate": 1.4570531659096982e-06,
"loss": 0.6426,
"step": 10610
},
{
"epoch": 1.4186761071404717,
"grad_norm": 4.9375,
"learning_rate": 1.4537135987176062e-06,
"loss": 0.663,
"step": 10620
},
{
"epoch": 1.4200120232449402,
"grad_norm": 5.25,
"learning_rate": 1.4503740315255143e-06,
"loss": 0.6581,
"step": 10630
},
{
"epoch": 1.4213479393494088,
"grad_norm": 4.875,
"learning_rate": 1.4470344643334224e-06,
"loss": 0.6741,
"step": 10640
},
{
"epoch": 1.4226838554538774,
"grad_norm": 5.125,
"learning_rate": 1.4436948971413307e-06,
"loss": 0.6941,
"step": 10650
},
{
"epoch": 1.424019771558346,
"grad_norm": 5.03125,
"learning_rate": 1.4403553299492388e-06,
"loss": 0.684,
"step": 10660
},
{
"epoch": 1.4253556876628148,
"grad_norm": 5.0,
"learning_rate": 1.4370157627571468e-06,
"loss": 0.6659,
"step": 10670
},
{
"epoch": 1.4266916037672834,
"grad_norm": 4.78125,
"learning_rate": 1.433676195565055e-06,
"loss": 0.6525,
"step": 10680
},
{
"epoch": 1.428027519871752,
"grad_norm": 4.71875,
"learning_rate": 1.430336628372963e-06,
"loss": 0.6455,
"step": 10690
},
{
"epoch": 1.4293634359762206,
"grad_norm": 4.84375,
"learning_rate": 1.4269970611808713e-06,
"loss": 0.6505,
"step": 10700
},
{
"epoch": 1.4306993520806892,
"grad_norm": 5.03125,
"learning_rate": 1.4236574939887793e-06,
"loss": 0.6829,
"step": 10710
},
{
"epoch": 1.432035268185158,
"grad_norm": 4.90625,
"learning_rate": 1.4203179267966874e-06,
"loss": 0.6913,
"step": 10720
},
{
"epoch": 1.4333711842896266,
"grad_norm": 4.90625,
"learning_rate": 1.4169783596045953e-06,
"loss": 0.6604,
"step": 10730
},
{
"epoch": 1.4347071003940952,
"grad_norm": 4.75,
"learning_rate": 1.4136387924125034e-06,
"loss": 0.6654,
"step": 10740
},
{
"epoch": 1.436043016498564,
"grad_norm": 5.25,
"learning_rate": 1.4102992252204114e-06,
"loss": 0.6516,
"step": 10750
},
{
"epoch": 1.436043016498564,
"eval_loss": 0.6790998578071594,
"eval_runtime": 255.8863,
"eval_samples_per_second": 26.004,
"eval_steps_per_second": 3.251,
"step": 10750
},
{
"epoch": 1.4373789326030324,
"grad_norm": 4.6875,
"learning_rate": 1.4069596580283195e-06,
"loss": 0.6694,
"step": 10760
},
{
"epoch": 1.4387148487075012,
"grad_norm": 4.84375,
"learning_rate": 1.4036200908362276e-06,
"loss": 0.6974,
"step": 10770
},
{
"epoch": 1.4400507648119698,
"grad_norm": 4.78125,
"learning_rate": 1.4002805236441359e-06,
"loss": 0.6682,
"step": 10780
},
{
"epoch": 1.4413866809164384,
"grad_norm": 4.84375,
"learning_rate": 1.396940956452044e-06,
"loss": 0.6722,
"step": 10790
},
{
"epoch": 1.4427225970209072,
"grad_norm": 4.59375,
"learning_rate": 1.393601389259952e-06,
"loss": 0.671,
"step": 10800
},
{
"epoch": 1.4440585131253758,
"grad_norm": 4.90625,
"learning_rate": 1.39026182206786e-06,
"loss": 0.6925,
"step": 10810
},
{
"epoch": 1.4453944292298444,
"grad_norm": 4.375,
"learning_rate": 1.3869222548757682e-06,
"loss": 0.6508,
"step": 10820
},
{
"epoch": 1.446730345334313,
"grad_norm": 5.0625,
"learning_rate": 1.3835826876836765e-06,
"loss": 0.7062,
"step": 10830
},
{
"epoch": 1.4480662614387816,
"grad_norm": 4.3125,
"learning_rate": 1.3802431204915845e-06,
"loss": 0.6777,
"step": 10840
},
{
"epoch": 1.4494021775432504,
"grad_norm": 4.53125,
"learning_rate": 1.3769035532994926e-06,
"loss": 0.6626,
"step": 10850
},
{
"epoch": 1.450738093647719,
"grad_norm": 5.09375,
"learning_rate": 1.3735639861074007e-06,
"loss": 0.6751,
"step": 10860
},
{
"epoch": 1.4520740097521876,
"grad_norm": 4.65625,
"learning_rate": 1.3702244189153085e-06,
"loss": 0.6736,
"step": 10870
},
{
"epoch": 1.4534099258566562,
"grad_norm": 5.09375,
"learning_rate": 1.3668848517232166e-06,
"loss": 0.6753,
"step": 10880
},
{
"epoch": 1.4547458419611248,
"grad_norm": 4.75,
"learning_rate": 1.3635452845311247e-06,
"loss": 0.6637,
"step": 10890
},
{
"epoch": 1.4560817580655936,
"grad_norm": 4.625,
"learning_rate": 1.360205717339033e-06,
"loss": 0.6559,
"step": 10900
},
{
"epoch": 1.4574176741700622,
"grad_norm": 4.875,
"learning_rate": 1.356866150146941e-06,
"loss": 0.6806,
"step": 10910
},
{
"epoch": 1.4587535902745308,
"grad_norm": 4.65625,
"learning_rate": 1.3535265829548491e-06,
"loss": 0.6615,
"step": 10920
},
{
"epoch": 1.4600895063789994,
"grad_norm": 4.34375,
"learning_rate": 1.3501870157627572e-06,
"loss": 0.6672,
"step": 10930
},
{
"epoch": 1.461425422483468,
"grad_norm": 4.9375,
"learning_rate": 1.3468474485706653e-06,
"loss": 0.6822,
"step": 10940
},
{
"epoch": 1.4627613385879368,
"grad_norm": 4.65625,
"learning_rate": 1.3435078813785734e-06,
"loss": 0.6513,
"step": 10950
},
{
"epoch": 1.4640972546924054,
"grad_norm": 4.71875,
"learning_rate": 1.3401683141864816e-06,
"loss": 0.6643,
"step": 10960
},
{
"epoch": 1.465433170796874,
"grad_norm": 4.78125,
"learning_rate": 1.3368287469943897e-06,
"loss": 0.6442,
"step": 10970
},
{
"epoch": 1.4667690869013426,
"grad_norm": 4.875,
"learning_rate": 1.3334891798022978e-06,
"loss": 0.6629,
"step": 10980
},
{
"epoch": 1.4681050030058111,
"grad_norm": 4.78125,
"learning_rate": 1.3301496126102059e-06,
"loss": 0.6625,
"step": 10990
},
{
"epoch": 1.46944091911028,
"grad_norm": 5.21875,
"learning_rate": 1.326810045418114e-06,
"loss": 0.6929,
"step": 11000
},
{
"epoch": 1.46944091911028,
"eval_loss": 0.6789782047271729,
"eval_runtime": 254.0545,
"eval_samples_per_second": 26.191,
"eval_steps_per_second": 3.275,
"step": 11000
},
{
"epoch": 1.4707768352147486,
"grad_norm": 5.0,
"learning_rate": 1.3234704782260222e-06,
"loss": 0.641,
"step": 11010
},
{
"epoch": 1.4721127513192171,
"grad_norm": 4.65625,
"learning_rate": 1.3201309110339299e-06,
"loss": 0.6764,
"step": 11020
},
{
"epoch": 1.4734486674236857,
"grad_norm": 4.84375,
"learning_rate": 1.3167913438418382e-06,
"loss": 0.6698,
"step": 11030
},
{
"epoch": 1.4747845835281543,
"grad_norm": 5.1875,
"learning_rate": 1.3134517766497463e-06,
"loss": 0.6749,
"step": 11040
},
{
"epoch": 1.4761204996326232,
"grad_norm": 4.9375,
"learning_rate": 1.3101122094576543e-06,
"loss": 0.6656,
"step": 11050
},
{
"epoch": 1.4774564157370917,
"grad_norm": 4.84375,
"learning_rate": 1.3067726422655624e-06,
"loss": 0.6839,
"step": 11060
},
{
"epoch": 1.4787923318415603,
"grad_norm": 4.8125,
"learning_rate": 1.3034330750734705e-06,
"loss": 0.6592,
"step": 11070
},
{
"epoch": 1.480128247946029,
"grad_norm": 4.8125,
"learning_rate": 1.3000935078813788e-06,
"loss": 0.6536,
"step": 11080
},
{
"epoch": 1.4814641640504975,
"grad_norm": 5.5,
"learning_rate": 1.2967539406892868e-06,
"loss": 0.686,
"step": 11090
},
{
"epoch": 1.4828000801549663,
"grad_norm": 4.71875,
"learning_rate": 1.293414373497195e-06,
"loss": 0.6648,
"step": 11100
},
{
"epoch": 1.484135996259435,
"grad_norm": 4.46875,
"learning_rate": 1.290074806305103e-06,
"loss": 0.6624,
"step": 11110
},
{
"epoch": 1.4854719123639035,
"grad_norm": 4.8125,
"learning_rate": 1.286735239113011e-06,
"loss": 0.6682,
"step": 11120
},
{
"epoch": 1.4868078284683721,
"grad_norm": 4.8125,
"learning_rate": 1.2833956719209191e-06,
"loss": 0.6688,
"step": 11130
},
{
"epoch": 1.4881437445728407,
"grad_norm": 4.78125,
"learning_rate": 1.2800561047288274e-06,
"loss": 0.6798,
"step": 11140
},
{
"epoch": 1.4894796606773095,
"grad_norm": 4.625,
"learning_rate": 1.2767165375367355e-06,
"loss": 0.6748,
"step": 11150
},
{
"epoch": 1.4908155767817781,
"grad_norm": 4.90625,
"learning_rate": 1.2733769703446434e-06,
"loss": 0.6671,
"step": 11160
},
{
"epoch": 1.4921514928862467,
"grad_norm": 4.75,
"learning_rate": 1.2700374031525514e-06,
"loss": 0.6714,
"step": 11170
},
{
"epoch": 1.4934874089907153,
"grad_norm": 4.59375,
"learning_rate": 1.2666978359604595e-06,
"loss": 0.6478,
"step": 11180
},
{
"epoch": 1.494823325095184,
"grad_norm": 4.84375,
"learning_rate": 1.2633582687683676e-06,
"loss": 0.6744,
"step": 11190
},
{
"epoch": 1.4961592411996527,
"grad_norm": 4.65625,
"learning_rate": 1.2600187015762757e-06,
"loss": 0.6778,
"step": 11200
},
{
"epoch": 1.4974951573041213,
"grad_norm": 4.5625,
"learning_rate": 1.256679134384184e-06,
"loss": 0.6648,
"step": 11210
},
{
"epoch": 1.49883107340859,
"grad_norm": 4.5,
"learning_rate": 1.253339567192092e-06,
"loss": 0.6656,
"step": 11220
},
{
"epoch": 1.5001669895130587,
"grad_norm": 4.65625,
"learning_rate": 1.25e-06,
"loss": 0.6629,
"step": 11230
},
{
"epoch": 1.501502905617527,
"grad_norm": 4.75,
"learning_rate": 1.2466604328079082e-06,
"loss": 0.6662,
"step": 11240
},
{
"epoch": 1.502838821721996,
"grad_norm": 5.0625,
"learning_rate": 1.2433208656158163e-06,
"loss": 0.6685,
"step": 11250
},
{
"epoch": 1.502838821721996,
"eval_loss": 0.6789219975471497,
"eval_runtime": 252.0317,
"eval_samples_per_second": 26.401,
"eval_steps_per_second": 3.301,
"step": 11250
},
{
"epoch": 1.5041747378264645,
"grad_norm": 4.6875,
"learning_rate": 1.2399812984237245e-06,
"loss": 0.6672,
"step": 11260
},
{
"epoch": 1.505510653930933,
"grad_norm": 4.96875,
"learning_rate": 1.2366417312316324e-06,
"loss": 0.6857,
"step": 11270
},
{
"epoch": 1.506846570035402,
"grad_norm": 4.59375,
"learning_rate": 1.2333021640395405e-06,
"loss": 0.6491,
"step": 11280
},
{
"epoch": 1.5081824861398703,
"grad_norm": 4.9375,
"learning_rate": 1.2299625968474486e-06,
"loss": 0.6803,
"step": 11290
},
{
"epoch": 1.509518402244339,
"grad_norm": 4.59375,
"learning_rate": 1.2266230296553568e-06,
"loss": 0.6952,
"step": 11300
},
{
"epoch": 1.5108543183488077,
"grad_norm": 4.625,
"learning_rate": 1.223283462463265e-06,
"loss": 0.6621,
"step": 11310
},
{
"epoch": 1.5121902344532763,
"grad_norm": 5.03125,
"learning_rate": 1.219943895271173e-06,
"loss": 0.6846,
"step": 11320
},
{
"epoch": 1.513526150557745,
"grad_norm": 5.125,
"learning_rate": 1.216604328079081e-06,
"loss": 0.6836,
"step": 11330
},
{
"epoch": 1.5148620666622135,
"grad_norm": 4.9375,
"learning_rate": 1.2132647608869891e-06,
"loss": 0.6715,
"step": 11340
},
{
"epoch": 1.5161979827666823,
"grad_norm": 5.03125,
"learning_rate": 1.2099251936948972e-06,
"loss": 0.667,
"step": 11350
},
{
"epoch": 1.5175338988711509,
"grad_norm": 4.5625,
"learning_rate": 1.2065856265028053e-06,
"loss": 0.6833,
"step": 11360
},
{
"epoch": 1.5188698149756195,
"grad_norm": 4.90625,
"learning_rate": 1.2032460593107134e-06,
"loss": 0.6887,
"step": 11370
},
{
"epoch": 1.5202057310800883,
"grad_norm": 5.09375,
"learning_rate": 1.1999064921186214e-06,
"loss": 0.6573,
"step": 11380
},
{
"epoch": 1.5215416471845566,
"grad_norm": 4.6875,
"learning_rate": 1.1965669249265297e-06,
"loss": 0.6574,
"step": 11390
},
{
"epoch": 1.5228775632890255,
"grad_norm": 4.84375,
"learning_rate": 1.1932273577344378e-06,
"loss": 0.688,
"step": 11400
},
{
"epoch": 1.524213479393494,
"grad_norm": 4.59375,
"learning_rate": 1.1898877905423459e-06,
"loss": 0.6494,
"step": 11410
},
{
"epoch": 1.5255493954979626,
"grad_norm": 4.5625,
"learning_rate": 1.186548223350254e-06,
"loss": 0.673,
"step": 11420
},
{
"epoch": 1.5268853116024315,
"grad_norm": 5.0625,
"learning_rate": 1.183208656158162e-06,
"loss": 0.6772,
"step": 11430
},
{
"epoch": 1.5282212277069,
"grad_norm": 5.0625,
"learning_rate": 1.17986908896607e-06,
"loss": 0.6791,
"step": 11440
},
{
"epoch": 1.5295571438113686,
"grad_norm": 4.78125,
"learning_rate": 1.1765295217739782e-06,
"loss": 0.654,
"step": 11450
},
{
"epoch": 1.5308930599158372,
"grad_norm": 5.09375,
"learning_rate": 1.1731899545818863e-06,
"loss": 0.6777,
"step": 11460
},
{
"epoch": 1.5322289760203058,
"grad_norm": 4.875,
"learning_rate": 1.1698503873897943e-06,
"loss": 0.6455,
"step": 11470
},
{
"epoch": 1.5335648921247746,
"grad_norm": 4.5,
"learning_rate": 1.1665108201977026e-06,
"loss": 0.6943,
"step": 11480
},
{
"epoch": 1.5349008082292432,
"grad_norm": 4.96875,
"learning_rate": 1.1631712530056105e-06,
"loss": 0.65,
"step": 11490
},
{
"epoch": 1.5362367243337118,
"grad_norm": 4.71875,
"learning_rate": 1.1598316858135186e-06,
"loss": 0.6714,
"step": 11500
},
{
"epoch": 1.5362367243337118,
"eval_loss": 0.6788843870162964,
"eval_runtime": 254.9564,
"eval_samples_per_second": 26.099,
"eval_steps_per_second": 3.263,
"step": 11500
},
{
"epoch": 1.5375726404381804,
"grad_norm": 4.8125,
"learning_rate": 1.1564921186214268e-06,
"loss": 0.6622,
"step": 11510
},
{
"epoch": 1.538908556542649,
"grad_norm": 4.65625,
"learning_rate": 1.153152551429335e-06,
"loss": 0.6549,
"step": 11520
},
{
"epoch": 1.5402444726471178,
"grad_norm": 4.90625,
"learning_rate": 1.149812984237243e-06,
"loss": 0.6714,
"step": 11530
},
{
"epoch": 1.5415803887515864,
"grad_norm": 4.5,
"learning_rate": 1.146473417045151e-06,
"loss": 0.6765,
"step": 11540
},
{
"epoch": 1.542916304856055,
"grad_norm": 4.71875,
"learning_rate": 1.1431338498530591e-06,
"loss": 0.6595,
"step": 11550
},
{
"epoch": 1.5442522209605238,
"grad_norm": 4.90625,
"learning_rate": 1.1397942826609672e-06,
"loss": 0.6726,
"step": 11560
},
{
"epoch": 1.5455881370649922,
"grad_norm": 4.75,
"learning_rate": 1.1364547154688753e-06,
"loss": 0.6496,
"step": 11570
},
{
"epoch": 1.546924053169461,
"grad_norm": 4.53125,
"learning_rate": 1.1331151482767834e-06,
"loss": 0.6751,
"step": 11580
},
{
"epoch": 1.5482599692739296,
"grad_norm": 5.0,
"learning_rate": 1.1297755810846914e-06,
"loss": 0.6407,
"step": 11590
},
{
"epoch": 1.5495958853783982,
"grad_norm": 4.46875,
"learning_rate": 1.1264360138925997e-06,
"loss": 0.6579,
"step": 11600
},
{
"epoch": 1.550931801482867,
"grad_norm": 4.65625,
"learning_rate": 1.1230964467005078e-06,
"loss": 0.6686,
"step": 11610
},
{
"epoch": 1.5522677175873354,
"grad_norm": 5.21875,
"learning_rate": 1.1197568795084159e-06,
"loss": 0.6776,
"step": 11620
},
{
"epoch": 1.5536036336918042,
"grad_norm": 4.75,
"learning_rate": 1.1164173123163237e-06,
"loss": 0.6502,
"step": 11630
},
{
"epoch": 1.5549395497962728,
"grad_norm": 4.875,
"learning_rate": 1.113077745124232e-06,
"loss": 0.6772,
"step": 11640
},
{
"epoch": 1.5562754659007414,
"grad_norm": 4.625,
"learning_rate": 1.10973817793214e-06,
"loss": 0.6787,
"step": 11650
},
{
"epoch": 1.5576113820052102,
"grad_norm": 4.75,
"learning_rate": 1.1063986107400482e-06,
"loss": 0.6991,
"step": 11660
},
{
"epoch": 1.5589472981096786,
"grad_norm": 4.875,
"learning_rate": 1.1030590435479563e-06,
"loss": 0.7204,
"step": 11670
},
{
"epoch": 1.5602832142141474,
"grad_norm": 4.75,
"learning_rate": 1.0997194763558643e-06,
"loss": 0.6476,
"step": 11680
},
{
"epoch": 1.561619130318616,
"grad_norm": 4.78125,
"learning_rate": 1.0963799091637726e-06,
"loss": 0.6545,
"step": 11690
},
{
"epoch": 1.5629550464230846,
"grad_norm": 4.625,
"learning_rate": 1.0930403419716805e-06,
"loss": 0.6763,
"step": 11700
},
{
"epoch": 1.5642909625275534,
"grad_norm": 4.84375,
"learning_rate": 1.0897007747795886e-06,
"loss": 0.6731,
"step": 11710
},
{
"epoch": 1.5656268786320218,
"grad_norm": 4.84375,
"learning_rate": 1.0863612075874966e-06,
"loss": 0.6615,
"step": 11720
},
{
"epoch": 1.5669627947364906,
"grad_norm": 4.65625,
"learning_rate": 1.083021640395405e-06,
"loss": 0.6916,
"step": 11730
},
{
"epoch": 1.5682987108409592,
"grad_norm": 4.78125,
"learning_rate": 1.079682073203313e-06,
"loss": 0.6578,
"step": 11740
},
{
"epoch": 1.5696346269454278,
"grad_norm": 4.96875,
"learning_rate": 1.076342506011221e-06,
"loss": 0.6679,
"step": 11750
},
{
"epoch": 1.5696346269454278,
"eval_loss": 0.6788153648376465,
"eval_runtime": 256.0024,
"eval_samples_per_second": 25.992,
"eval_steps_per_second": 3.25,
"step": 11750
},
{
"epoch": 1.5709705430498966,
"grad_norm": 4.75,
"learning_rate": 1.0730029388191291e-06,
"loss": 0.6686,
"step": 11760
},
{
"epoch": 1.572306459154365,
"grad_norm": 4.8125,
"learning_rate": 1.0696633716270372e-06,
"loss": 0.6895,
"step": 11770
},
{
"epoch": 1.5736423752588338,
"grad_norm": 4.78125,
"learning_rate": 1.0663238044349453e-06,
"loss": 0.6539,
"step": 11780
},
{
"epoch": 1.5749782913633024,
"grad_norm": 4.5625,
"learning_rate": 1.0629842372428534e-06,
"loss": 0.6603,
"step": 11790
},
{
"epoch": 1.576314207467771,
"grad_norm": 5.0,
"learning_rate": 1.0596446700507614e-06,
"loss": 0.6454,
"step": 11800
},
{
"epoch": 1.5776501235722398,
"grad_norm": 4.9375,
"learning_rate": 1.0563051028586695e-06,
"loss": 0.6659,
"step": 11810
},
{
"epoch": 1.5789860396767081,
"grad_norm": 4.78125,
"learning_rate": 1.0529655356665778e-06,
"loss": 0.6675,
"step": 11820
},
{
"epoch": 1.580321955781177,
"grad_norm": 4.75,
"learning_rate": 1.0496259684744859e-06,
"loss": 0.6923,
"step": 11830
},
{
"epoch": 1.5816578718856455,
"grad_norm": 4.53125,
"learning_rate": 1.046286401282394e-06,
"loss": 0.6755,
"step": 11840
},
{
"epoch": 1.5829937879901141,
"grad_norm": 4.875,
"learning_rate": 1.0429468340903018e-06,
"loss": 0.679,
"step": 11850
},
{
"epoch": 1.584329704094583,
"grad_norm": 4.65625,
"learning_rate": 1.0396072668982101e-06,
"loss": 0.6385,
"step": 11860
},
{
"epoch": 1.5856656201990516,
"grad_norm": 4.84375,
"learning_rate": 1.0362676997061182e-06,
"loss": 0.6731,
"step": 11870
},
{
"epoch": 1.5870015363035201,
"grad_norm": 4.34375,
"learning_rate": 1.0329281325140263e-06,
"loss": 0.6649,
"step": 11880
},
{
"epoch": 1.5883374524079887,
"grad_norm": 4.90625,
"learning_rate": 1.0295885653219343e-06,
"loss": 0.66,
"step": 11890
},
{
"epoch": 1.5896733685124573,
"grad_norm": 4.84375,
"learning_rate": 1.0262489981298424e-06,
"loss": 0.6432,
"step": 11900
},
{
"epoch": 1.5910092846169261,
"grad_norm": 4.75,
"learning_rate": 1.0229094309377507e-06,
"loss": 0.6635,
"step": 11910
},
{
"epoch": 1.5923452007213947,
"grad_norm": 4.6875,
"learning_rate": 1.0195698637456586e-06,
"loss": 0.6655,
"step": 11920
},
{
"epoch": 1.5936811168258633,
"grad_norm": 4.65625,
"learning_rate": 1.0162302965535666e-06,
"loss": 0.6618,
"step": 11930
},
{
"epoch": 1.595017032930332,
"grad_norm": 4.84375,
"learning_rate": 1.0128907293614747e-06,
"loss": 0.676,
"step": 11940
},
{
"epoch": 1.5963529490348005,
"grad_norm": 4.9375,
"learning_rate": 1.009551162169383e-06,
"loss": 0.6924,
"step": 11950
},
{
"epoch": 1.5976888651392693,
"grad_norm": 4.875,
"learning_rate": 1.006211594977291e-06,
"loss": 0.6555,
"step": 11960
},
{
"epoch": 1.599024781243738,
"grad_norm": 4.75,
"learning_rate": 1.0028720277851991e-06,
"loss": 0.6638,
"step": 11970
},
{
"epoch": 1.6003606973482065,
"grad_norm": 5.09375,
"learning_rate": 9.995324605931072e-07,
"loss": 0.6657,
"step": 11980
},
{
"epoch": 1.6016966134526753,
"grad_norm": 4.59375,
"learning_rate": 9.961928934010153e-07,
"loss": 0.648,
"step": 11990
},
{
"epoch": 1.6030325295571437,
"grad_norm": 4.5,
"learning_rate": 9.928533262089234e-07,
"loss": 0.6535,
"step": 12000
},
{
"epoch": 1.6030325295571437,
"eval_loss": 0.678803563117981,
"eval_runtime": 253.088,
"eval_samples_per_second": 26.291,
"eval_steps_per_second": 3.287,
"step": 12000
},
{
"epoch": 1.6043684456616125,
"grad_norm": 4.71875,
"learning_rate": 9.895137590168314e-07,
"loss": 0.6668,
"step": 12010
},
{
"epoch": 1.6057043617660811,
"grad_norm": 4.65625,
"learning_rate": 9.861741918247395e-07,
"loss": 0.6635,
"step": 12020
},
{
"epoch": 1.6070402778705497,
"grad_norm": 5.0625,
"learning_rate": 9.828346246326476e-07,
"loss": 0.6648,
"step": 12030
},
{
"epoch": 1.6083761939750185,
"grad_norm": 4.46875,
"learning_rate": 9.794950574405559e-07,
"loss": 0.6608,
"step": 12040
},
{
"epoch": 1.609712110079487,
"grad_norm": 5.0625,
"learning_rate": 9.76155490248464e-07,
"loss": 0.6866,
"step": 12050
},
{
"epoch": 1.6110480261839557,
"grad_norm": 4.90625,
"learning_rate": 9.72815923056372e-07,
"loss": 0.6963,
"step": 12060
},
{
"epoch": 1.6123839422884243,
"grad_norm": 4.78125,
"learning_rate": 9.694763558642801e-07,
"loss": 0.6616,
"step": 12070
},
{
"epoch": 1.613719858392893,
"grad_norm": 4.75,
"learning_rate": 9.661367886721882e-07,
"loss": 0.6804,
"step": 12080
},
{
"epoch": 1.6150557744973617,
"grad_norm": 4.65625,
"learning_rate": 9.627972214800963e-07,
"loss": 0.6514,
"step": 12090
},
{
"epoch": 1.61639169060183,
"grad_norm": 4.9375,
"learning_rate": 9.594576542880043e-07,
"loss": 0.6579,
"step": 12100
},
{
"epoch": 1.617727606706299,
"grad_norm": 4.78125,
"learning_rate": 9.561180870959124e-07,
"loss": 0.6773,
"step": 12110
},
{
"epoch": 1.6190635228107675,
"grad_norm": 4.90625,
"learning_rate": 9.527785199038206e-07,
"loss": 0.6912,
"step": 12120
},
{
"epoch": 1.620399438915236,
"grad_norm": 4.875,
"learning_rate": 9.494389527117287e-07,
"loss": 0.6727,
"step": 12130
},
{
"epoch": 1.621735355019705,
"grad_norm": 4.9375,
"learning_rate": 9.460993855196366e-07,
"loss": 0.6412,
"step": 12140
},
{
"epoch": 1.6230712711241733,
"grad_norm": 5.0625,
"learning_rate": 9.427598183275448e-07,
"loss": 0.6669,
"step": 12150
},
{
"epoch": 1.624407187228642,
"grad_norm": 4.96875,
"learning_rate": 9.394202511354529e-07,
"loss": 0.6775,
"step": 12160
},
{
"epoch": 1.6257431033331107,
"grad_norm": 4.4375,
"learning_rate": 9.36080683943361e-07,
"loss": 0.6595,
"step": 12170
},
{
"epoch": 1.6270790194375793,
"grad_norm": 4.6875,
"learning_rate": 9.327411167512691e-07,
"loss": 0.6493,
"step": 12180
},
{
"epoch": 1.628414935542048,
"grad_norm": 5.0,
"learning_rate": 9.294015495591772e-07,
"loss": 0.6835,
"step": 12190
},
{
"epoch": 1.6297508516465165,
"grad_norm": 5.09375,
"learning_rate": 9.260619823670854e-07,
"loss": 0.692,
"step": 12200
},
{
"epoch": 1.6310867677509853,
"grad_norm": 4.59375,
"learning_rate": 9.227224151749934e-07,
"loss": 0.675,
"step": 12210
},
{
"epoch": 1.6324226838554539,
"grad_norm": 4.71875,
"learning_rate": 9.193828479829014e-07,
"loss": 0.667,
"step": 12220
},
{
"epoch": 1.6337585999599225,
"grad_norm": 5.0625,
"learning_rate": 9.160432807908095e-07,
"loss": 0.6489,
"step": 12230
},
{
"epoch": 1.6350945160643913,
"grad_norm": 4.6875,
"learning_rate": 9.127037135987177e-07,
"loss": 0.6587,
"step": 12240
},
{
"epoch": 1.6364304321688596,
"grad_norm": 4.90625,
"learning_rate": 9.093641464066258e-07,
"loss": 0.6712,
"step": 12250
},
{
"epoch": 1.6364304321688596,
"eval_loss": 0.6787741780281067,
"eval_runtime": 253.048,
"eval_samples_per_second": 26.295,
"eval_steps_per_second": 3.288,
"step": 12250
},
{
"epoch": 1.6377663482733285,
"grad_norm": 4.8125,
"learning_rate": 9.060245792145339e-07,
"loss": 0.6666,
"step": 12260
},
{
"epoch": 1.639102264377797,
"grad_norm": 4.625,
"learning_rate": 9.02685012022442e-07,
"loss": 0.6698,
"step": 12270
},
{
"epoch": 1.6404381804822656,
"grad_norm": 4.78125,
"learning_rate": 8.9934544483035e-07,
"loss": 0.6425,
"step": 12280
},
{
"epoch": 1.6417740965867345,
"grad_norm": 5.09375,
"learning_rate": 8.960058776382581e-07,
"loss": 0.6872,
"step": 12290
},
{
"epoch": 1.643110012691203,
"grad_norm": 4.59375,
"learning_rate": 8.926663104461663e-07,
"loss": 0.6683,
"step": 12300
},
{
"epoch": 1.6444459287956716,
"grad_norm": 4.90625,
"learning_rate": 8.893267432540743e-07,
"loss": 0.6616,
"step": 12310
},
{
"epoch": 1.6457818449001402,
"grad_norm": 4.46875,
"learning_rate": 8.859871760619824e-07,
"loss": 0.6728,
"step": 12320
},
{
"epoch": 1.6471177610046088,
"grad_norm": 4.71875,
"learning_rate": 8.826476088698906e-07,
"loss": 0.6931,
"step": 12330
},
{
"epoch": 1.6484536771090776,
"grad_norm": 4.5625,
"learning_rate": 8.793080416777987e-07,
"loss": 0.6653,
"step": 12340
},
{
"epoch": 1.6497895932135462,
"grad_norm": 5.0,
"learning_rate": 8.759684744857067e-07,
"loss": 0.6794,
"step": 12350
},
{
"epoch": 1.6511255093180148,
"grad_norm": 4.8125,
"learning_rate": 8.726289072936148e-07,
"loss": 0.6931,
"step": 12360
},
{
"epoch": 1.6524614254224834,
"grad_norm": 4.71875,
"learning_rate": 8.692893401015229e-07,
"loss": 0.6453,
"step": 12370
},
{
"epoch": 1.653797341526952,
"grad_norm": 4.625,
"learning_rate": 8.65949772909431e-07,
"loss": 0.6864,
"step": 12380
},
{
"epoch": 1.6551332576314208,
"grad_norm": 4.875,
"learning_rate": 8.626102057173391e-07,
"loss": 0.6689,
"step": 12390
},
{
"epoch": 1.6564691737358894,
"grad_norm": 5.59375,
"learning_rate": 8.592706385252472e-07,
"loss": 0.6754,
"step": 12400
},
{
"epoch": 1.657805089840358,
"grad_norm": 4.78125,
"learning_rate": 8.559310713331553e-07,
"loss": 0.6882,
"step": 12410
},
{
"epoch": 1.6591410059448268,
"grad_norm": 5.0,
"learning_rate": 8.525915041410635e-07,
"loss": 0.6172,
"step": 12420
},
{
"epoch": 1.6604769220492952,
"grad_norm": 4.96875,
"learning_rate": 8.492519369489714e-07,
"loss": 0.7231,
"step": 12430
},
{
"epoch": 1.661812838153764,
"grad_norm": 5.0,
"learning_rate": 8.459123697568795e-07,
"loss": 0.6635,
"step": 12440
},
{
"epoch": 1.6631487542582326,
"grad_norm": 4.46875,
"learning_rate": 8.425728025647877e-07,
"loss": 0.6489,
"step": 12450
},
{
"epoch": 1.6644846703627012,
"grad_norm": 4.84375,
"learning_rate": 8.392332353726958e-07,
"loss": 0.681,
"step": 12460
},
{
"epoch": 1.66582058646717,
"grad_norm": 4.59375,
"learning_rate": 8.358936681806039e-07,
"loss": 0.6425,
"step": 12470
},
{
"epoch": 1.6671565025716384,
"grad_norm": 4.78125,
"learning_rate": 8.32554100988512e-07,
"loss": 0.6761,
"step": 12480
},
{
"epoch": 1.6684924186761072,
"grad_norm": 4.84375,
"learning_rate": 8.292145337964201e-07,
"loss": 0.6554,
"step": 12490
},
{
"epoch": 1.6698283347805758,
"grad_norm": 4.9375,
"learning_rate": 8.258749666043281e-07,
"loss": 0.6617,
"step": 12500
},
{
"epoch": 1.6698283347805758,
"eval_loss": 0.6787148118019104,
"eval_runtime": 255.9236,
"eval_samples_per_second": 26.0,
"eval_steps_per_second": 3.251,
"step": 12500
},
{
"epoch": 1.6711642508850444,
"grad_norm": 4.6875,
"learning_rate": 8.225353994122362e-07,
"loss": 0.6687,
"step": 12510
},
{
"epoch": 1.6725001669895132,
"grad_norm": 5.125,
"learning_rate": 8.191958322201443e-07,
"loss": 0.6908,
"step": 12520
},
{
"epoch": 1.6738360830939816,
"grad_norm": 5.03125,
"learning_rate": 8.158562650280524e-07,
"loss": 0.6925,
"step": 12530
},
{
"epoch": 1.6751719991984504,
"grad_norm": 4.75,
"learning_rate": 8.125166978359606e-07,
"loss": 0.6615,
"step": 12540
},
{
"epoch": 1.676507915302919,
"grad_norm": 4.84375,
"learning_rate": 8.091771306438687e-07,
"loss": 0.6412,
"step": 12550
},
{
"epoch": 1.6778438314073876,
"grad_norm": 5.03125,
"learning_rate": 8.058375634517767e-07,
"loss": 0.6788,
"step": 12560
},
{
"epoch": 1.6791797475118564,
"grad_norm": 4.65625,
"learning_rate": 8.024979962596847e-07,
"loss": 0.6917,
"step": 12570
},
{
"epoch": 1.6805156636163248,
"grad_norm": 4.90625,
"learning_rate": 7.991584290675929e-07,
"loss": 0.6743,
"step": 12580
},
{
"epoch": 1.6818515797207936,
"grad_norm": 4.84375,
"learning_rate": 7.95818861875501e-07,
"loss": 0.6672,
"step": 12590
},
{
"epoch": 1.6831874958252622,
"grad_norm": 4.84375,
"learning_rate": 7.92479294683409e-07,
"loss": 0.6752,
"step": 12600
},
{
"epoch": 1.6845234119297308,
"grad_norm": 4.96875,
"learning_rate": 7.891397274913172e-07,
"loss": 0.6824,
"step": 12610
},
{
"epoch": 1.6858593280341996,
"grad_norm": 4.9375,
"learning_rate": 7.858001602992253e-07,
"loss": 0.6788,
"step": 12620
},
{
"epoch": 1.687195244138668,
"grad_norm": 4.84375,
"learning_rate": 7.824605931071335e-07,
"loss": 0.6724,
"step": 12630
},
{
"epoch": 1.6885311602431368,
"grad_norm": 4.59375,
"learning_rate": 7.791210259150415e-07,
"loss": 0.6868,
"step": 12640
},
{
"epoch": 1.6898670763476054,
"grad_norm": 4.90625,
"learning_rate": 7.757814587229495e-07,
"loss": 0.6614,
"step": 12650
},
{
"epoch": 1.691202992452074,
"grad_norm": 4.71875,
"learning_rate": 7.724418915308576e-07,
"loss": 0.6705,
"step": 12660
},
{
"epoch": 1.6925389085565428,
"grad_norm": 4.84375,
"learning_rate": 7.691023243387658e-07,
"loss": 0.6377,
"step": 12670
},
{
"epoch": 1.6938748246610111,
"grad_norm": 4.875,
"learning_rate": 7.657627571466739e-07,
"loss": 0.669,
"step": 12680
},
{
"epoch": 1.69521074076548,
"grad_norm": 5.0625,
"learning_rate": 7.624231899545819e-07,
"loss": 0.6525,
"step": 12690
},
{
"epoch": 1.6965466568699485,
"grad_norm": 5.1875,
"learning_rate": 7.590836227624901e-07,
"loss": 0.6686,
"step": 12700
},
{
"epoch": 1.6978825729744171,
"grad_norm": 5.125,
"learning_rate": 7.557440555703982e-07,
"loss": 0.6778,
"step": 12710
},
{
"epoch": 1.699218489078886,
"grad_norm": 4.75,
"learning_rate": 7.524044883783062e-07,
"loss": 0.6682,
"step": 12720
},
{
"epoch": 1.7005544051833543,
"grad_norm": 4.9375,
"learning_rate": 7.490649211862143e-07,
"loss": 0.6791,
"step": 12730
},
{
"epoch": 1.7018903212878231,
"grad_norm": 5.0625,
"learning_rate": 7.457253539941224e-07,
"loss": 0.6711,
"step": 12740
},
{
"epoch": 1.7032262373922917,
"grad_norm": 4.65625,
"learning_rate": 7.423857868020305e-07,
"loss": 0.6629,
"step": 12750
},
{
"epoch": 1.7032262373922917,
"eval_loss": 0.6787093281745911,
"eval_runtime": 254.9039,
"eval_samples_per_second": 26.104,
"eval_steps_per_second": 3.264,
"step": 12750
},
{
"epoch": 1.7045621534967603,
"grad_norm": 5.09375,
"learning_rate": 7.390462196099387e-07,
"loss": 0.6523,
"step": 12760
},
{
"epoch": 1.7058980696012291,
"grad_norm": 4.78125,
"learning_rate": 7.357066524178467e-07,
"loss": 0.6495,
"step": 12770
},
{
"epoch": 1.7072339857056977,
"grad_norm": 4.96875,
"learning_rate": 7.323670852257548e-07,
"loss": 0.667,
"step": 12780
},
{
"epoch": 1.7085699018101663,
"grad_norm": 4.625,
"learning_rate": 7.290275180336628e-07,
"loss": 0.6818,
"step": 12790
},
{
"epoch": 1.709905817914635,
"grad_norm": 5.28125,
"learning_rate": 7.25687950841571e-07,
"loss": 0.6514,
"step": 12800
},
{
"epoch": 1.7112417340191035,
"grad_norm": 4.8125,
"learning_rate": 7.22348383649479e-07,
"loss": 0.6689,
"step": 12810
},
{
"epoch": 1.7125776501235723,
"grad_norm": 5.15625,
"learning_rate": 7.190088164573872e-07,
"loss": 0.7045,
"step": 12820
},
{
"epoch": 1.713913566228041,
"grad_norm": 5.09375,
"learning_rate": 7.156692492652953e-07,
"loss": 0.6442,
"step": 12830
},
{
"epoch": 1.7152494823325095,
"grad_norm": 4.875,
"learning_rate": 7.123296820732034e-07,
"loss": 0.6749,
"step": 12840
},
{
"epoch": 1.716585398436978,
"grad_norm": 4.90625,
"learning_rate": 7.089901148811116e-07,
"loss": 0.6818,
"step": 12850
},
{
"epoch": 1.7179213145414467,
"grad_norm": 4.53125,
"learning_rate": 7.056505476890195e-07,
"loss": 0.6583,
"step": 12860
},
{
"epoch": 1.7192572306459155,
"grad_norm": 4.96875,
"learning_rate": 7.023109804969276e-07,
"loss": 0.6675,
"step": 12870
},
{
"epoch": 1.720593146750384,
"grad_norm": 4.8125,
"learning_rate": 6.989714133048357e-07,
"loss": 0.6602,
"step": 12880
},
{
"epoch": 1.7219290628548527,
"grad_norm": 4.71875,
"learning_rate": 6.956318461127439e-07,
"loss": 0.6858,
"step": 12890
},
{
"epoch": 1.7232649789593215,
"grad_norm": 4.875,
"learning_rate": 6.922922789206519e-07,
"loss": 0.6703,
"step": 12900
},
{
"epoch": 1.7246008950637899,
"grad_norm": 4.71875,
"learning_rate": 6.889527117285601e-07,
"loss": 0.6676,
"step": 12910
},
{
"epoch": 1.7259368111682587,
"grad_norm": 4.6875,
"learning_rate": 6.856131445364682e-07,
"loss": 0.6844,
"step": 12920
},
{
"epoch": 1.7272727272727273,
"grad_norm": 4.78125,
"learning_rate": 6.822735773443762e-07,
"loss": 0.6718,
"step": 12930
},
{
"epoch": 1.7286086433771959,
"grad_norm": 4.8125,
"learning_rate": 6.789340101522842e-07,
"loss": 0.6595,
"step": 12940
},
{
"epoch": 1.7299445594816647,
"grad_norm": 5.125,
"learning_rate": 6.755944429601924e-07,
"loss": 0.6623,
"step": 12950
},
{
"epoch": 1.731280475586133,
"grad_norm": 4.5625,
"learning_rate": 6.722548757681005e-07,
"loss": 0.6675,
"step": 12960
},
{
"epoch": 1.7326163916906019,
"grad_norm": 4.625,
"learning_rate": 6.689153085760086e-07,
"loss": 0.6788,
"step": 12970
},
{
"epoch": 1.7339523077950705,
"grad_norm": 4.375,
"learning_rate": 6.655757413839167e-07,
"loss": 0.677,
"step": 12980
},
{
"epoch": 1.735288223899539,
"grad_norm": 4.6875,
"learning_rate": 6.622361741918248e-07,
"loss": 0.6718,
"step": 12990
},
{
"epoch": 1.7366241400040079,
"grad_norm": 4.5625,
"learning_rate": 6.58896606999733e-07,
"loss": 0.6677,
"step": 13000
},
{
"epoch": 1.7366241400040079,
"eval_loss": 0.6786794066429138,
"eval_runtime": 252.3209,
"eval_samples_per_second": 26.371,
"eval_steps_per_second": 3.297,
"step": 13000
},
{
"epoch": 1.7379600561084763,
"grad_norm": 4.6875,
"learning_rate": 6.55557039807641e-07,
"loss": 0.6563,
"step": 13010
},
{
"epoch": 1.739295972212945,
"grad_norm": 4.71875,
"learning_rate": 6.52217472615549e-07,
"loss": 0.672,
"step": 13020
},
{
"epoch": 1.7406318883174137,
"grad_norm": 4.78125,
"learning_rate": 6.488779054234571e-07,
"loss": 0.6504,
"step": 13030
},
{
"epoch": 1.7419678044218823,
"grad_norm": 4.6875,
"learning_rate": 6.455383382313653e-07,
"loss": 0.6389,
"step": 13040
},
{
"epoch": 1.743303720526351,
"grad_norm": 4.75,
"learning_rate": 6.421987710392734e-07,
"loss": 0.6679,
"step": 13050
},
{
"epoch": 1.7446396366308194,
"grad_norm": 5.0,
"learning_rate": 6.388592038471815e-07,
"loss": 0.6743,
"step": 13060
},
{
"epoch": 1.7459755527352883,
"grad_norm": 4.9375,
"learning_rate": 6.355196366550896e-07,
"loss": 0.637,
"step": 13070
},
{
"epoch": 1.7473114688397569,
"grad_norm": 5.03125,
"learning_rate": 6.321800694629976e-07,
"loss": 0.7063,
"step": 13080
},
{
"epoch": 1.7486473849442254,
"grad_norm": 4.78125,
"learning_rate": 6.288405022709057e-07,
"loss": 0.6807,
"step": 13090
},
{
"epoch": 1.7499833010486943,
"grad_norm": 4.4375,
"learning_rate": 6.255009350788139e-07,
"loss": 0.6544,
"step": 13100
},
{
"epoch": 1.7513192171531626,
"grad_norm": 5.1875,
"learning_rate": 6.221613678867219e-07,
"loss": 0.6704,
"step": 13110
},
{
"epoch": 1.7526551332576314,
"grad_norm": 5.0,
"learning_rate": 6.1882180069463e-07,
"loss": 0.6743,
"step": 13120
},
{
"epoch": 1.7539910493621,
"grad_norm": 5.0,
"learning_rate": 6.154822335025381e-07,
"loss": 0.6891,
"step": 13130
},
{
"epoch": 1.7553269654665686,
"grad_norm": 5.0,
"learning_rate": 6.121426663104462e-07,
"loss": 0.6579,
"step": 13140
},
{
"epoch": 1.7566628815710374,
"grad_norm": 4.65625,
"learning_rate": 6.088030991183543e-07,
"loss": 0.6588,
"step": 13150
},
{
"epoch": 1.7579987976755058,
"grad_norm": 5.1875,
"learning_rate": 6.054635319262624e-07,
"loss": 0.6987,
"step": 13160
},
{
"epoch": 1.7593347137799746,
"grad_norm": 4.625,
"learning_rate": 6.021239647341705e-07,
"loss": 0.6779,
"step": 13170
},
{
"epoch": 1.7606706298844432,
"grad_norm": 4.53125,
"learning_rate": 5.987843975420786e-07,
"loss": 0.6367,
"step": 13180
},
{
"epoch": 1.7620065459889118,
"grad_norm": 4.90625,
"learning_rate": 5.954448303499867e-07,
"loss": 0.6906,
"step": 13190
},
{
"epoch": 1.7633424620933806,
"grad_norm": 4.8125,
"learning_rate": 5.921052631578947e-07,
"loss": 0.6699,
"step": 13200
},
{
"epoch": 1.7646783781978492,
"grad_norm": 4.75,
"learning_rate": 5.887656959658029e-07,
"loss": 0.6809,
"step": 13210
},
{
"epoch": 1.7660142943023178,
"grad_norm": 5.0625,
"learning_rate": 5.85426128773711e-07,
"loss": 0.6771,
"step": 13220
},
{
"epoch": 1.7673502104067864,
"grad_norm": 4.90625,
"learning_rate": 5.82086561581619e-07,
"loss": 0.6526,
"step": 13230
},
{
"epoch": 1.768686126511255,
"grad_norm": 5.0,
"learning_rate": 5.787469943895271e-07,
"loss": 0.6666,
"step": 13240
},
{
"epoch": 1.7700220426157238,
"grad_norm": 4.71875,
"learning_rate": 5.754074271974353e-07,
"loss": 0.685,
"step": 13250
},
{
"epoch": 1.7700220426157238,
"eval_loss": 0.6786710023880005,
"eval_runtime": 253.6238,
"eval_samples_per_second": 26.236,
"eval_steps_per_second": 3.28,
"step": 13250
},
{
"epoch": 1.7713579587201924,
"grad_norm": 4.8125,
"learning_rate": 5.720678600053434e-07,
"loss": 0.6963,
"step": 13260
},
{
"epoch": 1.772693874824661,
"grad_norm": 5.21875,
"learning_rate": 5.687282928132515e-07,
"loss": 0.7044,
"step": 13270
},
{
"epoch": 1.7740297909291296,
"grad_norm": 5.0,
"learning_rate": 5.653887256211595e-07,
"loss": 0.686,
"step": 13280
},
{
"epoch": 1.7753657070335982,
"grad_norm": 5.03125,
"learning_rate": 5.620491584290676e-07,
"loss": 0.691,
"step": 13290
},
{
"epoch": 1.776701623138067,
"grad_norm": 4.71875,
"learning_rate": 5.587095912369758e-07,
"loss": 0.6586,
"step": 13300
},
{
"epoch": 1.7780375392425356,
"grad_norm": 4.8125,
"learning_rate": 5.553700240448838e-07,
"loss": 0.674,
"step": 13310
},
{
"epoch": 1.7793734553470042,
"grad_norm": 4.875,
"learning_rate": 5.520304568527919e-07,
"loss": 0.6892,
"step": 13320
},
{
"epoch": 1.780709371451473,
"grad_norm": 5.03125,
"learning_rate": 5.486908896607e-07,
"loss": 0.6801,
"step": 13330
},
{
"epoch": 1.7820452875559414,
"grad_norm": 4.84375,
"learning_rate": 5.453513224686081e-07,
"loss": 0.6776,
"step": 13340
},
{
"epoch": 1.7833812036604102,
"grad_norm": 4.9375,
"learning_rate": 5.420117552765162e-07,
"loss": 0.6897,
"step": 13350
},
{
"epoch": 1.7847171197648788,
"grad_norm": 5.0625,
"learning_rate": 5.386721880844243e-07,
"loss": 0.6743,
"step": 13360
},
{
"epoch": 1.7860530358693474,
"grad_norm": 4.5625,
"learning_rate": 5.353326208923324e-07,
"loss": 0.6762,
"step": 13370
},
{
"epoch": 1.7873889519738162,
"grad_norm": 4.78125,
"learning_rate": 5.319930537002405e-07,
"loss": 0.6733,
"step": 13380
},
{
"epoch": 1.7887248680782846,
"grad_norm": 4.96875,
"learning_rate": 5.286534865081486e-07,
"loss": 0.6794,
"step": 13390
},
{
"epoch": 1.7900607841827534,
"grad_norm": 4.78125,
"learning_rate": 5.253139193160566e-07,
"loss": 0.6623,
"step": 13400
},
{
"epoch": 1.791396700287222,
"grad_norm": 4.875,
"learning_rate": 5.219743521239648e-07,
"loss": 0.6877,
"step": 13410
},
{
"epoch": 1.7927326163916906,
"grad_norm": 4.96875,
"learning_rate": 5.186347849318728e-07,
"loss": 0.6762,
"step": 13420
},
{
"epoch": 1.7940685324961594,
"grad_norm": 4.34375,
"learning_rate": 5.15295217739781e-07,
"loss": 0.6652,
"step": 13430
},
{
"epoch": 1.7954044486006278,
"grad_norm": 4.71875,
"learning_rate": 5.11955650547689e-07,
"loss": 0.6657,
"step": 13440
},
{
"epoch": 1.7967403647050966,
"grad_norm": 5.0625,
"learning_rate": 5.086160833555972e-07,
"loss": 0.6811,
"step": 13450
},
{
"epoch": 1.7980762808095652,
"grad_norm": 4.8125,
"learning_rate": 5.052765161635052e-07,
"loss": 0.6888,
"step": 13460
},
{
"epoch": 1.7994121969140338,
"grad_norm": 4.8125,
"learning_rate": 5.019369489714134e-07,
"loss": 0.675,
"step": 13470
},
{
"epoch": 1.8007481130185026,
"grad_norm": 4.28125,
"learning_rate": 4.985973817793215e-07,
"loss": 0.6626,
"step": 13480
},
{
"epoch": 1.802084029122971,
"grad_norm": 4.75,
"learning_rate": 4.952578145872295e-07,
"loss": 0.6797,
"step": 13490
},
{
"epoch": 1.8034199452274398,
"grad_norm": 5.0625,
"learning_rate": 4.919182473951376e-07,
"loss": 0.6536,
"step": 13500
},
{
"epoch": 1.8034199452274398,
"eval_loss": 0.6786578297615051,
"eval_runtime": 256.0307,
"eval_samples_per_second": 25.989,
"eval_steps_per_second": 3.25,
"step": 13500
}
],
"logging_steps": 10,
"max_steps": 14972,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.979723334926205e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}