chchen's picture
End of training
b7b410f verified
{
"best_metric": 0.032429687678813934,
"best_model_checkpoint": "saves/psy-course/Llama-3.1-8B-Instruct/train/fold7/checkpoint-1850",
"epoch": 4.9961802902979375,
"eval_steps": 50,
"global_step": 3270,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.015278838808250574,
"grad_norm": 4.5117974281311035,
"learning_rate": 3.0581039755351682e-06,
"loss": 1.5891,
"step": 10
},
{
"epoch": 0.030557677616501147,
"grad_norm": 4.6750054359436035,
"learning_rate": 6.1162079510703365e-06,
"loss": 1.4808,
"step": 20
},
{
"epoch": 0.04583651642475172,
"grad_norm": 6.956159591674805,
"learning_rate": 9.174311926605506e-06,
"loss": 1.2673,
"step": 30
},
{
"epoch": 0.061115355233002294,
"grad_norm": 2.385298013687134,
"learning_rate": 1.2232415902140673e-05,
"loss": 0.9327,
"step": 40
},
{
"epoch": 0.07639419404125286,
"grad_norm": 1.4368096590042114,
"learning_rate": 1.5290519877675842e-05,
"loss": 0.6247,
"step": 50
},
{
"epoch": 0.07639419404125286,
"eval_loss": 0.40179339051246643,
"eval_runtime": 154.805,
"eval_samples_per_second": 7.519,
"eval_steps_per_second": 7.519,
"step": 50
},
{
"epoch": 0.09167303284950344,
"grad_norm": 1.7330724000930786,
"learning_rate": 1.834862385321101e-05,
"loss": 0.3854,
"step": 60
},
{
"epoch": 0.10695187165775401,
"grad_norm": 0.976122260093689,
"learning_rate": 2.140672782874618e-05,
"loss": 0.2537,
"step": 70
},
{
"epoch": 0.12223071046600459,
"grad_norm": 1.08048677444458,
"learning_rate": 2.4464831804281346e-05,
"loss": 0.2023,
"step": 80
},
{
"epoch": 0.13750954927425516,
"grad_norm": 0.7643369436264038,
"learning_rate": 2.7522935779816515e-05,
"loss": 0.1196,
"step": 90
},
{
"epoch": 0.15278838808250572,
"grad_norm": 1.6970341205596924,
"learning_rate": 3.0581039755351684e-05,
"loss": 0.1081,
"step": 100
},
{
"epoch": 0.15278838808250572,
"eval_loss": 0.07779182493686676,
"eval_runtime": 154.9816,
"eval_samples_per_second": 7.511,
"eval_steps_per_second": 7.511,
"step": 100
},
{
"epoch": 0.16806722689075632,
"grad_norm": 0.7916161417961121,
"learning_rate": 3.363914373088685e-05,
"loss": 0.0779,
"step": 110
},
{
"epoch": 0.18334606569900688,
"grad_norm": 0.6653062105178833,
"learning_rate": 3.669724770642202e-05,
"loss": 0.0757,
"step": 120
},
{
"epoch": 0.19862490450725745,
"grad_norm": 0.645976185798645,
"learning_rate": 3.9755351681957185e-05,
"loss": 0.0737,
"step": 130
},
{
"epoch": 0.21390374331550802,
"grad_norm": 1.0208077430725098,
"learning_rate": 4.281345565749236e-05,
"loss": 0.0737,
"step": 140
},
{
"epoch": 0.22918258212375858,
"grad_norm": 0.8060865998268127,
"learning_rate": 4.587155963302753e-05,
"loss": 0.0687,
"step": 150
},
{
"epoch": 0.22918258212375858,
"eval_loss": 0.06143134832382202,
"eval_runtime": 154.9789,
"eval_samples_per_second": 7.511,
"eval_steps_per_second": 7.511,
"step": 150
},
{
"epoch": 0.24446142093200918,
"grad_norm": 1.4080629348754883,
"learning_rate": 4.892966360856269e-05,
"loss": 0.074,
"step": 160
},
{
"epoch": 0.2597402597402597,
"grad_norm": 0.6945374011993408,
"learning_rate": 5.1987767584097854e-05,
"loss": 0.059,
"step": 170
},
{
"epoch": 0.2750190985485103,
"grad_norm": 0.8207284212112427,
"learning_rate": 5.504587155963303e-05,
"loss": 0.0729,
"step": 180
},
{
"epoch": 0.2902979373567609,
"grad_norm": 0.9088085889816284,
"learning_rate": 5.81039755351682e-05,
"loss": 0.0748,
"step": 190
},
{
"epoch": 0.30557677616501144,
"grad_norm": 0.8979083895683289,
"learning_rate": 6.116207951070337e-05,
"loss": 0.0565,
"step": 200
},
{
"epoch": 0.30557677616501144,
"eval_loss": 0.05768084526062012,
"eval_runtime": 155.0101,
"eval_samples_per_second": 7.509,
"eval_steps_per_second": 7.509,
"step": 200
},
{
"epoch": 0.32085561497326204,
"grad_norm": 0.9637401103973389,
"learning_rate": 6.422018348623854e-05,
"loss": 0.0637,
"step": 210
},
{
"epoch": 0.33613445378151263,
"grad_norm": 0.9198577404022217,
"learning_rate": 6.72782874617737e-05,
"loss": 0.0763,
"step": 220
},
{
"epoch": 0.35141329258976317,
"grad_norm": 0.9913630485534668,
"learning_rate": 7.033639143730886e-05,
"loss": 0.0644,
"step": 230
},
{
"epoch": 0.36669213139801377,
"grad_norm": 0.35458430647850037,
"learning_rate": 7.339449541284404e-05,
"loss": 0.0554,
"step": 240
},
{
"epoch": 0.3819709702062643,
"grad_norm": 0.5968704223632812,
"learning_rate": 7.645259938837921e-05,
"loss": 0.0612,
"step": 250
},
{
"epoch": 0.3819709702062643,
"eval_loss": 0.050989557057619095,
"eval_runtime": 155.0133,
"eval_samples_per_second": 7.509,
"eval_steps_per_second": 7.509,
"step": 250
},
{
"epoch": 0.3972498090145149,
"grad_norm": 0.4042321443557739,
"learning_rate": 7.951070336391437e-05,
"loss": 0.0466,
"step": 260
},
{
"epoch": 0.4125286478227655,
"grad_norm": 0.832648515701294,
"learning_rate": 8.256880733944955e-05,
"loss": 0.071,
"step": 270
},
{
"epoch": 0.42780748663101603,
"grad_norm": 0.40596529841423035,
"learning_rate": 8.562691131498472e-05,
"loss": 0.0674,
"step": 280
},
{
"epoch": 0.4430863254392666,
"grad_norm": 0.6321857571601868,
"learning_rate": 8.868501529051988e-05,
"loss": 0.0521,
"step": 290
},
{
"epoch": 0.45836516424751717,
"grad_norm": 0.6695836186408997,
"learning_rate": 9.174311926605506e-05,
"loss": 0.0426,
"step": 300
},
{
"epoch": 0.45836516424751717,
"eval_loss": 0.048978373408317566,
"eval_runtime": 155.032,
"eval_samples_per_second": 7.508,
"eval_steps_per_second": 7.508,
"step": 300
},
{
"epoch": 0.47364400305576776,
"grad_norm": 0.3279222846031189,
"learning_rate": 9.480122324159021e-05,
"loss": 0.0402,
"step": 310
},
{
"epoch": 0.48892284186401835,
"grad_norm": 0.2997318506240845,
"learning_rate": 9.785932721712538e-05,
"loss": 0.0497,
"step": 320
},
{
"epoch": 0.5042016806722689,
"grad_norm": 0.5087282061576843,
"learning_rate": 9.999974360983129e-05,
"loss": 0.0569,
"step": 330
},
{
"epoch": 0.5194805194805194,
"grad_norm": 0.8196572065353394,
"learning_rate": 9.999518563553522e-05,
"loss": 0.0696,
"step": 340
},
{
"epoch": 0.5347593582887701,
"grad_norm": 0.5956114530563354,
"learning_rate": 9.998493069976636e-05,
"loss": 0.0657,
"step": 350
},
{
"epoch": 0.5347593582887701,
"eval_loss": 0.04885314404964447,
"eval_runtime": 155.0796,
"eval_samples_per_second": 7.506,
"eval_steps_per_second": 7.506,
"step": 350
},
{
"epoch": 0.5500381970970206,
"grad_norm": 0.540963351726532,
"learning_rate": 9.99689799710767e-05,
"loss": 0.0603,
"step": 360
},
{
"epoch": 0.5653170359052712,
"grad_norm": 0.5407795310020447,
"learning_rate": 9.994733526705501e-05,
"loss": 0.0667,
"step": 370
},
{
"epoch": 0.5805958747135218,
"grad_norm": 0.508648157119751,
"learning_rate": 9.991999905411966e-05,
"loss": 0.0486,
"step": 380
},
{
"epoch": 0.5958747135217723,
"grad_norm": 0.2861989140510559,
"learning_rate": 9.988697444723762e-05,
"loss": 0.0383,
"step": 390
},
{
"epoch": 0.6111535523300229,
"grad_norm": 0.5613613724708557,
"learning_rate": 9.984826520956949e-05,
"loss": 0.0423,
"step": 400
},
{
"epoch": 0.6111535523300229,
"eval_loss": 0.045348409563302994,
"eval_runtime": 155.1205,
"eval_samples_per_second": 7.504,
"eval_steps_per_second": 7.504,
"step": 400
},
{
"epoch": 0.6264323911382735,
"grad_norm": 0.3380930721759796,
"learning_rate": 9.980387575204072e-05,
"loss": 0.0416,
"step": 410
},
{
"epoch": 0.6417112299465241,
"grad_norm": 0.5008583664894104,
"learning_rate": 9.975381113283891e-05,
"loss": 0.0385,
"step": 420
},
{
"epoch": 0.6569900687547746,
"grad_norm": 0.3847447633743286,
"learning_rate": 9.969807705683751e-05,
"loss": 0.0487,
"step": 430
},
{
"epoch": 0.6722689075630253,
"grad_norm": 0.3583645522594452,
"learning_rate": 9.96366798749457e-05,
"loss": 0.0328,
"step": 440
},
{
"epoch": 0.6875477463712758,
"grad_norm": 0.19374260306358337,
"learning_rate": 9.956962658338473e-05,
"loss": 0.056,
"step": 450
},
{
"epoch": 0.6875477463712758,
"eval_loss": 0.04382029548287392,
"eval_runtime": 155.0426,
"eval_samples_per_second": 7.508,
"eval_steps_per_second": 7.508,
"step": 450
},
{
"epoch": 0.7028265851795263,
"grad_norm": 0.22926779091358185,
"learning_rate": 9.94969248228907e-05,
"loss": 0.0415,
"step": 460
},
{
"epoch": 0.7181054239877769,
"grad_norm": 0.19949862360954285,
"learning_rate": 9.941858287784383e-05,
"loss": 0.0505,
"step": 470
},
{
"epoch": 0.7333842627960275,
"grad_norm": 0.3268764317035675,
"learning_rate": 9.933460967532453e-05,
"loss": 0.0459,
"step": 480
},
{
"epoch": 0.7486631016042781,
"grad_norm": 0.45443981885910034,
"learning_rate": 9.924501478409618e-05,
"loss": 0.0493,
"step": 490
},
{
"epoch": 0.7639419404125286,
"grad_norm": 0.22262951731681824,
"learning_rate": 9.914980841351465e-05,
"loss": 0.0393,
"step": 500
},
{
"epoch": 0.7639419404125286,
"eval_loss": 0.03957642242312431,
"eval_runtime": 155.0637,
"eval_samples_per_second": 7.507,
"eval_steps_per_second": 7.507,
"step": 500
},
{
"epoch": 0.7792207792207793,
"grad_norm": 0.25569775700569153,
"learning_rate": 9.904900141236506e-05,
"loss": 0.0302,
"step": 510
},
{
"epoch": 0.7944996180290298,
"grad_norm": 0.6094002723693848,
"learning_rate": 9.894260526762548e-05,
"loss": 0.0484,
"step": 520
},
{
"epoch": 0.8097784568372803,
"grad_norm": 0.3290807902812958,
"learning_rate": 9.883063210315804e-05,
"loss": 0.0616,
"step": 530
},
{
"epoch": 0.825057295645531,
"grad_norm": 0.3825334906578064,
"learning_rate": 9.871309467832738e-05,
"loss": 0.0449,
"step": 540
},
{
"epoch": 0.8403361344537815,
"grad_norm": 0.30199292302131653,
"learning_rate": 9.859000638654674e-05,
"loss": 0.0321,
"step": 550
},
{
"epoch": 0.8403361344537815,
"eval_loss": 0.03870731219649315,
"eval_runtime": 155.1875,
"eval_samples_per_second": 7.501,
"eval_steps_per_second": 7.501,
"step": 550
},
{
"epoch": 0.8556149732620321,
"grad_norm": 0.34711456298828125,
"learning_rate": 9.846138125375175e-05,
"loss": 0.0488,
"step": 560
},
{
"epoch": 0.8708938120702827,
"grad_norm": 0.3055240213871002,
"learning_rate": 9.83272339368022e-05,
"loss": 0.0339,
"step": 570
},
{
"epoch": 0.8861726508785333,
"grad_norm": 0.4192282557487488,
"learning_rate": 9.818757972181191e-05,
"loss": 0.0514,
"step": 580
},
{
"epoch": 0.9014514896867838,
"grad_norm": 0.526122510433197,
"learning_rate": 9.804243452240675e-05,
"loss": 0.0368,
"step": 590
},
{
"epoch": 0.9167303284950343,
"grad_norm": 0.6222466230392456,
"learning_rate": 9.789181487791146e-05,
"loss": 0.0493,
"step": 600
},
{
"epoch": 0.9167303284950343,
"eval_loss": 0.04000305384397507,
"eval_runtime": 155.221,
"eval_samples_per_second": 7.499,
"eval_steps_per_second": 7.499,
"step": 600
},
{
"epoch": 0.932009167303285,
"grad_norm": 0.5514181852340698,
"learning_rate": 9.773573795146485e-05,
"loss": 0.0551,
"step": 610
},
{
"epoch": 0.9472880061115355,
"grad_norm": 0.31069889664649963,
"learning_rate": 9.757422152806415e-05,
"loss": 0.0324,
"step": 620
},
{
"epoch": 0.9625668449197861,
"grad_norm": 0.3158469498157501,
"learning_rate": 9.74072840125383e-05,
"loss": 0.0495,
"step": 630
},
{
"epoch": 0.9778456837280367,
"grad_norm": 0.49057844281196594,
"learning_rate": 9.723494442745085e-05,
"loss": 0.0418,
"step": 640
},
{
"epoch": 0.9931245225362872,
"grad_norm": 0.4645870625972748,
"learning_rate": 9.705722241093223e-05,
"loss": 0.0578,
"step": 650
},
{
"epoch": 0.9931245225362872,
"eval_loss": 0.0392950177192688,
"eval_runtime": 155.2341,
"eval_samples_per_second": 7.498,
"eval_steps_per_second": 7.498,
"step": 650
},
{
"epoch": 1.0084033613445378,
"grad_norm": 0.19298018515110016,
"learning_rate": 9.687413821444199e-05,
"loss": 0.045,
"step": 660
},
{
"epoch": 1.0236822001527883,
"grad_norm": 0.21009771525859833,
"learning_rate": 9.668571270046122e-05,
"loss": 0.0396,
"step": 670
},
{
"epoch": 1.0389610389610389,
"grad_norm": 0.2709486484527588,
"learning_rate": 9.649196734011519e-05,
"loss": 0.0323,
"step": 680
},
{
"epoch": 1.0542398777692896,
"grad_norm": 0.7960172891616821,
"learning_rate": 9.629292421072671e-05,
"loss": 0.0324,
"step": 690
},
{
"epoch": 1.0695187165775402,
"grad_norm": 0.19981202483177185,
"learning_rate": 9.608860599330048e-05,
"loss": 0.0371,
"step": 700
},
{
"epoch": 1.0695187165775402,
"eval_loss": 0.037576667964458466,
"eval_runtime": 155.1738,
"eval_samples_per_second": 7.501,
"eval_steps_per_second": 7.501,
"step": 700
},
{
"epoch": 1.0847975553857907,
"grad_norm": 0.26322320103645325,
"learning_rate": 9.587903596993854e-05,
"loss": 0.0343,
"step": 710
},
{
"epoch": 1.1000763941940412,
"grad_norm": 0.5381883978843689,
"learning_rate": 9.566423802118724e-05,
"loss": 0.0359,
"step": 720
},
{
"epoch": 1.1153552330022918,
"grad_norm": 0.11692004650831223,
"learning_rate": 9.544423662331612e-05,
"loss": 0.0473,
"step": 730
},
{
"epoch": 1.1306340718105423,
"grad_norm": 0.3760609030723572,
"learning_rate": 9.521905684552877e-05,
"loss": 0.0373,
"step": 740
},
{
"epoch": 1.1459129106187929,
"grad_norm": 0.2045118510723114,
"learning_rate": 9.498872434710623e-05,
"loss": 0.0228,
"step": 750
},
{
"epoch": 1.1459129106187929,
"eval_loss": 0.03641541302204132,
"eval_runtime": 155.2402,
"eval_samples_per_second": 7.498,
"eval_steps_per_second": 7.498,
"step": 750
},
{
"epoch": 1.1611917494270436,
"grad_norm": 0.2369633913040161,
"learning_rate": 9.475326537448307e-05,
"loss": 0.0307,
"step": 760
},
{
"epoch": 1.1764705882352942,
"grad_norm": 0.2560236155986786,
"learning_rate": 9.451270675825665e-05,
"loss": 0.0309,
"step": 770
},
{
"epoch": 1.1917494270435447,
"grad_norm": 0.5044377446174622,
"learning_rate": 9.426707591012976e-05,
"loss": 0.0347,
"step": 780
},
{
"epoch": 1.2070282658517952,
"grad_norm": 0.42241302132606506,
"learning_rate": 9.4016400819787e-05,
"loss": 0.0465,
"step": 790
},
{
"epoch": 1.2223071046600458,
"grad_norm": 0.5318493843078613,
"learning_rate": 9.376071005170539e-05,
"loss": 0.0301,
"step": 800
},
{
"epoch": 1.2223071046600458,
"eval_loss": 0.0387733094394207,
"eval_runtime": 155.3117,
"eval_samples_per_second": 7.495,
"eval_steps_per_second": 7.495,
"step": 800
},
{
"epoch": 1.2375859434682965,
"grad_norm": 0.26983004808425903,
"learning_rate": 9.350003274189949e-05,
"loss": 0.0398,
"step": 810
},
{
"epoch": 1.2528647822765469,
"grad_norm": 0.41667139530181885,
"learning_rate": 9.323439859460122e-05,
"loss": 0.0345,
"step": 820
},
{
"epoch": 1.2681436210847976,
"grad_norm": 0.216452956199646,
"learning_rate": 9.296383787887519e-05,
"loss": 0.0335,
"step": 830
},
{
"epoch": 1.2834224598930482,
"grad_norm": 0.2754797339439392,
"learning_rate": 9.268838142516943e-05,
"loss": 0.0456,
"step": 840
},
{
"epoch": 1.2987012987012987,
"grad_norm": 0.20336757600307465,
"learning_rate": 9.240806062180234e-05,
"loss": 0.0383,
"step": 850
},
{
"epoch": 1.2987012987012987,
"eval_loss": 0.03707316145300865,
"eval_runtime": 155.3716,
"eval_samples_per_second": 7.492,
"eval_steps_per_second": 7.492,
"step": 850
},
{
"epoch": 1.3139801375095492,
"grad_norm": 0.20497027039527893,
"learning_rate": 9.212290741138592e-05,
"loss": 0.0206,
"step": 860
},
{
"epoch": 1.3292589763177998,
"grad_norm": 0.17971740663051605,
"learning_rate": 9.183295428718592e-05,
"loss": 0.0292,
"step": 870
},
{
"epoch": 1.3445378151260505,
"grad_norm": 0.27106043696403503,
"learning_rate": 9.153823428941924e-05,
"loss": 0.0429,
"step": 880
},
{
"epoch": 1.359816653934301,
"grad_norm": 0.2713659405708313,
"learning_rate": 9.1238781001489e-05,
"loss": 0.0327,
"step": 890
},
{
"epoch": 1.3750954927425516,
"grad_norm": 0.3254333436489105,
"learning_rate": 9.093462854615766e-05,
"loss": 0.0403,
"step": 900
},
{
"epoch": 1.3750954927425516,
"eval_loss": 0.03635941818356514,
"eval_runtime": 155.3012,
"eval_samples_per_second": 7.495,
"eval_steps_per_second": 7.495,
"step": 900
},
{
"epoch": 1.3903743315508021,
"grad_norm": 0.20336714386940002,
"learning_rate": 9.062581158165876e-05,
"loss": 0.0363,
"step": 910
},
{
"epoch": 1.4056531703590527,
"grad_norm": 0.34487685561180115,
"learning_rate": 9.031236529774764e-05,
"loss": 0.0268,
"step": 920
},
{
"epoch": 1.4209320091673032,
"grad_norm": 0.18971335887908936,
"learning_rate": 8.999432541169145e-05,
"loss": 0.0341,
"step": 930
},
{
"epoch": 1.4362108479755538,
"grad_norm": 0.2900649309158325,
"learning_rate": 8.967172816419927e-05,
"loss": 0.0321,
"step": 940
},
{
"epoch": 1.4514896867838045,
"grad_norm": 0.3296191692352295,
"learning_rate": 8.934461031529242e-05,
"loss": 0.0446,
"step": 950
},
{
"epoch": 1.4514896867838045,
"eval_loss": 0.0380614809691906,
"eval_runtime": 155.3777,
"eval_samples_per_second": 7.491,
"eval_steps_per_second": 7.491,
"step": 950
},
{
"epoch": 1.466768525592055,
"grad_norm": 0.6007914543151855,
"learning_rate": 8.901300914011569e-05,
"loss": 0.0332,
"step": 960
},
{
"epoch": 1.4820473644003056,
"grad_norm": 0.14614540338516235,
"learning_rate": 8.867696242468976e-05,
"loss": 0.0261,
"step": 970
},
{
"epoch": 1.4973262032085561,
"grad_norm": 0.24442273378372192,
"learning_rate": 8.833650846160555e-05,
"loss": 0.0417,
"step": 980
},
{
"epoch": 1.5126050420168067,
"grad_norm": 0.3221365809440613,
"learning_rate": 8.79916860456607e-05,
"loss": 0.0345,
"step": 990
},
{
"epoch": 1.5278838808250574,
"grad_norm": 0.17641890048980713,
"learning_rate": 8.7642534469439e-05,
"loss": 0.0282,
"step": 1000
},
{
"epoch": 1.5278838808250574,
"eval_loss": 0.035179559141397476,
"eval_runtime": 155.2493,
"eval_samples_per_second": 7.498,
"eval_steps_per_second": 7.498,
"step": 1000
},
{
"epoch": 1.5431627196333078,
"grad_norm": 0.3965778946876526,
"learning_rate": 8.728909351883283e-05,
"loss": 0.034,
"step": 1010
},
{
"epoch": 1.5584415584415585,
"grad_norm": 0.3395179808139801,
"learning_rate": 8.693140346850975e-05,
"loss": 0.0431,
"step": 1020
},
{
"epoch": 1.573720397249809,
"grad_norm": 0.1894790530204773,
"learning_rate": 8.656950507732303e-05,
"loss": 0.028,
"step": 1030
},
{
"epoch": 1.5889992360580596,
"grad_norm": 0.205996572971344,
"learning_rate": 8.620343958366718e-05,
"loss": 0.0338,
"step": 1040
},
{
"epoch": 1.6042780748663101,
"grad_norm": 0.3414444029331207,
"learning_rate": 8.5833248700779e-05,
"loss": 0.0303,
"step": 1050
},
{
"epoch": 1.6042780748663101,
"eval_loss": 0.03627519682049751,
"eval_runtime": 155.2685,
"eval_samples_per_second": 7.497,
"eval_steps_per_second": 7.497,
"step": 1050
},
{
"epoch": 1.6195569136745607,
"grad_norm": 0.16724058985710144,
"learning_rate": 8.545897461198413e-05,
"loss": 0.0331,
"step": 1060
},
{
"epoch": 1.6348357524828114,
"grad_norm": 0.17009948194026947,
"learning_rate": 8.508065996589036e-05,
"loss": 0.0289,
"step": 1070
},
{
"epoch": 1.6501145912910617,
"grad_norm": 0.16459020972251892,
"learning_rate": 8.469834787152783e-05,
"loss": 0.0331,
"step": 1080
},
{
"epoch": 1.6653934300993125,
"grad_norm": 0.22789143025875092,
"learning_rate": 8.43120818934367e-05,
"loss": 0.0322,
"step": 1090
},
{
"epoch": 1.680672268907563,
"grad_norm": 0.3355972170829773,
"learning_rate": 8.392190604670293e-05,
"loss": 0.0287,
"step": 1100
},
{
"epoch": 1.680672268907563,
"eval_loss": 0.04001282900571823,
"eval_runtime": 155.3209,
"eval_samples_per_second": 7.494,
"eval_steps_per_second": 7.494,
"step": 1100
},
{
"epoch": 1.6959511077158136,
"grad_norm": 0.4196670651435852,
"learning_rate": 8.352786479194288e-05,
"loss": 0.041,
"step": 1110
},
{
"epoch": 1.7112299465240641,
"grad_norm": 0.3241218030452728,
"learning_rate": 8.313000303023688e-05,
"loss": 0.0278,
"step": 1120
},
{
"epoch": 1.7265087853323147,
"grad_norm": 0.2682516574859619,
"learning_rate": 8.27283660980128e-05,
"loss": 0.0359,
"step": 1130
},
{
"epoch": 1.7417876241405654,
"grad_norm": 0.19624997675418854,
"learning_rate": 8.232299976187999e-05,
"loss": 0.0345,
"step": 1140
},
{
"epoch": 1.7570664629488157,
"grad_norm": 0.34082722663879395,
"learning_rate": 8.191395021341408e-05,
"loss": 0.0562,
"step": 1150
},
{
"epoch": 1.7570664629488157,
"eval_loss": 0.03553533926606178,
"eval_runtime": 155.2946,
"eval_samples_per_second": 7.495,
"eval_steps_per_second": 7.495,
"step": 1150
},
{
"epoch": 1.7723453017570665,
"grad_norm": 0.3745851218700409,
"learning_rate": 8.150126406389352e-05,
"loss": 0.0312,
"step": 1160
},
{
"epoch": 1.787624140565317,
"grad_norm": 0.5491358637809753,
"learning_rate": 8.108498833898815e-05,
"loss": 0.0383,
"step": 1170
},
{
"epoch": 1.8029029793735676,
"grad_norm": 0.30635517835617065,
"learning_rate": 8.066517047340066e-05,
"loss": 0.0364,
"step": 1180
},
{
"epoch": 1.8181818181818183,
"grad_norm": 0.11578669399023056,
"learning_rate": 8.02418583054614e-05,
"loss": 0.0269,
"step": 1190
},
{
"epoch": 1.8334606569900687,
"grad_norm": 0.37157660722732544,
"learning_rate": 7.981510007167719e-05,
"loss": 0.0354,
"step": 1200
},
{
"epoch": 1.8334606569900687,
"eval_loss": 0.03499121591448784,
"eval_runtime": 155.2714,
"eval_samples_per_second": 7.497,
"eval_steps_per_second": 7.497,
"step": 1200
},
{
"epoch": 1.8487394957983194,
"grad_norm": 0.2752259373664856,
"learning_rate": 7.938494440123468e-05,
"loss": 0.0304,
"step": 1210
},
{
"epoch": 1.8640183346065697,
"grad_norm": 0.1890093833208084,
"learning_rate": 7.895144031045918e-05,
"loss": 0.0345,
"step": 1220
},
{
"epoch": 1.8792971734148205,
"grad_norm": 0.19569799304008484,
"learning_rate": 7.851463719722913e-05,
"loss": 0.0254,
"step": 1230
},
{
"epoch": 1.894576012223071,
"grad_norm": 0.20889650285243988,
"learning_rate": 7.80745848353473e-05,
"loss": 0.0319,
"step": 1240
},
{
"epoch": 1.9098548510313216,
"grad_norm": 0.343009889125824,
"learning_rate": 7.763133336886892e-05,
"loss": 0.0379,
"step": 1250
},
{
"epoch": 1.9098548510313216,
"eval_loss": 0.03767581284046173,
"eval_runtime": 155.276,
"eval_samples_per_second": 7.496,
"eval_steps_per_second": 7.496,
"step": 1250
},
{
"epoch": 1.9251336898395723,
"grad_norm": 0.1667303442955017,
"learning_rate": 7.718493330638789e-05,
"loss": 0.0257,
"step": 1260
},
{
"epoch": 1.9404125286478227,
"grad_norm": 0.1682262122631073,
"learning_rate": 7.673543551528122e-05,
"loss": 0.0386,
"step": 1270
},
{
"epoch": 1.9556913674560734,
"grad_norm": 0.3651019334793091,
"learning_rate": 7.628289121591277e-05,
"loss": 0.0393,
"step": 1280
},
{
"epoch": 1.970970206264324,
"grad_norm": 0.2863140106201172,
"learning_rate": 7.582735197579656e-05,
"loss": 0.0318,
"step": 1290
},
{
"epoch": 1.9862490450725745,
"grad_norm": 0.2292311191558838,
"learning_rate": 7.536886970372078e-05,
"loss": 0.0272,
"step": 1300
},
{
"epoch": 1.9862490450725745,
"eval_loss": 0.035320527851581573,
"eval_runtime": 155.2677,
"eval_samples_per_second": 7.497,
"eval_steps_per_second": 7.497,
"step": 1300
},
{
"epoch": 2.0015278838808253,
"grad_norm": 0.1797218769788742,
"learning_rate": 7.490749664383271e-05,
"loss": 0.0418,
"step": 1310
},
{
"epoch": 2.0168067226890756,
"grad_norm": 0.12029094249010086,
"learning_rate": 7.444328536968538e-05,
"loss": 0.0276,
"step": 1320
},
{
"epoch": 2.0320855614973263,
"grad_norm": 0.9163817167282104,
"learning_rate": 7.397628877824701e-05,
"loss": 0.027,
"step": 1330
},
{
"epoch": 2.0473644003055766,
"grad_norm": 0.14729079604148865,
"learning_rate": 7.350656008387327e-05,
"loss": 0.0213,
"step": 1340
},
{
"epoch": 2.0626432391138274,
"grad_norm": 0.2564590573310852,
"learning_rate": 7.303415281224346e-05,
"loss": 0.0232,
"step": 1350
},
{
"epoch": 2.0626432391138274,
"eval_loss": 0.03556601703166962,
"eval_runtime": 155.3464,
"eval_samples_per_second": 7.493,
"eval_steps_per_second": 7.493,
"step": 1350
},
{
"epoch": 2.0779220779220777,
"grad_norm": 0.257223516702652,
"learning_rate": 7.255912079426136e-05,
"loss": 0.0183,
"step": 1360
},
{
"epoch": 2.0932009167303285,
"grad_norm": 0.11171077191829681,
"learning_rate": 7.208151815992107e-05,
"loss": 0.0216,
"step": 1370
},
{
"epoch": 2.1084797555385792,
"grad_norm": 0.14337344467639923,
"learning_rate": 7.160139933213898e-05,
"loss": 0.0301,
"step": 1380
},
{
"epoch": 2.1237585943468296,
"grad_norm": 0.35148173570632935,
"learning_rate": 7.111881902055223e-05,
"loss": 0.0231,
"step": 1390
},
{
"epoch": 2.1390374331550803,
"grad_norm": 0.17770536243915558,
"learning_rate": 7.06338322152845e-05,
"loss": 0.0203,
"step": 1400
},
{
"epoch": 2.1390374331550803,
"eval_loss": 0.03558831289410591,
"eval_runtime": 155.1936,
"eval_samples_per_second": 7.5,
"eval_steps_per_second": 7.5,
"step": 1400
},
{
"epoch": 2.1543162719633306,
"grad_norm": 0.2693638801574707,
"learning_rate": 7.014649418067994e-05,
"loss": 0.0225,
"step": 1410
},
{
"epoch": 2.1695951107715814,
"grad_norm": 0.1347169131040573,
"learning_rate": 6.965686044900577e-05,
"loss": 0.0174,
"step": 1420
},
{
"epoch": 2.184873949579832,
"grad_norm": 0.7838224172592163,
"learning_rate": 6.91649868141243e-05,
"loss": 0.0157,
"step": 1430
},
{
"epoch": 2.2001527883880825,
"grad_norm": 0.22640898823738098,
"learning_rate": 6.86709293251353e-05,
"loss": 0.0129,
"step": 1440
},
{
"epoch": 2.2154316271963332,
"grad_norm": 0.2071000039577484,
"learning_rate": 6.817474427998916e-05,
"loss": 0.0156,
"step": 1450
},
{
"epoch": 2.2154316271963332,
"eval_loss": 0.03878592699766159,
"eval_runtime": 155.1317,
"eval_samples_per_second": 7.503,
"eval_steps_per_second": 7.503,
"step": 1450
},
{
"epoch": 2.2307104660045836,
"grad_norm": 0.38917431235313416,
"learning_rate": 6.767648821907172e-05,
"loss": 0.0239,
"step": 1460
},
{
"epoch": 2.2459893048128343,
"grad_norm": 0.18601389229297638,
"learning_rate": 6.717621791876147e-05,
"loss": 0.0339,
"step": 1470
},
{
"epoch": 2.2612681436210846,
"grad_norm": 0.2055034637451172,
"learning_rate": 6.667399038495986e-05,
"loss": 0.0243,
"step": 1480
},
{
"epoch": 2.2765469824293354,
"grad_norm": 0.2515312135219574,
"learning_rate": 6.616986284659556e-05,
"loss": 0.0289,
"step": 1490
},
{
"epoch": 2.2918258212375857,
"grad_norm": 0.1224779561161995,
"learning_rate": 6.566389274910309e-05,
"loss": 0.0207,
"step": 1500
},
{
"epoch": 2.2918258212375857,
"eval_loss": 0.03438286855816841,
"eval_runtime": 155.0887,
"eval_samples_per_second": 7.505,
"eval_steps_per_second": 7.505,
"step": 1500
},
{
"epoch": 2.3071046600458365,
"grad_norm": 0.17208464443683624,
"learning_rate": 6.515613774787697e-05,
"loss": 0.0207,
"step": 1510
},
{
"epoch": 2.3223834988540872,
"grad_norm": 0.13433796167373657,
"learning_rate": 6.464665570170186e-05,
"loss": 0.0238,
"step": 1520
},
{
"epoch": 2.3376623376623376,
"grad_norm": 0.1663011610507965,
"learning_rate": 6.413550466615952e-05,
"loss": 0.0232,
"step": 1530
},
{
"epoch": 2.3529411764705883,
"grad_norm": 1.049318790435791,
"learning_rate": 6.362274288701342e-05,
"loss": 0.02,
"step": 1540
},
{
"epoch": 2.3682200152788386,
"grad_norm": 0.19288620352745056,
"learning_rate": 6.310842879357158e-05,
"loss": 0.0202,
"step": 1550
},
{
"epoch": 2.3682200152788386,
"eval_loss": 0.03451816737651825,
"eval_runtime": 155.1043,
"eval_samples_per_second": 7.505,
"eval_steps_per_second": 7.505,
"step": 1550
},
{
"epoch": 2.3834988540870894,
"grad_norm": 0.18346098065376282,
"learning_rate": 6.25926209920285e-05,
"loss": 0.0117,
"step": 1560
},
{
"epoch": 2.39877769289534,
"grad_norm": 0.3315141797065735,
"learning_rate": 6.207537825878708e-05,
"loss": 0.0176,
"step": 1570
},
{
"epoch": 2.4140565317035905,
"grad_norm": 0.12683014571666718,
"learning_rate": 6.155675953376095e-05,
"loss": 0.0315,
"step": 1580
},
{
"epoch": 2.4293353705118412,
"grad_norm": 0.19462399184703827,
"learning_rate": 6.103682391365828e-05,
"loss": 0.0242,
"step": 1590
},
{
"epoch": 2.4446142093200915,
"grad_norm": 0.17063353955745697,
"learning_rate": 6.05156306452477e-05,
"loss": 0.0196,
"step": 1600
},
{
"epoch": 2.4446142093200915,
"eval_loss": 0.03451281785964966,
"eval_runtime": 155.0665,
"eval_samples_per_second": 7.506,
"eval_steps_per_second": 7.506,
"step": 1600
},
{
"epoch": 2.4598930481283423,
"grad_norm": 0.20790407061576843,
"learning_rate": 5.9993239118607124e-05,
"loss": 0.0294,
"step": 1610
},
{
"epoch": 2.475171886936593,
"grad_norm": 0.2353222370147705,
"learning_rate": 5.9469708860356246e-05,
"loss": 0.0209,
"step": 1620
},
{
"epoch": 2.4904507257448434,
"grad_norm": 0.18481875956058502,
"learning_rate": 5.89450995268734e-05,
"loss": 0.0221,
"step": 1630
},
{
"epoch": 2.5057295645530937,
"grad_norm": 0.19238875806331635,
"learning_rate": 5.841947089749783e-05,
"loss": 0.0258,
"step": 1640
},
{
"epoch": 2.5210084033613445,
"grad_norm": 0.19955088198184967,
"learning_rate": 5.78928828677177e-05,
"loss": 0.0244,
"step": 1650
},
{
"epoch": 2.5210084033613445,
"eval_loss": 0.03445274755358696,
"eval_runtime": 155.0979,
"eval_samples_per_second": 7.505,
"eval_steps_per_second": 7.505,
"step": 1650
},
{
"epoch": 2.5362872421695952,
"grad_norm": 0.5157607793807983,
"learning_rate": 5.7365395442345085e-05,
"loss": 0.0207,
"step": 1660
},
{
"epoch": 2.5515660809778455,
"grad_norm": 0.248027041554451,
"learning_rate": 5.683706872867833e-05,
"loss": 0.0195,
"step": 1670
},
{
"epoch": 2.5668449197860963,
"grad_norm": 0.3609461784362793,
"learning_rate": 5.630796292965288e-05,
"loss": 0.0311,
"step": 1680
},
{
"epoch": 2.5821237585943466,
"grad_norm": 0.19655698537826538,
"learning_rate": 5.57781383369811e-05,
"loss": 0.0266,
"step": 1690
},
{
"epoch": 2.5974025974025974,
"grad_norm": 0.31010693311691284,
"learning_rate": 5.524765532428203e-05,
"loss": 0.0199,
"step": 1700
},
{
"epoch": 2.5974025974025974,
"eval_loss": 0.035529833287000656,
"eval_runtime": 155.1466,
"eval_samples_per_second": 7.503,
"eval_steps_per_second": 7.503,
"step": 1700
},
{
"epoch": 2.612681436210848,
"grad_norm": 0.3448195159435272,
"learning_rate": 5.471657434020182e-05,
"loss": 0.0242,
"step": 1710
},
{
"epoch": 2.6279602750190985,
"grad_norm": 0.2073138803243637,
"learning_rate": 5.418495590152557e-05,
"loss": 0.028,
"step": 1720
},
{
"epoch": 2.643239113827349,
"grad_norm": 0.20881344377994537,
"learning_rate": 5.365286058628145e-05,
"loss": 0.0229,
"step": 1730
},
{
"epoch": 2.6585179526355995,
"grad_norm": 0.3347488343715668,
"learning_rate": 5.312034902683779e-05,
"loss": 0.03,
"step": 1740
},
{
"epoch": 2.6737967914438503,
"grad_norm": 0.17304031550884247,
"learning_rate": 5.258748190299404e-05,
"loss": 0.0221,
"step": 1750
},
{
"epoch": 2.6737967914438503,
"eval_loss": 0.03376537188887596,
"eval_runtime": 155.1859,
"eval_samples_per_second": 7.501,
"eval_steps_per_second": 7.501,
"step": 1750
},
{
"epoch": 2.689075630252101,
"grad_norm": 0.216533362865448,
"learning_rate": 5.20543199350663e-05,
"loss": 0.0198,
"step": 1760
},
{
"epoch": 2.7043544690603514,
"grad_norm": 0.16479599475860596,
"learning_rate": 5.152092387696821e-05,
"loss": 0.0243,
"step": 1770
},
{
"epoch": 2.719633307868602,
"grad_norm": 0.624372124671936,
"learning_rate": 5.0987354509287985e-05,
"loss": 0.0217,
"step": 1780
},
{
"epoch": 2.7349121466768525,
"grad_norm": 0.21548637747764587,
"learning_rate": 5.045367263236257e-05,
"loss": 0.0173,
"step": 1790
},
{
"epoch": 2.750190985485103,
"grad_norm": 0.15954096615314484,
"learning_rate": 4.991993905934931e-05,
"loss": 0.0271,
"step": 1800
},
{
"epoch": 2.750190985485103,
"eval_loss": 0.03243241831660271,
"eval_runtime": 155.1503,
"eval_samples_per_second": 7.502,
"eval_steps_per_second": 7.502,
"step": 1800
},
{
"epoch": 2.765469824293354,
"grad_norm": 0.4924725890159607,
"learning_rate": 4.938621460929639e-05,
"loss": 0.0214,
"step": 1810
},
{
"epoch": 2.7807486631016043,
"grad_norm": 0.25770097970962524,
"learning_rate": 4.885256010021233e-05,
"loss": 0.0207,
"step": 1820
},
{
"epoch": 2.7960275019098546,
"grad_norm": 0.30344194173812866,
"learning_rate": 4.831903634213599e-05,
"loss": 0.0231,
"step": 1830
},
{
"epoch": 2.8113063407181054,
"grad_norm": 0.07679025083780289,
"learning_rate": 4.778570413020702e-05,
"loss": 0.0244,
"step": 1840
},
{
"epoch": 2.826585179526356,
"grad_norm": 0.38201385736465454,
"learning_rate": 4.725262423773838e-05,
"loss": 0.0265,
"step": 1850
},
{
"epoch": 2.826585179526356,
"eval_loss": 0.032429687678813934,
"eval_runtime": 155.0754,
"eval_samples_per_second": 7.506,
"eval_steps_per_second": 7.506,
"step": 1850
},
{
"epoch": 2.8418640183346064,
"grad_norm": 0.2612603008747101,
"learning_rate": 4.671985740929123e-05,
"loss": 0.0166,
"step": 1860
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.10188362002372742,
"learning_rate": 4.618746435375295e-05,
"loss": 0.0234,
"step": 1870
},
{
"epoch": 2.8724216959511075,
"grad_norm": 0.24049559235572815,
"learning_rate": 4.565550573741942e-05,
"loss": 0.0206,
"step": 1880
},
{
"epoch": 2.8877005347593583,
"grad_norm": 0.6570667624473572,
"learning_rate": 4.512404217708217e-05,
"loss": 0.0268,
"step": 1890
},
{
"epoch": 2.902979373567609,
"grad_norm": 0.23941820859909058,
"learning_rate": 4.45931342331209e-05,
"loss": 0.0214,
"step": 1900
},
{
"epoch": 2.902979373567609,
"eval_loss": 0.03430779278278351,
"eval_runtime": 155.0875,
"eval_samples_per_second": 7.505,
"eval_steps_per_second": 7.505,
"step": 1900
},
{
"epoch": 2.9182582123758594,
"grad_norm": 0.2971397638320923,
"learning_rate": 4.406284240260278e-05,
"loss": 0.0282,
"step": 1910
},
{
"epoch": 2.93353705118411,
"grad_norm": 0.11608521640300751,
"learning_rate": 4.3533227112388694e-05,
"loss": 0.0167,
"step": 1920
},
{
"epoch": 2.9488158899923604,
"grad_norm": 0.23668737709522247,
"learning_rate": 4.300434871224763e-05,
"loss": 0.0304,
"step": 1930
},
{
"epoch": 2.964094728800611,
"grad_norm": 0.34499678015708923,
"learning_rate": 4.247626746797983e-05,
"loss": 0.0254,
"step": 1940
},
{
"epoch": 2.979373567608862,
"grad_norm": 0.20990602672100067,
"learning_rate": 4.1949043554549406e-05,
"loss": 0.0214,
"step": 1950
},
{
"epoch": 2.979373567608862,
"eval_loss": 0.03535965830087662,
"eval_runtime": 155.1651,
"eval_samples_per_second": 7.502,
"eval_steps_per_second": 7.502,
"step": 1950
},
{
"epoch": 2.9946524064171123,
"grad_norm": 0.27059274911880493,
"learning_rate": 4.14227370492275e-05,
"loss": 0.0261,
"step": 1960
},
{
"epoch": 3.009931245225363,
"grad_norm": 0.11485490947961807,
"learning_rate": 4.08974079247464e-05,
"loss": 0.0173,
"step": 1970
},
{
"epoch": 3.0252100840336134,
"grad_norm": 0.031539060175418854,
"learning_rate": 4.037311604246565e-05,
"loss": 0.0143,
"step": 1980
},
{
"epoch": 3.040488922841864,
"grad_norm": 0.2541743814945221,
"learning_rate": 3.9849921145550805e-05,
"loss": 0.0137,
"step": 1990
},
{
"epoch": 3.0557677616501144,
"grad_norm": 0.04028066247701645,
"learning_rate": 3.9327882852165795e-05,
"loss": 0.0159,
"step": 2000
},
{
"epoch": 3.0557677616501144,
"eval_loss": 0.035116177052259445,
"eval_runtime": 155.2618,
"eval_samples_per_second": 7.497,
"eval_steps_per_second": 7.497,
"step": 2000
},
{
"epoch": 3.071046600458365,
"grad_norm": 0.19185902178287506,
"learning_rate": 3.880706064867926e-05,
"loss": 0.0145,
"step": 2010
},
{
"epoch": 3.0863254392666155,
"grad_norm": 0.25667238235473633,
"learning_rate": 3.8287513882886196e-05,
"loss": 0.0106,
"step": 2020
},
{
"epoch": 3.1016042780748663,
"grad_norm": 0.1860036551952362,
"learning_rate": 3.776930175724521e-05,
"loss": 0.0107,
"step": 2030
},
{
"epoch": 3.116883116883117,
"grad_norm": 0.3873313069343567,
"learning_rate": 3.7252483322132386e-05,
"loss": 0.0182,
"step": 2040
},
{
"epoch": 3.1321619556913673,
"grad_norm": 0.17588308453559875,
"learning_rate": 3.673711746911252e-05,
"loss": 0.0147,
"step": 2050
},
{
"epoch": 3.1321619556913673,
"eval_loss": 0.03641332685947418,
"eval_runtime": 155.3277,
"eval_samples_per_second": 7.494,
"eval_steps_per_second": 7.494,
"step": 2050
},
{
"epoch": 3.147440794499618,
"grad_norm": 0.13042625784873962,
"learning_rate": 3.6223262924228344e-05,
"loss": 0.0086,
"step": 2060
},
{
"epoch": 3.1627196333078684,
"grad_norm": 0.2611735165119171,
"learning_rate": 3.5710978241308733e-05,
"loss": 0.0127,
"step": 2070
},
{
"epoch": 3.177998472116119,
"grad_norm": 0.3763399124145508,
"learning_rate": 3.520032179529652e-05,
"loss": 0.0133,
"step": 2080
},
{
"epoch": 3.19327731092437,
"grad_norm": 0.3212707042694092,
"learning_rate": 3.4691351775596564e-05,
"loss": 0.0114,
"step": 2090
},
{
"epoch": 3.2085561497326203,
"grad_norm": 0.2076113373041153,
"learning_rate": 3.41841261794451e-05,
"loss": 0.0125,
"step": 2100
},
{
"epoch": 3.2085561497326203,
"eval_loss": 0.038192324340343475,
"eval_runtime": 155.3842,
"eval_samples_per_second": 7.491,
"eval_steps_per_second": 7.491,
"step": 2100
},
{
"epoch": 3.223834988540871,
"grad_norm": 0.20385810732841492,
"learning_rate": 3.367870280530101e-05,
"loss": 0.016,
"step": 2110
},
{
"epoch": 3.2391138273491213,
"grad_norm": 0.369327187538147,
"learning_rate": 3.3175139246259536e-05,
"loss": 0.0114,
"step": 2120
},
{
"epoch": 3.254392666157372,
"grad_norm": 0.13626205921173096,
"learning_rate": 3.2673492883489696e-05,
"loss": 0.0101,
"step": 2130
},
{
"epoch": 3.2696715049656224,
"grad_norm": 0.6658885478973389,
"learning_rate": 3.2173820879695535e-05,
"loss": 0.0118,
"step": 2140
},
{
"epoch": 3.284950343773873,
"grad_norm": 0.24886025488376617,
"learning_rate": 3.1676180172602525e-05,
"loss": 0.0135,
"step": 2150
},
{
"epoch": 3.284950343773873,
"eval_loss": 0.03980547562241554,
"eval_runtime": 155.2824,
"eval_samples_per_second": 7.496,
"eval_steps_per_second": 7.496,
"step": 2150
},
{
"epoch": 3.300229182582124,
"grad_norm": 0.10821767151355743,
"learning_rate": 3.11806274684695e-05,
"loss": 0.0093,
"step": 2160
},
{
"epoch": 3.3155080213903743,
"grad_norm": 0.3106374442577362,
"learning_rate": 3.068721923562688e-05,
"loss": 0.0063,
"step": 2170
},
{
"epoch": 3.330786860198625,
"grad_norm": 0.3591933846473694,
"learning_rate": 3.019601169804216e-05,
"loss": 0.0137,
"step": 2180
},
{
"epoch": 3.3460656990068753,
"grad_norm": 0.338277131319046,
"learning_rate": 2.9707060828913225e-05,
"loss": 0.0121,
"step": 2190
},
{
"epoch": 3.361344537815126,
"grad_norm": 0.30847465991973877,
"learning_rate": 2.9220422344290056e-05,
"loss": 0.0138,
"step": 2200
},
{
"epoch": 3.361344537815126,
"eval_loss": 0.040571607649326324,
"eval_runtime": 155.4873,
"eval_samples_per_second": 7.486,
"eval_steps_per_second": 7.486,
"step": 2200
},
{
"epoch": 3.3766233766233764,
"grad_norm": 0.40585702657699585,
"learning_rate": 2.873615169672601e-05,
"loss": 0.0103,
"step": 2210
},
{
"epoch": 3.391902215431627,
"grad_norm": 0.1804322749376297,
"learning_rate": 2.8254304068958927e-05,
"loss": 0.0106,
"step": 2220
},
{
"epoch": 3.407181054239878,
"grad_norm": 0.0771070197224617,
"learning_rate": 2.7774934367622996e-05,
"loss": 0.0157,
"step": 2230
},
{
"epoch": 3.4224598930481283,
"grad_norm": 0.2759353816509247,
"learning_rate": 2.7298097216992284e-05,
"loss": 0.01,
"step": 2240
},
{
"epoch": 3.437738731856379,
"grad_norm": 0.2682913541793823,
"learning_rate": 2.6823846952756125e-05,
"loss": 0.0143,
"step": 2250
},
{
"epoch": 3.437738731856379,
"eval_loss": 0.0449955128133297,
"eval_runtime": 155.4284,
"eval_samples_per_second": 7.489,
"eval_steps_per_second": 7.489,
"step": 2250
},
{
"epoch": 3.4530175706646293,
"grad_norm": 0.249994695186615,
"learning_rate": 2.6352237615827636e-05,
"loss": 0.009,
"step": 2260
},
{
"epoch": 3.46829640947288,
"grad_norm": 0.3084390163421631,
"learning_rate": 2.5883322946185777e-05,
"loss": 0.0135,
"step": 2270
},
{
"epoch": 3.483575248281131,
"grad_norm": 0.32062187790870667,
"learning_rate": 2.5417156376751562e-05,
"loss": 0.0146,
"step": 2280
},
{
"epoch": 3.498854087089381,
"grad_norm": 0.19855211675167084,
"learning_rate": 2.4953791027299506e-05,
"loss": 0.006,
"step": 2290
},
{
"epoch": 3.514132925897632,
"grad_norm": 0.07996582239866257,
"learning_rate": 2.4493279698404493e-05,
"loss": 0.0082,
"step": 2300
},
{
"epoch": 3.514132925897632,
"eval_loss": 0.043550413101911545,
"eval_runtime": 155.4017,
"eval_samples_per_second": 7.49,
"eval_steps_per_second": 7.49,
"step": 2300
},
{
"epoch": 3.5294117647058822,
"grad_norm": 0.4253939092159271,
"learning_rate": 2.403567486542518e-05,
"loss": 0.0133,
"step": 2310
},
{
"epoch": 3.544690603514133,
"grad_norm": 0.28725898265838623,
"learning_rate": 2.3581028672524485e-05,
"loss": 0.0149,
"step": 2320
},
{
"epoch": 3.5599694423223838,
"grad_norm": 0.17743922770023346,
"learning_rate": 2.312939292672765e-05,
"loss": 0.0077,
"step": 2330
},
{
"epoch": 3.575248281130634,
"grad_norm": 0.5599276423454285,
"learning_rate": 2.268081909201885e-05,
"loss": 0.0071,
"step": 2340
},
{
"epoch": 3.5905271199388844,
"grad_norm": 0.17478495836257935,
"learning_rate": 2.2235358283476936e-05,
"loss": 0.0177,
"step": 2350
},
{
"epoch": 3.5905271199388844,
"eval_loss": 0.042605265974998474,
"eval_runtime": 155.3525,
"eval_samples_per_second": 7.493,
"eval_steps_per_second": 7.493,
"step": 2350
},
{
"epoch": 3.605805958747135,
"grad_norm": 0.10272977501153946,
"learning_rate": 2.179306126145075e-05,
"loss": 0.0132,
"step": 2360
},
{
"epoch": 3.621084797555386,
"grad_norm": 0.3142433166503906,
"learning_rate": 2.1353978425775008e-05,
"loss": 0.011,
"step": 2370
},
{
"epoch": 3.6363636363636362,
"grad_norm": 0.26462051272392273,
"learning_rate": 2.091815981002731e-05,
"loss": 0.0112,
"step": 2380
},
{
"epoch": 3.651642475171887,
"grad_norm": 0.2536456286907196,
"learning_rate": 2.0485655075826667e-05,
"loss": 0.0166,
"step": 2390
},
{
"epoch": 3.6669213139801373,
"grad_norm": 1.0847595930099487,
"learning_rate": 2.0056513507174685e-05,
"loss": 0.0119,
"step": 2400
},
{
"epoch": 3.6669213139801373,
"eval_loss": 0.03961669281125069,
"eval_runtime": 155.3287,
"eval_samples_per_second": 7.494,
"eval_steps_per_second": 7.494,
"step": 2400
},
{
"epoch": 3.682200152788388,
"grad_norm": 0.19118262827396393,
"learning_rate": 1.963078400483953e-05,
"loss": 0.0103,
"step": 2410
},
{
"epoch": 3.697478991596639,
"grad_norm": 0.28924936056137085,
"learning_rate": 1.9208515080783723e-05,
"loss": 0.0119,
"step": 2420
},
{
"epoch": 3.712757830404889,
"grad_norm": 0.12338971346616745,
"learning_rate": 1.8789754852636245e-05,
"loss": 0.0076,
"step": 2430
},
{
"epoch": 3.72803666921314,
"grad_norm": 0.4825515151023865,
"learning_rate": 1.837455103820942e-05,
"loss": 0.0182,
"step": 2440
},
{
"epoch": 3.7433155080213902,
"grad_norm": 0.1710490584373474,
"learning_rate": 1.7962950950061502e-05,
"loss": 0.0061,
"step": 2450
},
{
"epoch": 3.7433155080213902,
"eval_loss": 0.040671028196811676,
"eval_runtime": 155.3708,
"eval_samples_per_second": 7.492,
"eval_steps_per_second": 7.492,
"step": 2450
},
{
"epoch": 3.758594346829641,
"grad_norm": 0.07451760768890381,
"learning_rate": 1.7555001490105488e-05,
"loss": 0.0093,
"step": 2460
},
{
"epoch": 3.7738731856378918,
"grad_norm": 0.23963361978530884,
"learning_rate": 1.7150749144264462e-05,
"loss": 0.0113,
"step": 2470
},
{
"epoch": 3.789152024446142,
"grad_norm": 0.2488386332988739,
"learning_rate": 1.6750239977174682e-05,
"loss": 0.0083,
"step": 2480
},
{
"epoch": 3.8044308632543924,
"grad_norm": 0.5135478377342224,
"learning_rate": 1.6353519626936397e-05,
"loss": 0.0168,
"step": 2490
},
{
"epoch": 3.819709702062643,
"grad_norm": 0.05055965110659599,
"learning_rate": 1.596063329991341e-05,
"loss": 0.0101,
"step": 2500
},
{
"epoch": 3.819709702062643,
"eval_loss": 0.0403166227042675,
"eval_runtime": 155.429,
"eval_samples_per_second": 7.489,
"eval_steps_per_second": 7.489,
"step": 2500
},
{
"epoch": 3.834988540870894,
"grad_norm": 0.1947387158870697,
"learning_rate": 1.5571625765581832e-05,
"loss": 0.0086,
"step": 2510
},
{
"epoch": 3.8502673796791442,
"grad_norm": 0.4964490532875061,
"learning_rate": 1.5186541351428545e-05,
"loss": 0.0121,
"step": 2520
},
{
"epoch": 3.865546218487395,
"grad_norm": 0.36925700306892395,
"learning_rate": 1.4805423937900087e-05,
"loss": 0.014,
"step": 2530
},
{
"epoch": 3.8808250572956453,
"grad_norm": 0.66943359375,
"learning_rate": 1.4428316953402526e-05,
"loss": 0.0103,
"step": 2540
},
{
"epoch": 3.896103896103896,
"grad_norm": 0.3197585642337799,
"learning_rate": 1.4055263369352672e-05,
"loss": 0.0105,
"step": 2550
},
{
"epoch": 3.896103896103896,
"eval_loss": 0.03982643038034439,
"eval_runtime": 155.2993,
"eval_samples_per_second": 7.495,
"eval_steps_per_second": 7.495,
"step": 2550
},
{
"epoch": 3.911382734912147,
"grad_norm": 0.274338960647583,
"learning_rate": 1.3686305695281559e-05,
"loss": 0.0137,
"step": 2560
},
{
"epoch": 3.926661573720397,
"grad_norm": 0.10365447402000427,
"learning_rate": 1.3321485973990494e-05,
"loss": 0.0124,
"step": 2570
},
{
"epoch": 3.941940412528648,
"grad_norm": 0.41343265771865845,
"learning_rate": 1.2960845776760156e-05,
"loss": 0.0149,
"step": 2580
},
{
"epoch": 3.9572192513368982,
"grad_norm": 0.2558101713657379,
"learning_rate": 1.2604426198613688e-05,
"loss": 0.0109,
"step": 2590
},
{
"epoch": 3.972498090145149,
"grad_norm": 0.34950628876686096,
"learning_rate": 1.2252267853633798e-05,
"loss": 0.0087,
"step": 2600
},
{
"epoch": 3.972498090145149,
"eval_loss": 0.03903589025139809,
"eval_runtime": 155.2401,
"eval_samples_per_second": 7.498,
"eval_steps_per_second": 7.498,
"step": 2600
},
{
"epoch": 3.9877769289533997,
"grad_norm": 0.20983736217021942,
"learning_rate": 1.1904410870334803e-05,
"loss": 0.0126,
"step": 2610
},
{
"epoch": 4.0030557677616505,
"grad_norm": 0.1424865871667862,
"learning_rate": 1.1560894887090052e-05,
"loss": 0.0123,
"step": 2620
},
{
"epoch": 4.0183346065699,
"grad_norm": 0.38797080516815186,
"learning_rate": 1.1221759047615004e-05,
"loss": 0.0087,
"step": 2630
},
{
"epoch": 4.033613445378151,
"grad_norm": 0.07197237014770508,
"learning_rate": 1.0887041996506859e-05,
"loss": 0.0039,
"step": 2640
},
{
"epoch": 4.048892284186402,
"grad_norm": 0.09156972169876099,
"learning_rate": 1.0556781874841027e-05,
"loss": 0.0058,
"step": 2650
},
{
"epoch": 4.048892284186402,
"eval_loss": 0.0419425331056118,
"eval_runtime": 155.2402,
"eval_samples_per_second": 7.498,
"eval_steps_per_second": 7.498,
"step": 2650
},
{
"epoch": 4.064171122994653,
"grad_norm": 0.08017122000455856,
"learning_rate": 1.0231016315824875e-05,
"loss": 0.0051,
"step": 2660
},
{
"epoch": 4.0794499618029025,
"grad_norm": 0.0580967552959919,
"learning_rate": 9.909782440509491e-06,
"loss": 0.0062,
"step": 2670
},
{
"epoch": 4.094728800611153,
"grad_norm": 0.34293055534362793,
"learning_rate": 9.593116853559648e-06,
"loss": 0.0083,
"step": 2680
},
{
"epoch": 4.110007639419404,
"grad_norm": 0.054443735629320145,
"learning_rate": 9.281055639082747e-06,
"loss": 0.0057,
"step": 2690
},
{
"epoch": 4.125286478227655,
"grad_norm": 0.13537727296352386,
"learning_rate": 8.973634356517063e-06,
"loss": 0.0065,
"step": 2700
},
{
"epoch": 4.125286478227655,
"eval_loss": 0.04612136632204056,
"eval_runtime": 155.2931,
"eval_samples_per_second": 7.496,
"eval_steps_per_second": 7.496,
"step": 2700
},
{
"epoch": 4.140565317035906,
"grad_norm": 0.3425973653793335,
"learning_rate": 8.670888036579639e-06,
"loss": 0.0056,
"step": 2710
},
{
"epoch": 4.1558441558441555,
"grad_norm": 0.016841163858771324,
"learning_rate": 8.372851177274604e-06,
"loss": 0.0051,
"step": 2720
},
{
"epoch": 4.171122994652406,
"grad_norm": 0.2846859395503998,
"learning_rate": 8.079557739962128e-06,
"loss": 0.0059,
"step": 2730
},
{
"epoch": 4.186401833460657,
"grad_norm": 0.06579844653606415,
"learning_rate": 7.791041145488454e-06,
"loss": 0.0068,
"step": 2740
},
{
"epoch": 4.201680672268908,
"grad_norm": 0.17928680777549744,
"learning_rate": 7.507334270377619e-06,
"loss": 0.0072,
"step": 2750
},
{
"epoch": 4.201680672268908,
"eval_loss": 0.04907457157969475,
"eval_runtime": 155.2924,
"eval_samples_per_second": 7.496,
"eval_steps_per_second": 7.496,
"step": 2750
},
{
"epoch": 4.2169595110771585,
"grad_norm": 0.10718922317028046,
"learning_rate": 7.228469443085206e-06,
"loss": 0.0033,
"step": 2760
},
{
"epoch": 4.232238349885408,
"grad_norm": 0.4315052628517151,
"learning_rate": 6.954478440314427e-06,
"loss": 0.0052,
"step": 2770
},
{
"epoch": 4.247517188693659,
"grad_norm": 0.2148362547159195,
"learning_rate": 6.685392483395259e-06,
"loss": 0.0022,
"step": 2780
},
{
"epoch": 4.26279602750191,
"grad_norm": 0.28928646445274353,
"learning_rate": 6.421242234726682e-06,
"loss": 0.0052,
"step": 2790
},
{
"epoch": 4.278074866310161,
"grad_norm": 0.33966100215911865,
"learning_rate": 6.1620577942827166e-06,
"loss": 0.0018,
"step": 2800
},
{
"epoch": 4.278074866310161,
"eval_loss": 0.05084596574306488,
"eval_runtime": 155.1856,
"eval_samples_per_second": 7.501,
"eval_steps_per_second": 7.501,
"step": 2800
},
{
"epoch": 4.293353705118411,
"grad_norm": 0.5706417560577393,
"learning_rate": 5.907868696182584e-06,
"loss": 0.0079,
"step": 2810
},
{
"epoch": 4.308632543926661,
"grad_norm": 0.024796945974230766,
"learning_rate": 5.658703905325186e-06,
"loss": 0.0046,
"step": 2820
},
{
"epoch": 4.323911382734912,
"grad_norm": 0.08715485036373138,
"learning_rate": 5.414591814088627e-06,
"loss": 0.0072,
"step": 2830
},
{
"epoch": 4.339190221543163,
"grad_norm": 0.3748434782028198,
"learning_rate": 5.17556023909489e-06,
"loss": 0.0046,
"step": 2840
},
{
"epoch": 4.354469060351414,
"grad_norm": 0.41014212369918823,
"learning_rate": 4.941636418040058e-06,
"loss": 0.0053,
"step": 2850
},
{
"epoch": 4.354469060351414,
"eval_loss": 0.050783995538949966,
"eval_runtime": 155.0265,
"eval_samples_per_second": 7.508,
"eval_steps_per_second": 7.508,
"step": 2850
},
{
"epoch": 4.369747899159664,
"grad_norm": 0.38217148184776306,
"learning_rate": 4.7128470065906925e-06,
"loss": 0.0057,
"step": 2860
},
{
"epoch": 4.385026737967914,
"grad_norm": 0.4754278063774109,
"learning_rate": 4.4892180753462744e-06,
"loss": 0.0048,
"step": 2870
},
{
"epoch": 4.400305576776165,
"grad_norm": 0.19204643368721008,
"learning_rate": 4.270775106868586e-06,
"loss": 0.0036,
"step": 2880
},
{
"epoch": 4.415584415584416,
"grad_norm": 0.08977972716093063,
"learning_rate": 4.057542992777868e-06,
"loss": 0.004,
"step": 2890
},
{
"epoch": 4.4308632543926665,
"grad_norm": 0.09212498366832733,
"learning_rate": 3.849546030916473e-06,
"loss": 0.0024,
"step": 2900
},
{
"epoch": 4.4308632543926665,
"eval_loss": 0.05243577063083649,
"eval_runtime": 155.0799,
"eval_samples_per_second": 7.506,
"eval_steps_per_second": 7.506,
"step": 2900
},
{
"epoch": 4.446142093200916,
"grad_norm": 0.13009680807590485,
"learning_rate": 3.646807922580098e-06,
"loss": 0.0021,
"step": 2910
},
{
"epoch": 4.461420932009167,
"grad_norm": 0.3079231381416321,
"learning_rate": 3.4493517698170164e-06,
"loss": 0.0083,
"step": 2920
},
{
"epoch": 4.476699770817418,
"grad_norm": 0.09543947130441666,
"learning_rate": 3.2572000727956186e-06,
"loss": 0.0043,
"step": 2930
},
{
"epoch": 4.491978609625669,
"grad_norm": 1.1745598316192627,
"learning_rate": 3.070374727240466e-06,
"loss": 0.0047,
"step": 2940
},
{
"epoch": 4.507257448433919,
"grad_norm": 0.24490894377231598,
"learning_rate": 2.8888970219373314e-06,
"loss": 0.0042,
"step": 2950
},
{
"epoch": 4.507257448433919,
"eval_loss": 0.053425174206495285,
"eval_runtime": 154.9883,
"eval_samples_per_second": 7.51,
"eval_steps_per_second": 7.51,
"step": 2950
},
{
"epoch": 4.522536287242169,
"grad_norm": 0.01050595659762621,
"learning_rate": 2.7127876363072736e-06,
"loss": 0.0035,
"step": 2960
},
{
"epoch": 4.53781512605042,
"grad_norm": 0.3492041826248169,
"learning_rate": 2.54206663805025e-06,
"loss": 0.007,
"step": 2970
},
{
"epoch": 4.553093964858671,
"grad_norm": 0.203172966837883,
"learning_rate": 2.3767534808584125e-06,
"loss": 0.0016,
"step": 2980
},
{
"epoch": 4.5683728036669216,
"grad_norm": 0.11281214654445648,
"learning_rate": 2.2168670021993075e-06,
"loss": 0.0035,
"step": 2990
},
{
"epoch": 4.583651642475171,
"grad_norm": 0.7499719262123108,
"learning_rate": 2.0624254211693894e-06,
"loss": 0.0056,
"step": 3000
},
{
"epoch": 4.583651642475171,
"eval_loss": 0.05354822427034378,
"eval_runtime": 154.9731,
"eval_samples_per_second": 7.511,
"eval_steps_per_second": 7.511,
"step": 3000
},
{
"epoch": 4.598930481283422,
"grad_norm": 0.059386882930994034,
"learning_rate": 1.9134463364179177e-06,
"loss": 0.0044,
"step": 3010
},
{
"epoch": 4.614209320091673,
"grad_norm": 0.5271286368370056,
"learning_rate": 1.7699467241416024e-06,
"loss": 0.0063,
"step": 3020
},
{
"epoch": 4.629488158899924,
"grad_norm": 0.4666330814361572,
"learning_rate": 1.6319429361501714e-06,
"loss": 0.004,
"step": 3030
},
{
"epoch": 4.6447669977081745,
"grad_norm": 0.0444776713848114,
"learning_rate": 1.4994506980030577e-06,
"loss": 0.0014,
"step": 3040
},
{
"epoch": 4.660045836516424,
"grad_norm": 0.1632346361875534,
"learning_rate": 1.3724851072174917e-06,
"loss": 0.0023,
"step": 3050
},
{
"epoch": 4.660045836516424,
"eval_loss": 0.054052937775850296,
"eval_runtime": 154.9518,
"eval_samples_per_second": 7.512,
"eval_steps_per_second": 7.512,
"step": 3050
},
{
"epoch": 4.675324675324675,
"grad_norm": 0.12274482846260071,
"learning_rate": 1.251060631548112e-06,
"loss": 0.0099,
"step": 3060
},
{
"epoch": 4.690603514132926,
"grad_norm": 0.272321492433548,
"learning_rate": 1.135191107338368e-06,
"loss": 0.0033,
"step": 3070
},
{
"epoch": 4.705882352941177,
"grad_norm": 0.41429057717323303,
"learning_rate": 1.0248897379438904e-06,
"loss": 0.0055,
"step": 3080
},
{
"epoch": 4.721161191749427,
"grad_norm": 0.06488903611898422,
"learning_rate": 9.201690922279405e-07,
"loss": 0.0052,
"step": 3090
},
{
"epoch": 4.736440030557677,
"grad_norm": 0.07039007544517517,
"learning_rate": 8.210411031291776e-07,
"loss": 0.0028,
"step": 3100
},
{
"epoch": 4.736440030557677,
"eval_loss": 0.0541132427752018,
"eval_runtime": 154.9537,
"eval_samples_per_second": 7.512,
"eval_steps_per_second": 7.512,
"step": 3100
},
{
"epoch": 4.751718869365928,
"grad_norm": 0.10857018083333969,
"learning_rate": 7.275170663019415e-07,
"loss": 0.0046,
"step": 3110
},
{
"epoch": 4.766997708174179,
"grad_norm": 0.8763617873191833,
"learning_rate": 6.396076388290484e-07,
"loss": 0.0053,
"step": 3120
},
{
"epoch": 4.7822765469824295,
"grad_norm": 0.6102765798568726,
"learning_rate": 5.573228380074736e-07,
"loss": 0.0047,
"step": 3130
},
{
"epoch": 4.79755538579068,
"grad_norm": 0.010208429768681526,
"learning_rate": 4.806720402068477e-07,
"loss": 0.0035,
"step": 3140
},
{
"epoch": 4.81283422459893,
"grad_norm": 0.060984183102846146,
"learning_rate": 4.0966397980100604e-07,
"loss": 0.0063,
"step": 3150
},
{
"epoch": 4.81283422459893,
"eval_loss": 0.053843092173337936,
"eval_runtime": 154.8435,
"eval_samples_per_second": 7.517,
"eval_steps_per_second": 7.517,
"step": 3150
},
{
"epoch": 4.828113063407181,
"grad_norm": 0.1609184741973877,
"learning_rate": 3.4430674817274575e-07,
"loss": 0.0025,
"step": 3160
},
{
"epoch": 4.843391902215432,
"grad_norm": 0.3119584023952484,
"learning_rate": 2.8460779279176896e-07,
"loss": 0.0081,
"step": 3170
},
{
"epoch": 4.8586707410236825,
"grad_norm": 0.07525625079870224,
"learning_rate": 2.3057391636606696e-07,
"loss": 0.0029,
"step": 3180
},
{
"epoch": 4.873949579831933,
"grad_norm": 0.4541275203227997,
"learning_rate": 1.8221127606674605e-07,
"loss": 0.0096,
"step": 3190
},
{
"epoch": 4.889228418640183,
"grad_norm": 0.07138187438249588,
"learning_rate": 1.3952538282639982e-07,
"loss": 0.0034,
"step": 3200
},
{
"epoch": 4.889228418640183,
"eval_loss": 0.053439512848854065,
"eval_runtime": 154.7923,
"eval_samples_per_second": 7.52,
"eval_steps_per_second": 7.52,
"step": 3200
},
{
"epoch": 4.904507257448434,
"grad_norm": 0.14377787709236145,
"learning_rate": 1.025211007111615e-07,
"loss": 0.0018,
"step": 3210
},
{
"epoch": 4.919786096256685,
"grad_norm": 0.014497518539428711,
"learning_rate": 7.120264636643615e-08,
"loss": 0.002,
"step": 3220
},
{
"epoch": 4.935064935064935,
"grad_norm": 0.037886813282966614,
"learning_rate": 4.5573588536407254e-08,
"loss": 0.0054,
"step": 3230
},
{
"epoch": 4.950343773873186,
"grad_norm": 0.521568775177002,
"learning_rate": 2.5636847657367623e-08,
"loss": 0.0079,
"step": 3240
},
{
"epoch": 4.965622612681436,
"grad_norm": 0.35444819927215576,
"learning_rate": 1.1394695524963306e-08,
"loss": 0.0077,
"step": 3250
},
{
"epoch": 4.965622612681436,
"eval_loss": 0.05358424782752991,
"eval_runtime": 154.8927,
"eval_samples_per_second": 7.515,
"eval_steps_per_second": 7.515,
"step": 3250
},
{
"epoch": 4.980901451489687,
"grad_norm": 0.15779346227645874,
"learning_rate": 2.8487550352951363e-09,
"loss": 0.0056,
"step": 3260
},
{
"epoch": 4.9961802902979375,
"grad_norm": 0.05626541003584862,
"learning_rate": 0.0,
"loss": 0.0048,
"step": 3270
},
{
"epoch": 4.9961802902979375,
"step": 3270,
"total_flos": 8.213792003390177e+17,
"train_loss": 0.0453332782948008,
"train_runtime": 35234.3821,
"train_samples_per_second": 1.486,
"train_steps_per_second": 0.093
}
],
"logging_steps": 10,
"max_steps": 3270,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.213792003390177e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}