craa's picture
End of training
0f7a35c verified
Invalid JSON: Unexpected token 'N', ..."ad_norm": NaN, "... is not valid JSON
{
"best_global_step": 82000,
"best_metric": 3.5338425636291504,
"best_model_checkpoint": "/scratch/cl5625/exceptions/models/resemble_to_push_frequency_1032/checkpoint-30000",
"epoch": 29.703052190121156,
"eval_steps": 1000,
"global_step": 102000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.014561975768872321,
"grad_norm": 1.1883279085159302,
"learning_rate": 0.000294,
"loss": 8.4738,
"step": 50
},
{
"epoch": 0.029123951537744643,
"grad_norm": 0.5355018973350525,
"learning_rate": 0.0005939999999999999,
"loss": 6.7177,
"step": 100
},
{
"epoch": 0.04368592730661696,
"grad_norm": 0.5124205946922302,
"learning_rate": 0.0005998286713286713,
"loss": 6.3373,
"step": 150
},
{
"epoch": 0.058247903075489285,
"grad_norm": 0.5119701027870178,
"learning_rate": 0.0005996538461538461,
"loss": 6.1315,
"step": 200
},
{
"epoch": 0.0728098788443616,
"grad_norm": 0.5244374871253967,
"learning_rate": 0.0005994790209790209,
"loss": 5.989,
"step": 250
},
{
"epoch": 0.08737185461323392,
"grad_norm": 0.4514008164405823,
"learning_rate": 0.0005993041958041958,
"loss": 5.855,
"step": 300
},
{
"epoch": 0.10193383038210624,
"grad_norm": 0.41654127836227417,
"learning_rate": 0.0005991293706293705,
"loss": 5.7174,
"step": 350
},
{
"epoch": 0.11649580615097857,
"grad_norm": 0.4382358491420746,
"learning_rate": 0.0005989545454545454,
"loss": 5.5956,
"step": 400
},
{
"epoch": 0.13105778191985087,
"grad_norm": 0.549221932888031,
"learning_rate": 0.0005987797202797202,
"loss": 5.4909,
"step": 450
},
{
"epoch": 0.1456197576887232,
"grad_norm": 0.4766194224357605,
"learning_rate": 0.000598604895104895,
"loss": 5.4,
"step": 500
},
{
"epoch": 0.16018173345759554,
"grad_norm": 0.5269848704338074,
"learning_rate": 0.0005984300699300698,
"loss": 5.3285,
"step": 550
},
{
"epoch": 0.17474370922646784,
"grad_norm": 0.4068600535392761,
"learning_rate": 0.0005982552447552447,
"loss": 5.2405,
"step": 600
},
{
"epoch": 0.18930568499534017,
"grad_norm": 0.47763192653656006,
"learning_rate": 0.0005980804195804195,
"loss": 5.1842,
"step": 650
},
{
"epoch": 0.20386766076421248,
"grad_norm": 0.4301474392414093,
"learning_rate": 0.0005979055944055943,
"loss": 5.128,
"step": 700
},
{
"epoch": 0.2184296365330848,
"grad_norm": 0.39097562432289124,
"learning_rate": 0.0005977307692307691,
"loss": 5.0549,
"step": 750
},
{
"epoch": 0.23299161230195714,
"grad_norm": 0.4334748685359955,
"learning_rate": 0.000597555944055944,
"loss": 5.0117,
"step": 800
},
{
"epoch": 0.24755358807082944,
"grad_norm": 0.48352745175361633,
"learning_rate": 0.0005973811188811188,
"loss": 4.9778,
"step": 850
},
{
"epoch": 0.26211556383970175,
"grad_norm": 0.5069511532783508,
"learning_rate": 0.0005972062937062936,
"loss": 4.929,
"step": 900
},
{
"epoch": 0.2766775396085741,
"grad_norm": 0.47534990310668945,
"learning_rate": 0.0005970314685314685,
"loss": 4.8689,
"step": 950
},
{
"epoch": 0.2912395153774464,
"grad_norm": 0.41896557807922363,
"learning_rate": 0.0005968566433566433,
"loss": 4.8077,
"step": 1000
},
{
"epoch": 0.2912395153774464,
"eval_accuracy": 0.2569420831117012,
"eval_loss": 4.736781120300293,
"eval_runtime": 54.012,
"eval_samples_per_second": 308.154,
"eval_steps_per_second": 19.273,
"step": 1000
},
{
"epoch": 0.3058014911463187,
"grad_norm": 0.4391483664512634,
"learning_rate": 0.0005966818181818181,
"loss": 4.7635,
"step": 1050
},
{
"epoch": 0.3203634669151911,
"grad_norm": 0.42747387290000916,
"learning_rate": 0.0005965069930069929,
"loss": 4.7166,
"step": 1100
},
{
"epoch": 0.3349254426840634,
"grad_norm": 0.45990902185440063,
"learning_rate": 0.0005963321678321677,
"loss": 4.6916,
"step": 1150
},
{
"epoch": 0.3494874184529357,
"grad_norm": 0.4686427414417267,
"learning_rate": 0.0005961573426573425,
"loss": 4.6507,
"step": 1200
},
{
"epoch": 0.36404939422180804,
"grad_norm": 0.4218248128890991,
"learning_rate": 0.0005959825174825174,
"loss": 4.6193,
"step": 1250
},
{
"epoch": 0.37861136999068035,
"grad_norm": 0.4387086033821106,
"learning_rate": 0.0005958076923076922,
"loss": 4.5909,
"step": 1300
},
{
"epoch": 0.39317334575955265,
"grad_norm": 0.4904707372188568,
"learning_rate": 0.000595632867132867,
"loss": 4.5681,
"step": 1350
},
{
"epoch": 0.40773532152842495,
"grad_norm": 0.4466973543167114,
"learning_rate": 0.0005954580419580418,
"loss": 4.5442,
"step": 1400
},
{
"epoch": 0.4222972972972973,
"grad_norm": 0.39577051997184753,
"learning_rate": 0.0005952832167832168,
"loss": 4.5246,
"step": 1450
},
{
"epoch": 0.4368592730661696,
"grad_norm": 0.4266783595085144,
"learning_rate": 0.0005951083916083916,
"loss": 4.4966,
"step": 1500
},
{
"epoch": 0.4514212488350419,
"grad_norm": 0.44683218002319336,
"learning_rate": 0.0005949335664335664,
"loss": 4.4784,
"step": 1550
},
{
"epoch": 0.4659832246039143,
"grad_norm": 0.4334483742713928,
"learning_rate": 0.0005947587412587413,
"loss": 4.4545,
"step": 1600
},
{
"epoch": 0.4805452003727866,
"grad_norm": 0.4043181538581848,
"learning_rate": 0.0005945839160839161,
"loss": 4.4341,
"step": 1650
},
{
"epoch": 0.4951071761416589,
"grad_norm": 0.39420944452285767,
"learning_rate": 0.0005944090909090909,
"loss": 4.4209,
"step": 1700
},
{
"epoch": 0.5096691519105312,
"grad_norm": 0.38630977272987366,
"learning_rate": 0.0005942342657342657,
"loss": 4.3946,
"step": 1750
},
{
"epoch": 0.5242311276794035,
"grad_norm": 0.41390445828437805,
"learning_rate": 0.0005940594405594406,
"loss": 4.3856,
"step": 1800
},
{
"epoch": 0.5387931034482759,
"grad_norm": 0.42289671301841736,
"learning_rate": 0.0005938846153846153,
"loss": 4.3664,
"step": 1850
},
{
"epoch": 0.5533550792171482,
"grad_norm": 0.4519951641559601,
"learning_rate": 0.0005937097902097902,
"loss": 4.3621,
"step": 1900
},
{
"epoch": 0.5679170549860205,
"grad_norm": 0.39452847838401794,
"learning_rate": 0.000593534965034965,
"loss": 4.3481,
"step": 1950
},
{
"epoch": 0.5824790307548928,
"grad_norm": 0.42720577120780945,
"learning_rate": 0.0005933601398601398,
"loss": 4.3355,
"step": 2000
},
{
"epoch": 0.5824790307548928,
"eval_accuracy": 0.3001815149621097,
"eval_loss": 4.278709411621094,
"eval_runtime": 53.5915,
"eval_samples_per_second": 310.572,
"eval_steps_per_second": 19.425,
"step": 2000
},
{
"epoch": 0.5970410065237651,
"grad_norm": 0.41133683919906616,
"learning_rate": 0.0005931853146853146,
"loss": 4.3078,
"step": 2050
},
{
"epoch": 0.6116029822926374,
"grad_norm": 0.4028146266937256,
"learning_rate": 0.0005930104895104895,
"loss": 4.2918,
"step": 2100
},
{
"epoch": 0.6261649580615097,
"grad_norm": 0.39694148302078247,
"learning_rate": 0.0005928356643356643,
"loss": 4.3041,
"step": 2150
},
{
"epoch": 0.6407269338303822,
"grad_norm": 0.3814305365085602,
"learning_rate": 0.0005926608391608391,
"loss": 4.2923,
"step": 2200
},
{
"epoch": 0.6552889095992545,
"grad_norm": 0.3657163679599762,
"learning_rate": 0.000592486013986014,
"loss": 4.2723,
"step": 2250
},
{
"epoch": 0.6698508853681268,
"grad_norm": 0.38016071915626526,
"learning_rate": 0.0005923111888111888,
"loss": 4.2675,
"step": 2300
},
{
"epoch": 0.6844128611369991,
"grad_norm": 0.36959293484687805,
"learning_rate": 0.0005921363636363636,
"loss": 4.2591,
"step": 2350
},
{
"epoch": 0.6989748369058714,
"grad_norm": 0.3834407925605774,
"learning_rate": 0.0005919615384615384,
"loss": 4.2461,
"step": 2400
},
{
"epoch": 0.7135368126747437,
"grad_norm": 0.4075358510017395,
"learning_rate": 0.0005917867132867133,
"loss": 4.2657,
"step": 2450
},
{
"epoch": 0.7280987884436161,
"grad_norm": 0.3681492805480957,
"learning_rate": 0.0005916118881118881,
"loss": 4.2323,
"step": 2500
},
{
"epoch": 0.7426607642124884,
"grad_norm": 0.3669564425945282,
"learning_rate": 0.0005914370629370629,
"loss": 4.228,
"step": 2550
},
{
"epoch": 0.7572227399813607,
"grad_norm": 0.4404616057872772,
"learning_rate": 0.0005912622377622377,
"loss": 4.2063,
"step": 2600
},
{
"epoch": 0.771784715750233,
"grad_norm": 0.3834710717201233,
"learning_rate": 0.0005910874125874125,
"loss": 4.1905,
"step": 2650
},
{
"epoch": 0.7863466915191053,
"grad_norm": 0.3446391820907593,
"learning_rate": 0.0005909125874125873,
"loss": 4.189,
"step": 2700
},
{
"epoch": 0.8009086672879776,
"grad_norm": 0.38983553647994995,
"learning_rate": 0.0005907377622377622,
"loss": 4.1879,
"step": 2750
},
{
"epoch": 0.8154706430568499,
"grad_norm": 0.36041370034217834,
"learning_rate": 0.000590562937062937,
"loss": 4.1847,
"step": 2800
},
{
"epoch": 0.8300326188257223,
"grad_norm": 0.37385591864585876,
"learning_rate": 0.0005903881118881118,
"loss": 4.1689,
"step": 2850
},
{
"epoch": 0.8445945945945946,
"grad_norm": 0.389721542596817,
"learning_rate": 0.0005902132867132867,
"loss": 4.17,
"step": 2900
},
{
"epoch": 0.8591565703634669,
"grad_norm": 0.33258622884750366,
"learning_rate": 0.0005900384615384615,
"loss": 4.1634,
"step": 2950
},
{
"epoch": 0.8737185461323392,
"grad_norm": 0.37621429562568665,
"learning_rate": 0.0005898636363636363,
"loss": 4.1543,
"step": 3000
},
{
"epoch": 0.8737185461323392,
"eval_accuracy": 0.31520899734793917,
"eval_loss": 4.0961079597473145,
"eval_runtime": 53.531,
"eval_samples_per_second": 310.923,
"eval_steps_per_second": 19.447,
"step": 3000
},
{
"epoch": 0.8882805219012115,
"grad_norm": 0.3883044421672821,
"learning_rate": 0.0005896888111888111,
"loss": 4.1492,
"step": 3050
},
{
"epoch": 0.9028424976700838,
"grad_norm": 0.35951223969459534,
"learning_rate": 0.000589513986013986,
"loss": 4.1353,
"step": 3100
},
{
"epoch": 0.9174044734389561,
"grad_norm": 0.37769070267677307,
"learning_rate": 0.0005893391608391608,
"loss": 4.1325,
"step": 3150
},
{
"epoch": 0.9319664492078286,
"grad_norm": 0.3636488616466522,
"learning_rate": 0.0005891643356643356,
"loss": 4.1155,
"step": 3200
},
{
"epoch": 0.9465284249767009,
"grad_norm": 0.343472957611084,
"learning_rate": 0.0005889895104895104,
"loss": 4.1136,
"step": 3250
},
{
"epoch": 0.9610904007455732,
"grad_norm": 0.3756231367588043,
"learning_rate": 0.0005888146853146853,
"loss": 4.1242,
"step": 3300
},
{
"epoch": 0.9756523765144455,
"grad_norm": 0.3466884195804596,
"learning_rate": 0.00058863986013986,
"loss": 4.1025,
"step": 3350
},
{
"epoch": 0.9902143522833178,
"grad_norm": 0.34539276361465454,
"learning_rate": 0.0005884650349650349,
"loss": 4.0973,
"step": 3400
},
{
"epoch": 1.0046598322460392,
"grad_norm": 0.35815510153770447,
"learning_rate": 0.0005882902097902097,
"loss": 4.0526,
"step": 3450
},
{
"epoch": 1.0192218080149114,
"grad_norm": 0.35195082426071167,
"learning_rate": 0.0005881153846153845,
"loss": 4.0031,
"step": 3500
},
{
"epoch": 1.0337837837837838,
"grad_norm": 0.3461218774318695,
"learning_rate": 0.0005879405594405594,
"loss": 4.006,
"step": 3550
},
{
"epoch": 1.0483457595526562,
"grad_norm": 0.39880794286727905,
"learning_rate": 0.0005877657342657342,
"loss": 4.0192,
"step": 3600
},
{
"epoch": 1.0629077353215284,
"grad_norm": 0.34905946254730225,
"learning_rate": 0.000587590909090909,
"loss": 4.0175,
"step": 3650
},
{
"epoch": 1.0774697110904008,
"grad_norm": 0.3371531367301941,
"learning_rate": 0.0005874160839160838,
"loss": 4.0141,
"step": 3700
},
{
"epoch": 1.092031686859273,
"grad_norm": 0.3369714915752411,
"learning_rate": 0.0005872412587412587,
"loss": 3.9991,
"step": 3750
},
{
"epoch": 1.1065936626281454,
"grad_norm": 0.3621531128883362,
"learning_rate": 0.0005870664335664335,
"loss": 3.9857,
"step": 3800
},
{
"epoch": 1.1211556383970178,
"grad_norm": 0.34609004855155945,
"learning_rate": 0.0005868916083916083,
"loss": 4.0016,
"step": 3850
},
{
"epoch": 1.13571761416589,
"grad_norm": 0.34326431155204773,
"learning_rate": 0.0005867167832167831,
"loss": 4.005,
"step": 3900
},
{
"epoch": 1.1502795899347624,
"grad_norm": 0.3505842387676239,
"learning_rate": 0.000586541958041958,
"loss": 4.0129,
"step": 3950
},
{
"epoch": 1.1648415657036346,
"grad_norm": 0.3398914635181427,
"learning_rate": 0.0005863671328671328,
"loss": 3.9969,
"step": 4000
},
{
"epoch": 1.1648415657036346,
"eval_accuracy": 0.3254560448785691,
"eval_loss": 3.988481283187866,
"eval_runtime": 53.593,
"eval_samples_per_second": 310.563,
"eval_steps_per_second": 19.424,
"step": 4000
},
{
"epoch": 1.179403541472507,
"grad_norm": 0.33963337540626526,
"learning_rate": 0.0005861923076923076,
"loss": 3.9891,
"step": 4050
},
{
"epoch": 1.1939655172413792,
"grad_norm": 0.3412471115589142,
"learning_rate": 0.0005860174825174824,
"loss": 3.9894,
"step": 4100
},
{
"epoch": 1.2085274930102516,
"grad_norm": 0.3301082253456116,
"learning_rate": 0.0005858426573426573,
"loss": 3.9921,
"step": 4150
},
{
"epoch": 1.2230894687791238,
"grad_norm": 0.32301121950149536,
"learning_rate": 0.000585667832167832,
"loss": 3.9788,
"step": 4200
},
{
"epoch": 1.2376514445479962,
"grad_norm": 0.32878032326698303,
"learning_rate": 0.000585493006993007,
"loss": 3.9755,
"step": 4250
},
{
"epoch": 1.2522134203168687,
"grad_norm": 0.3783838748931885,
"learning_rate": 0.0005853181818181817,
"loss": 3.9738,
"step": 4300
},
{
"epoch": 1.2667753960857409,
"grad_norm": 0.3759077191352844,
"learning_rate": 0.0005851433566433565,
"loss": 3.9662,
"step": 4350
},
{
"epoch": 1.2813373718546133,
"grad_norm": 0.34620216488838196,
"learning_rate": 0.0005849685314685315,
"loss": 3.9784,
"step": 4400
},
{
"epoch": 1.2958993476234855,
"grad_norm": 0.3216020166873932,
"learning_rate": 0.0005847937062937063,
"loss": 3.9534,
"step": 4450
},
{
"epoch": 1.3104613233923579,
"grad_norm": 0.3530759811401367,
"learning_rate": 0.0005846188811188811,
"loss": 3.9602,
"step": 4500
},
{
"epoch": 1.3250232991612303,
"grad_norm": 0.34322479367256165,
"learning_rate": 0.0005844440559440559,
"loss": 3.9551,
"step": 4550
},
{
"epoch": 1.3395852749301025,
"grad_norm": 0.3483957350254059,
"learning_rate": 0.0005842692307692308,
"loss": 3.9348,
"step": 4600
},
{
"epoch": 1.354147250698975,
"grad_norm": 0.33279702067375183,
"learning_rate": 0.0005840944055944056,
"loss": 3.9417,
"step": 4650
},
{
"epoch": 1.368709226467847,
"grad_norm": 0.3175128698348999,
"learning_rate": 0.0005839195804195804,
"loss": 3.9446,
"step": 4700
},
{
"epoch": 1.3832712022367195,
"grad_norm": 0.3718789219856262,
"learning_rate": 0.0005837447552447552,
"loss": 3.9483,
"step": 4750
},
{
"epoch": 1.397833178005592,
"grad_norm": 0.33285394310951233,
"learning_rate": 0.0005835699300699301,
"loss": 3.9436,
"step": 4800
},
{
"epoch": 1.4123951537744641,
"grad_norm": 0.3199831247329712,
"learning_rate": 0.0005833951048951048,
"loss": 3.9369,
"step": 4850
},
{
"epoch": 1.4269571295433363,
"grad_norm": 0.3186275064945221,
"learning_rate": 0.0005832202797202797,
"loss": 3.9344,
"step": 4900
},
{
"epoch": 1.4415191053122087,
"grad_norm": 0.34162044525146484,
"learning_rate": 0.0005830454545454546,
"loss": 3.9271,
"step": 4950
},
{
"epoch": 1.4560810810810811,
"grad_norm": 0.32524988055229187,
"learning_rate": 0.0005828706293706293,
"loss": 3.9235,
"step": 5000
},
{
"epoch": 1.4560810810810811,
"eval_accuracy": 0.33175510083145565,
"eval_loss": 3.9142301082611084,
"eval_runtime": 53.6459,
"eval_samples_per_second": 310.257,
"eval_steps_per_second": 19.405,
"step": 5000
},
{
"epoch": 1.4706430568499533,
"grad_norm": 0.3223293721675873,
"learning_rate": 0.0005826958041958042,
"loss": 3.9109,
"step": 5050
},
{
"epoch": 1.4852050326188257,
"grad_norm": 0.334349662065506,
"learning_rate": 0.000582520979020979,
"loss": 3.932,
"step": 5100
},
{
"epoch": 1.499767008387698,
"grad_norm": 0.33222952485084534,
"learning_rate": 0.0005823461538461538,
"loss": 3.9152,
"step": 5150
},
{
"epoch": 1.5143289841565704,
"grad_norm": 0.3161272406578064,
"learning_rate": 0.0005821713286713286,
"loss": 3.932,
"step": 5200
},
{
"epoch": 1.5288909599254428,
"grad_norm": 0.3443957269191742,
"learning_rate": 0.0005819965034965035,
"loss": 3.9341,
"step": 5250
},
{
"epoch": 1.543452935694315,
"grad_norm": 0.35085344314575195,
"learning_rate": 0.0005818216783216783,
"loss": 3.9098,
"step": 5300
},
{
"epoch": 1.5580149114631874,
"grad_norm": 0.31371259689331055,
"learning_rate": 0.0005816468531468531,
"loss": 3.9094,
"step": 5350
},
{
"epoch": 1.5725768872320596,
"grad_norm": 0.32282862067222595,
"learning_rate": 0.0005814720279720279,
"loss": 3.9211,
"step": 5400
},
{
"epoch": 1.587138863000932,
"grad_norm": 0.3314749598503113,
"learning_rate": 0.0005812972027972028,
"loss": 3.9103,
"step": 5450
},
{
"epoch": 1.6017008387698044,
"grad_norm": 0.3175921142101288,
"learning_rate": 0.0005811223776223776,
"loss": 3.901,
"step": 5500
},
{
"epoch": 1.6162628145386766,
"grad_norm": 0.3080521523952484,
"learning_rate": 0.0005809475524475524,
"loss": 3.8964,
"step": 5550
},
{
"epoch": 1.6308247903075488,
"grad_norm": 0.3255755305290222,
"learning_rate": 0.0005807727272727272,
"loss": 3.8989,
"step": 5600
},
{
"epoch": 1.6453867660764212,
"grad_norm": 0.3227233290672302,
"learning_rate": 0.0005805979020979021,
"loss": 3.9009,
"step": 5650
},
{
"epoch": 1.6599487418452936,
"grad_norm": 0.3223280906677246,
"learning_rate": 0.0005804230769230769,
"loss": 3.9062,
"step": 5700
},
{
"epoch": 1.674510717614166,
"grad_norm": 0.32244929671287537,
"learning_rate": 0.0005802482517482517,
"loss": 3.8922,
"step": 5750
},
{
"epoch": 1.6890726933830382,
"grad_norm": 0.3163047432899475,
"learning_rate": 0.0005800734265734265,
"loss": 3.894,
"step": 5800
},
{
"epoch": 1.7036346691519104,
"grad_norm": 0.31790420413017273,
"learning_rate": 0.0005798986013986013,
"loss": 3.8831,
"step": 5850
},
{
"epoch": 1.7181966449207828,
"grad_norm": 0.323431134223938,
"learning_rate": 0.0005797237762237762,
"loss": 3.8919,
"step": 5900
},
{
"epoch": 1.7327586206896552,
"grad_norm": 0.3332827687263489,
"learning_rate": 0.000579548951048951,
"loss": 3.8789,
"step": 5950
},
{
"epoch": 1.7473205964585274,
"grad_norm": 0.31257835030555725,
"learning_rate": 0.0005793741258741258,
"loss": 3.8897,
"step": 6000
},
{
"epoch": 1.7473205964585274,
"eval_accuracy": 0.336940117228707,
"eval_loss": 3.8567962646484375,
"eval_runtime": 53.6231,
"eval_samples_per_second": 310.389,
"eval_steps_per_second": 19.413,
"step": 6000
},
{
"epoch": 1.7618825722273999,
"grad_norm": 0.31113606691360474,
"learning_rate": 0.0005791993006993006,
"loss": 3.8798,
"step": 6050
},
{
"epoch": 1.776444547996272,
"grad_norm": 0.34054034948349,
"learning_rate": 0.0005790244755244755,
"loss": 3.8707,
"step": 6100
},
{
"epoch": 1.7910065237651445,
"grad_norm": 0.32504695653915405,
"learning_rate": 0.0005788496503496503,
"loss": 3.8798,
"step": 6150
},
{
"epoch": 1.8055684995340169,
"grad_norm": 0.3181167244911194,
"learning_rate": 0.0005786748251748251,
"loss": 3.8682,
"step": 6200
},
{
"epoch": 1.820130475302889,
"grad_norm": 0.33279845118522644,
"learning_rate": 0.0005784999999999999,
"loss": 3.8627,
"step": 6250
},
{
"epoch": 1.8346924510717613,
"grad_norm": 0.3395768702030182,
"learning_rate": 0.0005783251748251748,
"loss": 3.8649,
"step": 6300
},
{
"epoch": 1.8492544268406337,
"grad_norm": 0.34127599000930786,
"learning_rate": 0.0005781503496503496,
"loss": 3.8617,
"step": 6350
},
{
"epoch": 1.863816402609506,
"grad_norm": 0.3437989354133606,
"learning_rate": 0.0005779755244755244,
"loss": 3.8601,
"step": 6400
},
{
"epoch": 1.8783783783783785,
"grad_norm": 0.32382577657699585,
"learning_rate": 0.0005778006993006993,
"loss": 3.8475,
"step": 6450
},
{
"epoch": 1.8929403541472507,
"grad_norm": 0.3303312659263611,
"learning_rate": 0.000577625874125874,
"loss": 3.8543,
"step": 6500
},
{
"epoch": 1.907502329916123,
"grad_norm": 0.32527676224708557,
"learning_rate": 0.0005774510489510489,
"loss": 3.8602,
"step": 6550
},
{
"epoch": 1.9220643056849953,
"grad_norm": 0.30908825993537903,
"learning_rate": 0.0005772762237762237,
"loss": 3.8526,
"step": 6600
},
{
"epoch": 1.9366262814538677,
"grad_norm": 0.3196081817150116,
"learning_rate": 0.0005771013986013985,
"loss": 3.8611,
"step": 6650
},
{
"epoch": 1.9511882572227401,
"grad_norm": 0.32832983136177063,
"learning_rate": 0.0005769265734265733,
"loss": 3.8493,
"step": 6700
},
{
"epoch": 1.9657502329916123,
"grad_norm": 0.3023565411567688,
"learning_rate": 0.0005767517482517482,
"loss": 3.8493,
"step": 6750
},
{
"epoch": 1.9803122087604845,
"grad_norm": 0.3191196024417877,
"learning_rate": 0.000576576923076923,
"loss": 3.8488,
"step": 6800
},
{
"epoch": 1.994874184529357,
"grad_norm": 0.31798142194747925,
"learning_rate": 0.0005764020979020978,
"loss": 3.8442,
"step": 6850
},
{
"epoch": 2.0093196644920783,
"grad_norm": 0.33970579504966736,
"learning_rate": 0.0005762272727272726,
"loss": 3.7736,
"step": 6900
},
{
"epoch": 2.0238816402609507,
"grad_norm": 0.3101087808609009,
"learning_rate": 0.0005760524475524475,
"loss": 3.7549,
"step": 6950
},
{
"epoch": 2.0384436160298227,
"grad_norm": 0.32137158513069153,
"learning_rate": 0.0005758776223776223,
"loss": 3.7427,
"step": 7000
},
{
"epoch": 2.0384436160298227,
"eval_accuracy": 0.34147023121699915,
"eval_loss": 3.8138842582702637,
"eval_runtime": 53.7752,
"eval_samples_per_second": 309.511,
"eval_steps_per_second": 19.358,
"step": 7000
},
{
"epoch": 2.053005591798695,
"grad_norm": 0.3288806974887848,
"learning_rate": 0.0005757027972027971,
"loss": 3.7408,
"step": 7050
},
{
"epoch": 2.0675675675675675,
"grad_norm": 0.3187062740325928,
"learning_rate": 0.000575527972027972,
"loss": 3.7595,
"step": 7100
},
{
"epoch": 2.08212954333644,
"grad_norm": 0.31951916217803955,
"learning_rate": 0.0005753531468531468,
"loss": 3.7586,
"step": 7150
},
{
"epoch": 2.0966915191053124,
"grad_norm": 0.31987684965133667,
"learning_rate": 0.0005751783216783216,
"loss": 3.7384,
"step": 7200
},
{
"epoch": 2.1112534948741843,
"grad_norm": 0.3017788529396057,
"learning_rate": 0.0005750034965034964,
"loss": 3.7678,
"step": 7250
},
{
"epoch": 2.1258154706430568,
"grad_norm": 0.32916632294654846,
"learning_rate": 0.0005748286713286712,
"loss": 3.7579,
"step": 7300
},
{
"epoch": 2.140377446411929,
"grad_norm": 0.3227897882461548,
"learning_rate": 0.000574653846153846,
"loss": 3.7496,
"step": 7350
},
{
"epoch": 2.1549394221808016,
"grad_norm": 0.30625689029693604,
"learning_rate": 0.000574479020979021,
"loss": 3.7517,
"step": 7400
},
{
"epoch": 2.169501397949674,
"grad_norm": 0.3217352032661438,
"learning_rate": 0.0005743041958041958,
"loss": 3.741,
"step": 7450
},
{
"epoch": 2.184063373718546,
"grad_norm": 0.32221558690071106,
"learning_rate": 0.0005741293706293706,
"loss": 3.7483,
"step": 7500
},
{
"epoch": 2.1986253494874184,
"grad_norm": 0.31215184926986694,
"learning_rate": 0.0005739545454545454,
"loss": 3.7611,
"step": 7550
},
{
"epoch": 2.213187325256291,
"grad_norm": 0.3266601264476776,
"learning_rate": 0.0005737797202797203,
"loss": 3.7547,
"step": 7600
},
{
"epoch": 2.227749301025163,
"grad_norm": 0.3010008931159973,
"learning_rate": 0.0005736048951048951,
"loss": 3.7511,
"step": 7650
},
{
"epoch": 2.2423112767940356,
"grad_norm": 0.3031994700431824,
"learning_rate": 0.0005734300699300699,
"loss": 3.76,
"step": 7700
},
{
"epoch": 2.2568732525629076,
"grad_norm": 0.32502564787864685,
"learning_rate": 0.0005732552447552448,
"loss": 3.7579,
"step": 7750
},
{
"epoch": 2.27143522833178,
"grad_norm": 0.3163962960243225,
"learning_rate": 0.0005730804195804196,
"loss": 3.7531,
"step": 7800
},
{
"epoch": 2.2859972041006524,
"grad_norm": 0.3225083649158478,
"learning_rate": 0.0005729055944055944,
"loss": 3.7468,
"step": 7850
},
{
"epoch": 2.300559179869525,
"grad_norm": 0.3353878855705261,
"learning_rate": 0.0005727307692307692,
"loss": 3.7372,
"step": 7900
},
{
"epoch": 2.315121155638397,
"grad_norm": 0.3147525191307068,
"learning_rate": 0.0005725559440559441,
"loss": 3.7397,
"step": 7950
},
{
"epoch": 2.3296831314072692,
"grad_norm": 0.31996214389801025,
"learning_rate": 0.0005723811188811188,
"loss": 3.7463,
"step": 8000
},
{
"epoch": 2.3296831314072692,
"eval_accuracy": 0.3446897173502343,
"eval_loss": 3.7829577922821045,
"eval_runtime": 53.5949,
"eval_samples_per_second": 310.552,
"eval_steps_per_second": 19.423,
"step": 8000
},
{
"epoch": 2.3442451071761417,
"grad_norm": 0.329949289560318,
"learning_rate": 0.0005722062937062937,
"loss": 3.7594,
"step": 8050
},
{
"epoch": 2.358807082945014,
"grad_norm": 0.3147149384021759,
"learning_rate": 0.0005720314685314685,
"loss": 3.7547,
"step": 8100
},
{
"epoch": 2.3733690587138865,
"grad_norm": 0.299905925989151,
"learning_rate": 0.0005718566433566433,
"loss": 3.7459,
"step": 8150
},
{
"epoch": 2.3879310344827585,
"grad_norm": 0.33270028233528137,
"learning_rate": 0.0005716818181818181,
"loss": 3.7605,
"step": 8200
},
{
"epoch": 2.402493010251631,
"grad_norm": 0.34615278244018555,
"learning_rate": 0.000571506993006993,
"loss": 3.7527,
"step": 8250
},
{
"epoch": 2.4170549860205033,
"grad_norm": 0.31800028681755066,
"learning_rate": 0.0005713321678321678,
"loss": 3.7431,
"step": 8300
},
{
"epoch": 2.4316169617893757,
"grad_norm": 0.3355848789215088,
"learning_rate": 0.0005711573426573426,
"loss": 3.7401,
"step": 8350
},
{
"epoch": 2.4461789375582477,
"grad_norm": 0.3147391080856323,
"learning_rate": 0.0005709825174825175,
"loss": 3.753,
"step": 8400
},
{
"epoch": 2.46074091332712,
"grad_norm": 0.33956778049468994,
"learning_rate": 0.0005708076923076923,
"loss": 3.7509,
"step": 8450
},
{
"epoch": 2.4753028890959925,
"grad_norm": 0.3316870927810669,
"learning_rate": 0.0005706328671328671,
"loss": 3.7459,
"step": 8500
},
{
"epoch": 2.489864864864865,
"grad_norm": 0.3233761787414551,
"learning_rate": 0.0005704580419580419,
"loss": 3.7415,
"step": 8550
},
{
"epoch": 2.5044268406337373,
"grad_norm": 0.31071245670318604,
"learning_rate": 0.0005702832167832168,
"loss": 3.7364,
"step": 8600
},
{
"epoch": 2.5189888164026097,
"grad_norm": 0.3265255093574524,
"learning_rate": 0.0005701083916083916,
"loss": 3.7471,
"step": 8650
},
{
"epoch": 2.5335507921714817,
"grad_norm": 0.3139096200466156,
"learning_rate": 0.0005699335664335664,
"loss": 3.7485,
"step": 8700
},
{
"epoch": 2.548112767940354,
"grad_norm": 0.2998340427875519,
"learning_rate": 0.0005697587412587412,
"loss": 3.7417,
"step": 8750
},
{
"epoch": 2.5626747437092265,
"grad_norm": 0.29732733964920044,
"learning_rate": 0.000569583916083916,
"loss": 3.7367,
"step": 8800
},
{
"epoch": 2.5772367194780985,
"grad_norm": 0.3112543821334839,
"learning_rate": 0.0005694090909090908,
"loss": 3.7397,
"step": 8850
},
{
"epoch": 2.591798695246971,
"grad_norm": 0.3016383647918701,
"learning_rate": 0.0005692342657342657,
"loss": 3.7537,
"step": 8900
},
{
"epoch": 2.6063606710158433,
"grad_norm": 0.32809779047966003,
"learning_rate": 0.0005690594405594405,
"loss": 3.7348,
"step": 8950
},
{
"epoch": 2.6209226467847158,
"grad_norm": 0.319830060005188,
"learning_rate": 0.0005688846153846153,
"loss": 3.7415,
"step": 9000
},
{
"epoch": 2.6209226467847158,
"eval_accuracy": 0.3471662361006664,
"eval_loss": 3.7527623176574707,
"eval_runtime": 53.7475,
"eval_samples_per_second": 309.67,
"eval_steps_per_second": 19.368,
"step": 9000
},
{
"epoch": 2.635484622553588,
"grad_norm": 0.32060742378234863,
"learning_rate": 0.0005687097902097901,
"loss": 3.7446,
"step": 9050
},
{
"epoch": 2.6500465983224606,
"grad_norm": 0.3294517993927002,
"learning_rate": 0.000568534965034965,
"loss": 3.7421,
"step": 9100
},
{
"epoch": 2.6646085740913326,
"grad_norm": 0.30391988158226013,
"learning_rate": 0.0005683601398601398,
"loss": 3.7342,
"step": 9150
},
{
"epoch": 2.679170549860205,
"grad_norm": 0.3142305910587311,
"learning_rate": 0.0005681853146853146,
"loss": 3.7423,
"step": 9200
},
{
"epoch": 2.6937325256290774,
"grad_norm": 0.3378458023071289,
"learning_rate": 0.0005680104895104895,
"loss": 3.7332,
"step": 9250
},
{
"epoch": 2.70829450139795,
"grad_norm": 0.3309178054332733,
"learning_rate": 0.0005678356643356643,
"loss": 3.7415,
"step": 9300
},
{
"epoch": 2.7228564771668218,
"grad_norm": 0.30197566747665405,
"learning_rate": 0.0005676608391608391,
"loss": 3.7353,
"step": 9350
},
{
"epoch": 2.737418452935694,
"grad_norm": 0.2995157837867737,
"learning_rate": 0.0005674860139860139,
"loss": 3.7242,
"step": 9400
},
{
"epoch": 2.7519804287045666,
"grad_norm": 0.3359026312828064,
"learning_rate": 0.0005673111888111888,
"loss": 3.7318,
"step": 9450
},
{
"epoch": 2.766542404473439,
"grad_norm": 0.32080361247062683,
"learning_rate": 0.0005671363636363635,
"loss": 3.7254,
"step": 9500
},
{
"epoch": 2.7811043802423114,
"grad_norm": 0.3272884786128998,
"learning_rate": 0.0005669615384615384,
"loss": 3.7415,
"step": 9550
},
{
"epoch": 2.795666356011184,
"grad_norm": 0.31400012969970703,
"learning_rate": 0.0005667867132867132,
"loss": 3.7273,
"step": 9600
},
{
"epoch": 2.810228331780056,
"grad_norm": 0.30123645067214966,
"learning_rate": 0.000566611888111888,
"loss": 3.7345,
"step": 9650
},
{
"epoch": 2.8247903075489282,
"grad_norm": 0.33027133345603943,
"learning_rate": 0.0005664370629370628,
"loss": 3.7361,
"step": 9700
},
{
"epoch": 2.8393522833178007,
"grad_norm": 0.30711743235588074,
"learning_rate": 0.0005662622377622377,
"loss": 3.7336,
"step": 9750
},
{
"epoch": 2.8539142590866726,
"grad_norm": 0.31669116020202637,
"learning_rate": 0.0005660874125874125,
"loss": 3.7419,
"step": 9800
},
{
"epoch": 2.868476234855545,
"grad_norm": 0.3051603138446808,
"learning_rate": 0.0005659125874125873,
"loss": 3.7307,
"step": 9850
},
{
"epoch": 2.8830382106244175,
"grad_norm": 0.3133176267147064,
"learning_rate": 0.0005657377622377622,
"loss": 3.7137,
"step": 9900
},
{
"epoch": 2.89760018639329,
"grad_norm": 0.3297509253025055,
"learning_rate": 0.000565562937062937,
"loss": 3.7195,
"step": 9950
},
{
"epoch": 2.9121621621621623,
"grad_norm": 0.30900847911834717,
"learning_rate": 0.0005653881118881118,
"loss": 3.7289,
"step": 10000
},
{
"epoch": 2.9121621621621623,
"eval_accuracy": 0.3495230617357806,
"eval_loss": 3.728936195373535,
"eval_runtime": 53.587,
"eval_samples_per_second": 310.597,
"eval_steps_per_second": 19.426,
"step": 10000
},
{
"epoch": 2.9267241379310347,
"grad_norm": 0.318390816450119,
"learning_rate": 0.0005652132867132866,
"loss": 3.7109,
"step": 10050
},
{
"epoch": 2.9412861136999067,
"grad_norm": 0.3189872205257416,
"learning_rate": 0.0005650384615384615,
"loss": 3.7119,
"step": 10100
},
{
"epoch": 2.955848089468779,
"grad_norm": 0.30972564220428467,
"learning_rate": 0.0005648636363636363,
"loss": 3.7249,
"step": 10150
},
{
"epoch": 2.9704100652376515,
"grad_norm": 0.31740841269493103,
"learning_rate": 0.0005646888111888111,
"loss": 3.716,
"step": 10200
},
{
"epoch": 2.984972041006524,
"grad_norm": 0.3237694501876831,
"learning_rate": 0.000564513986013986,
"loss": 3.7168,
"step": 10250
},
{
"epoch": 2.999534016775396,
"grad_norm": 0.3141046464443207,
"learning_rate": 0.0005643391608391607,
"loss": 3.7109,
"step": 10300
},
{
"epoch": 3.0139794967381173,
"grad_norm": 0.3134516775608063,
"learning_rate": 0.0005641643356643355,
"loss": 3.6157,
"step": 10350
},
{
"epoch": 3.0285414725069897,
"grad_norm": 0.32396963238716125,
"learning_rate": 0.0005639895104895105,
"loss": 3.6185,
"step": 10400
},
{
"epoch": 3.043103448275862,
"grad_norm": 0.32163459062576294,
"learning_rate": 0.0005638146853146853,
"loss": 3.6242,
"step": 10450
},
{
"epoch": 3.0576654240447345,
"grad_norm": 0.3513883054256439,
"learning_rate": 0.0005636398601398601,
"loss": 3.6147,
"step": 10500
},
{
"epoch": 3.072227399813607,
"grad_norm": 0.3110412359237671,
"learning_rate": 0.000563465034965035,
"loss": 3.6164,
"step": 10550
},
{
"epoch": 3.086789375582479,
"grad_norm": 0.3234538733959198,
"learning_rate": 0.0005632902097902098,
"loss": 3.6387,
"step": 10600
},
{
"epoch": 3.1013513513513513,
"grad_norm": 0.3188924491405487,
"learning_rate": 0.0005631153846153846,
"loss": 3.6259,
"step": 10650
},
{
"epoch": 3.1159133271202237,
"grad_norm": 0.32610708475112915,
"learning_rate": 0.0005629405594405594,
"loss": 3.6288,
"step": 10700
},
{
"epoch": 3.130475302889096,
"grad_norm": 0.31207531690597534,
"learning_rate": 0.0005627657342657343,
"loss": 3.6214,
"step": 10750
},
{
"epoch": 3.145037278657968,
"grad_norm": 0.30246466398239136,
"learning_rate": 0.0005625909090909091,
"loss": 3.6177,
"step": 10800
},
{
"epoch": 3.1595992544268405,
"grad_norm": 0.3239559531211853,
"learning_rate": 0.0005624160839160839,
"loss": 3.6264,
"step": 10850
},
{
"epoch": 3.174161230195713,
"grad_norm": 0.29902079701423645,
"learning_rate": 0.0005622412587412587,
"loss": 3.6267,
"step": 10900
},
{
"epoch": 3.1887232059645854,
"grad_norm": 0.31506234407424927,
"learning_rate": 0.0005620664335664336,
"loss": 3.6461,
"step": 10950
},
{
"epoch": 3.2032851817334578,
"grad_norm": 0.3308972418308258,
"learning_rate": 0.0005618916083916083,
"loss": 3.6325,
"step": 11000
},
{
"epoch": 3.2032851817334578,
"eval_accuracy": 0.35166636802176204,
"eval_loss": 3.715419292449951,
"eval_runtime": 53.6089,
"eval_samples_per_second": 310.471,
"eval_steps_per_second": 19.418,
"step": 11000
},
{
"epoch": 3.2178471575023297,
"grad_norm": 0.3090171217918396,
"learning_rate": 0.0005617167832167832,
"loss": 3.6222,
"step": 11050
},
{
"epoch": 3.232409133271202,
"grad_norm": 0.3381396234035492,
"learning_rate": 0.000561541958041958,
"loss": 3.6412,
"step": 11100
},
{
"epoch": 3.2469711090400746,
"grad_norm": 0.31572040915489197,
"learning_rate": 0.0005613671328671328,
"loss": 3.6387,
"step": 11150
},
{
"epoch": 3.261533084808947,
"grad_norm": 0.3284361660480499,
"learning_rate": 0.0005611923076923077,
"loss": 3.6426,
"step": 11200
},
{
"epoch": 3.276095060577819,
"grad_norm": 0.30892351269721985,
"learning_rate": 0.0005610174825174825,
"loss": 3.6434,
"step": 11250
},
{
"epoch": 3.2906570363466914,
"grad_norm": 0.31443294882774353,
"learning_rate": 0.0005608426573426573,
"loss": 3.6351,
"step": 11300
},
{
"epoch": 3.305219012115564,
"grad_norm": 0.32290583848953247,
"learning_rate": 0.0005606678321678321,
"loss": 3.6447,
"step": 11350
},
{
"epoch": 3.319780987884436,
"grad_norm": 0.30862802267074585,
"learning_rate": 0.000560493006993007,
"loss": 3.634,
"step": 11400
},
{
"epoch": 3.3343429636533086,
"grad_norm": 0.31837892532348633,
"learning_rate": 0.0005603181818181818,
"loss": 3.635,
"step": 11450
},
{
"epoch": 3.348904939422181,
"grad_norm": 0.3183481693267822,
"learning_rate": 0.0005601433566433566,
"loss": 3.6532,
"step": 11500
},
{
"epoch": 3.363466915191053,
"grad_norm": 0.334343284368515,
"learning_rate": 0.0005599685314685314,
"loss": 3.6394,
"step": 11550
},
{
"epoch": 3.3780288909599254,
"grad_norm": 0.2964753806591034,
"learning_rate": 0.0005597937062937063,
"loss": 3.6397,
"step": 11600
},
{
"epoch": 3.392590866728798,
"grad_norm": 0.34382757544517517,
"learning_rate": 0.0005596188811188811,
"loss": 3.6302,
"step": 11650
},
{
"epoch": 3.4071528424976703,
"grad_norm": 0.31933602690696716,
"learning_rate": 0.0005594440559440559,
"loss": 3.631,
"step": 11700
},
{
"epoch": 3.4217148182665422,
"grad_norm": 0.31780973076820374,
"learning_rate": 0.0005592692307692307,
"loss": 3.6434,
"step": 11750
},
{
"epoch": 3.4362767940354146,
"grad_norm": 0.31436842679977417,
"learning_rate": 0.0005590944055944055,
"loss": 3.6434,
"step": 11800
},
{
"epoch": 3.450838769804287,
"grad_norm": 0.3354842960834503,
"learning_rate": 0.0005589195804195803,
"loss": 3.6543,
"step": 11850
},
{
"epoch": 3.4654007455731595,
"grad_norm": 0.3190697431564331,
"learning_rate": 0.0005587447552447552,
"loss": 3.6459,
"step": 11900
},
{
"epoch": 3.479962721342032,
"grad_norm": 0.3107091188430786,
"learning_rate": 0.00055856993006993,
"loss": 3.637,
"step": 11950
},
{
"epoch": 3.494524697110904,
"grad_norm": 0.30985352396965027,
"learning_rate": 0.0005583951048951048,
"loss": 3.6268,
"step": 12000
},
{
"epoch": 3.494524697110904,
"eval_accuracy": 0.35349927172970896,
"eval_loss": 3.69624400138855,
"eval_runtime": 53.5399,
"eval_samples_per_second": 310.871,
"eval_steps_per_second": 19.443,
"step": 12000
},
{
"epoch": 3.5090866728797763,
"grad_norm": 0.3227110505104065,
"learning_rate": 0.0005582202797202797,
"loss": 3.6509,
"step": 12050
},
{
"epoch": 3.5236486486486487,
"grad_norm": 0.3478952944278717,
"learning_rate": 0.0005580454545454545,
"loss": 3.6505,
"step": 12100
},
{
"epoch": 3.538210624417521,
"grad_norm": 0.30305859446525574,
"learning_rate": 0.0005578706293706293,
"loss": 3.6373,
"step": 12150
},
{
"epoch": 3.552772600186393,
"grad_norm": 0.33472204208374023,
"learning_rate": 0.0005576958041958041,
"loss": 3.6524,
"step": 12200
},
{
"epoch": 3.5673345759552655,
"grad_norm": 0.32605162262916565,
"learning_rate": 0.000557520979020979,
"loss": 3.6371,
"step": 12250
},
{
"epoch": 3.581896551724138,
"grad_norm": 0.31411874294281006,
"learning_rate": 0.0005573461538461538,
"loss": 3.6274,
"step": 12300
},
{
"epoch": 3.5964585274930103,
"grad_norm": 0.34198397397994995,
"learning_rate": 0.0005571713286713286,
"loss": 3.6404,
"step": 12350
},
{
"epoch": 3.6110205032618827,
"grad_norm": 0.3327263295650482,
"learning_rate": 0.0005569965034965034,
"loss": 3.66,
"step": 12400
},
{
"epoch": 3.625582479030755,
"grad_norm": 0.322231650352478,
"learning_rate": 0.0005568216783216783,
"loss": 3.6466,
"step": 12450
},
{
"epoch": 3.640144454799627,
"grad_norm": 0.3001907765865326,
"learning_rate": 0.000556646853146853,
"loss": 3.6305,
"step": 12500
},
{
"epoch": 3.6547064305684995,
"grad_norm": 0.3235301971435547,
"learning_rate": 0.0005564720279720279,
"loss": 3.6436,
"step": 12550
},
{
"epoch": 3.669268406337372,
"grad_norm": 0.31615960597991943,
"learning_rate": 0.0005562972027972027,
"loss": 3.6461,
"step": 12600
},
{
"epoch": 3.683830382106244,
"grad_norm": 0.31696444749832153,
"learning_rate": 0.0005561223776223775,
"loss": 3.6492,
"step": 12650
},
{
"epoch": 3.6983923578751163,
"grad_norm": 0.33829542994499207,
"learning_rate": 0.0005559475524475524,
"loss": 3.6339,
"step": 12700
},
{
"epoch": 3.7129543336439887,
"grad_norm": 0.3388734459877014,
"learning_rate": 0.0005557727272727272,
"loss": 3.6437,
"step": 12750
},
{
"epoch": 3.727516309412861,
"grad_norm": 0.33483371138572693,
"learning_rate": 0.000555597902097902,
"loss": 3.6441,
"step": 12800
},
{
"epoch": 3.7420782851817336,
"grad_norm": 0.3170323967933655,
"learning_rate": 0.0005554230769230768,
"loss": 3.653,
"step": 12850
},
{
"epoch": 3.756640260950606,
"grad_norm": 0.3122214674949646,
"learning_rate": 0.0005552482517482517,
"loss": 3.6459,
"step": 12900
},
{
"epoch": 3.771202236719478,
"grad_norm": 0.3186436891555786,
"learning_rate": 0.0005550734265734265,
"loss": 3.6363,
"step": 12950
},
{
"epoch": 3.7857642124883504,
"grad_norm": 0.30622565746307373,
"learning_rate": 0.0005548986013986013,
"loss": 3.6435,
"step": 13000
},
{
"epoch": 3.7857642124883504,
"eval_accuracy": 0.35488867599661567,
"eval_loss": 3.6783316135406494,
"eval_runtime": 53.7191,
"eval_samples_per_second": 309.834,
"eval_steps_per_second": 19.379,
"step": 13000
},
{
"epoch": 3.800326188257223,
"grad_norm": 0.3129367232322693,
"learning_rate": 0.0005547237762237761,
"loss": 3.6207,
"step": 13050
},
{
"epoch": 3.814888164026095,
"grad_norm": 0.32513627409935,
"learning_rate": 0.000554548951048951,
"loss": 3.6369,
"step": 13100
},
{
"epoch": 3.829450139794967,
"grad_norm": 0.30402615666389465,
"learning_rate": 0.0005543741258741258,
"loss": 3.6543,
"step": 13150
},
{
"epoch": 3.8440121155638396,
"grad_norm": 0.3050006330013275,
"learning_rate": 0.0005541993006993006,
"loss": 3.6357,
"step": 13200
},
{
"epoch": 3.858574091332712,
"grad_norm": 0.2974061071872711,
"learning_rate": 0.0005540244755244756,
"loss": 3.6411,
"step": 13250
},
{
"epoch": 3.8731360671015844,
"grad_norm": 0.3141818046569824,
"learning_rate": 0.0005538496503496502,
"loss": 3.6407,
"step": 13300
},
{
"epoch": 3.887698042870457,
"grad_norm": 0.3138577938079834,
"learning_rate": 0.0005536748251748252,
"loss": 3.6346,
"step": 13350
},
{
"epoch": 3.9022600186393293,
"grad_norm": 0.3083747625350952,
"learning_rate": 0.0005535,
"loss": 3.6563,
"step": 13400
},
{
"epoch": 3.9168219944082012,
"grad_norm": 0.3096838593482971,
"learning_rate": 0.0005533251748251748,
"loss": 3.6473,
"step": 13450
},
{
"epoch": 3.9313839701770736,
"grad_norm": 0.30454158782958984,
"learning_rate": 0.0005531503496503496,
"loss": 3.6444,
"step": 13500
},
{
"epoch": 3.945945945945946,
"grad_norm": 0.3204196095466614,
"learning_rate": 0.0005529755244755245,
"loss": 3.6318,
"step": 13550
},
{
"epoch": 3.960507921714818,
"grad_norm": 0.32237592339515686,
"learning_rate": 0.0005528006993006993,
"loss": 3.6528,
"step": 13600
},
{
"epoch": 3.9750698974836904,
"grad_norm": 0.3079224228858948,
"learning_rate": 0.0005526258741258741,
"loss": 3.6209,
"step": 13650
},
{
"epoch": 3.989631873252563,
"grad_norm": 0.31364694237709045,
"learning_rate": 0.0005524510489510489,
"loss": 3.6271,
"step": 13700
},
{
"epoch": 4.004077353215284,
"grad_norm": 0.3244830369949341,
"learning_rate": 0.0005522762237762238,
"loss": 3.6119,
"step": 13750
},
{
"epoch": 4.018639328984157,
"grad_norm": 0.3311712443828583,
"learning_rate": 0.0005521013986013986,
"loss": 3.5161,
"step": 13800
},
{
"epoch": 4.033201304753029,
"grad_norm": 0.323055237531662,
"learning_rate": 0.0005519265734265734,
"loss": 3.5338,
"step": 13850
},
{
"epoch": 4.0477632805219015,
"grad_norm": 0.3451452851295471,
"learning_rate": 0.0005517517482517482,
"loss": 3.5432,
"step": 13900
},
{
"epoch": 4.062325256290774,
"grad_norm": 0.3319436311721802,
"learning_rate": 0.0005515769230769231,
"loss": 3.5317,
"step": 13950
},
{
"epoch": 4.076887232059645,
"grad_norm": 0.311636745929718,
"learning_rate": 0.0005514020979020979,
"loss": 3.5409,
"step": 14000
},
{
"epoch": 4.076887232059645,
"eval_accuracy": 0.3559829626609214,
"eval_loss": 3.6699917316436768,
"eval_runtime": 53.6105,
"eval_samples_per_second": 310.462,
"eval_steps_per_second": 19.418,
"step": 14000
},
{
"epoch": 4.091449207828518,
"grad_norm": 0.32985007762908936,
"learning_rate": 0.0005512272727272727,
"loss": 3.5268,
"step": 14050
},
{
"epoch": 4.10601118359739,
"grad_norm": 0.32016435265541077,
"learning_rate": 0.0005510524475524475,
"loss": 3.5445,
"step": 14100
},
{
"epoch": 4.120573159366263,
"grad_norm": 0.3203654885292053,
"learning_rate": 0.0005508776223776223,
"loss": 3.5486,
"step": 14150
},
{
"epoch": 4.135135135135135,
"grad_norm": 0.33023160696029663,
"learning_rate": 0.0005507027972027972,
"loss": 3.5481,
"step": 14200
},
{
"epoch": 4.1496971109040075,
"grad_norm": 0.3284917175769806,
"learning_rate": 0.000550527972027972,
"loss": 3.537,
"step": 14250
},
{
"epoch": 4.16425908667288,
"grad_norm": 0.3125667870044708,
"learning_rate": 0.0005503531468531468,
"loss": 3.5498,
"step": 14300
},
{
"epoch": 4.178821062441752,
"grad_norm": 0.35497283935546875,
"learning_rate": 0.0005501783216783216,
"loss": 3.5459,
"step": 14350
},
{
"epoch": 4.193383038210625,
"grad_norm": 0.3228631913661957,
"learning_rate": 0.0005500034965034965,
"loss": 3.5677,
"step": 14400
},
{
"epoch": 4.207945013979497,
"grad_norm": 0.3228815197944641,
"learning_rate": 0.0005498286713286713,
"loss": 3.57,
"step": 14450
},
{
"epoch": 4.222506989748369,
"grad_norm": 0.32745644450187683,
"learning_rate": 0.0005496538461538461,
"loss": 3.5578,
"step": 14500
},
{
"epoch": 4.237068965517241,
"grad_norm": 0.3445773124694824,
"learning_rate": 0.0005494790209790209,
"loss": 3.5642,
"step": 14550
},
{
"epoch": 4.2516309412861135,
"grad_norm": 0.3111281991004944,
"learning_rate": 0.0005493041958041958,
"loss": 3.5579,
"step": 14600
},
{
"epoch": 4.266192917054986,
"grad_norm": 0.2998022139072418,
"learning_rate": 0.0005491293706293706,
"loss": 3.5598,
"step": 14650
},
{
"epoch": 4.280754892823858,
"grad_norm": 0.30128976702690125,
"learning_rate": 0.0005489545454545454,
"loss": 3.5697,
"step": 14700
},
{
"epoch": 4.295316868592731,
"grad_norm": 0.3182757794857025,
"learning_rate": 0.0005487797202797203,
"loss": 3.562,
"step": 14750
},
{
"epoch": 4.309878844361603,
"grad_norm": 0.30610358715057373,
"learning_rate": 0.000548604895104895,
"loss": 3.5624,
"step": 14800
},
{
"epoch": 4.324440820130476,
"grad_norm": 0.3180147409439087,
"learning_rate": 0.0005484300699300699,
"loss": 3.5558,
"step": 14850
},
{
"epoch": 4.339002795899348,
"grad_norm": 0.31451013684272766,
"learning_rate": 0.0005482552447552447,
"loss": 3.5662,
"step": 14900
},
{
"epoch": 4.3535647716682195,
"grad_norm": 0.3290238678455353,
"learning_rate": 0.0005480804195804195,
"loss": 3.5619,
"step": 14950
},
{
"epoch": 4.368126747437092,
"grad_norm": 0.33237355947494507,
"learning_rate": 0.0005479055944055943,
"loss": 3.5812,
"step": 15000
},
{
"epoch": 4.368126747437092,
"eval_accuracy": 0.35728536014459117,
"eval_loss": 3.6612935066223145,
"eval_runtime": 53.7257,
"eval_samples_per_second": 309.796,
"eval_steps_per_second": 19.376,
"step": 15000
},
{
"epoch": 4.382688723205964,
"grad_norm": 0.32442939281463623,
"learning_rate": 0.0005477307692307692,
"loss": 3.576,
"step": 15050
},
{
"epoch": 4.397250698974837,
"grad_norm": 0.31378301978111267,
"learning_rate": 0.000547555944055944,
"loss": 3.5708,
"step": 15100
},
{
"epoch": 4.411812674743709,
"grad_norm": 0.32099053263664246,
"learning_rate": 0.0005473811188811188,
"loss": 3.5733,
"step": 15150
},
{
"epoch": 4.426374650512582,
"grad_norm": 0.34932631254196167,
"learning_rate": 0.0005472062937062936,
"loss": 3.5663,
"step": 15200
},
{
"epoch": 4.440936626281454,
"grad_norm": 0.31164199113845825,
"learning_rate": 0.0005470314685314685,
"loss": 3.5757,
"step": 15250
},
{
"epoch": 4.455498602050326,
"grad_norm": 0.3159792721271515,
"learning_rate": 0.0005468566433566433,
"loss": 3.5567,
"step": 15300
},
{
"epoch": 4.470060577819199,
"grad_norm": 0.34067076444625854,
"learning_rate": 0.0005466818181818181,
"loss": 3.5713,
"step": 15350
},
{
"epoch": 4.484622553588071,
"grad_norm": 0.32717546820640564,
"learning_rate": 0.000546506993006993,
"loss": 3.5756,
"step": 15400
},
{
"epoch": 4.499184529356943,
"grad_norm": 0.32357490062713623,
"learning_rate": 0.0005463321678321678,
"loss": 3.5765,
"step": 15450
},
{
"epoch": 4.513746505125815,
"grad_norm": 0.3132951855659485,
"learning_rate": 0.0005461573426573426,
"loss": 3.5823,
"step": 15500
},
{
"epoch": 4.528308480894688,
"grad_norm": 0.31881314516067505,
"learning_rate": 0.0005459825174825174,
"loss": 3.5746,
"step": 15550
},
{
"epoch": 4.54287045666356,
"grad_norm": 0.3236841857433319,
"learning_rate": 0.0005458076923076922,
"loss": 3.5769,
"step": 15600
},
{
"epoch": 4.5574324324324325,
"grad_norm": 0.31546154618263245,
"learning_rate": 0.000545632867132867,
"loss": 3.573,
"step": 15650
},
{
"epoch": 4.571994408201305,
"grad_norm": 0.3234204053878784,
"learning_rate": 0.0005454580419580419,
"loss": 3.5809,
"step": 15700
},
{
"epoch": 4.586556383970177,
"grad_norm": 0.3184894621372223,
"learning_rate": 0.0005452832167832167,
"loss": 3.5782,
"step": 15750
},
{
"epoch": 4.60111835973905,
"grad_norm": 0.3175009787082672,
"learning_rate": 0.0005451083916083915,
"loss": 3.5773,
"step": 15800
},
{
"epoch": 4.615680335507921,
"grad_norm": 0.3093114197254181,
"learning_rate": 0.0005449335664335663,
"loss": 3.5604,
"step": 15850
},
{
"epoch": 4.630242311276794,
"grad_norm": 0.30245575308799744,
"learning_rate": 0.0005447587412587412,
"loss": 3.5744,
"step": 15900
},
{
"epoch": 4.644804287045666,
"grad_norm": 0.31706503033638,
"learning_rate": 0.000544583916083916,
"loss": 3.5589,
"step": 15950
},
{
"epoch": 4.6593662628145385,
"grad_norm": 0.31915611028671265,
"learning_rate": 0.0005444090909090908,
"loss": 3.5711,
"step": 16000
},
{
"epoch": 4.6593662628145385,
"eval_accuracy": 0.35867029649560195,
"eval_loss": 3.646385431289673,
"eval_runtime": 53.6836,
"eval_samples_per_second": 310.039,
"eval_steps_per_second": 19.391,
"step": 16000
},
{
"epoch": 4.673928238583411,
"grad_norm": 0.3159019649028778,
"learning_rate": 0.0005442342657342657,
"loss": 3.5764,
"step": 16050
},
{
"epoch": 4.688490214352283,
"grad_norm": 0.33272585272789,
"learning_rate": 0.0005440594405594405,
"loss": 3.5754,
"step": 16100
},
{
"epoch": 4.703052190121156,
"grad_norm": 0.3211807310581207,
"learning_rate": 0.0005438846153846153,
"loss": 3.573,
"step": 16150
},
{
"epoch": 4.717614165890028,
"grad_norm": 0.3272148072719574,
"learning_rate": 0.0005437097902097901,
"loss": 3.5809,
"step": 16200
},
{
"epoch": 4.7321761416589005,
"grad_norm": 0.33305129408836365,
"learning_rate": 0.0005435349650349651,
"loss": 3.5727,
"step": 16250
},
{
"epoch": 4.746738117427773,
"grad_norm": 0.3019855320453644,
"learning_rate": 0.0005433601398601397,
"loss": 3.5914,
"step": 16300
},
{
"epoch": 4.761300093196645,
"grad_norm": 0.313298761844635,
"learning_rate": 0.0005431853146853147,
"loss": 3.5713,
"step": 16350
},
{
"epoch": 4.775862068965517,
"grad_norm": 0.31281420588493347,
"learning_rate": 0.0005430104895104895,
"loss": 3.5721,
"step": 16400
},
{
"epoch": 4.790424044734389,
"grad_norm": 0.31018730998039246,
"learning_rate": 0.0005428356643356643,
"loss": 3.5829,
"step": 16450
},
{
"epoch": 4.804986020503262,
"grad_norm": 0.3040141463279724,
"learning_rate": 0.0005426608391608391,
"loss": 3.5814,
"step": 16500
},
{
"epoch": 4.819547996272134,
"grad_norm": 0.3142719268798828,
"learning_rate": 0.000542486013986014,
"loss": 3.5669,
"step": 16550
},
{
"epoch": 4.834109972041007,
"grad_norm": 0.31050989031791687,
"learning_rate": 0.0005423111888111888,
"loss": 3.5895,
"step": 16600
},
{
"epoch": 4.848671947809879,
"grad_norm": 0.31738415360450745,
"learning_rate": 0.0005421363636363636,
"loss": 3.5803,
"step": 16650
},
{
"epoch": 4.863233923578751,
"grad_norm": 0.33238860964775085,
"learning_rate": 0.0005419615384615385,
"loss": 3.5689,
"step": 16700
},
{
"epoch": 4.877795899347624,
"grad_norm": 0.3258155584335327,
"learning_rate": 0.0005417867132867133,
"loss": 3.5834,
"step": 16750
},
{
"epoch": 4.892357875116495,
"grad_norm": 0.34824496507644653,
"learning_rate": 0.0005416118881118881,
"loss": 3.5846,
"step": 16800
},
{
"epoch": 4.906919850885368,
"grad_norm": 0.3015121519565582,
"learning_rate": 0.0005414370629370629,
"loss": 3.5757,
"step": 16850
},
{
"epoch": 4.92148182665424,
"grad_norm": 0.3203749358654022,
"learning_rate": 0.0005412622377622378,
"loss": 3.5841,
"step": 16900
},
{
"epoch": 4.936043802423113,
"grad_norm": 0.2977421283721924,
"learning_rate": 0.0005410874125874126,
"loss": 3.5783,
"step": 16950
},
{
"epoch": 4.950605778191985,
"grad_norm": 0.318941205739975,
"learning_rate": 0.0005409125874125874,
"loss": 3.5748,
"step": 17000
},
{
"epoch": 4.950605778191985,
"eval_accuracy": 0.359580693147769,
"eval_loss": 3.63466477394104,
"eval_runtime": 53.7564,
"eval_samples_per_second": 309.619,
"eval_steps_per_second": 19.365,
"step": 17000
},
{
"epoch": 4.965167753960857,
"grad_norm": 0.31237950921058655,
"learning_rate": 0.0005407377622377622,
"loss": 3.5748,
"step": 17050
},
{
"epoch": 4.97972972972973,
"grad_norm": 0.3048698306083679,
"learning_rate": 0.000540562937062937,
"loss": 3.5694,
"step": 17100
},
{
"epoch": 4.994291705498602,
"grad_norm": 0.31058716773986816,
"learning_rate": 0.0005403881118881118,
"loss": 3.5876,
"step": 17150
},
{
"epoch": 5.008737185461324,
"grad_norm": 0.3176633417606354,
"learning_rate": 0.0005402132867132867,
"loss": 3.519,
"step": 17200
},
{
"epoch": 5.023299161230196,
"grad_norm": 0.32644349336624146,
"learning_rate": 0.0005400384615384615,
"loss": 3.464,
"step": 17250
},
{
"epoch": 5.0378611369990685,
"grad_norm": 0.31298401951789856,
"learning_rate": 0.0005398636363636363,
"loss": 3.4716,
"step": 17300
},
{
"epoch": 5.05242311276794,
"grad_norm": 0.33144500851631165,
"learning_rate": 0.0005396888111888111,
"loss": 3.4736,
"step": 17350
},
{
"epoch": 5.066985088536812,
"grad_norm": 0.3371279537677765,
"learning_rate": 0.000539513986013986,
"loss": 3.4803,
"step": 17400
},
{
"epoch": 5.081547064305685,
"grad_norm": 0.30339860916137695,
"learning_rate": 0.0005393391608391608,
"loss": 3.4797,
"step": 17450
},
{
"epoch": 5.096109040074557,
"grad_norm": 0.3512382209300995,
"learning_rate": 0.0005391643356643356,
"loss": 3.4856,
"step": 17500
},
{
"epoch": 5.11067101584343,
"grad_norm": 0.32866692543029785,
"learning_rate": 0.0005389895104895105,
"loss": 3.4985,
"step": 17550
},
{
"epoch": 5.125232991612302,
"grad_norm": 0.3049773871898651,
"learning_rate": 0.0005388146853146853,
"loss": 3.4957,
"step": 17600
},
{
"epoch": 5.1397949673811745,
"grad_norm": 0.3084123730659485,
"learning_rate": 0.0005386398601398601,
"loss": 3.4944,
"step": 17650
},
{
"epoch": 5.154356943150047,
"grad_norm": 0.3339555263519287,
"learning_rate": 0.0005384650349650349,
"loss": 3.5047,
"step": 17700
},
{
"epoch": 5.168918918918919,
"grad_norm": 0.32437944412231445,
"learning_rate": 0.0005382902097902098,
"loss": 3.4957,
"step": 17750
},
{
"epoch": 5.183480894687791,
"grad_norm": 0.32889071106910706,
"learning_rate": 0.0005381153846153845,
"loss": 3.5004,
"step": 17800
},
{
"epoch": 5.198042870456663,
"grad_norm": 0.3491978943347931,
"learning_rate": 0.0005379405594405594,
"loss": 3.4958,
"step": 17850
},
{
"epoch": 5.212604846225536,
"grad_norm": 0.3071553409099579,
"learning_rate": 0.0005377657342657342,
"loss": 3.4968,
"step": 17900
},
{
"epoch": 5.227166821994408,
"grad_norm": 0.3152475953102112,
"learning_rate": 0.000537590909090909,
"loss": 3.4914,
"step": 17950
},
{
"epoch": 5.2417287977632805,
"grad_norm": 0.3133900761604309,
"learning_rate": 0.0005374160839160838,
"loss": 3.5007,
"step": 18000
},
{
"epoch": 5.2417287977632805,
"eval_accuracy": 0.3599717533653989,
"eval_loss": 3.636350393295288,
"eval_runtime": 53.8447,
"eval_samples_per_second": 309.111,
"eval_steps_per_second": 19.333,
"step": 18000
},
{
"epoch": 5.256290773532153,
"grad_norm": 0.3083953261375427,
"learning_rate": 0.0005372412587412587,
"loss": 3.5021,
"step": 18050
},
{
"epoch": 5.270852749301025,
"grad_norm": 0.34414270520210266,
"learning_rate": 0.0005370664335664335,
"loss": 3.516,
"step": 18100
},
{
"epoch": 5.285414725069898,
"grad_norm": 0.3216901123523712,
"learning_rate": 0.0005368916083916083,
"loss": 3.5161,
"step": 18150
},
{
"epoch": 5.29997670083877,
"grad_norm": 0.3427264392375946,
"learning_rate": 0.0005367167832167832,
"loss": 3.5075,
"step": 18200
},
{
"epoch": 5.314538676607642,
"grad_norm": 0.3271310031414032,
"learning_rate": 0.000536541958041958,
"loss": 3.506,
"step": 18250
},
{
"epoch": 5.329100652376514,
"grad_norm": 0.32501789927482605,
"learning_rate": 0.0005363671328671328,
"loss": 3.5099,
"step": 18300
},
{
"epoch": 5.3436626281453865,
"grad_norm": 0.3126375079154968,
"learning_rate": 0.0005361923076923076,
"loss": 3.516,
"step": 18350
},
{
"epoch": 5.358224603914259,
"grad_norm": 0.3329120874404907,
"learning_rate": 0.0005360174825174825,
"loss": 3.5009,
"step": 18400
},
{
"epoch": 5.372786579683131,
"grad_norm": 0.34138381481170654,
"learning_rate": 0.0005358426573426573,
"loss": 3.5186,
"step": 18450
},
{
"epoch": 5.387348555452004,
"grad_norm": 0.3538558781147003,
"learning_rate": 0.0005356678321678321,
"loss": 3.5156,
"step": 18500
},
{
"epoch": 5.401910531220876,
"grad_norm": 0.3556719422340393,
"learning_rate": 0.0005354930069930069,
"loss": 3.513,
"step": 18550
},
{
"epoch": 5.416472506989749,
"grad_norm": 0.32848823070526123,
"learning_rate": 0.0005353181818181817,
"loss": 3.5206,
"step": 18600
},
{
"epoch": 5.431034482758621,
"grad_norm": 0.31033194065093994,
"learning_rate": 0.0005351433566433565,
"loss": 3.5212,
"step": 18650
},
{
"epoch": 5.445596458527493,
"grad_norm": 0.32916679978370667,
"learning_rate": 0.0005349685314685314,
"loss": 3.5122,
"step": 18700
},
{
"epoch": 5.460158434296365,
"grad_norm": 0.3248915672302246,
"learning_rate": 0.0005347937062937062,
"loss": 3.5116,
"step": 18750
},
{
"epoch": 5.474720410065237,
"grad_norm": 0.316643089056015,
"learning_rate": 0.000534618881118881,
"loss": 3.5131,
"step": 18800
},
{
"epoch": 5.48928238583411,
"grad_norm": 0.3165883719921112,
"learning_rate": 0.0005344440559440559,
"loss": 3.5206,
"step": 18850
},
{
"epoch": 5.503844361602982,
"grad_norm": 0.31263893842697144,
"learning_rate": 0.0005342692307692307,
"loss": 3.515,
"step": 18900
},
{
"epoch": 5.518406337371855,
"grad_norm": 0.31926199793815613,
"learning_rate": 0.0005340944055944055,
"loss": 3.5238,
"step": 18950
},
{
"epoch": 5.532968313140727,
"grad_norm": 0.33692145347595215,
"learning_rate": 0.0005339195804195803,
"loss": 3.5337,
"step": 19000
},
{
"epoch": 5.532968313140727,
"eval_accuracy": 0.36116574510022476,
"eval_loss": 3.6262943744659424,
"eval_runtime": 53.8294,
"eval_samples_per_second": 309.199,
"eval_steps_per_second": 19.339,
"step": 19000
},
{
"epoch": 5.547530288909599,
"grad_norm": 0.3566485345363617,
"learning_rate": 0.0005337447552447552,
"loss": 3.5197,
"step": 19050
},
{
"epoch": 5.562092264678472,
"grad_norm": 0.3133227825164795,
"learning_rate": 0.00053356993006993,
"loss": 3.523,
"step": 19100
},
{
"epoch": 5.576654240447344,
"grad_norm": 0.31186652183532715,
"learning_rate": 0.0005333951048951048,
"loss": 3.5221,
"step": 19150
},
{
"epoch": 5.591216216216216,
"grad_norm": 0.3436189591884613,
"learning_rate": 0.0005332202797202796,
"loss": 3.5373,
"step": 19200
},
{
"epoch": 5.605778191985088,
"grad_norm": 0.3185917139053345,
"learning_rate": 0.0005330454545454546,
"loss": 3.5197,
"step": 19250
},
{
"epoch": 5.620340167753961,
"grad_norm": 0.3546905517578125,
"learning_rate": 0.0005328706293706292,
"loss": 3.5303,
"step": 19300
},
{
"epoch": 5.634902143522833,
"grad_norm": 0.33124232292175293,
"learning_rate": 0.0005326958041958042,
"loss": 3.5286,
"step": 19350
},
{
"epoch": 5.649464119291705,
"grad_norm": 0.30169352889060974,
"learning_rate": 0.000532520979020979,
"loss": 3.519,
"step": 19400
},
{
"epoch": 5.664026095060578,
"grad_norm": 0.3323482871055603,
"learning_rate": 0.0005323461538461538,
"loss": 3.5369,
"step": 19450
},
{
"epoch": 5.67858807082945,
"grad_norm": 0.31857866048812866,
"learning_rate": 0.0005321713286713287,
"loss": 3.535,
"step": 19500
},
{
"epoch": 5.693150046598323,
"grad_norm": 0.3306872248649597,
"learning_rate": 0.0005319965034965035,
"loss": 3.5327,
"step": 19550
},
{
"epoch": 5.707712022367195,
"grad_norm": 0.3332788050174713,
"learning_rate": 0.0005318216783216783,
"loss": 3.5268,
"step": 19600
},
{
"epoch": 5.7222739981360675,
"grad_norm": 0.3307408392429352,
"learning_rate": 0.0005316468531468531,
"loss": 3.5251,
"step": 19650
},
{
"epoch": 5.736835973904939,
"grad_norm": 0.3280176818370819,
"learning_rate": 0.000531472027972028,
"loss": 3.5273,
"step": 19700
},
{
"epoch": 5.7513979496738115,
"grad_norm": 0.33305320143699646,
"learning_rate": 0.0005312972027972028,
"loss": 3.522,
"step": 19750
},
{
"epoch": 5.765959925442684,
"grad_norm": 0.29962897300720215,
"learning_rate": 0.0005311223776223776,
"loss": 3.5329,
"step": 19800
},
{
"epoch": 5.780521901211556,
"grad_norm": 0.3109263479709625,
"learning_rate": 0.0005309475524475524,
"loss": 3.5426,
"step": 19850
},
{
"epoch": 5.795083876980429,
"grad_norm": 0.3340068757534027,
"learning_rate": 0.0005307727272727273,
"loss": 3.5345,
"step": 19900
},
{
"epoch": 5.809645852749301,
"grad_norm": 0.32446378469467163,
"learning_rate": 0.0005305979020979021,
"loss": 3.5408,
"step": 19950
},
{
"epoch": 5.8242078285181735,
"grad_norm": 0.32898879051208496,
"learning_rate": 0.0005304230769230769,
"loss": 3.5421,
"step": 20000
},
{
"epoch": 5.8242078285181735,
"eval_accuracy": 0.3620535670194439,
"eval_loss": 3.6150898933410645,
"eval_runtime": 53.8523,
"eval_samples_per_second": 309.067,
"eval_steps_per_second": 19.331,
"step": 20000
},
{
"epoch": 5.838769804287046,
"grad_norm": 0.3129737377166748,
"learning_rate": 0.0005302482517482517,
"loss": 3.5318,
"step": 20050
},
{
"epoch": 5.853331780055918,
"grad_norm": 0.3082195222377777,
"learning_rate": 0.0005300734265734265,
"loss": 3.5251,
"step": 20100
},
{
"epoch": 5.86789375582479,
"grad_norm": 0.30648788809776306,
"learning_rate": 0.0005298986013986013,
"loss": 3.5338,
"step": 20150
},
{
"epoch": 5.882455731593662,
"grad_norm": 0.3134007453918457,
"learning_rate": 0.0005297237762237762,
"loss": 3.5169,
"step": 20200
},
{
"epoch": 5.897017707362535,
"grad_norm": 0.3222978115081787,
"learning_rate": 0.000529548951048951,
"loss": 3.5412,
"step": 20250
},
{
"epoch": 5.911579683131407,
"grad_norm": 0.3065994381904602,
"learning_rate": 0.0005293741258741258,
"loss": 3.5197,
"step": 20300
},
{
"epoch": 5.9261416589002796,
"grad_norm": 0.3118910789489746,
"learning_rate": 0.0005291993006993007,
"loss": 3.519,
"step": 20350
},
{
"epoch": 5.940703634669152,
"grad_norm": 0.3327983021736145,
"learning_rate": 0.0005290244755244755,
"loss": 3.5199,
"step": 20400
},
{
"epoch": 5.955265610438024,
"grad_norm": 0.33203497529029846,
"learning_rate": 0.0005288496503496503,
"loss": 3.5236,
"step": 20450
},
{
"epoch": 5.969827586206897,
"grad_norm": 0.3465798497200012,
"learning_rate": 0.0005286748251748251,
"loss": 3.524,
"step": 20500
},
{
"epoch": 5.984389561975769,
"grad_norm": 0.31439685821533203,
"learning_rate": 0.0005285,
"loss": 3.5324,
"step": 20550
},
{
"epoch": 5.998951537744642,
"grad_norm": 0.32758834958076477,
"learning_rate": 0.0005283251748251748,
"loss": 3.5191,
"step": 20600
},
{
"epoch": 6.013397017707362,
"grad_norm": 0.3180631995201111,
"learning_rate": 0.0005281503496503496,
"loss": 3.4261,
"step": 20650
},
{
"epoch": 6.0279589934762345,
"grad_norm": 0.3253958225250244,
"learning_rate": 0.0005279755244755244,
"loss": 3.4226,
"step": 20700
},
{
"epoch": 6.042520969245107,
"grad_norm": 0.31996259093284607,
"learning_rate": 0.0005278006993006993,
"loss": 3.4179,
"step": 20750
},
{
"epoch": 6.057082945013979,
"grad_norm": 0.349447101354599,
"learning_rate": 0.000527625874125874,
"loss": 3.4317,
"step": 20800
},
{
"epoch": 6.071644920782852,
"grad_norm": 0.3400956392288208,
"learning_rate": 0.0005274510489510489,
"loss": 3.4253,
"step": 20850
},
{
"epoch": 6.086206896551724,
"grad_norm": 0.31208309531211853,
"learning_rate": 0.0005272762237762238,
"loss": 3.4351,
"step": 20900
},
{
"epoch": 6.100768872320597,
"grad_norm": 0.3292507231235504,
"learning_rate": 0.0005271013986013985,
"loss": 3.4399,
"step": 20950
},
{
"epoch": 6.115330848089469,
"grad_norm": 0.338582307100296,
"learning_rate": 0.0005269265734265734,
"loss": 3.4317,
"step": 21000
},
{
"epoch": 6.115330848089469,
"eval_accuracy": 0.362383840065542,
"eval_loss": 3.6184234619140625,
"eval_runtime": 53.6626,
"eval_samples_per_second": 310.16,
"eval_steps_per_second": 19.399,
"step": 21000
},
{
"epoch": 6.129892823858341,
"grad_norm": 0.3549116849899292,
"learning_rate": 0.0005267517482517482,
"loss": 3.448,
"step": 21050
},
{
"epoch": 6.144454799627214,
"grad_norm": 0.3466671407222748,
"learning_rate": 0.000526576923076923,
"loss": 3.4531,
"step": 21100
},
{
"epoch": 6.159016775396085,
"grad_norm": 0.33880236744880676,
"learning_rate": 0.0005264020979020978,
"loss": 3.4532,
"step": 21150
},
{
"epoch": 6.173578751164958,
"grad_norm": 0.3429170846939087,
"learning_rate": 0.0005262272727272727,
"loss": 3.4504,
"step": 21200
},
{
"epoch": 6.18814072693383,
"grad_norm": 0.3074339032173157,
"learning_rate": 0.0005260524475524475,
"loss": 3.4359,
"step": 21250
},
{
"epoch": 6.202702702702703,
"grad_norm": 0.3143917918205261,
"learning_rate": 0.0005258776223776223,
"loss": 3.4509,
"step": 21300
},
{
"epoch": 6.217264678471575,
"grad_norm": 0.3545181155204773,
"learning_rate": 0.0005257027972027971,
"loss": 3.4566,
"step": 21350
},
{
"epoch": 6.2318266542404475,
"grad_norm": 0.322401762008667,
"learning_rate": 0.000525527972027972,
"loss": 3.4592,
"step": 21400
},
{
"epoch": 6.24638863000932,
"grad_norm": 0.33117911219596863,
"learning_rate": 0.0005253531468531468,
"loss": 3.4598,
"step": 21450
},
{
"epoch": 6.260950605778192,
"grad_norm": 0.34444600343704224,
"learning_rate": 0.0005251783216783216,
"loss": 3.4582,
"step": 21500
},
{
"epoch": 6.275512581547065,
"grad_norm": 0.3065207302570343,
"learning_rate": 0.0005250034965034965,
"loss": 3.47,
"step": 21550
},
{
"epoch": 6.290074557315936,
"grad_norm": 0.34848421812057495,
"learning_rate": 0.0005248286713286712,
"loss": 3.4732,
"step": 21600
},
{
"epoch": 6.304636533084809,
"grad_norm": 0.3056810796260834,
"learning_rate": 0.0005246538461538461,
"loss": 3.4529,
"step": 21650
},
{
"epoch": 6.319198508853681,
"grad_norm": 0.33891788125038147,
"learning_rate": 0.0005244790209790209,
"loss": 3.4678,
"step": 21700
},
{
"epoch": 6.3337604846225535,
"grad_norm": 0.3220604956150055,
"learning_rate": 0.0005243041958041957,
"loss": 3.4709,
"step": 21750
},
{
"epoch": 6.348322460391426,
"grad_norm": 0.30562639236450195,
"learning_rate": 0.0005241293706293705,
"loss": 3.4767,
"step": 21800
},
{
"epoch": 6.362884436160298,
"grad_norm": 0.32998815178871155,
"learning_rate": 0.0005239545454545454,
"loss": 3.4829,
"step": 21850
},
{
"epoch": 6.377446411929171,
"grad_norm": 0.34016919136047363,
"learning_rate": 0.0005237797202797202,
"loss": 3.4699,
"step": 21900
},
{
"epoch": 6.392008387698043,
"grad_norm": 0.3256104290485382,
"learning_rate": 0.000523604895104895,
"loss": 3.4741,
"step": 21950
},
{
"epoch": 6.4065703634669156,
"grad_norm": 0.33402830362319946,
"learning_rate": 0.0005234300699300698,
"loss": 3.4691,
"step": 22000
},
{
"epoch": 6.4065703634669156,
"eval_accuracy": 0.3631490294510907,
"eval_loss": 3.6099016666412354,
"eval_runtime": 53.7802,
"eval_samples_per_second": 309.482,
"eval_steps_per_second": 19.357,
"step": 22000
},
{
"epoch": 6.421132339235788,
"grad_norm": 0.32157817482948303,
"learning_rate": 0.0005232552447552447,
"loss": 3.4586,
"step": 22050
},
{
"epoch": 6.4356943150046595,
"grad_norm": 0.307766854763031,
"learning_rate": 0.0005230804195804195,
"loss": 3.4756,
"step": 22100
},
{
"epoch": 6.450256290773532,
"grad_norm": 0.33260318636894226,
"learning_rate": 0.0005229055944055943,
"loss": 3.4764,
"step": 22150
},
{
"epoch": 6.464818266542404,
"grad_norm": 0.3042699098587036,
"learning_rate": 0.0005227307692307691,
"loss": 3.4875,
"step": 22200
},
{
"epoch": 6.479380242311277,
"grad_norm": 0.31879618763923645,
"learning_rate": 0.0005225559440559441,
"loss": 3.4849,
"step": 22250
},
{
"epoch": 6.493942218080149,
"grad_norm": 0.33056700229644775,
"learning_rate": 0.0005223811188811189,
"loss": 3.4723,
"step": 22300
},
{
"epoch": 6.508504193849022,
"grad_norm": 0.3173615336418152,
"learning_rate": 0.0005222062937062937,
"loss": 3.4772,
"step": 22350
},
{
"epoch": 6.523066169617894,
"grad_norm": 0.33287325501441956,
"learning_rate": 0.0005220314685314686,
"loss": 3.4774,
"step": 22400
},
{
"epoch": 6.537628145386766,
"grad_norm": 0.32694077491760254,
"learning_rate": 0.0005218566433566433,
"loss": 3.4902,
"step": 22450
},
{
"epoch": 6.552190121155638,
"grad_norm": 0.35262542963027954,
"learning_rate": 0.0005216818181818182,
"loss": 3.4779,
"step": 22500
},
{
"epoch": 6.56675209692451,
"grad_norm": 0.3070398271083832,
"learning_rate": 0.000521506993006993,
"loss": 3.4911,
"step": 22550
},
{
"epoch": 6.581314072693383,
"grad_norm": 0.33605894446372986,
"learning_rate": 0.0005213321678321678,
"loss": 3.4823,
"step": 22600
},
{
"epoch": 6.595876048462255,
"grad_norm": 0.3233272135257721,
"learning_rate": 0.0005211573426573426,
"loss": 3.4815,
"step": 22650
},
{
"epoch": 6.610438024231128,
"grad_norm": 0.3428344428539276,
"learning_rate": 0.0005209825174825175,
"loss": 3.4917,
"step": 22700
},
{
"epoch": 6.625,
"grad_norm": 0.3113822340965271,
"learning_rate": 0.0005208076923076923,
"loss": 3.484,
"step": 22750
},
{
"epoch": 6.639561975768872,
"grad_norm": 0.31913211941719055,
"learning_rate": 0.0005206328671328671,
"loss": 3.4862,
"step": 22800
},
{
"epoch": 6.654123951537745,
"grad_norm": 0.3375007212162018,
"learning_rate": 0.0005204580419580419,
"loss": 3.4915,
"step": 22850
},
{
"epoch": 6.668685927306617,
"grad_norm": 0.3295234143733978,
"learning_rate": 0.0005202832167832168,
"loss": 3.4898,
"step": 22900
},
{
"epoch": 6.68324790307549,
"grad_norm": 0.32532602548599243,
"learning_rate": 0.0005201083916083916,
"loss": 3.4951,
"step": 22950
},
{
"epoch": 6.697809878844362,
"grad_norm": 0.3587338328361511,
"learning_rate": 0.0005199335664335664,
"loss": 3.4836,
"step": 23000
},
{
"epoch": 6.697809878844362,
"eval_accuracy": 0.3637184535743562,
"eval_loss": 3.599292755126953,
"eval_runtime": 53.6472,
"eval_samples_per_second": 310.249,
"eval_steps_per_second": 19.405,
"step": 23000
},
{
"epoch": 6.712371854613234,
"grad_norm": 0.32595306634902954,
"learning_rate": 0.0005197587412587413,
"loss": 3.4882,
"step": 23050
},
{
"epoch": 6.726933830382106,
"grad_norm": 0.337817907333374,
"learning_rate": 0.0005195839160839161,
"loss": 3.4846,
"step": 23100
},
{
"epoch": 6.741495806150978,
"grad_norm": 0.3214098811149597,
"learning_rate": 0.0005194090909090909,
"loss": 3.4951,
"step": 23150
},
{
"epoch": 6.756057781919851,
"grad_norm": 0.31647759675979614,
"learning_rate": 0.0005192342657342657,
"loss": 3.4814,
"step": 23200
},
{
"epoch": 6.770619757688723,
"grad_norm": 0.31576162576675415,
"learning_rate": 0.0005190594405594405,
"loss": 3.4945,
"step": 23250
},
{
"epoch": 6.785181733457596,
"grad_norm": 0.32426100969314575,
"learning_rate": 0.0005188846153846153,
"loss": 3.4875,
"step": 23300
},
{
"epoch": 6.799743709226468,
"grad_norm": 0.32514530420303345,
"learning_rate": 0.0005187097902097902,
"loss": 3.491,
"step": 23350
},
{
"epoch": 6.8143056849953405,
"grad_norm": 0.3201660215854645,
"learning_rate": 0.000518534965034965,
"loss": 3.4882,
"step": 23400
},
{
"epoch": 6.828867660764212,
"grad_norm": 0.32947874069213867,
"learning_rate": 0.0005183601398601398,
"loss": 3.4996,
"step": 23450
},
{
"epoch": 6.8434296365330844,
"grad_norm": 0.34106481075286865,
"learning_rate": 0.0005181853146853146,
"loss": 3.4961,
"step": 23500
},
{
"epoch": 6.857991612301957,
"grad_norm": 0.36824700236320496,
"learning_rate": 0.0005180104895104895,
"loss": 3.4847,
"step": 23550
},
{
"epoch": 6.872553588070829,
"grad_norm": 0.3059179186820984,
"learning_rate": 0.0005178356643356643,
"loss": 3.5004,
"step": 23600
},
{
"epoch": 6.887115563839702,
"grad_norm": 0.31658226251602173,
"learning_rate": 0.0005176608391608391,
"loss": 3.4822,
"step": 23650
},
{
"epoch": 6.901677539608574,
"grad_norm": 0.3481607437133789,
"learning_rate": 0.000517486013986014,
"loss": 3.4975,
"step": 23700
},
{
"epoch": 6.9162395153774465,
"grad_norm": 0.340072900056839,
"learning_rate": 0.0005173111888111888,
"loss": 3.4979,
"step": 23750
},
{
"epoch": 6.930801491146319,
"grad_norm": 0.3230604827404022,
"learning_rate": 0.0005171363636363636,
"loss": 3.4972,
"step": 23800
},
{
"epoch": 6.945363466915191,
"grad_norm": 0.32332488894462585,
"learning_rate": 0.0005169615384615384,
"loss": 3.4911,
"step": 23850
},
{
"epoch": 6.959925442684064,
"grad_norm": 0.3132401406764984,
"learning_rate": 0.0005167867132867133,
"loss": 3.4979,
"step": 23900
},
{
"epoch": 6.974487418452936,
"grad_norm": 0.30957671999931335,
"learning_rate": 0.000516611888111888,
"loss": 3.4941,
"step": 23950
},
{
"epoch": 6.989049394221808,
"grad_norm": 0.32135453820228577,
"learning_rate": 0.0005164370629370629,
"loss": 3.4874,
"step": 24000
},
{
"epoch": 6.989049394221808,
"eval_accuracy": 0.36465554014516494,
"eval_loss": 3.590451240539551,
"eval_runtime": 53.7639,
"eval_samples_per_second": 309.576,
"eval_steps_per_second": 19.362,
"step": 24000
},
{
"epoch": 7.003494874184529,
"grad_norm": 0.3334214985370636,
"learning_rate": 0.0005162622377622377,
"loss": 3.4636,
"step": 24050
},
{
"epoch": 7.0180568499534015,
"grad_norm": 0.3495422899723053,
"learning_rate": 0.0005160874125874125,
"loss": 3.3781,
"step": 24100
},
{
"epoch": 7.032618825722274,
"grad_norm": 0.3541470468044281,
"learning_rate": 0.0005159125874125873,
"loss": 3.38,
"step": 24150
},
{
"epoch": 7.047180801491146,
"grad_norm": 0.333109974861145,
"learning_rate": 0.0005157377622377622,
"loss": 3.3921,
"step": 24200
},
{
"epoch": 7.061742777260019,
"grad_norm": 0.31906047463417053,
"learning_rate": 0.000515562937062937,
"loss": 3.405,
"step": 24250
},
{
"epoch": 7.076304753028891,
"grad_norm": 0.3586280345916748,
"learning_rate": 0.0005153881118881118,
"loss": 3.3985,
"step": 24300
},
{
"epoch": 7.090866728797764,
"grad_norm": 0.3500705659389496,
"learning_rate": 0.0005152132867132867,
"loss": 3.4048,
"step": 24350
},
{
"epoch": 7.105428704566636,
"grad_norm": 0.33212336897850037,
"learning_rate": 0.0005150384615384615,
"loss": 3.4119,
"step": 24400
},
{
"epoch": 7.1199906803355075,
"grad_norm": 0.3508271276950836,
"learning_rate": 0.0005148636363636363,
"loss": 3.4001,
"step": 24450
},
{
"epoch": 7.13455265610438,
"grad_norm": 0.3221181035041809,
"learning_rate": 0.0005146888111888111,
"loss": 3.393,
"step": 24500
},
{
"epoch": 7.149114631873252,
"grad_norm": 0.3389952480792999,
"learning_rate": 0.000514513986013986,
"loss": 3.4187,
"step": 24550
},
{
"epoch": 7.163676607642125,
"grad_norm": 0.34659498929977417,
"learning_rate": 0.0005143391608391608,
"loss": 3.4227,
"step": 24600
},
{
"epoch": 7.178238583410997,
"grad_norm": 0.3916986286640167,
"learning_rate": 0.0005141643356643356,
"loss": 3.414,
"step": 24650
},
{
"epoch": 7.19280055917987,
"grad_norm": 0.31232357025146484,
"learning_rate": 0.0005139895104895104,
"loss": 3.4161,
"step": 24700
},
{
"epoch": 7.207362534948742,
"grad_norm": 0.315341979265213,
"learning_rate": 0.0005138146853146852,
"loss": 3.4145,
"step": 24750
},
{
"epoch": 7.221924510717614,
"grad_norm": 0.32361841201782227,
"learning_rate": 0.00051363986013986,
"loss": 3.4221,
"step": 24800
},
{
"epoch": 7.236486486486487,
"grad_norm": 0.3259839415550232,
"learning_rate": 0.0005134650349650349,
"loss": 3.4284,
"step": 24850
},
{
"epoch": 7.251048462255358,
"grad_norm": 0.34261107444763184,
"learning_rate": 0.0005132902097902097,
"loss": 3.4087,
"step": 24900
},
{
"epoch": 7.265610438024231,
"grad_norm": 0.3521381914615631,
"learning_rate": 0.0005131153846153845,
"loss": 3.4345,
"step": 24950
},
{
"epoch": 7.280172413793103,
"grad_norm": 0.32308435440063477,
"learning_rate": 0.0005129405594405594,
"loss": 3.4321,
"step": 25000
},
{
"epoch": 7.280172413793103,
"eval_accuracy": 0.3645800558818702,
"eval_loss": 3.598625898361206,
"eval_runtime": 53.8267,
"eval_samples_per_second": 309.215,
"eval_steps_per_second": 19.34,
"step": 25000
},
{
"epoch": 7.294734389561976,
"grad_norm": 0.3033270239830017,
"learning_rate": 0.0005127657342657342,
"loss": 3.4391,
"step": 25050
},
{
"epoch": 7.309296365330848,
"grad_norm": 0.332119882106781,
"learning_rate": 0.000512590909090909,
"loss": 3.4345,
"step": 25100
},
{
"epoch": 7.3238583410997204,
"grad_norm": 0.3105907738208771,
"learning_rate": 0.0005124160839160838,
"loss": 3.4386,
"step": 25150
},
{
"epoch": 7.338420316868593,
"grad_norm": 0.33971258997917175,
"learning_rate": 0.0005122412587412588,
"loss": 3.424,
"step": 25200
},
{
"epoch": 7.352982292637465,
"grad_norm": 0.34061726927757263,
"learning_rate": 0.0005120664335664336,
"loss": 3.4281,
"step": 25250
},
{
"epoch": 7.367544268406338,
"grad_norm": 0.3410240709781647,
"learning_rate": 0.0005118916083916084,
"loss": 3.4452,
"step": 25300
},
{
"epoch": 7.38210624417521,
"grad_norm": 0.3339107036590576,
"learning_rate": 0.0005117167832167832,
"loss": 3.4366,
"step": 25350
},
{
"epoch": 7.396668219944082,
"grad_norm": 0.34466513991355896,
"learning_rate": 0.0005115419580419581,
"loss": 3.4326,
"step": 25400
},
{
"epoch": 7.411230195712954,
"grad_norm": 0.32572296261787415,
"learning_rate": 0.0005113671328671328,
"loss": 3.4348,
"step": 25450
},
{
"epoch": 7.4257921714818265,
"grad_norm": 0.33211272954940796,
"learning_rate": 0.0005111923076923077,
"loss": 3.4413,
"step": 25500
},
{
"epoch": 7.440354147250699,
"grad_norm": 0.331061452627182,
"learning_rate": 0.0005110174825174825,
"loss": 3.4274,
"step": 25550
},
{
"epoch": 7.454916123019571,
"grad_norm": 0.3294326961040497,
"learning_rate": 0.0005108426573426573,
"loss": 3.4472,
"step": 25600
},
{
"epoch": 7.469478098788444,
"grad_norm": 0.32842373847961426,
"learning_rate": 0.0005106678321678321,
"loss": 3.4361,
"step": 25650
},
{
"epoch": 7.484040074557316,
"grad_norm": 0.32948267459869385,
"learning_rate": 0.000510493006993007,
"loss": 3.4361,
"step": 25700
},
{
"epoch": 7.4986020503261885,
"grad_norm": 0.3331805467605591,
"learning_rate": 0.0005103181818181818,
"loss": 3.4412,
"step": 25750
},
{
"epoch": 7.513164026095061,
"grad_norm": 0.3543056845664978,
"learning_rate": 0.0005101433566433566,
"loss": 3.4501,
"step": 25800
},
{
"epoch": 7.5277260018639325,
"grad_norm": 0.31350502371788025,
"learning_rate": 0.0005099685314685315,
"loss": 3.4454,
"step": 25850
},
{
"epoch": 7.542287977632805,
"grad_norm": 0.3373096287250519,
"learning_rate": 0.0005097937062937063,
"loss": 3.453,
"step": 25900
},
{
"epoch": 7.556849953401677,
"grad_norm": 0.33655133843421936,
"learning_rate": 0.0005096188811188811,
"loss": 3.4609,
"step": 25950
},
{
"epoch": 7.57141192917055,
"grad_norm": 0.31745684146881104,
"learning_rate": 0.0005094440559440559,
"loss": 3.4369,
"step": 26000
},
{
"epoch": 7.57141192917055,
"eval_accuracy": 0.3652601197119276,
"eval_loss": 3.591218948364258,
"eval_runtime": 53.6938,
"eval_samples_per_second": 309.98,
"eval_steps_per_second": 19.388,
"step": 26000
},
{
"epoch": 7.585973904939422,
"grad_norm": 0.35504648089408875,
"learning_rate": 0.0005092692307692308,
"loss": 3.4682,
"step": 26050
},
{
"epoch": 7.600535880708295,
"grad_norm": 0.33637967705726624,
"learning_rate": 0.0005090944055944056,
"loss": 3.4509,
"step": 26100
},
{
"epoch": 7.615097856477167,
"grad_norm": 0.33597350120544434,
"learning_rate": 0.0005089195804195804,
"loss": 3.4583,
"step": 26150
},
{
"epoch": 7.629659832246039,
"grad_norm": 0.3214302361011505,
"learning_rate": 0.0005087447552447552,
"loss": 3.4457,
"step": 26200
},
{
"epoch": 7.644221808014912,
"grad_norm": 0.365751177072525,
"learning_rate": 0.00050856993006993,
"loss": 3.4516,
"step": 26250
},
{
"epoch": 7.658783783783784,
"grad_norm": 0.34353286027908325,
"learning_rate": 0.0005083951048951048,
"loss": 3.4569,
"step": 26300
},
{
"epoch": 7.673345759552656,
"grad_norm": 0.3380644917488098,
"learning_rate": 0.0005082202797202797,
"loss": 3.4491,
"step": 26350
},
{
"epoch": 7.687907735321528,
"grad_norm": 0.33947882056236267,
"learning_rate": 0.0005080454545454545,
"loss": 3.4516,
"step": 26400
},
{
"epoch": 7.702469711090401,
"grad_norm": 0.31728804111480713,
"learning_rate": 0.0005078706293706293,
"loss": 3.454,
"step": 26450
},
{
"epoch": 7.717031686859273,
"grad_norm": 0.3451788127422333,
"learning_rate": 0.0005076958041958042,
"loss": 3.457,
"step": 26500
},
{
"epoch": 7.731593662628145,
"grad_norm": 0.38292965292930603,
"learning_rate": 0.000507520979020979,
"loss": 3.4644,
"step": 26550
},
{
"epoch": 7.746155638397018,
"grad_norm": 0.3145500421524048,
"learning_rate": 0.0005073461538461538,
"loss": 3.4548,
"step": 26600
},
{
"epoch": 7.76071761416589,
"grad_norm": 0.34711846709251404,
"learning_rate": 0.0005071713286713286,
"loss": 3.4643,
"step": 26650
},
{
"epoch": 7.775279589934763,
"grad_norm": 0.3363344371318817,
"learning_rate": 0.0005069965034965035,
"loss": 3.4566,
"step": 26700
},
{
"epoch": 7.789841565703634,
"grad_norm": 0.31343647837638855,
"learning_rate": 0.0005068216783216783,
"loss": 3.4551,
"step": 26750
},
{
"epoch": 7.804403541472507,
"grad_norm": 0.31084564328193665,
"learning_rate": 0.0005066468531468531,
"loss": 3.4569,
"step": 26800
},
{
"epoch": 7.818965517241379,
"grad_norm": 0.3759641647338867,
"learning_rate": 0.0005064720279720279,
"loss": 3.4614,
"step": 26850
},
{
"epoch": 7.833527493010251,
"grad_norm": 0.3450261652469635,
"learning_rate": 0.0005062972027972028,
"loss": 3.4614,
"step": 26900
},
{
"epoch": 7.848089468779124,
"grad_norm": 0.32384663820266724,
"learning_rate": 0.0005061223776223775,
"loss": 3.4693,
"step": 26950
},
{
"epoch": 7.862651444547996,
"grad_norm": 0.3452761471271515,
"learning_rate": 0.0005059475524475524,
"loss": 3.4537,
"step": 27000
},
{
"epoch": 7.862651444547996,
"eval_accuracy": 0.3661786291587479,
"eval_loss": 3.5794222354888916,
"eval_runtime": 53.6328,
"eval_samples_per_second": 310.333,
"eval_steps_per_second": 19.41,
"step": 27000
},
{
"epoch": 7.877213420316869,
"grad_norm": 0.315171480178833,
"learning_rate": 0.0005057727272727272,
"loss": 3.4653,
"step": 27050
},
{
"epoch": 7.891775396085741,
"grad_norm": 0.31368204951286316,
"learning_rate": 0.000505597902097902,
"loss": 3.4675,
"step": 27100
},
{
"epoch": 7.9063373718546135,
"grad_norm": 0.3297800123691559,
"learning_rate": 0.0005054230769230769,
"loss": 3.4458,
"step": 27150
},
{
"epoch": 7.920899347623486,
"grad_norm": 0.3278055489063263,
"learning_rate": 0.0005052482517482517,
"loss": 3.4738,
"step": 27200
},
{
"epoch": 7.935461323392358,
"grad_norm": 0.3137778639793396,
"learning_rate": 0.0005050734265734265,
"loss": 3.4677,
"step": 27250
},
{
"epoch": 7.95002329916123,
"grad_norm": 0.3212771713733673,
"learning_rate": 0.0005048986013986013,
"loss": 3.4704,
"step": 27300
},
{
"epoch": 7.964585274930102,
"grad_norm": 0.31689706444740295,
"learning_rate": 0.0005047237762237762,
"loss": 3.4477,
"step": 27350
},
{
"epoch": 7.979147250698975,
"grad_norm": 0.3310457170009613,
"learning_rate": 0.000504548951048951,
"loss": 3.4605,
"step": 27400
},
{
"epoch": 7.993709226467847,
"grad_norm": 0.33040231466293335,
"learning_rate": 0.0005043741258741258,
"loss": 3.4611,
"step": 27450
},
{
"epoch": 8.008154706430568,
"grad_norm": 0.34095948934555054,
"learning_rate": 0.0005041993006993006,
"loss": 3.394,
"step": 27500
},
{
"epoch": 8.022716682199441,
"grad_norm": 0.3549644649028778,
"learning_rate": 0.0005040244755244755,
"loss": 3.3429,
"step": 27550
},
{
"epoch": 8.037278657968313,
"grad_norm": 0.33670076727867126,
"learning_rate": 0.0005038496503496503,
"loss": 3.3604,
"step": 27600
},
{
"epoch": 8.051840633737186,
"grad_norm": 0.31921789050102234,
"learning_rate": 0.0005036748251748251,
"loss": 3.361,
"step": 27650
},
{
"epoch": 8.066402609506058,
"grad_norm": 0.3673897385597229,
"learning_rate": 0.0005034999999999999,
"loss": 3.3655,
"step": 27700
},
{
"epoch": 8.08096458527493,
"grad_norm": 0.3496399521827698,
"learning_rate": 0.0005033251748251747,
"loss": 3.3547,
"step": 27750
},
{
"epoch": 8.095526561043803,
"grad_norm": 0.3243163228034973,
"learning_rate": 0.0005031503496503496,
"loss": 3.3708,
"step": 27800
},
{
"epoch": 8.110088536812675,
"grad_norm": 0.3277808427810669,
"learning_rate": 0.0005029755244755244,
"loss": 3.3714,
"step": 27850
},
{
"epoch": 8.124650512581548,
"grad_norm": 0.338352769613266,
"learning_rate": 0.0005028006993006992,
"loss": 3.3882,
"step": 27900
},
{
"epoch": 8.13921248835042,
"grad_norm": 0.33708006143569946,
"learning_rate": 0.000502625874125874,
"loss": 3.3723,
"step": 27950
},
{
"epoch": 8.15377446411929,
"grad_norm": 0.32670241594314575,
"learning_rate": 0.000502451048951049,
"loss": 3.3668,
"step": 28000
},
{
"epoch": 8.15377446411929,
"eval_accuracy": 0.3660947969473317,
"eval_loss": 3.591442108154297,
"eval_runtime": 53.554,
"eval_samples_per_second": 310.789,
"eval_steps_per_second": 19.438,
"step": 28000
},
{
"epoch": 8.168336439888163,
"grad_norm": 0.35390371084213257,
"learning_rate": 0.0005022762237762237,
"loss": 3.3826,
"step": 28050
},
{
"epoch": 8.182898415657036,
"grad_norm": 0.31787583231925964,
"learning_rate": 0.0005021013986013985,
"loss": 3.386,
"step": 28100
},
{
"epoch": 8.197460391425908,
"grad_norm": 0.36148154735565186,
"learning_rate": 0.0005019265734265733,
"loss": 3.3822,
"step": 28150
},
{
"epoch": 8.21202236719478,
"grad_norm": 0.33843347430229187,
"learning_rate": 0.0005017517482517483,
"loss": 3.3855,
"step": 28200
},
{
"epoch": 8.226584342963653,
"grad_norm": 0.3566340208053589,
"learning_rate": 0.0005015769230769231,
"loss": 3.3842,
"step": 28250
},
{
"epoch": 8.241146318732525,
"grad_norm": 0.3228391110897064,
"learning_rate": 0.0005014020979020979,
"loss": 3.3911,
"step": 28300
},
{
"epoch": 8.255708294501398,
"grad_norm": 0.38596999645233154,
"learning_rate": 0.0005012272727272727,
"loss": 3.3901,
"step": 28350
},
{
"epoch": 8.27027027027027,
"grad_norm": 0.33675438165664673,
"learning_rate": 0.0005010524475524476,
"loss": 3.3946,
"step": 28400
},
{
"epoch": 8.284832246039143,
"grad_norm": 0.33514130115509033,
"learning_rate": 0.0005008776223776223,
"loss": 3.3954,
"step": 28450
},
{
"epoch": 8.299394221808015,
"grad_norm": 0.3307504951953888,
"learning_rate": 0.0005007027972027972,
"loss": 3.4104,
"step": 28500
},
{
"epoch": 8.313956197576887,
"grad_norm": 0.3365596830844879,
"learning_rate": 0.000500527972027972,
"loss": 3.4066,
"step": 28550
},
{
"epoch": 8.32851817334576,
"grad_norm": 0.34331199526786804,
"learning_rate": 0.0005003531468531468,
"loss": 3.3985,
"step": 28600
},
{
"epoch": 8.343080149114632,
"grad_norm": 0.3220767080783844,
"learning_rate": 0.0005001783216783217,
"loss": 3.4101,
"step": 28650
},
{
"epoch": 8.357642124883505,
"grad_norm": 0.34586459398269653,
"learning_rate": 0.0005000034965034965,
"loss": 3.4083,
"step": 28700
},
{
"epoch": 8.372204100652377,
"grad_norm": 0.35441240668296814,
"learning_rate": 0.0004998286713286713,
"loss": 3.423,
"step": 28750
},
{
"epoch": 8.38676607642125,
"grad_norm": 0.32183533906936646,
"learning_rate": 0.0004996538461538461,
"loss": 3.4064,
"step": 28800
},
{
"epoch": 8.401328052190122,
"grad_norm": 0.3539142906665802,
"learning_rate": 0.000499479020979021,
"loss": 3.4067,
"step": 28850
},
{
"epoch": 8.415890027958994,
"grad_norm": 0.3181823790073395,
"learning_rate": 0.0004993041958041958,
"loss": 3.4163,
"step": 28900
},
{
"epoch": 8.430452003727865,
"grad_norm": 0.3841262459754944,
"learning_rate": 0.0004991293706293706,
"loss": 3.42,
"step": 28950
},
{
"epoch": 8.445013979496737,
"grad_norm": 0.32062074542045593,
"learning_rate": 0.0004989545454545454,
"loss": 3.4093,
"step": 29000
},
{
"epoch": 8.445013979496737,
"eval_accuracy": 0.36640719832984603,
"eval_loss": 3.584610939025879,
"eval_runtime": 53.8487,
"eval_samples_per_second": 309.088,
"eval_steps_per_second": 19.332,
"step": 29000
},
{
"epoch": 8.45957595526561,
"grad_norm": 0.33427077531814575,
"learning_rate": 0.0004987797202797203,
"loss": 3.4135,
"step": 29050
},
{
"epoch": 8.474137931034482,
"grad_norm": 0.3367469608783722,
"learning_rate": 0.0004986048951048951,
"loss": 3.428,
"step": 29100
},
{
"epoch": 8.488699906803355,
"grad_norm": 0.33394715189933777,
"learning_rate": 0.0004984300699300699,
"loss": 3.4095,
"step": 29150
},
{
"epoch": 8.503261882572227,
"grad_norm": 0.3207018971443176,
"learning_rate": 0.0004982552447552448,
"loss": 3.4179,
"step": 29200
},
{
"epoch": 8.5178238583411,
"grad_norm": 0.34231436252593994,
"learning_rate": 0.0004980804195804195,
"loss": 3.4238,
"step": 29250
},
{
"epoch": 8.532385834109972,
"grad_norm": 0.3377602994441986,
"learning_rate": 0.0004979055944055944,
"loss": 3.4226,
"step": 29300
},
{
"epoch": 8.546947809878844,
"grad_norm": 0.3428233563899994,
"learning_rate": 0.0004977307692307692,
"loss": 3.426,
"step": 29350
},
{
"epoch": 8.561509785647717,
"grad_norm": 0.3227432668209076,
"learning_rate": 0.000497555944055944,
"loss": 3.4268,
"step": 29400
},
{
"epoch": 8.57607176141659,
"grad_norm": 0.3025504946708679,
"learning_rate": 0.0004973811188811188,
"loss": 3.4094,
"step": 29450
},
{
"epoch": 8.590633737185462,
"grad_norm": 0.3185526132583618,
"learning_rate": 0.0004972062937062937,
"loss": 3.4223,
"step": 29500
},
{
"epoch": 8.605195712954334,
"grad_norm": 0.33054155111312866,
"learning_rate": 0.0004970314685314685,
"loss": 3.4258,
"step": 29550
},
{
"epoch": 8.619757688723206,
"grad_norm": 0.32079461216926575,
"learning_rate": 0.0004968566433566433,
"loss": 3.4117,
"step": 29600
},
{
"epoch": 8.634319664492079,
"grad_norm": 0.3708074390888214,
"learning_rate": 0.0004966818181818181,
"loss": 3.4194,
"step": 29650
},
{
"epoch": 8.648881640260951,
"grad_norm": 0.31600895524024963,
"learning_rate": 0.000496506993006993,
"loss": 3.4229,
"step": 29700
},
{
"epoch": 8.663443616029824,
"grad_norm": 0.357805460691452,
"learning_rate": 0.0004963321678321678,
"loss": 3.4078,
"step": 29750
},
{
"epoch": 8.678005591798696,
"grad_norm": 0.3140498399734497,
"learning_rate": 0.0004961573426573426,
"loss": 3.4176,
"step": 29800
},
{
"epoch": 8.692567567567568,
"grad_norm": 0.33390533924102783,
"learning_rate": 0.0004959825174825175,
"loss": 3.4325,
"step": 29850
},
{
"epoch": 8.707129543336439,
"grad_norm": 0.33709651231765747,
"learning_rate": 0.0004958076923076923,
"loss": 3.4354,
"step": 29900
},
{
"epoch": 8.721691519105311,
"grad_norm": 0.3508022129535675,
"learning_rate": 0.0004956328671328671,
"loss": 3.4322,
"step": 29950
},
{
"epoch": 8.736253494874184,
"grad_norm": 0.3267257511615753,
"learning_rate": 0.0004954580419580419,
"loss": 3.431,
"step": 30000
},
{
"epoch": 8.736253494874184,
"eval_accuracy": 0.3671072502047011,
"eval_loss": 3.5762174129486084,
"eval_runtime": 53.741,
"eval_samples_per_second": 309.707,
"eval_steps_per_second": 19.371,
"step": 30000
},
{
"epoch": 8.750815470643056,
"grad_norm": 0.3560209274291992,
"learning_rate": 0.0004952832167832167,
"loss": 3.4339,
"step": 30050
},
{
"epoch": 8.765377446411929,
"grad_norm": 0.3457399904727936,
"learning_rate": 0.0004951083916083915,
"loss": 3.4385,
"step": 30100
},
{
"epoch": 8.779939422180801,
"grad_norm": 0.35111910104751587,
"learning_rate": 0.0004949335664335664,
"loss": 3.436,
"step": 30150
},
{
"epoch": 8.794501397949674,
"grad_norm": 0.3317621946334839,
"learning_rate": 0.0004947587412587412,
"loss": 3.4257,
"step": 30200
},
{
"epoch": 8.809063373718546,
"grad_norm": 0.3555958569049835,
"learning_rate": 0.000494583916083916,
"loss": 3.427,
"step": 30250
},
{
"epoch": 8.823625349487418,
"grad_norm": 0.3580514192581177,
"learning_rate": 0.0004944090909090908,
"loss": 3.4433,
"step": 30300
},
{
"epoch": 8.83818732525629,
"grad_norm": 0.3721309304237366,
"learning_rate": 0.0004942342657342657,
"loss": 3.4303,
"step": 30350
},
{
"epoch": 8.852749301025163,
"grad_norm": 0.33837392926216125,
"learning_rate": 0.0004940594405594405,
"loss": 3.4396,
"step": 30400
},
{
"epoch": 8.867311276794036,
"grad_norm": 0.3344026505947113,
"learning_rate": 0.0004938846153846153,
"loss": 3.4392,
"step": 30450
},
{
"epoch": 8.881873252562908,
"grad_norm": 0.3359682559967041,
"learning_rate": 0.0004937097902097901,
"loss": 3.4327,
"step": 30500
},
{
"epoch": 8.89643522833178,
"grad_norm": 0.3363666236400604,
"learning_rate": 0.000493534965034965,
"loss": 3.4385,
"step": 30550
},
{
"epoch": 8.910997204100653,
"grad_norm": 0.31655505299568176,
"learning_rate": 0.0004933601398601398,
"loss": 3.4258,
"step": 30600
},
{
"epoch": 8.925559179869525,
"grad_norm": 0.3216507136821747,
"learning_rate": 0.0004931853146853146,
"loss": 3.4292,
"step": 30650
},
{
"epoch": 8.940121155638398,
"grad_norm": 0.3542217016220093,
"learning_rate": 0.0004930104895104895,
"loss": 3.4478,
"step": 30700
},
{
"epoch": 8.95468313140727,
"grad_norm": 0.3227326273918152,
"learning_rate": 0.0004928356643356642,
"loss": 3.4315,
"step": 30750
},
{
"epoch": 8.969245107176143,
"grad_norm": 0.33713215589523315,
"learning_rate": 0.0004926608391608391,
"loss": 3.4332,
"step": 30800
},
{
"epoch": 8.983807082945013,
"grad_norm": 0.31681111454963684,
"learning_rate": 0.0004924860139860139,
"loss": 3.4466,
"step": 30850
},
{
"epoch": 8.998369058713886,
"grad_norm": 0.3436877131462097,
"learning_rate": 0.0004923111888111887,
"loss": 3.4311,
"step": 30900
},
{
"epoch": 9.012814538676608,
"grad_norm": 0.3397812843322754,
"learning_rate": 0.0004921363636363635,
"loss": 3.3401,
"step": 30950
},
{
"epoch": 9.02737651444548,
"grad_norm": 0.32249370217323303,
"learning_rate": 0.0004919615384615384,
"loss": 3.3184,
"step": 31000
},
{
"epoch": 9.02737651444548,
"eval_accuracy": 0.3673554546903946,
"eval_loss": 3.5798304080963135,
"eval_runtime": 53.6682,
"eval_samples_per_second": 310.128,
"eval_steps_per_second": 19.397,
"step": 31000
},
{
"epoch": 9.041938490214353,
"grad_norm": 0.34547701478004456,
"learning_rate": 0.0004917867132867132,
"loss": 3.3374,
"step": 31050
},
{
"epoch": 9.056500465983225,
"grad_norm": 0.33244821429252625,
"learning_rate": 0.000491611888111888,
"loss": 3.3453,
"step": 31100
},
{
"epoch": 9.071062441752098,
"grad_norm": 0.32689371705055237,
"learning_rate": 0.0004914370629370628,
"loss": 3.3282,
"step": 31150
},
{
"epoch": 9.08562441752097,
"grad_norm": 0.36178383231163025,
"learning_rate": 0.0004912622377622378,
"loss": 3.3424,
"step": 31200
},
{
"epoch": 9.100186393289842,
"grad_norm": 0.3325725793838501,
"learning_rate": 0.0004910874125874126,
"loss": 3.342,
"step": 31250
},
{
"epoch": 9.114748369058713,
"grad_norm": 0.3469438850879669,
"learning_rate": 0.0004909125874125874,
"loss": 3.3563,
"step": 31300
},
{
"epoch": 9.129310344827585,
"grad_norm": 0.3236953318119049,
"learning_rate": 0.0004907377622377623,
"loss": 3.3446,
"step": 31350
},
{
"epoch": 9.143872320596458,
"grad_norm": 0.3355654180049896,
"learning_rate": 0.0004905629370629371,
"loss": 3.3484,
"step": 31400
},
{
"epoch": 9.15843429636533,
"grad_norm": 0.34349188208580017,
"learning_rate": 0.0004903881118881119,
"loss": 3.359,
"step": 31450
},
{
"epoch": 9.172996272134203,
"grad_norm": 0.35089704394340515,
"learning_rate": 0.0004902132867132867,
"loss": 3.3577,
"step": 31500
},
{
"epoch": 9.187558247903075,
"grad_norm": 0.3498465418815613,
"learning_rate": 0.0004900384615384615,
"loss": 3.342,
"step": 31550
},
{
"epoch": 9.202120223671947,
"grad_norm": 0.3578391671180725,
"learning_rate": 0.0004898636363636363,
"loss": 3.3676,
"step": 31600
},
{
"epoch": 9.21668219944082,
"grad_norm": 0.3580490052700043,
"learning_rate": 0.0004896888111888112,
"loss": 3.3645,
"step": 31650
},
{
"epoch": 9.231244175209692,
"grad_norm": 0.3216322362422943,
"learning_rate": 0.000489513986013986,
"loss": 3.3669,
"step": 31700
},
{
"epoch": 9.245806150978565,
"grad_norm": 0.35393744707107544,
"learning_rate": 0.0004893391608391608,
"loss": 3.3599,
"step": 31750
},
{
"epoch": 9.260368126747437,
"grad_norm": 0.3496459126472473,
"learning_rate": 0.0004891643356643356,
"loss": 3.3759,
"step": 31800
},
{
"epoch": 9.27493010251631,
"grad_norm": 0.34132808446884155,
"learning_rate": 0.0004889895104895105,
"loss": 3.3837,
"step": 31850
},
{
"epoch": 9.289492078285182,
"grad_norm": 0.32651665806770325,
"learning_rate": 0.0004888146853146853,
"loss": 3.3847,
"step": 31900
},
{
"epoch": 9.304054054054054,
"grad_norm": 0.3654690086841583,
"learning_rate": 0.0004886398601398601,
"loss": 3.3735,
"step": 31950
},
{
"epoch": 9.318616029822927,
"grad_norm": 0.34119266271591187,
"learning_rate": 0.000488465034965035,
"loss": 3.377,
"step": 32000
},
{
"epoch": 9.318616029822927,
"eval_accuracy": 0.3671819114308571,
"eval_loss": 3.580883502960205,
"eval_runtime": 53.5153,
"eval_samples_per_second": 311.014,
"eval_steps_per_second": 19.452,
"step": 32000
},
{
"epoch": 9.3331780055918,
"grad_norm": 0.3433147966861725,
"learning_rate": 0.0004882902097902098,
"loss": 3.3882,
"step": 32050
},
{
"epoch": 9.347739981360672,
"grad_norm": 0.34038546681404114,
"learning_rate": 0.0004881153846153846,
"loss": 3.3923,
"step": 32100
},
{
"epoch": 9.362301957129544,
"grad_norm": 0.34648334980010986,
"learning_rate": 0.0004879405594405594,
"loss": 3.378,
"step": 32150
},
{
"epoch": 9.376863932898416,
"grad_norm": 0.367409348487854,
"learning_rate": 0.00048776573426573424,
"loss": 3.3755,
"step": 32200
},
{
"epoch": 9.391425908667287,
"grad_norm": 0.3667387068271637,
"learning_rate": 0.00048759090909090904,
"loss": 3.3698,
"step": 32250
},
{
"epoch": 9.40598788443616,
"grad_norm": 0.3343542814254761,
"learning_rate": 0.0004874160839160839,
"loss": 3.3945,
"step": 32300
},
{
"epoch": 9.420549860205032,
"grad_norm": 0.3317640423774719,
"learning_rate": 0.0004872412587412587,
"loss": 3.379,
"step": 32350
},
{
"epoch": 9.435111835973904,
"grad_norm": 0.3385464549064636,
"learning_rate": 0.00048706643356643354,
"loss": 3.3775,
"step": 32400
},
{
"epoch": 9.449673811742777,
"grad_norm": 0.332550048828125,
"learning_rate": 0.00048689160839160834,
"loss": 3.3821,
"step": 32450
},
{
"epoch": 9.46423578751165,
"grad_norm": 0.37024253606796265,
"learning_rate": 0.0004867167832167832,
"loss": 3.3938,
"step": 32500
},
{
"epoch": 9.478797763280522,
"grad_norm": 0.32912054657936096,
"learning_rate": 0.00048654195804195794,
"loss": 3.4036,
"step": 32550
},
{
"epoch": 9.493359739049394,
"grad_norm": 0.3475010097026825,
"learning_rate": 0.00048636713286713285,
"loss": 3.3911,
"step": 32600
},
{
"epoch": 9.507921714818266,
"grad_norm": 0.33058643341064453,
"learning_rate": 0.0004861923076923077,
"loss": 3.3881,
"step": 32650
},
{
"epoch": 9.522483690587139,
"grad_norm": 0.35297462344169617,
"learning_rate": 0.00048601748251748245,
"loss": 3.3969,
"step": 32700
},
{
"epoch": 9.537045666356011,
"grad_norm": 0.32153356075286865,
"learning_rate": 0.0004858426573426573,
"loss": 3.3882,
"step": 32750
},
{
"epoch": 9.551607642124884,
"grad_norm": 0.3378690481185913,
"learning_rate": 0.0004856678321678321,
"loss": 3.3999,
"step": 32800
},
{
"epoch": 9.566169617893756,
"grad_norm": 0.36066606640815735,
"learning_rate": 0.00048549300699300696,
"loss": 3.4033,
"step": 32850
},
{
"epoch": 9.580731593662628,
"grad_norm": 0.33761295676231384,
"learning_rate": 0.00048531818181818176,
"loss": 3.389,
"step": 32900
},
{
"epoch": 9.595293569431501,
"grad_norm": 0.34147173166275024,
"learning_rate": 0.0004851433566433566,
"loss": 3.3877,
"step": 32950
},
{
"epoch": 9.609855545200373,
"grad_norm": 0.33251672983169556,
"learning_rate": 0.0004849685314685314,
"loss": 3.3939,
"step": 33000
},
{
"epoch": 9.609855545200373,
"eval_accuracy": 0.36782082340397815,
"eval_loss": 3.5724120140075684,
"eval_runtime": 53.6826,
"eval_samples_per_second": 310.045,
"eval_steps_per_second": 19.392,
"step": 33000
},
{
"epoch": 9.624417520969246,
"grad_norm": 0.37161070108413696,
"learning_rate": 0.00048479370629370627,
"loss": 3.3929,
"step": 33050
},
{
"epoch": 9.638979496738118,
"grad_norm": 0.37724238634109497,
"learning_rate": 0.00048461888111888106,
"loss": 3.3984,
"step": 33100
},
{
"epoch": 9.65354147250699,
"grad_norm": 0.32887575030326843,
"learning_rate": 0.0004844440559440559,
"loss": 3.3922,
"step": 33150
},
{
"epoch": 9.668103448275861,
"grad_norm": 0.34217751026153564,
"learning_rate": 0.0004842692307692307,
"loss": 3.3963,
"step": 33200
},
{
"epoch": 9.682665424044734,
"grad_norm": 0.34450656175613403,
"learning_rate": 0.00048409440559440557,
"loss": 3.4125,
"step": 33250
},
{
"epoch": 9.697227399813606,
"grad_norm": 0.30637916922569275,
"learning_rate": 0.0004839195804195803,
"loss": 3.41,
"step": 33300
},
{
"epoch": 9.711789375582478,
"grad_norm": 0.37624505162239075,
"learning_rate": 0.0004837447552447552,
"loss": 3.412,
"step": 33350
},
{
"epoch": 9.72635135135135,
"grad_norm": 0.32816216349601746,
"learning_rate": 0.0004835699300699301,
"loss": 3.4044,
"step": 33400
},
{
"epoch": 9.740913327120223,
"grad_norm": 0.3651433289051056,
"learning_rate": 0.0004833951048951048,
"loss": 3.3991,
"step": 33450
},
{
"epoch": 9.755475302889096,
"grad_norm": 0.33466917276382446,
"learning_rate": 0.0004832202797202797,
"loss": 3.3992,
"step": 33500
},
{
"epoch": 9.770037278657968,
"grad_norm": 0.32142600417137146,
"learning_rate": 0.0004830454545454545,
"loss": 3.4052,
"step": 33550
},
{
"epoch": 9.78459925442684,
"grad_norm": 0.3309042453765869,
"learning_rate": 0.00048287062937062933,
"loss": 3.4094,
"step": 33600
},
{
"epoch": 9.799161230195713,
"grad_norm": 0.3519335687160492,
"learning_rate": 0.00048269580419580413,
"loss": 3.4076,
"step": 33650
},
{
"epoch": 9.813723205964585,
"grad_norm": 0.3344794511795044,
"learning_rate": 0.000482520979020979,
"loss": 3.4003,
"step": 33700
},
{
"epoch": 9.828285181733458,
"grad_norm": 0.3368070721626282,
"learning_rate": 0.0004823461538461538,
"loss": 3.4049,
"step": 33750
},
{
"epoch": 9.84284715750233,
"grad_norm": 0.33322641253471375,
"learning_rate": 0.00048217132867132864,
"loss": 3.4159,
"step": 33800
},
{
"epoch": 9.857409133271203,
"grad_norm": 0.3441433012485504,
"learning_rate": 0.00048199650349650344,
"loss": 3.405,
"step": 33850
},
{
"epoch": 9.871971109040075,
"grad_norm": 0.33350870013237,
"learning_rate": 0.0004818216783216783,
"loss": 3.4204,
"step": 33900
},
{
"epoch": 9.886533084808947,
"grad_norm": 0.32797157764434814,
"learning_rate": 0.0004816468531468531,
"loss": 3.4159,
"step": 33950
},
{
"epoch": 9.90109506057782,
"grad_norm": 0.3108486831188202,
"learning_rate": 0.00048147202797202795,
"loss": 3.4109,
"step": 34000
},
{
"epoch": 9.90109506057782,
"eval_accuracy": 0.3685346317567234,
"eval_loss": 3.5610878467559814,
"eval_runtime": 53.686,
"eval_samples_per_second": 310.025,
"eval_steps_per_second": 19.391,
"step": 34000
},
{
"epoch": 9.915657036346692,
"grad_norm": 0.3304729163646698,
"learning_rate": 0.0004812972027972028,
"loss": 3.4239,
"step": 34050
},
{
"epoch": 9.930219012115565,
"grad_norm": 0.3313955068588257,
"learning_rate": 0.0004811223776223776,
"loss": 3.4181,
"step": 34100
},
{
"epoch": 9.944780987884435,
"grad_norm": 0.3772457242012024,
"learning_rate": 0.00048094755244755245,
"loss": 3.4151,
"step": 34150
},
{
"epoch": 9.959342963653308,
"grad_norm": 0.3140145540237427,
"learning_rate": 0.0004807727272727272,
"loss": 3.4049,
"step": 34200
},
{
"epoch": 9.97390493942218,
"grad_norm": 0.3574320375919342,
"learning_rate": 0.00048059790209790205,
"loss": 3.4028,
"step": 34250
},
{
"epoch": 9.988466915191053,
"grad_norm": 0.34361740946769714,
"learning_rate": 0.00048042307692307685,
"loss": 3.416,
"step": 34300
},
{
"epoch": 10.002912395153775,
"grad_norm": 0.35159480571746826,
"learning_rate": 0.0004802482517482517,
"loss": 3.388,
"step": 34350
},
{
"epoch": 10.017474370922647,
"grad_norm": 0.3495783805847168,
"learning_rate": 0.0004800734265734265,
"loss": 3.3064,
"step": 34400
},
{
"epoch": 10.03203634669152,
"grad_norm": 0.32195907831192017,
"learning_rate": 0.00047989860139860136,
"loss": 3.3047,
"step": 34450
},
{
"epoch": 10.046598322460392,
"grad_norm": 0.33187875151634216,
"learning_rate": 0.00047972377622377616,
"loss": 3.3059,
"step": 34500
},
{
"epoch": 10.061160298229264,
"grad_norm": 0.3763480484485626,
"learning_rate": 0.000479548951048951,
"loss": 3.3148,
"step": 34550
},
{
"epoch": 10.075722273998137,
"grad_norm": 0.338958740234375,
"learning_rate": 0.0004793741258741258,
"loss": 3.313,
"step": 34600
},
{
"epoch": 10.090284249767008,
"grad_norm": 0.3450896739959717,
"learning_rate": 0.00047919930069930067,
"loss": 3.304,
"step": 34650
},
{
"epoch": 10.10484622553588,
"grad_norm": 0.33590009808540344,
"learning_rate": 0.0004790244755244755,
"loss": 3.3238,
"step": 34700
},
{
"epoch": 10.119408201304752,
"grad_norm": 0.3487749993801117,
"learning_rate": 0.0004788496503496503,
"loss": 3.3386,
"step": 34750
},
{
"epoch": 10.133970177073625,
"grad_norm": 0.35672515630722046,
"learning_rate": 0.0004786748251748252,
"loss": 3.328,
"step": 34800
},
{
"epoch": 10.148532152842497,
"grad_norm": 0.39034461975097656,
"learning_rate": 0.0004785,
"loss": 3.3355,
"step": 34850
},
{
"epoch": 10.16309412861137,
"grad_norm": 0.3186233937740326,
"learning_rate": 0.00047832517482517483,
"loss": 3.3368,
"step": 34900
},
{
"epoch": 10.177656104380242,
"grad_norm": 0.3687865734100342,
"learning_rate": 0.0004781503496503496,
"loss": 3.328,
"step": 34950
},
{
"epoch": 10.192218080149114,
"grad_norm": 0.34652620553970337,
"learning_rate": 0.00047797552447552443,
"loss": 3.3373,
"step": 35000
},
{
"epoch": 10.192218080149114,
"eval_accuracy": 0.36815674013331323,
"eval_loss": 3.577220916748047,
"eval_runtime": 53.6882,
"eval_samples_per_second": 310.012,
"eval_steps_per_second": 19.39,
"step": 35000
},
{
"epoch": 10.206780055917987,
"grad_norm": 0.3340875208377838,
"learning_rate": 0.00047780069930069923,
"loss": 3.3462,
"step": 35050
},
{
"epoch": 10.22134203168686,
"grad_norm": 0.3590002655982971,
"learning_rate": 0.0004776258741258741,
"loss": 3.3337,
"step": 35100
},
{
"epoch": 10.235904007455732,
"grad_norm": 0.35122376680374146,
"learning_rate": 0.0004774510489510489,
"loss": 3.3435,
"step": 35150
},
{
"epoch": 10.250465983224604,
"grad_norm": 0.32970312237739563,
"learning_rate": 0.00047727622377622374,
"loss": 3.35,
"step": 35200
},
{
"epoch": 10.265027958993477,
"grad_norm": 0.3339228332042694,
"learning_rate": 0.00047710139860139854,
"loss": 3.3482,
"step": 35250
},
{
"epoch": 10.279589934762349,
"grad_norm": 0.3375256657600403,
"learning_rate": 0.0004769265734265734,
"loss": 3.3569,
"step": 35300
},
{
"epoch": 10.294151910531221,
"grad_norm": 0.35364654660224915,
"learning_rate": 0.0004767517482517482,
"loss": 3.355,
"step": 35350
},
{
"epoch": 10.308713886300094,
"grad_norm": 0.34699511528015137,
"learning_rate": 0.00047657692307692304,
"loss": 3.3481,
"step": 35400
},
{
"epoch": 10.323275862068966,
"grad_norm": 0.3622118830680847,
"learning_rate": 0.0004764020979020979,
"loss": 3.3501,
"step": 35450
},
{
"epoch": 10.337837837837839,
"grad_norm": 0.3498924970626831,
"learning_rate": 0.0004762272727272727,
"loss": 3.3644,
"step": 35500
},
{
"epoch": 10.35239981360671,
"grad_norm": 0.3727911114692688,
"learning_rate": 0.00047605244755244755,
"loss": 3.3639,
"step": 35550
},
{
"epoch": 10.366961789375582,
"grad_norm": 0.3646833300590515,
"learning_rate": 0.00047587762237762235,
"loss": 3.3655,
"step": 35600
},
{
"epoch": 10.381523765144454,
"grad_norm": 0.36712172627449036,
"learning_rate": 0.0004757027972027972,
"loss": 3.3527,
"step": 35650
},
{
"epoch": 10.396085740913326,
"grad_norm": 0.347441703081131,
"learning_rate": 0.00047552797202797195,
"loss": 3.3624,
"step": 35700
},
{
"epoch": 10.410647716682199,
"grad_norm": 0.32130295038223267,
"learning_rate": 0.0004753531468531468,
"loss": 3.3592,
"step": 35750
},
{
"epoch": 10.425209692451071,
"grad_norm": 0.33343687653541565,
"learning_rate": 0.0004751783216783216,
"loss": 3.3741,
"step": 35800
},
{
"epoch": 10.439771668219944,
"grad_norm": 0.3670390248298645,
"learning_rate": 0.00047500349650349646,
"loss": 3.3689,
"step": 35850
},
{
"epoch": 10.454333643988816,
"grad_norm": 0.3569272458553314,
"learning_rate": 0.00047482867132867126,
"loss": 3.3685,
"step": 35900
},
{
"epoch": 10.468895619757689,
"grad_norm": 0.34055817127227783,
"learning_rate": 0.0004746538461538461,
"loss": 3.3634,
"step": 35950
},
{
"epoch": 10.483457595526561,
"grad_norm": 0.33791208267211914,
"learning_rate": 0.0004744790209790209,
"loss": 3.3609,
"step": 36000
},
{
"epoch": 10.483457595526561,
"eval_accuracy": 0.3684646735999315,
"eval_loss": 3.568392753601074,
"eval_runtime": 53.7009,
"eval_samples_per_second": 309.939,
"eval_steps_per_second": 19.385,
"step": 36000
},
{
"epoch": 10.498019571295433,
"grad_norm": 0.32268309593200684,
"learning_rate": 0.00047430419580419576,
"loss": 3.3688,
"step": 36050
},
{
"epoch": 10.512581547064306,
"grad_norm": 0.33770951628685,
"learning_rate": 0.0004741293706293706,
"loss": 3.3606,
"step": 36100
},
{
"epoch": 10.527143522833178,
"grad_norm": 0.35543033480644226,
"learning_rate": 0.0004739545454545454,
"loss": 3.3651,
"step": 36150
},
{
"epoch": 10.54170549860205,
"grad_norm": 0.36934471130371094,
"learning_rate": 0.00047377972027972027,
"loss": 3.3687,
"step": 36200
},
{
"epoch": 10.556267474370923,
"grad_norm": 0.35115522146224976,
"learning_rate": 0.00047360489510489507,
"loss": 3.3592,
"step": 36250
},
{
"epoch": 10.570829450139795,
"grad_norm": 0.3395870327949524,
"learning_rate": 0.0004734300699300699,
"loss": 3.3721,
"step": 36300
},
{
"epoch": 10.585391425908668,
"grad_norm": 0.33614251017570496,
"learning_rate": 0.0004732552447552447,
"loss": 3.3549,
"step": 36350
},
{
"epoch": 10.59995340167754,
"grad_norm": 0.36550241708755493,
"learning_rate": 0.0004730804195804196,
"loss": 3.3782,
"step": 36400
},
{
"epoch": 10.614515377446413,
"grad_norm": 0.36166924238204956,
"learning_rate": 0.0004729055944055943,
"loss": 3.3783,
"step": 36450
},
{
"epoch": 10.629077353215283,
"grad_norm": 0.3547736406326294,
"learning_rate": 0.0004727307692307692,
"loss": 3.3818,
"step": 36500
},
{
"epoch": 10.643639328984156,
"grad_norm": 0.3256096839904785,
"learning_rate": 0.000472555944055944,
"loss": 3.3684,
"step": 36550
},
{
"epoch": 10.658201304753028,
"grad_norm": 0.3733418583869934,
"learning_rate": 0.00047238111888111883,
"loss": 3.374,
"step": 36600
},
{
"epoch": 10.6727632805219,
"grad_norm": 0.3601815700531006,
"learning_rate": 0.00047220629370629363,
"loss": 3.3792,
"step": 36650
},
{
"epoch": 10.687325256290773,
"grad_norm": 0.35017862915992737,
"learning_rate": 0.0004720314685314685,
"loss": 3.3813,
"step": 36700
},
{
"epoch": 10.701887232059645,
"grad_norm": 0.34967872500419617,
"learning_rate": 0.0004718566433566433,
"loss": 3.3885,
"step": 36750
},
{
"epoch": 10.716449207828518,
"grad_norm": 0.34832727909088135,
"learning_rate": 0.00047168181818181814,
"loss": 3.3882,
"step": 36800
},
{
"epoch": 10.73101118359739,
"grad_norm": 0.3380468189716339,
"learning_rate": 0.000471506993006993,
"loss": 3.3771,
"step": 36850
},
{
"epoch": 10.745573159366263,
"grad_norm": 0.31330186128616333,
"learning_rate": 0.0004713321678321678,
"loss": 3.378,
"step": 36900
},
{
"epoch": 10.760135135135135,
"grad_norm": 0.35074543952941895,
"learning_rate": 0.00047115734265734265,
"loss": 3.3779,
"step": 36950
},
{
"epoch": 10.774697110904008,
"grad_norm": 0.33050093054771423,
"learning_rate": 0.00047098251748251745,
"loss": 3.4038,
"step": 37000
},
{
"epoch": 10.774697110904008,
"eval_accuracy": 0.36911640143706986,
"eval_loss": 3.5599007606506348,
"eval_runtime": 53.6294,
"eval_samples_per_second": 310.352,
"eval_steps_per_second": 19.411,
"step": 37000
},
{
"epoch": 10.78925908667288,
"grad_norm": 0.3579956293106079,
"learning_rate": 0.0004708076923076923,
"loss": 3.3911,
"step": 37050
},
{
"epoch": 10.803821062441752,
"grad_norm": 0.35300689935684204,
"learning_rate": 0.0004706328671328671,
"loss": 3.3969,
"step": 37100
},
{
"epoch": 10.818383038210625,
"grad_norm": 0.3623140752315521,
"learning_rate": 0.00047045804195804195,
"loss": 3.3748,
"step": 37150
},
{
"epoch": 10.832945013979497,
"grad_norm": 0.3329179584980011,
"learning_rate": 0.0004702832167832167,
"loss": 3.3935,
"step": 37200
},
{
"epoch": 10.84750698974837,
"grad_norm": 0.33512651920318604,
"learning_rate": 0.00047010839160839155,
"loss": 3.3956,
"step": 37250
},
{
"epoch": 10.862068965517242,
"grad_norm": 0.3599680960178375,
"learning_rate": 0.00046993356643356635,
"loss": 3.3912,
"step": 37300
},
{
"epoch": 10.876630941286114,
"grad_norm": 0.3267347514629364,
"learning_rate": 0.0004697587412587412,
"loss": 3.3887,
"step": 37350
},
{
"epoch": 10.891192917054987,
"grad_norm": 0.34810012578964233,
"learning_rate": 0.000469583916083916,
"loss": 3.3945,
"step": 37400
},
{
"epoch": 10.905754892823857,
"grad_norm": 0.337226539850235,
"learning_rate": 0.00046940909090909086,
"loss": 3.3896,
"step": 37450
},
{
"epoch": 10.92031686859273,
"grad_norm": 0.3359721302986145,
"learning_rate": 0.0004692342657342657,
"loss": 3.3856,
"step": 37500
},
{
"epoch": 10.934878844361602,
"grad_norm": 0.3451782166957855,
"learning_rate": 0.0004690594405594405,
"loss": 3.399,
"step": 37550
},
{
"epoch": 10.949440820130475,
"grad_norm": 0.33843597769737244,
"learning_rate": 0.00046888461538461537,
"loss": 3.3997,
"step": 37600
},
{
"epoch": 10.964002795899347,
"grad_norm": 0.34089478850364685,
"learning_rate": 0.00046870979020979017,
"loss": 3.3971,
"step": 37650
},
{
"epoch": 10.97856477166822,
"grad_norm": 0.3247876763343811,
"learning_rate": 0.000468534965034965,
"loss": 3.3987,
"step": 37700
},
{
"epoch": 10.993126747437092,
"grad_norm": 0.37552350759506226,
"learning_rate": 0.0004683601398601398,
"loss": 3.3995,
"step": 37750
},
{
"epoch": 11.007572227399814,
"grad_norm": 0.347272127866745,
"learning_rate": 0.0004681853146853147,
"loss": 3.3177,
"step": 37800
},
{
"epoch": 11.022134203168687,
"grad_norm": 0.32849717140197754,
"learning_rate": 0.0004680104895104895,
"loss": 3.2835,
"step": 37850
},
{
"epoch": 11.036696178937559,
"grad_norm": 0.3299245834350586,
"learning_rate": 0.00046783566433566433,
"loss": 3.2808,
"step": 37900
},
{
"epoch": 11.05125815470643,
"grad_norm": 0.365369975566864,
"learning_rate": 0.0004676608391608391,
"loss": 3.287,
"step": 37950
},
{
"epoch": 11.065820130475302,
"grad_norm": 0.3561994433403015,
"learning_rate": 0.00046748601398601393,
"loss": 3.2944,
"step": 38000
},
{
"epoch": 11.065820130475302,
"eval_accuracy": 0.3690345680301335,
"eval_loss": 3.572451591491699,
"eval_runtime": 53.7621,
"eval_samples_per_second": 309.586,
"eval_steps_per_second": 19.363,
"step": 38000
},
{
"epoch": 11.080382106244175,
"grad_norm": 0.3286043107509613,
"learning_rate": 0.00046731118881118873,
"loss": 3.2934,
"step": 38050
},
{
"epoch": 11.094944082013047,
"grad_norm": 0.3433077037334442,
"learning_rate": 0.0004671363636363636,
"loss": 3.3118,
"step": 38100
},
{
"epoch": 11.10950605778192,
"grad_norm": 0.3572927713394165,
"learning_rate": 0.00046696153846153844,
"loss": 3.3082,
"step": 38150
},
{
"epoch": 11.124068033550792,
"grad_norm": 0.3482791483402252,
"learning_rate": 0.00046678671328671324,
"loss": 3.2973,
"step": 38200
},
{
"epoch": 11.138630009319664,
"grad_norm": 0.34981897473335266,
"learning_rate": 0.0004666118881118881,
"loss": 3.3193,
"step": 38250
},
{
"epoch": 11.153191985088537,
"grad_norm": 0.3662327527999878,
"learning_rate": 0.0004664370629370629,
"loss": 3.3112,
"step": 38300
},
{
"epoch": 11.167753960857409,
"grad_norm": 0.34621360898017883,
"learning_rate": 0.00046626223776223774,
"loss": 3.3105,
"step": 38350
},
{
"epoch": 11.182315936626281,
"grad_norm": 0.3586236834526062,
"learning_rate": 0.00046608741258741254,
"loss": 3.3078,
"step": 38400
},
{
"epoch": 11.196877912395154,
"grad_norm": 0.3501543402671814,
"learning_rate": 0.0004659125874125874,
"loss": 3.323,
"step": 38450
},
{
"epoch": 11.211439888164026,
"grad_norm": 0.3348871171474457,
"learning_rate": 0.0004657377622377622,
"loss": 3.3324,
"step": 38500
},
{
"epoch": 11.226001863932899,
"grad_norm": 0.405658483505249,
"learning_rate": 0.00046556293706293705,
"loss": 3.322,
"step": 38550
},
{
"epoch": 11.240563839701771,
"grad_norm": 0.3457586169242859,
"learning_rate": 0.00046538811188811185,
"loss": 3.3267,
"step": 38600
},
{
"epoch": 11.255125815470644,
"grad_norm": 0.33291178941726685,
"learning_rate": 0.0004652132867132867,
"loss": 3.3245,
"step": 38650
},
{
"epoch": 11.269687791239516,
"grad_norm": 0.34564632177352905,
"learning_rate": 0.00046503846153846145,
"loss": 3.3361,
"step": 38700
},
{
"epoch": 11.284249767008388,
"grad_norm": 0.3823041021823883,
"learning_rate": 0.0004648636363636363,
"loss": 3.3214,
"step": 38750
},
{
"epoch": 11.29881174277726,
"grad_norm": 0.32795125246047974,
"learning_rate": 0.0004646888111888111,
"loss": 3.3323,
"step": 38800
},
{
"epoch": 11.313373718546133,
"grad_norm": 0.35187655687332153,
"learning_rate": 0.00046451398601398596,
"loss": 3.3299,
"step": 38850
},
{
"epoch": 11.327935694315004,
"grad_norm": 0.3160579204559326,
"learning_rate": 0.0004643391608391608,
"loss": 3.3431,
"step": 38900
},
{
"epoch": 11.342497670083876,
"grad_norm": 0.38711971044540405,
"learning_rate": 0.0004641643356643356,
"loss": 3.332,
"step": 38950
},
{
"epoch": 11.357059645852749,
"grad_norm": 0.37961405515670776,
"learning_rate": 0.00046398951048951046,
"loss": 3.3442,
"step": 39000
},
{
"epoch": 11.357059645852749,
"eval_accuracy": 0.36912874699415077,
"eval_loss": 3.5660009384155273,
"eval_runtime": 53.5851,
"eval_samples_per_second": 310.609,
"eval_steps_per_second": 19.427,
"step": 39000
},
{
"epoch": 11.371621621621621,
"grad_norm": 0.33141857385635376,
"learning_rate": 0.00046381468531468526,
"loss": 3.3419,
"step": 39050
},
{
"epoch": 11.386183597390493,
"grad_norm": 0.3561297059059143,
"learning_rate": 0.0004636398601398601,
"loss": 3.3425,
"step": 39100
},
{
"epoch": 11.400745573159366,
"grad_norm": 0.35019031167030334,
"learning_rate": 0.0004634650349650349,
"loss": 3.3343,
"step": 39150
},
{
"epoch": 11.415307548928238,
"grad_norm": 0.3231639862060547,
"learning_rate": 0.00046329020979020977,
"loss": 3.3429,
"step": 39200
},
{
"epoch": 11.42986952469711,
"grad_norm": 0.3577631115913391,
"learning_rate": 0.00046311538461538457,
"loss": 3.3387,
"step": 39250
},
{
"epoch": 11.444431500465983,
"grad_norm": 0.34335488080978394,
"learning_rate": 0.0004629405594405594,
"loss": 3.3505,
"step": 39300
},
{
"epoch": 11.458993476234856,
"grad_norm": 0.3327701985836029,
"learning_rate": 0.0004627657342657342,
"loss": 3.3466,
"step": 39350
},
{
"epoch": 11.473555452003728,
"grad_norm": 0.3649628758430481,
"learning_rate": 0.0004625909090909091,
"loss": 3.3514,
"step": 39400
},
{
"epoch": 11.4881174277726,
"grad_norm": 0.35762521624565125,
"learning_rate": 0.0004624160839160838,
"loss": 3.3553,
"step": 39450
},
{
"epoch": 11.502679403541473,
"grad_norm": 0.34466105699539185,
"learning_rate": 0.0004622412587412587,
"loss": 3.343,
"step": 39500
},
{
"epoch": 11.517241379310345,
"grad_norm": 0.3552060127258301,
"learning_rate": 0.00046206643356643353,
"loss": 3.3465,
"step": 39550
},
{
"epoch": 11.531803355079218,
"grad_norm": 0.3663930892944336,
"learning_rate": 0.00046189160839160833,
"loss": 3.3511,
"step": 39600
},
{
"epoch": 11.54636533084809,
"grad_norm": 0.3702998161315918,
"learning_rate": 0.0004617167832167832,
"loss": 3.355,
"step": 39650
},
{
"epoch": 11.560927306616962,
"grad_norm": 0.35928207635879517,
"learning_rate": 0.000461541958041958,
"loss": 3.3474,
"step": 39700
},
{
"epoch": 11.575489282385835,
"grad_norm": 0.3266322910785675,
"learning_rate": 0.00046136713286713284,
"loss": 3.3541,
"step": 39750
},
{
"epoch": 11.590051258154705,
"grad_norm": 0.3636510968208313,
"learning_rate": 0.00046119230769230764,
"loss": 3.3663,
"step": 39800
},
{
"epoch": 11.60461323392358,
"grad_norm": 0.3514472544193268,
"learning_rate": 0.0004610174825174825,
"loss": 3.3469,
"step": 39850
},
{
"epoch": 11.61917520969245,
"grad_norm": 0.32494455575942993,
"learning_rate": 0.0004608426573426573,
"loss": 3.3513,
"step": 39900
},
{
"epoch": 11.633737185461323,
"grad_norm": 0.382254958152771,
"learning_rate": 0.00046066783216783215,
"loss": 3.3608,
"step": 39950
},
{
"epoch": 11.648299161230195,
"grad_norm": 0.34891214966773987,
"learning_rate": 0.00046049300699300695,
"loss": 3.3584,
"step": 40000
},
{
"epoch": 11.648299161230195,
"eval_accuracy": 0.3698353831661157,
"eval_loss": 3.5602543354034424,
"eval_runtime": 53.7409,
"eval_samples_per_second": 309.708,
"eval_steps_per_second": 19.371,
"step": 40000
},
{
"epoch": 11.662861136999068,
"grad_norm": 0.37278759479522705,
"learning_rate": 0.0004603181818181818,
"loss": 3.3542,
"step": 40050
},
{
"epoch": 11.67742311276794,
"grad_norm": 0.34679147601127625,
"learning_rate": 0.0004601433566433566,
"loss": 3.3569,
"step": 40100
},
{
"epoch": 11.691985088536812,
"grad_norm": 0.3317880630493164,
"learning_rate": 0.00045996853146853145,
"loss": 3.3661,
"step": 40150
},
{
"epoch": 11.706547064305685,
"grad_norm": 0.3492712676525116,
"learning_rate": 0.0004597937062937062,
"loss": 3.3572,
"step": 40200
},
{
"epoch": 11.721109040074557,
"grad_norm": 0.34208858013153076,
"learning_rate": 0.00045961888111888105,
"loss": 3.3631,
"step": 40250
},
{
"epoch": 11.73567101584343,
"grad_norm": 0.33355712890625,
"learning_rate": 0.0004594440559440559,
"loss": 3.3765,
"step": 40300
},
{
"epoch": 11.750232991612302,
"grad_norm": 0.3657315671443939,
"learning_rate": 0.0004592692307692307,
"loss": 3.3711,
"step": 40350
},
{
"epoch": 11.764794967381174,
"grad_norm": 0.3430049419403076,
"learning_rate": 0.00045909440559440556,
"loss": 3.3704,
"step": 40400
},
{
"epoch": 11.779356943150047,
"grad_norm": 0.3502923250198364,
"learning_rate": 0.00045891958041958036,
"loss": 3.3644,
"step": 40450
},
{
"epoch": 11.79391891891892,
"grad_norm": 0.34256404638290405,
"learning_rate": 0.0004587447552447552,
"loss": 3.3691,
"step": 40500
},
{
"epoch": 11.808480894687792,
"grad_norm": 0.3291017711162567,
"learning_rate": 0.00045856993006993,
"loss": 3.3617,
"step": 40550
},
{
"epoch": 11.823042870456664,
"grad_norm": 0.3593076467514038,
"learning_rate": 0.00045839510489510487,
"loss": 3.3738,
"step": 40600
},
{
"epoch": 11.837604846225537,
"grad_norm": 0.32617098093032837,
"learning_rate": 0.00045822027972027967,
"loss": 3.3535,
"step": 40650
},
{
"epoch": 11.852166821994409,
"grad_norm": 0.3508724868297577,
"learning_rate": 0.0004580454545454545,
"loss": 3.3876,
"step": 40700
},
{
"epoch": 11.86672879776328,
"grad_norm": 0.3479333519935608,
"learning_rate": 0.0004578706293706293,
"loss": 3.3695,
"step": 40750
},
{
"epoch": 11.881290773532152,
"grad_norm": 0.32733118534088135,
"learning_rate": 0.0004576958041958042,
"loss": 3.3708,
"step": 40800
},
{
"epoch": 11.895852749301024,
"grad_norm": 0.3198227882385254,
"learning_rate": 0.000457520979020979,
"loss": 3.3678,
"step": 40850
},
{
"epoch": 11.910414725069897,
"grad_norm": 0.36628979444503784,
"learning_rate": 0.00045734615384615383,
"loss": 3.38,
"step": 40900
},
{
"epoch": 11.92497670083877,
"grad_norm": 0.3251120448112488,
"learning_rate": 0.0004571713286713287,
"loss": 3.377,
"step": 40950
},
{
"epoch": 11.939538676607642,
"grad_norm": 0.3339826166629791,
"learning_rate": 0.00045699650349650343,
"loss": 3.3727,
"step": 41000
},
{
"epoch": 11.939538676607642,
"eval_accuracy": 0.3699530774769538,
"eval_loss": 3.551804542541504,
"eval_runtime": 53.6416,
"eval_samples_per_second": 310.282,
"eval_steps_per_second": 19.407,
"step": 41000
},
{
"epoch": 11.954100652376514,
"grad_norm": NaN,
"learning_rate": 0.0004568216783216783,
"loss": 3.375,
"step": 41050
},
{
"epoch": 11.968662628145387,
"grad_norm": 0.34985795617103577,
"learning_rate": 0.0004566468531468531,
"loss": 3.3771,
"step": 41100
},
{
"epoch": 11.983224603914259,
"grad_norm": 0.349457323551178,
"learning_rate": 0.00045647202797202794,
"loss": 3.3705,
"step": 41150
},
{
"epoch": 11.997786579683131,
"grad_norm": 0.37310871481895447,
"learning_rate": 0.00045629720279720274,
"loss": 3.3731,
"step": 41200
},
{
"epoch": 12.012232059645854,
"grad_norm": 0.355582594871521,
"learning_rate": 0.0004561223776223776,
"loss": 3.2841,
"step": 41250
},
{
"epoch": 12.026794035414724,
"grad_norm": 0.3613288700580597,
"learning_rate": 0.0004559475524475524,
"loss": 3.2708,
"step": 41300
},
{
"epoch": 12.041356011183597,
"grad_norm": 0.3537559509277344,
"learning_rate": 0.00045577272727272724,
"loss": 3.2653,
"step": 41350
},
{
"epoch": 12.055917986952469,
"grad_norm": 0.3575269281864166,
"learning_rate": 0.00045559790209790204,
"loss": 3.2662,
"step": 41400
},
{
"epoch": 12.070479962721341,
"grad_norm": 0.35903748869895935,
"learning_rate": 0.0004554230769230769,
"loss": 3.2705,
"step": 41450
},
{
"epoch": 12.085041938490214,
"grad_norm": 0.34469369053840637,
"learning_rate": 0.0004552482517482517,
"loss": 3.2761,
"step": 41500
},
{
"epoch": 12.099603914259086,
"grad_norm": 0.36321061849594116,
"learning_rate": 0.00045507342657342655,
"loss": 3.2873,
"step": 41550
},
{
"epoch": 12.114165890027959,
"grad_norm": 0.3258301317691803,
"learning_rate": 0.00045489860139860135,
"loss": 3.29,
"step": 41600
},
{
"epoch": 12.128727865796831,
"grad_norm": 0.3304847776889801,
"learning_rate": 0.0004547237762237762,
"loss": 3.293,
"step": 41650
},
{
"epoch": 12.143289841565704,
"grad_norm": 0.34567683935165405,
"learning_rate": 0.00045454895104895106,
"loss": 3.2915,
"step": 41700
},
{
"epoch": 12.157851817334576,
"grad_norm": 0.356577068567276,
"learning_rate": 0.0004543741258741258,
"loss": 3.2912,
"step": 41750
},
{
"epoch": 12.172413793103448,
"grad_norm": 0.37577152252197266,
"learning_rate": 0.00045419930069930066,
"loss": 3.2981,
"step": 41800
},
{
"epoch": 12.18697576887232,
"grad_norm": 0.36569955945014954,
"learning_rate": 0.00045402447552447546,
"loss": 3.2971,
"step": 41850
},
{
"epoch": 12.201537744641193,
"grad_norm": 0.33984923362731934,
"learning_rate": 0.0004538496503496503,
"loss": 3.3024,
"step": 41900
},
{
"epoch": 12.216099720410066,
"grad_norm": 0.3310795724391937,
"learning_rate": 0.0004536748251748251,
"loss": 3.3037,
"step": 41950
},
{
"epoch": 12.230661696178938,
"grad_norm": 0.3288843631744385,
"learning_rate": 0.00045349999999999996,
"loss": 3.2891,
"step": 42000
},
{
"epoch": 12.230661696178938,
"eval_accuracy": 0.36970757725614467,
"eval_loss": 3.565706491470337,
"eval_runtime": 53.7928,
"eval_samples_per_second": 309.409,
"eval_steps_per_second": 19.352,
"step": 42000
},
{
"epoch": 12.24522367194781,
"grad_norm": 0.3533994257450104,
"learning_rate": 0.00045332517482517476,
"loss": 3.306,
"step": 42050
},
{
"epoch": 12.259785647716683,
"grad_norm": 0.3744911849498749,
"learning_rate": 0.0004531503496503496,
"loss": 3.3153,
"step": 42100
},
{
"epoch": 12.274347623485555,
"grad_norm": 0.33848461508750916,
"learning_rate": 0.0004529755244755244,
"loss": 3.3241,
"step": 42150
},
{
"epoch": 12.288909599254428,
"grad_norm": 0.3552786111831665,
"learning_rate": 0.00045280069930069927,
"loss": 3.304,
"step": 42200
},
{
"epoch": 12.303471575023298,
"grad_norm": 0.3374313414096832,
"learning_rate": 0.00045262587412587407,
"loss": 3.3176,
"step": 42250
},
{
"epoch": 12.31803355079217,
"grad_norm": 0.3446204364299774,
"learning_rate": 0.0004524510489510489,
"loss": 3.316,
"step": 42300
},
{
"epoch": 12.332595526561043,
"grad_norm": 0.33028316497802734,
"learning_rate": 0.0004522762237762238,
"loss": 3.3309,
"step": 42350
},
{
"epoch": 12.347157502329916,
"grad_norm": 0.3592996299266815,
"learning_rate": 0.0004521013986013986,
"loss": 3.3166,
"step": 42400
},
{
"epoch": 12.361719478098788,
"grad_norm": 0.32952797412872314,
"learning_rate": 0.00045192657342657343,
"loss": 3.3313,
"step": 42450
},
{
"epoch": 12.37628145386766,
"grad_norm": 0.3635094165802002,
"learning_rate": 0.0004517517482517482,
"loss": 3.3239,
"step": 42500
},
{
"epoch": 12.390843429636533,
"grad_norm": 0.34926414489746094,
"learning_rate": 0.00045157692307692303,
"loss": 3.3094,
"step": 42550
},
{
"epoch": 12.405405405405405,
"grad_norm": 0.3685735762119293,
"learning_rate": 0.00045140209790209783,
"loss": 3.3258,
"step": 42600
},
{
"epoch": 12.419967381174278,
"grad_norm": 0.39783161878585815,
"learning_rate": 0.0004512272727272727,
"loss": 3.3241,
"step": 42650
},
{
"epoch": 12.43452935694315,
"grad_norm": 0.3684312701225281,
"learning_rate": 0.0004510524475524475,
"loss": 3.33,
"step": 42700
},
{
"epoch": 12.449091332712023,
"grad_norm": 0.3377741873264313,
"learning_rate": 0.00045087762237762234,
"loss": 3.3362,
"step": 42750
},
{
"epoch": 12.463653308480895,
"grad_norm": 0.3300979733467102,
"learning_rate": 0.00045070279720279714,
"loss": 3.3443,
"step": 42800
},
{
"epoch": 12.478215284249767,
"grad_norm": 0.34091269969940186,
"learning_rate": 0.000450527972027972,
"loss": 3.3248,
"step": 42850
},
{
"epoch": 12.49277726001864,
"grad_norm": 0.36633142828941345,
"learning_rate": 0.0004503531468531468,
"loss": 3.3339,
"step": 42900
},
{
"epoch": 12.507339235787512,
"grad_norm": 0.3405598998069763,
"learning_rate": 0.00045017832167832165,
"loss": 3.3359,
"step": 42950
},
{
"epoch": 12.521901211556385,
"grad_norm": 0.34523844718933105,
"learning_rate": 0.0004500034965034965,
"loss": 3.3365,
"step": 43000
},
{
"epoch": 12.521901211556385,
"eval_accuracy": 0.3699521368630809,
"eval_loss": 3.558289051055908,
"eval_runtime": 53.8081,
"eval_samples_per_second": 309.321,
"eval_steps_per_second": 19.347,
"step": 43000
},
{
"epoch": 12.536463187325257,
"grad_norm": 0.33098194003105164,
"learning_rate": 0.0004498286713286713,
"loss": 3.3353,
"step": 43050
},
{
"epoch": 12.55102516309413,
"grad_norm": 0.3387551009654999,
"learning_rate": 0.00044965384615384615,
"loss": 3.3402,
"step": 43100
},
{
"epoch": 12.565587138863002,
"grad_norm": 0.4029315114021301,
"learning_rate": 0.00044947902097902095,
"loss": 3.337,
"step": 43150
},
{
"epoch": 12.580149114631872,
"grad_norm": 0.3447522521018982,
"learning_rate": 0.0004493041958041958,
"loss": 3.3323,
"step": 43200
},
{
"epoch": 12.594711090400745,
"grad_norm": 0.36428382992744446,
"learning_rate": 0.00044912937062937055,
"loss": 3.3395,
"step": 43250
},
{
"epoch": 12.609273066169617,
"grad_norm": 0.33712589740753174,
"learning_rate": 0.0004489545454545454,
"loss": 3.3474,
"step": 43300
},
{
"epoch": 12.62383504193849,
"grad_norm": 0.3448302149772644,
"learning_rate": 0.0004487797202797202,
"loss": 3.3336,
"step": 43350
},
{
"epoch": 12.638397017707362,
"grad_norm": 0.3623657822608948,
"learning_rate": 0.00044860489510489506,
"loss": 3.3303,
"step": 43400
},
{
"epoch": 12.652958993476235,
"grad_norm": 0.36065393686294556,
"learning_rate": 0.00044843006993006986,
"loss": 3.338,
"step": 43450
},
{
"epoch": 12.667520969245107,
"grad_norm": 0.3675295412540436,
"learning_rate": 0.0004482552447552447,
"loss": 3.3405,
"step": 43500
},
{
"epoch": 12.68208294501398,
"grad_norm": 0.33962827920913696,
"learning_rate": 0.0004480804195804195,
"loss": 3.3462,
"step": 43550
},
{
"epoch": 12.696644920782852,
"grad_norm": 0.33174002170562744,
"learning_rate": 0.00044790559440559437,
"loss": 3.3458,
"step": 43600
},
{
"epoch": 12.711206896551724,
"grad_norm": 0.35363972187042236,
"learning_rate": 0.00044773076923076917,
"loss": 3.3467,
"step": 43650
},
{
"epoch": 12.725768872320597,
"grad_norm": 0.3485361933708191,
"learning_rate": 0.000447555944055944,
"loss": 3.3544,
"step": 43700
},
{
"epoch": 12.740330848089469,
"grad_norm": 0.3747612535953522,
"learning_rate": 0.0004473811188811189,
"loss": 3.3621,
"step": 43750
},
{
"epoch": 12.754892823858341,
"grad_norm": 0.35302239656448364,
"learning_rate": 0.0004472062937062937,
"loss": 3.3544,
"step": 43800
},
{
"epoch": 12.769454799627214,
"grad_norm": 0.3448193371295929,
"learning_rate": 0.00044703146853146853,
"loss": 3.3471,
"step": 43850
},
{
"epoch": 12.784016775396086,
"grad_norm": 0.34541070461273193,
"learning_rate": 0.00044685664335664333,
"loss": 3.3338,
"step": 43900
},
{
"epoch": 12.798578751164959,
"grad_norm": 0.3578779101371765,
"learning_rate": 0.0004466818181818182,
"loss": 3.3472,
"step": 43950
},
{
"epoch": 12.813140726933831,
"grad_norm": 0.3638927936553955,
"learning_rate": 0.00044650699300699293,
"loss": 3.3517,
"step": 44000
},
{
"epoch": 12.813140726933831,
"eval_accuracy": 0.3705229719071558,
"eval_loss": 3.551844358444214,
"eval_runtime": 53.6548,
"eval_samples_per_second": 310.205,
"eval_steps_per_second": 19.402,
"step": 44000
},
{
"epoch": 12.827702702702704,
"grad_norm": 0.36279812455177307,
"learning_rate": 0.0004463321678321678,
"loss": 3.3503,
"step": 44050
},
{
"epoch": 12.842264678471576,
"grad_norm": 0.3644449710845947,
"learning_rate": 0.0004461573426573426,
"loss": 3.3355,
"step": 44100
},
{
"epoch": 12.856826654240447,
"grad_norm": 0.3525535762310028,
"learning_rate": 0.00044598251748251744,
"loss": 3.3563,
"step": 44150
},
{
"epoch": 12.871388630009319,
"grad_norm": 0.35456639528274536,
"learning_rate": 0.00044580769230769224,
"loss": 3.3572,
"step": 44200
},
{
"epoch": 12.885950605778191,
"grad_norm": 0.34489408135414124,
"learning_rate": 0.0004456328671328671,
"loss": 3.3531,
"step": 44250
},
{
"epoch": 12.900512581547064,
"grad_norm": 0.35041430592536926,
"learning_rate": 0.0004454580419580419,
"loss": 3.3423,
"step": 44300
},
{
"epoch": 12.915074557315936,
"grad_norm": 0.32889389991760254,
"learning_rate": 0.00044528321678321674,
"loss": 3.3624,
"step": 44350
},
{
"epoch": 12.929636533084809,
"grad_norm": 0.3557213544845581,
"learning_rate": 0.0004451083916083916,
"loss": 3.3462,
"step": 44400
},
{
"epoch": 12.944198508853681,
"grad_norm": 0.3788115680217743,
"learning_rate": 0.0004449335664335664,
"loss": 3.3618,
"step": 44450
},
{
"epoch": 12.958760484622553,
"grad_norm": 0.36202603578567505,
"learning_rate": 0.00044475874125874125,
"loss": 3.3527,
"step": 44500
},
{
"epoch": 12.973322460391426,
"grad_norm": 0.3443240821361542,
"learning_rate": 0.00044458391608391605,
"loss": 3.361,
"step": 44550
},
{
"epoch": 12.987884436160298,
"grad_norm": 0.3382508158683777,
"learning_rate": 0.0004444090909090909,
"loss": 3.3559,
"step": 44600
},
{
"epoch": 13.002329916123019,
"grad_norm": 0.3703657388687134,
"learning_rate": 0.0004442342657342657,
"loss": 3.3453,
"step": 44650
},
{
"epoch": 13.016891891891891,
"grad_norm": 0.3575532138347626,
"learning_rate": 0.00044405944055944056,
"loss": 3.235,
"step": 44700
},
{
"epoch": 13.031453867660764,
"grad_norm": 0.33988216519355774,
"learning_rate": 0.0004438846153846153,
"loss": 3.2558,
"step": 44750
},
{
"epoch": 13.046015843429636,
"grad_norm": 0.3483348488807678,
"learning_rate": 0.00044370979020979016,
"loss": 3.2569,
"step": 44800
},
{
"epoch": 13.060577819198508,
"grad_norm": 0.3623815178871155,
"learning_rate": 0.00044353496503496496,
"loss": 3.2566,
"step": 44850
},
{
"epoch": 13.07513979496738,
"grad_norm": 0.3875901699066162,
"learning_rate": 0.0004433601398601398,
"loss": 3.2659,
"step": 44900
},
{
"epoch": 13.089701770736253,
"grad_norm": 0.3628861606121063,
"learning_rate": 0.0004431853146853146,
"loss": 3.2644,
"step": 44950
},
{
"epoch": 13.104263746505126,
"grad_norm": 0.35806170105934143,
"learning_rate": 0.00044301048951048946,
"loss": 3.2635,
"step": 45000
},
{
"epoch": 13.104263746505126,
"eval_accuracy": 0.37037071003649114,
"eval_loss": 3.5615053176879883,
"eval_runtime": 53.6575,
"eval_samples_per_second": 310.189,
"eval_steps_per_second": 19.401,
"step": 45000
},
{
"epoch": 13.118825722273998,
"grad_norm": 0.3943418562412262,
"learning_rate": 0.00044283566433566426,
"loss": 3.2745,
"step": 45050
},
{
"epoch": 13.13338769804287,
"grad_norm": 0.3521541953086853,
"learning_rate": 0.0004426608391608391,
"loss": 3.2742,
"step": 45100
},
{
"epoch": 13.147949673811743,
"grad_norm": 0.4016038477420807,
"learning_rate": 0.00044248601398601397,
"loss": 3.2754,
"step": 45150
},
{
"epoch": 13.162511649580615,
"grad_norm": 0.35581544041633606,
"learning_rate": 0.00044231118881118877,
"loss": 3.2855,
"step": 45200
},
{
"epoch": 13.177073625349488,
"grad_norm": 0.3793915808200836,
"learning_rate": 0.0004421363636363636,
"loss": 3.2848,
"step": 45250
},
{
"epoch": 13.19163560111836,
"grad_norm": 0.3508451581001282,
"learning_rate": 0.0004419615384615384,
"loss": 3.2707,
"step": 45300
},
{
"epoch": 13.206197576887233,
"grad_norm": 0.34674158692359924,
"learning_rate": 0.0004417867132867133,
"loss": 3.2974,
"step": 45350
},
{
"epoch": 13.220759552656105,
"grad_norm": 0.350559264421463,
"learning_rate": 0.0004416118881118881,
"loss": 3.2866,
"step": 45400
},
{
"epoch": 13.235321528424977,
"grad_norm": 0.34364110231399536,
"learning_rate": 0.00044143706293706293,
"loss": 3.3051,
"step": 45450
},
{
"epoch": 13.24988350419385,
"grad_norm": 0.35101890563964844,
"learning_rate": 0.0004412622377622377,
"loss": 3.2928,
"step": 45500
},
{
"epoch": 13.26444547996272,
"grad_norm": 0.3574727177619934,
"learning_rate": 0.00044108741258741253,
"loss": 3.302,
"step": 45550
},
{
"epoch": 13.279007455731593,
"grad_norm": 0.34663891792297363,
"learning_rate": 0.00044091258741258733,
"loss": 3.2942,
"step": 45600
},
{
"epoch": 13.293569431500465,
"grad_norm": 0.3441726863384247,
"learning_rate": 0.0004407377622377622,
"loss": 3.2919,
"step": 45650
},
{
"epoch": 13.308131407269338,
"grad_norm": 0.3620011508464813,
"learning_rate": 0.000440562937062937,
"loss": 3.2929,
"step": 45700
},
{
"epoch": 13.32269338303821,
"grad_norm": 0.3525831401348114,
"learning_rate": 0.00044038811188811184,
"loss": 3.2914,
"step": 45750
},
{
"epoch": 13.337255358807083,
"grad_norm": 0.34323644638061523,
"learning_rate": 0.0004402132867132867,
"loss": 3.3014,
"step": 45800
},
{
"epoch": 13.351817334575955,
"grad_norm": 0.36165276169776917,
"learning_rate": 0.0004400384615384615,
"loss": 3.306,
"step": 45850
},
{
"epoch": 13.366379310344827,
"grad_norm": 0.37652528285980225,
"learning_rate": 0.00043986363636363635,
"loss": 3.2961,
"step": 45900
},
{
"epoch": 13.3809412861137,
"grad_norm": 0.36313024163246155,
"learning_rate": 0.00043968881118881115,
"loss": 3.301,
"step": 45950
},
{
"epoch": 13.395503261882572,
"grad_norm": 0.3545302748680115,
"learning_rate": 0.000439513986013986,
"loss": 3.3246,
"step": 46000
},
{
"epoch": 13.395503261882572,
"eval_accuracy": 0.37051732822391875,
"eval_loss": 3.5548934936523438,
"eval_runtime": 53.6644,
"eval_samples_per_second": 310.15,
"eval_steps_per_second": 19.398,
"step": 46000
},
{
"epoch": 13.410065237651445,
"grad_norm": 0.3664780855178833,
"learning_rate": 0.0004393391608391608,
"loss": 3.3072,
"step": 46050
},
{
"epoch": 13.424627213420317,
"grad_norm": 0.37858885526657104,
"learning_rate": 0.00043916433566433565,
"loss": 3.303,
"step": 46100
},
{
"epoch": 13.43918918918919,
"grad_norm": 0.35305213928222656,
"learning_rate": 0.00043898951048951045,
"loss": 3.3141,
"step": 46150
},
{
"epoch": 13.453751164958062,
"grad_norm": 0.36375489830970764,
"learning_rate": 0.0004388146853146853,
"loss": 3.3084,
"step": 46200
},
{
"epoch": 13.468313140726934,
"grad_norm": 0.35223737359046936,
"learning_rate": 0.00043863986013986005,
"loss": 3.3056,
"step": 46250
},
{
"epoch": 13.482875116495807,
"grad_norm": 0.34370267391204834,
"learning_rate": 0.0004384650349650349,
"loss": 3.3067,
"step": 46300
},
{
"epoch": 13.49743709226468,
"grad_norm": 0.3426927626132965,
"learning_rate": 0.0004382902097902097,
"loss": 3.3176,
"step": 46350
},
{
"epoch": 13.511999068033552,
"grad_norm": 0.3586205542087555,
"learning_rate": 0.00043811538461538456,
"loss": 3.3078,
"step": 46400
},
{
"epoch": 13.526561043802424,
"grad_norm": 0.3668364882469177,
"learning_rate": 0.0004379405594405594,
"loss": 3.3185,
"step": 46450
},
{
"epoch": 13.541123019571295,
"grad_norm": 0.3667503297328949,
"learning_rate": 0.0004377657342657342,
"loss": 3.3165,
"step": 46500
},
{
"epoch": 13.555684995340167,
"grad_norm": 0.3611725866794586,
"learning_rate": 0.00043759090909090907,
"loss": 3.3153,
"step": 46550
},
{
"epoch": 13.57024697110904,
"grad_norm": 0.3535501956939697,
"learning_rate": 0.00043741608391608387,
"loss": 3.3249,
"step": 46600
},
{
"epoch": 13.584808946877912,
"grad_norm": 0.35195192694664,
"learning_rate": 0.0004372412587412587,
"loss": 3.3254,
"step": 46650
},
{
"epoch": 13.599370922646784,
"grad_norm": 0.379182368516922,
"learning_rate": 0.0004370664335664335,
"loss": 3.3217,
"step": 46700
},
{
"epoch": 13.613932898415657,
"grad_norm": 0.3737790882587433,
"learning_rate": 0.0004368916083916084,
"loss": 3.324,
"step": 46750
},
{
"epoch": 13.628494874184529,
"grad_norm": 0.35494062304496765,
"learning_rate": 0.0004367167832167832,
"loss": 3.3244,
"step": 46800
},
{
"epoch": 13.643056849953402,
"grad_norm": 0.33564329147338867,
"learning_rate": 0.00043654195804195803,
"loss": 3.3224,
"step": 46850
},
{
"epoch": 13.657618825722274,
"grad_norm": 0.3607423007488251,
"learning_rate": 0.00043636713286713283,
"loss": 3.3325,
"step": 46900
},
{
"epoch": 13.672180801491146,
"grad_norm": 0.36061540246009827,
"learning_rate": 0.0004361923076923077,
"loss": 3.3232,
"step": 46950
},
{
"epoch": 13.686742777260019,
"grad_norm": 0.36729174852371216,
"learning_rate": 0.00043601748251748243,
"loss": 3.326,
"step": 47000
},
{
"epoch": 13.686742777260019,
"eval_accuracy": 0.3709420153875023,
"eval_loss": 3.5490164756774902,
"eval_runtime": 53.6923,
"eval_samples_per_second": 309.989,
"eval_steps_per_second": 19.388,
"step": 47000
},
{
"epoch": 13.701304753028891,
"grad_norm": 0.34589728713035583,
"learning_rate": 0.00043584265734265734,
"loss": 3.3293,
"step": 47050
},
{
"epoch": 13.715866728797764,
"grad_norm": 0.3353387415409088,
"learning_rate": 0.0004356678321678321,
"loss": 3.338,
"step": 47100
},
{
"epoch": 13.730428704566636,
"grad_norm": 0.32900935411453247,
"learning_rate": 0.00043549300699300694,
"loss": 3.3287,
"step": 47150
},
{
"epoch": 13.744990680335508,
"grad_norm": 0.33722198009490967,
"learning_rate": 0.0004353181818181818,
"loss": 3.335,
"step": 47200
},
{
"epoch": 13.75955265610438,
"grad_norm": 0.34776055812835693,
"learning_rate": 0.0004351433566433566,
"loss": 3.3368,
"step": 47250
},
{
"epoch": 13.774114631873253,
"grad_norm": 0.34321945905685425,
"learning_rate": 0.00043496853146853144,
"loss": 3.3447,
"step": 47300
},
{
"epoch": 13.788676607642126,
"grad_norm": 0.3597453832626343,
"learning_rate": 0.00043479370629370624,
"loss": 3.3474,
"step": 47350
},
{
"epoch": 13.803238583410998,
"grad_norm": 0.3584977388381958,
"learning_rate": 0.0004346188811188811,
"loss": 3.3419,
"step": 47400
},
{
"epoch": 13.817800559179869,
"grad_norm": 0.3313165009021759,
"learning_rate": 0.0004344440559440559,
"loss": 3.3364,
"step": 47450
},
{
"epoch": 13.832362534948741,
"grad_norm": 0.3603111207485199,
"learning_rate": 0.00043426923076923075,
"loss": 3.3367,
"step": 47500
},
{
"epoch": 13.846924510717614,
"grad_norm": 0.35369378328323364,
"learning_rate": 0.00043409440559440555,
"loss": 3.3415,
"step": 47550
},
{
"epoch": 13.861486486486486,
"grad_norm": 0.37278762459754944,
"learning_rate": 0.0004339195804195804,
"loss": 3.3248,
"step": 47600
},
{
"epoch": 13.876048462255358,
"grad_norm": 0.33600765466690063,
"learning_rate": 0.0004337447552447552,
"loss": 3.3372,
"step": 47650
},
{
"epoch": 13.89061043802423,
"grad_norm": 0.3594343364238739,
"learning_rate": 0.00043356993006993006,
"loss": 3.3325,
"step": 47700
},
{
"epoch": 13.905172413793103,
"grad_norm": 0.34375789761543274,
"learning_rate": 0.0004333951048951048,
"loss": 3.3424,
"step": 47750
},
{
"epoch": 13.919734389561976,
"grad_norm": 0.349744975566864,
"learning_rate": 0.0004332202797202797,
"loss": 3.3309,
"step": 47800
},
{
"epoch": 13.934296365330848,
"grad_norm": 0.343473345041275,
"learning_rate": 0.00043304545454545456,
"loss": 3.3392,
"step": 47850
},
{
"epoch": 13.94885834109972,
"grad_norm": 0.35148507356643677,
"learning_rate": 0.0004328706293706293,
"loss": 3.3318,
"step": 47900
},
{
"epoch": 13.963420316868593,
"grad_norm": 0.37654560804367065,
"learning_rate": 0.00043269580419580416,
"loss": 3.3446,
"step": 47950
},
{
"epoch": 13.977982292637465,
"grad_norm": 0.3372819423675537,
"learning_rate": 0.00043252097902097896,
"loss": 3.3341,
"step": 48000
},
{
"epoch": 13.977982292637465,
"eval_accuracy": 0.37139445066033444,
"eval_loss": 3.543229103088379,
"eval_runtime": 53.7768,
"eval_samples_per_second": 309.502,
"eval_steps_per_second": 19.358,
"step": 48000
},
{
"epoch": 13.992544268406338,
"grad_norm": 0.35655635595321655,
"learning_rate": 0.0004323461538461538,
"loss": 3.3439,
"step": 48050
},
{
"epoch": 14.006989748369058,
"grad_norm": 0.3547080457210541,
"learning_rate": 0.0004321713286713286,
"loss": 3.2918,
"step": 48100
},
{
"epoch": 14.02155172413793,
"grad_norm": 0.33806321024894714,
"learning_rate": 0.00043199650349650347,
"loss": 3.2325,
"step": 48150
},
{
"epoch": 14.036113699906803,
"grad_norm": 0.36164820194244385,
"learning_rate": 0.00043182167832167827,
"loss": 3.2547,
"step": 48200
},
{
"epoch": 14.050675675675675,
"grad_norm": 0.33799758553504944,
"learning_rate": 0.0004316468531468531,
"loss": 3.2453,
"step": 48250
},
{
"epoch": 14.065237651444548,
"grad_norm": 0.3774013817310333,
"learning_rate": 0.0004314720279720279,
"loss": 3.2307,
"step": 48300
},
{
"epoch": 14.07979962721342,
"grad_norm": 0.3361668884754181,
"learning_rate": 0.0004312972027972028,
"loss": 3.2379,
"step": 48350
},
{
"epoch": 14.094361602982293,
"grad_norm": 0.3815077841281891,
"learning_rate": 0.0004311223776223776,
"loss": 3.2621,
"step": 48400
},
{
"epoch": 14.108923578751165,
"grad_norm": 0.37621423602104187,
"learning_rate": 0.00043094755244755243,
"loss": 3.2474,
"step": 48450
},
{
"epoch": 14.123485554520038,
"grad_norm": 0.36258870363235474,
"learning_rate": 0.0004307727272727272,
"loss": 3.2517,
"step": 48500
},
{
"epoch": 14.13804753028891,
"grad_norm": 0.3932361602783203,
"learning_rate": 0.0004305979020979021,
"loss": 3.2658,
"step": 48550
},
{
"epoch": 14.152609506057782,
"grad_norm": 0.3555769920349121,
"learning_rate": 0.00043042307692307694,
"loss": 3.2742,
"step": 48600
},
{
"epoch": 14.167171481826655,
"grad_norm": 0.35345563292503357,
"learning_rate": 0.0004302482517482517,
"loss": 3.2624,
"step": 48650
},
{
"epoch": 14.181733457595527,
"grad_norm": 0.37088289856910706,
"learning_rate": 0.00043007342657342654,
"loss": 3.2776,
"step": 48700
},
{
"epoch": 14.1962954333644,
"grad_norm": 0.36062613129615784,
"learning_rate": 0.00042989860139860134,
"loss": 3.2636,
"step": 48750
},
{
"epoch": 14.210857409133272,
"grad_norm": 0.3612150251865387,
"learning_rate": 0.0004297237762237762,
"loss": 3.2603,
"step": 48800
},
{
"epoch": 14.225419384902143,
"grad_norm": 0.34834256768226624,
"learning_rate": 0.000429548951048951,
"loss": 3.259,
"step": 48850
},
{
"epoch": 14.239981360671015,
"grad_norm": 0.36643290519714355,
"learning_rate": 0.00042937412587412585,
"loss": 3.2726,
"step": 48900
},
{
"epoch": 14.254543336439887,
"grad_norm": 0.4372047185897827,
"learning_rate": 0.00042919930069930065,
"loss": 3.2699,
"step": 48950
},
{
"epoch": 14.26910531220876,
"grad_norm": 0.3443453311920166,
"learning_rate": 0.0004290244755244755,
"loss": 3.2913,
"step": 49000
},
{
"epoch": 14.26910531220876,
"eval_accuracy": 0.37089275073591277,
"eval_loss": 3.5589487552642822,
"eval_runtime": 53.6734,
"eval_samples_per_second": 310.098,
"eval_steps_per_second": 19.395,
"step": 49000
},
{
"epoch": 14.283667287977632,
"grad_norm": 0.38753968477249146,
"learning_rate": 0.0004288496503496503,
"loss": 3.2774,
"step": 49050
},
{
"epoch": 14.298229263746505,
"grad_norm": 0.35234200954437256,
"learning_rate": 0.00042867482517482515,
"loss": 3.2762,
"step": 49100
},
{
"epoch": 14.312791239515377,
"grad_norm": 0.38150152564048767,
"learning_rate": 0.00042849999999999995,
"loss": 3.2947,
"step": 49150
},
{
"epoch": 14.32735321528425,
"grad_norm": 0.3585006296634674,
"learning_rate": 0.0004283251748251748,
"loss": 3.2861,
"step": 49200
},
{
"epoch": 14.341915191053122,
"grad_norm": 0.340561181306839,
"learning_rate": 0.00042815034965034966,
"loss": 3.2793,
"step": 49250
},
{
"epoch": 14.356477166821994,
"grad_norm": 0.3703455626964569,
"learning_rate": 0.00042797552447552446,
"loss": 3.289,
"step": 49300
},
{
"epoch": 14.371039142590867,
"grad_norm": 0.38863709568977356,
"learning_rate": 0.0004278006993006993,
"loss": 3.2819,
"step": 49350
},
{
"epoch": 14.38560111835974,
"grad_norm": 0.37331631779670715,
"learning_rate": 0.00042762587412587406,
"loss": 3.2992,
"step": 49400
},
{
"epoch": 14.400163094128612,
"grad_norm": 0.35026001930236816,
"learning_rate": 0.0004274510489510489,
"loss": 3.3023,
"step": 49450
},
{
"epoch": 14.414725069897484,
"grad_norm": 0.3765786588191986,
"learning_rate": 0.0004272762237762237,
"loss": 3.2869,
"step": 49500
},
{
"epoch": 14.429287045666356,
"grad_norm": 0.4198647141456604,
"learning_rate": 0.00042710139860139857,
"loss": 3.299,
"step": 49550
},
{
"epoch": 14.443849021435229,
"grad_norm": 0.37420520186424255,
"learning_rate": 0.00042692657342657337,
"loss": 3.3011,
"step": 49600
},
{
"epoch": 14.458410997204101,
"grad_norm": 0.36429670453071594,
"learning_rate": 0.0004267517482517482,
"loss": 3.288,
"step": 49650
},
{
"epoch": 14.472972972972974,
"grad_norm": 0.3489975035190582,
"learning_rate": 0.000426576923076923,
"loss": 3.3109,
"step": 49700
},
{
"epoch": 14.487534948741846,
"grad_norm": 0.3541027903556824,
"learning_rate": 0.0004264020979020979,
"loss": 3.2971,
"step": 49750
},
{
"epoch": 14.502096924510717,
"grad_norm": 0.3759947717189789,
"learning_rate": 0.0004262272727272727,
"loss": 3.313,
"step": 49800
},
{
"epoch": 14.51665890027959,
"grad_norm": 0.35864460468292236,
"learning_rate": 0.00042605244755244753,
"loss": 3.3117,
"step": 49850
},
{
"epoch": 14.531220876048462,
"grad_norm": 0.35375118255615234,
"learning_rate": 0.00042587762237762233,
"loss": 3.3131,
"step": 49900
},
{
"epoch": 14.545782851817334,
"grad_norm": 0.3452862501144409,
"learning_rate": 0.0004257027972027972,
"loss": 3.2948,
"step": 49950
},
{
"epoch": 14.560344827586206,
"grad_norm": 0.36372438073158264,
"learning_rate": 0.00042552797202797204,
"loss": 3.2971,
"step": 50000
},
{
"epoch": 14.560344827586206,
"eval_accuracy": 0.37094483722912086,
"eval_loss": 3.5522592067718506,
"eval_runtime": 53.8803,
"eval_samples_per_second": 308.907,
"eval_steps_per_second": 19.321,
"step": 50000
},
{
"epoch": 14.574906803355079,
"grad_norm": 0.34010276198387146,
"learning_rate": 0.00042535314685314684,
"loss": 3.3026,
"step": 50050
},
{
"epoch": 14.589468779123951,
"grad_norm": 0.33550965785980225,
"learning_rate": 0.0004251783216783217,
"loss": 3.3004,
"step": 50100
},
{
"epoch": 14.604030754892824,
"grad_norm": 0.3614843785762787,
"learning_rate": 0.00042500349650349643,
"loss": 3.3071,
"step": 50150
},
{
"epoch": 14.618592730661696,
"grad_norm": 0.34630846977233887,
"learning_rate": 0.0004248286713286713,
"loss": 3.2983,
"step": 50200
},
{
"epoch": 14.633154706430568,
"grad_norm": 0.34411928057670593,
"learning_rate": 0.0004246538461538461,
"loss": 3.3064,
"step": 50250
},
{
"epoch": 14.647716682199441,
"grad_norm": 0.3575693368911743,
"learning_rate": 0.00042447902097902094,
"loss": 3.2985,
"step": 50300
},
{
"epoch": 14.662278657968313,
"grad_norm": 0.3505471646785736,
"learning_rate": 0.00042430419580419574,
"loss": 3.3178,
"step": 50350
},
{
"epoch": 14.676840633737186,
"grad_norm": 0.3225877583026886,
"learning_rate": 0.0004241293706293706,
"loss": 3.3121,
"step": 50400
},
{
"epoch": 14.691402609506058,
"grad_norm": 0.33646827936172485,
"learning_rate": 0.0004239545454545454,
"loss": 3.3233,
"step": 50450
},
{
"epoch": 14.70596458527493,
"grad_norm": 0.34578248858451843,
"learning_rate": 0.00042377972027972025,
"loss": 3.3137,
"step": 50500
},
{
"epoch": 14.720526561043803,
"grad_norm": 0.35833966732025146,
"learning_rate": 0.00042360489510489505,
"loss": 3.3105,
"step": 50550
},
{
"epoch": 14.735088536812675,
"grad_norm": 0.3806699812412262,
"learning_rate": 0.0004234300699300699,
"loss": 3.3273,
"step": 50600
},
{
"epoch": 14.749650512581548,
"grad_norm": 0.37561845779418945,
"learning_rate": 0.00042325524475524476,
"loss": 3.3159,
"step": 50650
},
{
"epoch": 14.76421248835042,
"grad_norm": 0.37811514735221863,
"learning_rate": 0.00042308041958041956,
"loss": 3.3263,
"step": 50700
},
{
"epoch": 14.77877446411929,
"grad_norm": 0.36160987615585327,
"learning_rate": 0.0004229055944055944,
"loss": 3.3082,
"step": 50750
},
{
"epoch": 14.793336439888163,
"grad_norm": 0.3447883129119873,
"learning_rate": 0.0004227307692307692,
"loss": 3.31,
"step": 50800
},
{
"epoch": 14.807898415657036,
"grad_norm": 0.39840012788772583,
"learning_rate": 0.00042255594405594406,
"loss": 3.3142,
"step": 50850
},
{
"epoch": 14.822460391425908,
"grad_norm": 0.37650975584983826,
"learning_rate": 0.0004223811188811188,
"loss": 3.3239,
"step": 50900
},
{
"epoch": 14.83702236719478,
"grad_norm": 0.3962187170982361,
"learning_rate": 0.00042220629370629366,
"loss": 3.3275,
"step": 50950
},
{
"epoch": 14.851584342963653,
"grad_norm": 0.3631623387336731,
"learning_rate": 0.00042203146853146846,
"loss": 3.3271,
"step": 51000
},
{
"epoch": 14.851584342963653,
"eval_accuracy": 0.3713922167023865,
"eval_loss": 3.5445010662078857,
"eval_runtime": 53.7519,
"eval_samples_per_second": 309.645,
"eval_steps_per_second": 19.367,
"step": 51000
},
{
"epoch": 14.866146318732525,
"grad_norm": 0.3710850775241852,
"learning_rate": 0.0004218566433566433,
"loss": 3.3204,
"step": 51050
},
{
"epoch": 14.880708294501398,
"grad_norm": 0.36191123723983765,
"learning_rate": 0.0004216818181818181,
"loss": 3.3346,
"step": 51100
},
{
"epoch": 14.89527027027027,
"grad_norm": 0.3745746910572052,
"learning_rate": 0.00042150699300699297,
"loss": 3.3167,
"step": 51150
},
{
"epoch": 14.909832246039143,
"grad_norm": 0.3422452211380005,
"learning_rate": 0.00042133216783216777,
"loss": 3.3211,
"step": 51200
},
{
"epoch": 14.924394221808015,
"grad_norm": 0.348006010055542,
"learning_rate": 0.0004211573426573426,
"loss": 3.3234,
"step": 51250
},
{
"epoch": 14.938956197576887,
"grad_norm": 0.33569449186325073,
"learning_rate": 0.0004209825174825175,
"loss": 3.3362,
"step": 51300
},
{
"epoch": 14.95351817334576,
"grad_norm": 0.34915322065353394,
"learning_rate": 0.0004208076923076923,
"loss": 3.326,
"step": 51350
},
{
"epoch": 14.968080149114632,
"grad_norm": 0.3519541621208191,
"learning_rate": 0.00042063286713286713,
"loss": 3.3145,
"step": 51400
},
{
"epoch": 14.982642124883505,
"grad_norm": 0.3761986494064331,
"learning_rate": 0.00042045804195804193,
"loss": 3.3318,
"step": 51450
},
{
"epoch": 14.997204100652377,
"grad_norm": 0.3359870910644531,
"learning_rate": 0.0004202832167832168,
"loss": 3.3319,
"step": 51500
},
{
"epoch": 15.011649580615098,
"grad_norm": 0.37047114968299866,
"learning_rate": 0.0004201083916083916,
"loss": 3.2483,
"step": 51550
},
{
"epoch": 15.02621155638397,
"grad_norm": 0.36424553394317627,
"learning_rate": 0.00041993356643356644,
"loss": 3.2183,
"step": 51600
},
{
"epoch": 15.040773532152842,
"grad_norm": 0.35980352759361267,
"learning_rate": 0.0004197587412587412,
"loss": 3.2262,
"step": 51650
},
{
"epoch": 15.055335507921715,
"grad_norm": 0.37826991081237793,
"learning_rate": 0.00041958391608391604,
"loss": 3.2198,
"step": 51700
},
{
"epoch": 15.069897483690587,
"grad_norm": 0.33697888255119324,
"learning_rate": 0.00041940909090909084,
"loss": 3.2334,
"step": 51750
},
{
"epoch": 15.08445945945946,
"grad_norm": 0.38942408561706543,
"learning_rate": 0.0004192342657342657,
"loss": 3.2307,
"step": 51800
},
{
"epoch": 15.099021435228332,
"grad_norm": 0.37160614132881165,
"learning_rate": 0.0004190594405594405,
"loss": 3.2336,
"step": 51850
},
{
"epoch": 15.113583410997204,
"grad_norm": 0.37960243225097656,
"learning_rate": 0.00041888461538461535,
"loss": 3.2344,
"step": 51900
},
{
"epoch": 15.128145386766077,
"grad_norm": 0.3533761501312256,
"learning_rate": 0.00041870979020979015,
"loss": 3.2452,
"step": 51950
},
{
"epoch": 15.14270736253495,
"grad_norm": 0.36656302213668823,
"learning_rate": 0.000418534965034965,
"loss": 3.2435,
"step": 52000
},
{
"epoch": 15.14270736253495,
"eval_accuracy": 0.3713126172533981,
"eval_loss": 3.5588560104370117,
"eval_runtime": 53.7438,
"eval_samples_per_second": 309.691,
"eval_steps_per_second": 19.37,
"step": 52000
},
{
"epoch": 15.157269338303822,
"grad_norm": 0.37083372473716736,
"learning_rate": 0.00041836013986013985,
"loss": 3.2411,
"step": 52050
},
{
"epoch": 15.171831314072694,
"grad_norm": 0.35016798973083496,
"learning_rate": 0.00041818531468531465,
"loss": 3.2433,
"step": 52100
},
{
"epoch": 15.186393289841567,
"grad_norm": 0.34692856669425964,
"learning_rate": 0.0004180104895104895,
"loss": 3.2586,
"step": 52150
},
{
"epoch": 15.200955265610437,
"grad_norm": 0.3626892864704132,
"learning_rate": 0.0004178356643356643,
"loss": 3.271,
"step": 52200
},
{
"epoch": 15.21551724137931,
"grad_norm": 0.3602758049964905,
"learning_rate": 0.00041766083916083916,
"loss": 3.2612,
"step": 52250
},
{
"epoch": 15.230079217148182,
"grad_norm": 0.40110644698143005,
"learning_rate": 0.00041748601398601396,
"loss": 3.2565,
"step": 52300
},
{
"epoch": 15.244641192917054,
"grad_norm": 0.3626898527145386,
"learning_rate": 0.0004173111888111888,
"loss": 3.2693,
"step": 52350
},
{
"epoch": 15.259203168685927,
"grad_norm": 0.37451228499412537,
"learning_rate": 0.00041713636363636356,
"loss": 3.259,
"step": 52400
},
{
"epoch": 15.2737651444548,
"grad_norm": 0.36757326126098633,
"learning_rate": 0.0004169615384615384,
"loss": 3.2615,
"step": 52450
},
{
"epoch": 15.288327120223672,
"grad_norm": 0.401369571685791,
"learning_rate": 0.0004167867132867132,
"loss": 3.257,
"step": 52500
},
{
"epoch": 15.302889095992544,
"grad_norm": 0.35822781920433044,
"learning_rate": 0.00041661188811188807,
"loss": 3.2813,
"step": 52550
},
{
"epoch": 15.317451071761417,
"grad_norm": 0.3864552676677704,
"learning_rate": 0.00041643706293706287,
"loss": 3.2849,
"step": 52600
},
{
"epoch": 15.332013047530289,
"grad_norm": 0.41683146357536316,
"learning_rate": 0.0004162622377622377,
"loss": 3.268,
"step": 52650
},
{
"epoch": 15.346575023299161,
"grad_norm": 0.3629013001918793,
"learning_rate": 0.0004160874125874126,
"loss": 3.2752,
"step": 52700
},
{
"epoch": 15.361136999068034,
"grad_norm": 0.35468605160713196,
"learning_rate": 0.0004159125874125874,
"loss": 3.2849,
"step": 52750
},
{
"epoch": 15.375698974836906,
"grad_norm": 0.3459469974040985,
"learning_rate": 0.00041573776223776223,
"loss": 3.2712,
"step": 52800
},
{
"epoch": 15.390260950605779,
"grad_norm": 0.39700445532798767,
"learning_rate": 0.00041556293706293703,
"loss": 3.2843,
"step": 52850
},
{
"epoch": 15.404822926374651,
"grad_norm": 0.35506534576416016,
"learning_rate": 0.0004153881118881119,
"loss": 3.2844,
"step": 52900
},
{
"epoch": 15.419384902143523,
"grad_norm": 0.38613244891166687,
"learning_rate": 0.0004152132867132867,
"loss": 3.2746,
"step": 52950
},
{
"epoch": 15.433946877912396,
"grad_norm": 0.3591909110546112,
"learning_rate": 0.00041503846153846154,
"loss": 3.281,
"step": 53000
},
{
"epoch": 15.433946877912396,
"eval_accuracy": 0.371552238637502,
"eval_loss": 3.555067539215088,
"eval_runtime": 54.0003,
"eval_samples_per_second": 308.221,
"eval_steps_per_second": 19.278,
"step": 53000
},
{
"epoch": 15.448508853681268,
"grad_norm": 0.35899174213409424,
"learning_rate": 0.00041486363636363634,
"loss": 3.2965,
"step": 53050
},
{
"epoch": 15.463070829450139,
"grad_norm": 0.37865790724754333,
"learning_rate": 0.0004146888111888112,
"loss": 3.2903,
"step": 53100
},
{
"epoch": 15.477632805219011,
"grad_norm": 0.34430116415023804,
"learning_rate": 0.00041451398601398593,
"loss": 3.2766,
"step": 53150
},
{
"epoch": 15.492194780987884,
"grad_norm": 0.3497706651687622,
"learning_rate": 0.0004143391608391608,
"loss": 3.277,
"step": 53200
},
{
"epoch": 15.506756756756756,
"grad_norm": 0.3752337396144867,
"learning_rate": 0.0004141643356643356,
"loss": 3.2805,
"step": 53250
},
{
"epoch": 15.521318732525629,
"grad_norm": 0.39002713561058044,
"learning_rate": 0.00041398951048951044,
"loss": 3.2833,
"step": 53300
},
{
"epoch": 15.535880708294501,
"grad_norm": 0.3368464410305023,
"learning_rate": 0.00041381468531468524,
"loss": 3.2913,
"step": 53350
},
{
"epoch": 15.550442684063373,
"grad_norm": 0.3738703727722168,
"learning_rate": 0.0004136398601398601,
"loss": 3.2898,
"step": 53400
},
{
"epoch": 15.565004659832246,
"grad_norm": 0.3517080545425415,
"learning_rate": 0.00041346503496503495,
"loss": 3.2909,
"step": 53450
},
{
"epoch": 15.579566635601118,
"grad_norm": 0.3623846769332886,
"learning_rate": 0.00041329020979020975,
"loss": 3.2804,
"step": 53500
},
{
"epoch": 15.59412861136999,
"grad_norm": 0.3643958270549774,
"learning_rate": 0.0004131153846153846,
"loss": 3.2919,
"step": 53550
},
{
"epoch": 15.608690587138863,
"grad_norm": 0.3840982913970947,
"learning_rate": 0.0004129405594405594,
"loss": 3.2904,
"step": 53600
},
{
"epoch": 15.623252562907735,
"grad_norm": 0.3870510756969452,
"learning_rate": 0.00041276573426573426,
"loss": 3.304,
"step": 53650
},
{
"epoch": 15.637814538676608,
"grad_norm": 0.3654141426086426,
"learning_rate": 0.00041259090909090906,
"loss": 3.2927,
"step": 53700
},
{
"epoch": 15.65237651444548,
"grad_norm": 0.36212411522865295,
"learning_rate": 0.0004124160839160839,
"loss": 3.2895,
"step": 53750
},
{
"epoch": 15.666938490214353,
"grad_norm": 0.3940054476261139,
"learning_rate": 0.0004122412587412587,
"loss": 3.3188,
"step": 53800
},
{
"epoch": 15.681500465983225,
"grad_norm": 0.35793691873550415,
"learning_rate": 0.00041206643356643356,
"loss": 3.3,
"step": 53850
},
{
"epoch": 15.696062441752098,
"grad_norm": 0.38477155566215515,
"learning_rate": 0.0004118916083916083,
"loss": 3.2999,
"step": 53900
},
{
"epoch": 15.71062441752097,
"grad_norm": 0.35515347123146057,
"learning_rate": 0.00041171678321678316,
"loss": 3.2988,
"step": 53950
},
{
"epoch": 15.725186393289842,
"grad_norm": 0.3534596860408783,
"learning_rate": 0.00041154195804195796,
"loss": 3.3023,
"step": 54000
},
{
"epoch": 15.725186393289842,
"eval_accuracy": 0.37190861371857115,
"eval_loss": 3.544243812561035,
"eval_runtime": 53.5815,
"eval_samples_per_second": 310.629,
"eval_steps_per_second": 19.428,
"step": 54000
},
{
"epoch": 15.739748369058713,
"grad_norm": 0.3302527964115143,
"learning_rate": 0.0004113671328671328,
"loss": 3.3066,
"step": 54050
},
{
"epoch": 15.754310344827585,
"grad_norm": 0.3625675141811371,
"learning_rate": 0.00041119230769230767,
"loss": 3.3,
"step": 54100
},
{
"epoch": 15.768872320596458,
"grad_norm": 0.34582090377807617,
"learning_rate": 0.00041101748251748247,
"loss": 3.3056,
"step": 54150
},
{
"epoch": 15.78343429636533,
"grad_norm": 0.35482490062713623,
"learning_rate": 0.0004108426573426573,
"loss": 3.3043,
"step": 54200
},
{
"epoch": 15.797996272134203,
"grad_norm": 0.3603096604347229,
"learning_rate": 0.0004106678321678321,
"loss": 3.3053,
"step": 54250
},
{
"epoch": 15.812558247903075,
"grad_norm": 0.3536098897457123,
"learning_rate": 0.000410493006993007,
"loss": 3.3112,
"step": 54300
},
{
"epoch": 15.827120223671947,
"grad_norm": 0.41206008195877075,
"learning_rate": 0.0004103181818181818,
"loss": 3.3086,
"step": 54350
},
{
"epoch": 15.84168219944082,
"grad_norm": 0.35738715529441833,
"learning_rate": 0.00041014335664335663,
"loss": 3.312,
"step": 54400
},
{
"epoch": 15.856244175209692,
"grad_norm": 0.375002384185791,
"learning_rate": 0.00040996853146853143,
"loss": 3.3148,
"step": 54450
},
{
"epoch": 15.870806150978565,
"grad_norm": 0.3635875880718231,
"learning_rate": 0.0004097937062937063,
"loss": 3.3177,
"step": 54500
},
{
"epoch": 15.885368126747437,
"grad_norm": 0.3630351722240448,
"learning_rate": 0.0004096188811188811,
"loss": 3.3109,
"step": 54550
},
{
"epoch": 15.89993010251631,
"grad_norm": 0.39034175872802734,
"learning_rate": 0.00040944405594405594,
"loss": 3.3014,
"step": 54600
},
{
"epoch": 15.914492078285182,
"grad_norm": 0.34750694036483765,
"learning_rate": 0.0004092692307692307,
"loss": 3.3082,
"step": 54650
},
{
"epoch": 15.929054054054054,
"grad_norm": 0.4042045474052429,
"learning_rate": 0.00040909440559440554,
"loss": 3.3119,
"step": 54700
},
{
"epoch": 15.943616029822927,
"grad_norm": 0.3563961386680603,
"learning_rate": 0.00040891958041958034,
"loss": 3.3132,
"step": 54750
},
{
"epoch": 15.9581780055918,
"grad_norm": 0.35997897386550903,
"learning_rate": 0.0004087447552447552,
"loss": 3.3179,
"step": 54800
},
{
"epoch": 15.972739981360672,
"grad_norm": 0.33496615290641785,
"learning_rate": 0.00040856993006993005,
"loss": 3.314,
"step": 54850
},
{
"epoch": 15.987301957129544,
"grad_norm": 0.3893013000488281,
"learning_rate": 0.00040839510489510485,
"loss": 3.3048,
"step": 54900
},
{
"epoch": 16.001747437092266,
"grad_norm": 0.3888099491596222,
"learning_rate": 0.0004082202797202797,
"loss": 3.295,
"step": 54950
},
{
"epoch": 16.016309412861137,
"grad_norm": 0.3761426508426666,
"learning_rate": 0.0004080454545454545,
"loss": 3.2045,
"step": 55000
},
{
"epoch": 16.016309412861137,
"eval_accuracy": 0.3716526491684268,
"eval_loss": 3.552569627761841,
"eval_runtime": 53.7366,
"eval_samples_per_second": 309.733,
"eval_steps_per_second": 19.372,
"step": 55000
},
{
"epoch": 16.030871388630008,
"grad_norm": 0.3669045865535736,
"learning_rate": 0.00040787062937062935,
"loss": 3.2014,
"step": 55050
},
{
"epoch": 16.045433364398882,
"grad_norm": 0.37650567293167114,
"learning_rate": 0.00040769580419580415,
"loss": 3.2067,
"step": 55100
},
{
"epoch": 16.059995340167752,
"grad_norm": 0.37726104259490967,
"learning_rate": 0.000407520979020979,
"loss": 3.2181,
"step": 55150
},
{
"epoch": 16.074557315936627,
"grad_norm": 0.3812066614627838,
"learning_rate": 0.0004073461538461538,
"loss": 3.213,
"step": 55200
},
{
"epoch": 16.089119291705497,
"grad_norm": 0.3624626100063324,
"learning_rate": 0.00040717132867132866,
"loss": 3.224,
"step": 55250
},
{
"epoch": 16.10368126747437,
"grad_norm": 0.36346620321273804,
"learning_rate": 0.00040699650349650346,
"loss": 3.235,
"step": 55300
},
{
"epoch": 16.118243243243242,
"grad_norm": 0.37540963292121887,
"learning_rate": 0.0004068216783216783,
"loss": 3.2326,
"step": 55350
},
{
"epoch": 16.132805219012116,
"grad_norm": 0.3593809902667999,
"learning_rate": 0.00040664685314685306,
"loss": 3.2304,
"step": 55400
},
{
"epoch": 16.147367194780987,
"grad_norm": 0.381249338388443,
"learning_rate": 0.0004064720279720279,
"loss": 3.228,
"step": 55450
},
{
"epoch": 16.16192917054986,
"grad_norm": 0.3877742886543274,
"learning_rate": 0.00040629720279720277,
"loss": 3.231,
"step": 55500
},
{
"epoch": 16.17649114631873,
"grad_norm": 0.38962066173553467,
"learning_rate": 0.00040612237762237757,
"loss": 3.2386,
"step": 55550
},
{
"epoch": 16.191053122087606,
"grad_norm": 0.3545854091644287,
"learning_rate": 0.0004059475524475524,
"loss": 3.2417,
"step": 55600
},
{
"epoch": 16.205615097856477,
"grad_norm": 0.37691715359687805,
"learning_rate": 0.0004057727272727272,
"loss": 3.2418,
"step": 55650
},
{
"epoch": 16.22017707362535,
"grad_norm": 0.37831491231918335,
"learning_rate": 0.0004055979020979021,
"loss": 3.2338,
"step": 55700
},
{
"epoch": 16.23473904939422,
"grad_norm": 0.37360233068466187,
"learning_rate": 0.0004054230769230769,
"loss": 3.2526,
"step": 55750
},
{
"epoch": 16.249301025163096,
"grad_norm": 0.4056057035923004,
"learning_rate": 0.00040524825174825173,
"loss": 3.2549,
"step": 55800
},
{
"epoch": 16.263863000931966,
"grad_norm": 0.37376824021339417,
"learning_rate": 0.00040507342657342653,
"loss": 3.2509,
"step": 55850
},
{
"epoch": 16.27842497670084,
"grad_norm": 0.3650595545768738,
"learning_rate": 0.0004048986013986014,
"loss": 3.2494,
"step": 55900
},
{
"epoch": 16.29298695246971,
"grad_norm": 0.3548644483089447,
"learning_rate": 0.0004047237762237762,
"loss": 3.2429,
"step": 55950
},
{
"epoch": 16.30754892823858,
"grad_norm": 0.38068774342536926,
"learning_rate": 0.00040454895104895104,
"loss": 3.2608,
"step": 56000
},
{
"epoch": 16.30754892823858,
"eval_accuracy": 0.3716091457768083,
"eval_loss": 3.552886724472046,
"eval_runtime": 53.7187,
"eval_samples_per_second": 309.836,
"eval_steps_per_second": 19.379,
"step": 56000
},
{
"epoch": 16.322110904007456,
"grad_norm": 0.3673131465911865,
"learning_rate": 0.00040437412587412583,
"loss": 3.2607,
"step": 56050
},
{
"epoch": 16.336672879776327,
"grad_norm": 0.3998847007751465,
"learning_rate": 0.0004041993006993007,
"loss": 3.2589,
"step": 56100
},
{
"epoch": 16.3512348555452,
"grad_norm": 0.4015873670578003,
"learning_rate": 0.00040402447552447554,
"loss": 3.2677,
"step": 56150
},
{
"epoch": 16.36579683131407,
"grad_norm": 0.3576182723045349,
"learning_rate": 0.0004038496503496503,
"loss": 3.2537,
"step": 56200
},
{
"epoch": 16.380358807082946,
"grad_norm": 0.3824199140071869,
"learning_rate": 0.00040367482517482514,
"loss": 3.2587,
"step": 56250
},
{
"epoch": 16.394920782851816,
"grad_norm": 0.3755083680152893,
"learning_rate": 0.00040349999999999994,
"loss": 3.2559,
"step": 56300
},
{
"epoch": 16.40948275862069,
"grad_norm": 0.37745600938796997,
"learning_rate": 0.0004033251748251748,
"loss": 3.2651,
"step": 56350
},
{
"epoch": 16.42404473438956,
"grad_norm": 0.3472992777824402,
"learning_rate": 0.0004031503496503496,
"loss": 3.2705,
"step": 56400
},
{
"epoch": 16.438606710158435,
"grad_norm": 0.3721696138381958,
"learning_rate": 0.00040297552447552445,
"loss": 3.2709,
"step": 56450
},
{
"epoch": 16.453168685927306,
"grad_norm": 0.44224095344543457,
"learning_rate": 0.00040280069930069925,
"loss": 3.2753,
"step": 56500
},
{
"epoch": 16.46773066169618,
"grad_norm": 0.3655279874801636,
"learning_rate": 0.0004026258741258741,
"loss": 3.2718,
"step": 56550
},
{
"epoch": 16.48229263746505,
"grad_norm": 0.4208175837993622,
"learning_rate": 0.0004024510489510489,
"loss": 3.2723,
"step": 56600
},
{
"epoch": 16.496854613233925,
"grad_norm": 0.36009299755096436,
"learning_rate": 0.00040227622377622376,
"loss": 3.2746,
"step": 56650
},
{
"epoch": 16.511416589002796,
"grad_norm": 0.3603965938091278,
"learning_rate": 0.00040210139860139856,
"loss": 3.2668,
"step": 56700
},
{
"epoch": 16.52597856477167,
"grad_norm": 0.35173988342285156,
"learning_rate": 0.0004019265734265734,
"loss": 3.2793,
"step": 56750
},
{
"epoch": 16.54054054054054,
"grad_norm": 0.36199483275413513,
"learning_rate": 0.0004017517482517482,
"loss": 3.2773,
"step": 56800
},
{
"epoch": 16.555102516309415,
"grad_norm": 0.41115614771842957,
"learning_rate": 0.00040157692307692306,
"loss": 3.2808,
"step": 56850
},
{
"epoch": 16.569664492078285,
"grad_norm": 0.3796447515487671,
"learning_rate": 0.0004014020979020979,
"loss": 3.2859,
"step": 56900
},
{
"epoch": 16.584226467847156,
"grad_norm": 0.35656216740608215,
"learning_rate": 0.00040122727272727266,
"loss": 3.2721,
"step": 56950
},
{
"epoch": 16.59878844361603,
"grad_norm": 0.38244813680648804,
"learning_rate": 0.0004010524475524475,
"loss": 3.2856,
"step": 57000
},
{
"epoch": 16.59878844361603,
"eval_accuracy": 0.3720420633117792,
"eval_loss": 3.5462512969970703,
"eval_runtime": 53.7179,
"eval_samples_per_second": 309.841,
"eval_steps_per_second": 19.379,
"step": 57000
},
{
"epoch": 16.6133504193849,
"grad_norm": 0.3651905059814453,
"learning_rate": 0.0004008776223776223,
"loss": 3.2915,
"step": 57050
},
{
"epoch": 16.627912395153775,
"grad_norm": 0.39289090037345886,
"learning_rate": 0.00040070279720279717,
"loss": 3.2943,
"step": 57100
},
{
"epoch": 16.642474370922645,
"grad_norm": 0.36119070649147034,
"learning_rate": 0.00040052797202797197,
"loss": 3.2677,
"step": 57150
},
{
"epoch": 16.65703634669152,
"grad_norm": 0.3775008022785187,
"learning_rate": 0.0004003531468531468,
"loss": 3.2877,
"step": 57200
},
{
"epoch": 16.67159832246039,
"grad_norm": 0.3913573920726776,
"learning_rate": 0.0004001783216783216,
"loss": 3.2814,
"step": 57250
},
{
"epoch": 16.686160298229264,
"grad_norm": 0.3607911467552185,
"learning_rate": 0.0004000034965034965,
"loss": 3.3019,
"step": 57300
},
{
"epoch": 16.700722273998135,
"grad_norm": 0.3782852292060852,
"learning_rate": 0.0003998286713286713,
"loss": 3.2801,
"step": 57350
},
{
"epoch": 16.71528424976701,
"grad_norm": 0.365681916475296,
"learning_rate": 0.00039965384615384613,
"loss": 3.2835,
"step": 57400
},
{
"epoch": 16.72984622553588,
"grad_norm": 0.34979718923568726,
"learning_rate": 0.00039947902097902093,
"loss": 3.2942,
"step": 57450
},
{
"epoch": 16.744408201304754,
"grad_norm": 0.3606526553630829,
"learning_rate": 0.0003993041958041958,
"loss": 3.2869,
"step": 57500
},
{
"epoch": 16.758970177073625,
"grad_norm": 0.4020101726055145,
"learning_rate": 0.00039912937062937064,
"loss": 3.2722,
"step": 57550
},
{
"epoch": 16.7735321528425,
"grad_norm": 0.3851315975189209,
"learning_rate": 0.00039895454545454544,
"loss": 3.3015,
"step": 57600
},
{
"epoch": 16.78809412861137,
"grad_norm": 0.3507918119430542,
"learning_rate": 0.0003987797202797203,
"loss": 3.2921,
"step": 57650
},
{
"epoch": 16.802656104380244,
"grad_norm": 0.39337655901908875,
"learning_rate": 0.00039860489510489504,
"loss": 3.2917,
"step": 57700
},
{
"epoch": 16.817218080149114,
"grad_norm": 0.3827846050262451,
"learning_rate": 0.0003984300699300699,
"loss": 3.3008,
"step": 57750
},
{
"epoch": 16.83178005591799,
"grad_norm": 0.3798079490661621,
"learning_rate": 0.0003982552447552447,
"loss": 3.3022,
"step": 57800
},
{
"epoch": 16.84634203168686,
"grad_norm": 0.3755364716053009,
"learning_rate": 0.00039808041958041955,
"loss": 3.3025,
"step": 57850
},
{
"epoch": 16.86090400745573,
"grad_norm": 0.37088385224342346,
"learning_rate": 0.00039790559440559435,
"loss": 3.291,
"step": 57900
},
{
"epoch": 16.875465983224604,
"grad_norm": 0.3381931781768799,
"learning_rate": 0.0003977307692307692,
"loss": 3.3046,
"step": 57950
},
{
"epoch": 16.890027958993475,
"grad_norm": 0.3929003179073334,
"learning_rate": 0.000397555944055944,
"loss": 3.2891,
"step": 58000
},
{
"epoch": 16.890027958993475,
"eval_accuracy": 0.37263147547984243,
"eval_loss": 3.53983998298645,
"eval_runtime": 53.8555,
"eval_samples_per_second": 309.049,
"eval_steps_per_second": 19.329,
"step": 58000
},
{
"epoch": 16.90458993476235,
"grad_norm": 0.3901415467262268,
"learning_rate": 0.00039738111888111885,
"loss": 3.2989,
"step": 58050
},
{
"epoch": 16.91915191053122,
"grad_norm": 0.39257511496543884,
"learning_rate": 0.00039720629370629365,
"loss": 3.3008,
"step": 58100
},
{
"epoch": 16.933713886300094,
"grad_norm": 0.4112852215766907,
"learning_rate": 0.0003970314685314685,
"loss": 3.2988,
"step": 58150
},
{
"epoch": 16.948275862068964,
"grad_norm": 0.333914190530777,
"learning_rate": 0.0003968566433566433,
"loss": 3.309,
"step": 58200
},
{
"epoch": 16.96283783783784,
"grad_norm": 0.3521735966205597,
"learning_rate": 0.00039668181818181816,
"loss": 3.2993,
"step": 58250
},
{
"epoch": 16.97739981360671,
"grad_norm": 0.3785235285758972,
"learning_rate": 0.000396506993006993,
"loss": 3.3071,
"step": 58300
},
{
"epoch": 16.991961789375583,
"grad_norm": 0.37092772126197815,
"learning_rate": 0.0003963321678321678,
"loss": 3.2962,
"step": 58350
},
{
"epoch": 17.006407269338304,
"grad_norm": 0.36320120096206665,
"learning_rate": 0.00039615734265734267,
"loss": 3.2517,
"step": 58400
},
{
"epoch": 17.020969245107175,
"grad_norm": 0.3834632933139801,
"learning_rate": 0.0003959825174825174,
"loss": 3.1924,
"step": 58450
},
{
"epoch": 17.03553122087605,
"grad_norm": 0.37915751338005066,
"learning_rate": 0.00039580769230769227,
"loss": 3.1932,
"step": 58500
},
{
"epoch": 17.05009319664492,
"grad_norm": 0.39746683835983276,
"learning_rate": 0.00039563286713286707,
"loss": 3.2142,
"step": 58550
},
{
"epoch": 17.064655172413794,
"grad_norm": 0.3451356291770935,
"learning_rate": 0.0003954580419580419,
"loss": 3.1985,
"step": 58600
},
{
"epoch": 17.079217148182664,
"grad_norm": 0.3739311993122101,
"learning_rate": 0.0003952832167832167,
"loss": 3.1925,
"step": 58650
},
{
"epoch": 17.09377912395154,
"grad_norm": 0.3468732535839081,
"learning_rate": 0.0003951083916083916,
"loss": 3.222,
"step": 58700
},
{
"epoch": 17.10834109972041,
"grad_norm": 0.45381662249565125,
"learning_rate": 0.0003949335664335664,
"loss": 3.2193,
"step": 58750
},
{
"epoch": 17.122903075489283,
"grad_norm": 0.3815675377845764,
"learning_rate": 0.00039475874125874123,
"loss": 3.208,
"step": 58800
},
{
"epoch": 17.137465051258154,
"grad_norm": 0.36168697476387024,
"learning_rate": 0.00039458391608391603,
"loss": 3.2273,
"step": 58850
},
{
"epoch": 17.152027027027028,
"grad_norm": 0.374543696641922,
"learning_rate": 0.0003944090909090909,
"loss": 3.2288,
"step": 58900
},
{
"epoch": 17.1665890027959,
"grad_norm": 0.40095534920692444,
"learning_rate": 0.00039423426573426573,
"loss": 3.2166,
"step": 58950
},
{
"epoch": 17.181150978564773,
"grad_norm": 0.3500742018222809,
"learning_rate": 0.00039405944055944053,
"loss": 3.2247,
"step": 59000
},
{
"epoch": 17.181150978564773,
"eval_accuracy": 0.37181478748475616,
"eval_loss": 3.5555620193481445,
"eval_runtime": 53.7354,
"eval_samples_per_second": 309.74,
"eval_steps_per_second": 19.373,
"step": 59000
},
{
"epoch": 17.195712954333644,
"grad_norm": 0.36379897594451904,
"learning_rate": 0.0003938846153846154,
"loss": 3.2339,
"step": 59050
},
{
"epoch": 17.210274930102518,
"grad_norm": 0.4176549017429352,
"learning_rate": 0.0003937097902097902,
"loss": 3.235,
"step": 59100
},
{
"epoch": 17.22483690587139,
"grad_norm": 0.3799380958080292,
"learning_rate": 0.00039353496503496504,
"loss": 3.2303,
"step": 59150
},
{
"epoch": 17.239398881640263,
"grad_norm": 0.3564260005950928,
"learning_rate": 0.0003933601398601398,
"loss": 3.2433,
"step": 59200
},
{
"epoch": 17.253960857409133,
"grad_norm": 0.3839263319969177,
"learning_rate": 0.00039318531468531464,
"loss": 3.2311,
"step": 59250
},
{
"epoch": 17.268522833178004,
"grad_norm": 0.3948262929916382,
"learning_rate": 0.00039301048951048944,
"loss": 3.246,
"step": 59300
},
{
"epoch": 17.283084808946878,
"grad_norm": 0.3694395124912262,
"learning_rate": 0.0003928356643356643,
"loss": 3.2422,
"step": 59350
},
{
"epoch": 17.29764678471575,
"grad_norm": 0.3637319803237915,
"learning_rate": 0.0003926608391608391,
"loss": 3.2391,
"step": 59400
},
{
"epoch": 17.312208760484623,
"grad_norm": 0.3916648030281067,
"learning_rate": 0.00039248601398601395,
"loss": 3.2335,
"step": 59450
},
{
"epoch": 17.326770736253494,
"grad_norm": 0.3760225474834442,
"learning_rate": 0.00039231118881118875,
"loss": 3.2372,
"step": 59500
},
{
"epoch": 17.341332712022368,
"grad_norm": 0.3617111146450043,
"learning_rate": 0.0003921363636363636,
"loss": 3.2481,
"step": 59550
},
{
"epoch": 17.35589468779124,
"grad_norm": 0.37129026651382446,
"learning_rate": 0.00039196153846153846,
"loss": 3.2402,
"step": 59600
},
{
"epoch": 17.370456663560113,
"grad_norm": 0.3965758681297302,
"learning_rate": 0.00039178671328671326,
"loss": 3.2444,
"step": 59650
},
{
"epoch": 17.385018639328983,
"grad_norm": 0.37022677063941956,
"learning_rate": 0.0003916118881118881,
"loss": 3.2543,
"step": 59700
},
{
"epoch": 17.399580615097857,
"grad_norm": 0.378451406955719,
"learning_rate": 0.0003914370629370629,
"loss": 3.2477,
"step": 59750
},
{
"epoch": 17.414142590866728,
"grad_norm": 0.37330615520477295,
"learning_rate": 0.00039126223776223776,
"loss": 3.2527,
"step": 59800
},
{
"epoch": 17.428704566635602,
"grad_norm": 0.4043675363063812,
"learning_rate": 0.00039108741258741256,
"loss": 3.2679,
"step": 59850
},
{
"epoch": 17.443266542404473,
"grad_norm": 0.4179072082042694,
"learning_rate": 0.0003909125874125874,
"loss": 3.2596,
"step": 59900
},
{
"epoch": 17.457828518173347,
"grad_norm": 0.3811803460121155,
"learning_rate": 0.00039073776223776216,
"loss": 3.2562,
"step": 59950
},
{
"epoch": 17.472390493942218,
"grad_norm": 0.35742607712745667,
"learning_rate": 0.000390562937062937,
"loss": 3.2549,
"step": 60000
},
{
"epoch": 17.472390493942218,
"eval_accuracy": 0.3721059074783976,
"eval_loss": 3.546649932861328,
"eval_runtime": 53.6803,
"eval_samples_per_second": 310.058,
"eval_steps_per_second": 19.393,
"step": 60000
},
{
"epoch": 17.486952469711092,
"grad_norm": 0.3788129687309265,
"learning_rate": 0.0003903881118881118,
"loss": 3.255,
"step": 60050
},
{
"epoch": 17.501514445479962,
"grad_norm": 0.3589966297149658,
"learning_rate": 0.00039021328671328667,
"loss": 3.2656,
"step": 60100
},
{
"epoch": 17.516076421248837,
"grad_norm": 0.3915586471557617,
"learning_rate": 0.00039003846153846147,
"loss": 3.2637,
"step": 60150
},
{
"epoch": 17.530638397017707,
"grad_norm": 0.4009389281272888,
"learning_rate": 0.0003898636363636363,
"loss": 3.252,
"step": 60200
},
{
"epoch": 17.54520037278658,
"grad_norm": 0.364738404750824,
"learning_rate": 0.0003896888111888111,
"loss": 3.2702,
"step": 60250
},
{
"epoch": 17.559762348555452,
"grad_norm": 0.37006139755249023,
"learning_rate": 0.000389513986013986,
"loss": 3.2781,
"step": 60300
},
{
"epoch": 17.574324324324323,
"grad_norm": 0.35005587339401245,
"learning_rate": 0.00038933916083916083,
"loss": 3.2608,
"step": 60350
},
{
"epoch": 17.588886300093197,
"grad_norm": 0.3852754831314087,
"learning_rate": 0.00038916433566433563,
"loss": 3.2687,
"step": 60400
},
{
"epoch": 17.603448275862068,
"grad_norm": 0.37562867999076843,
"learning_rate": 0.0003889895104895105,
"loss": 3.2753,
"step": 60450
},
{
"epoch": 17.618010251630942,
"grad_norm": 0.37529560923576355,
"learning_rate": 0.0003888146853146853,
"loss": 3.2747,
"step": 60500
},
{
"epoch": 17.632572227399812,
"grad_norm": 0.3818197548389435,
"learning_rate": 0.00038863986013986014,
"loss": 3.268,
"step": 60550
},
{
"epoch": 17.647134203168687,
"grad_norm": 0.395130455493927,
"learning_rate": 0.00038846503496503494,
"loss": 3.2721,
"step": 60600
},
{
"epoch": 17.661696178937557,
"grad_norm": 0.3846796751022339,
"learning_rate": 0.0003882902097902098,
"loss": 3.2643,
"step": 60650
},
{
"epoch": 17.67625815470643,
"grad_norm": 0.3665049970149994,
"learning_rate": 0.00038811538461538454,
"loss": 3.28,
"step": 60700
},
{
"epoch": 17.690820130475302,
"grad_norm": 0.3482495844364166,
"learning_rate": 0.0003879405594405594,
"loss": 3.2701,
"step": 60750
},
{
"epoch": 17.705382106244176,
"grad_norm": 0.36692333221435547,
"learning_rate": 0.0003877657342657342,
"loss": 3.2761,
"step": 60800
},
{
"epoch": 17.719944082013047,
"grad_norm": 0.3555540144443512,
"learning_rate": 0.00038759090909090905,
"loss": 3.2739,
"step": 60850
},
{
"epoch": 17.73450605778192,
"grad_norm": 0.3915589451789856,
"learning_rate": 0.00038741608391608384,
"loss": 3.2764,
"step": 60900
},
{
"epoch": 17.74906803355079,
"grad_norm": 0.38231101632118225,
"learning_rate": 0.0003872412587412587,
"loss": 3.2891,
"step": 60950
},
{
"epoch": 17.763630009319666,
"grad_norm": 0.3909893333911896,
"learning_rate": 0.00038706643356643355,
"loss": 3.2807,
"step": 61000
},
{
"epoch": 17.763630009319666,
"eval_accuracy": 0.3726736855273857,
"eval_loss": 3.5406863689422607,
"eval_runtime": 53.8444,
"eval_samples_per_second": 309.113,
"eval_steps_per_second": 19.333,
"step": 61000
},
{
"epoch": 17.778191985088537,
"grad_norm": 0.37381458282470703,
"learning_rate": 0.00038689160839160835,
"loss": 3.2855,
"step": 61050
},
{
"epoch": 17.79275396085741,
"grad_norm": 0.36098214983940125,
"learning_rate": 0.0003867167832167832,
"loss": 3.2747,
"step": 61100
},
{
"epoch": 17.80731593662628,
"grad_norm": 0.3577144742012024,
"learning_rate": 0.000386541958041958,
"loss": 3.275,
"step": 61150
},
{
"epoch": 17.821877912395152,
"grad_norm": 0.3942639231681824,
"learning_rate": 0.00038636713286713286,
"loss": 3.2853,
"step": 61200
},
{
"epoch": 17.836439888164026,
"grad_norm": 0.39560213685035706,
"learning_rate": 0.00038619230769230766,
"loss": 3.2937,
"step": 61250
},
{
"epoch": 17.851001863932897,
"grad_norm": 0.37834152579307556,
"learning_rate": 0.0003860174825174825,
"loss": 3.2795,
"step": 61300
},
{
"epoch": 17.86556383970177,
"grad_norm": 0.37762266397476196,
"learning_rate": 0.0003858426573426573,
"loss": 3.2938,
"step": 61350
},
{
"epoch": 17.88012581547064,
"grad_norm": 0.38189753890037537,
"learning_rate": 0.00038566783216783217,
"loss": 3.2825,
"step": 61400
},
{
"epoch": 17.894687791239516,
"grad_norm": 0.3447355031967163,
"learning_rate": 0.0003854930069930069,
"loss": 3.281,
"step": 61450
},
{
"epoch": 17.909249767008387,
"grad_norm": 0.37258729338645935,
"learning_rate": 0.00038531818181818177,
"loss": 3.2896,
"step": 61500
},
{
"epoch": 17.92381174277726,
"grad_norm": 0.3561316728591919,
"learning_rate": 0.00038514335664335657,
"loss": 3.2814,
"step": 61550
},
{
"epoch": 17.93837371854613,
"grad_norm": 0.38559871912002563,
"learning_rate": 0.0003849685314685314,
"loss": 3.2816,
"step": 61600
},
{
"epoch": 17.952935694315006,
"grad_norm": 0.37410980463027954,
"learning_rate": 0.0003847937062937062,
"loss": 3.2903,
"step": 61650
},
{
"epoch": 17.967497670083876,
"grad_norm": 0.3895050287246704,
"learning_rate": 0.0003846188811188811,
"loss": 3.2849,
"step": 61700
},
{
"epoch": 17.98205964585275,
"grad_norm": 0.36890658736228943,
"learning_rate": 0.00038444405594405593,
"loss": 3.2858,
"step": 61750
},
{
"epoch": 17.99662162162162,
"grad_norm": 0.37519824504852295,
"learning_rate": 0.00038426923076923073,
"loss": 3.2881,
"step": 61800
},
{
"epoch": 18.01106710158434,
"grad_norm": 0.3875732719898224,
"learning_rate": 0.0003840944055944056,
"loss": 3.2087,
"step": 61850
},
{
"epoch": 18.025629077353216,
"grad_norm": 0.38909807801246643,
"learning_rate": 0.0003839195804195804,
"loss": 3.1766,
"step": 61900
},
{
"epoch": 18.040191053122086,
"grad_norm": 0.37911364436149597,
"learning_rate": 0.00038374475524475523,
"loss": 3.1851,
"step": 61950
},
{
"epoch": 18.05475302889096,
"grad_norm": 0.38040152192115784,
"learning_rate": 0.00038356993006993003,
"loss": 3.1888,
"step": 62000
},
{
"epoch": 18.05475302889096,
"eval_accuracy": 0.3723371809143802,
"eval_loss": 3.5530009269714355,
"eval_runtime": 53.6091,
"eval_samples_per_second": 310.47,
"eval_steps_per_second": 19.418,
"step": 62000
},
{
"epoch": 18.06931500465983,
"grad_norm": 0.38925114274024963,
"learning_rate": 0.0003833951048951049,
"loss": 3.2002,
"step": 62050
},
{
"epoch": 18.083876980428705,
"grad_norm": 0.3732377886772156,
"learning_rate": 0.0003832202797202797,
"loss": 3.1865,
"step": 62100
},
{
"epoch": 18.098438956197576,
"grad_norm": 0.36380213499069214,
"learning_rate": 0.00038304545454545454,
"loss": 3.2032,
"step": 62150
},
{
"epoch": 18.11300093196645,
"grad_norm": 0.40389272570610046,
"learning_rate": 0.0003828706293706293,
"loss": 3.1982,
"step": 62200
},
{
"epoch": 18.12756290773532,
"grad_norm": 0.3760923445224762,
"learning_rate": 0.00038269580419580414,
"loss": 3.1936,
"step": 62250
},
{
"epoch": 18.142124883504195,
"grad_norm": 0.4106809198856354,
"learning_rate": 0.00038252097902097894,
"loss": 3.2135,
"step": 62300
},
{
"epoch": 18.156686859273066,
"grad_norm": 0.38382473587989807,
"learning_rate": 0.0003823461538461538,
"loss": 3.2054,
"step": 62350
},
{
"epoch": 18.17124883504194,
"grad_norm": 0.3982684314250946,
"learning_rate": 0.00038217132867132865,
"loss": 3.208,
"step": 62400
},
{
"epoch": 18.18581081081081,
"grad_norm": 0.3854241669178009,
"learning_rate": 0.00038199650349650345,
"loss": 3.2146,
"step": 62450
},
{
"epoch": 18.200372786579685,
"grad_norm": 0.3876630663871765,
"learning_rate": 0.0003818216783216783,
"loss": 3.2161,
"step": 62500
},
{
"epoch": 18.214934762348555,
"grad_norm": 0.3796238303184509,
"learning_rate": 0.0003816468531468531,
"loss": 3.2252,
"step": 62550
},
{
"epoch": 18.229496738117426,
"grad_norm": 0.38117191195487976,
"learning_rate": 0.00038147202797202796,
"loss": 3.2236,
"step": 62600
},
{
"epoch": 18.2440587138863,
"grad_norm": 0.4064529240131378,
"learning_rate": 0.00038129720279720276,
"loss": 3.2123,
"step": 62650
},
{
"epoch": 18.25862068965517,
"grad_norm": 0.3822717070579529,
"learning_rate": 0.0003811223776223776,
"loss": 3.2287,
"step": 62700
},
{
"epoch": 18.273182665424045,
"grad_norm": 0.4065398573875427,
"learning_rate": 0.0003809475524475524,
"loss": 3.2333,
"step": 62750
},
{
"epoch": 18.287744641192916,
"grad_norm": 0.35962507128715515,
"learning_rate": 0.00038077272727272726,
"loss": 3.2236,
"step": 62800
},
{
"epoch": 18.30230661696179,
"grad_norm": 0.3704201281070709,
"learning_rate": 0.00038059790209790206,
"loss": 3.2315,
"step": 62850
},
{
"epoch": 18.31686859273066,
"grad_norm": 0.36879655718803406,
"learning_rate": 0.0003804230769230769,
"loss": 3.2353,
"step": 62900
},
{
"epoch": 18.331430568499535,
"grad_norm": 0.37170055508613586,
"learning_rate": 0.00038024825174825166,
"loss": 3.2381,
"step": 62950
},
{
"epoch": 18.345992544268405,
"grad_norm": 0.3680242896080017,
"learning_rate": 0.0003800734265734265,
"loss": 3.2371,
"step": 63000
},
{
"epoch": 18.345992544268405,
"eval_accuracy": 0.37224582379198135,
"eval_loss": 3.550691843032837,
"eval_runtime": 53.6468,
"eval_samples_per_second": 310.251,
"eval_steps_per_second": 19.405,
"step": 63000
},
{
"epoch": 18.36055452003728,
"grad_norm": 0.355370432138443,
"learning_rate": 0.0003798986013986013,
"loss": 3.2253,
"step": 63050
},
{
"epoch": 18.37511649580615,
"grad_norm": 0.4078480303287506,
"learning_rate": 0.00037972377622377617,
"loss": 3.2362,
"step": 63100
},
{
"epoch": 18.389678471575024,
"grad_norm": 0.3773263394832611,
"learning_rate": 0.000379548951048951,
"loss": 3.2418,
"step": 63150
},
{
"epoch": 18.404240447343895,
"grad_norm": 0.3944302499294281,
"learning_rate": 0.0003793741258741258,
"loss": 3.2532,
"step": 63200
},
{
"epoch": 18.41880242311277,
"grad_norm": 0.39704012870788574,
"learning_rate": 0.0003791993006993007,
"loss": 3.2447,
"step": 63250
},
{
"epoch": 18.43336439888164,
"grad_norm": 0.36326536536216736,
"learning_rate": 0.0003790244755244755,
"loss": 3.2419,
"step": 63300
},
{
"epoch": 18.447926374650514,
"grad_norm": 0.3721631169319153,
"learning_rate": 0.00037884965034965033,
"loss": 3.2484,
"step": 63350
},
{
"epoch": 18.462488350419385,
"grad_norm": 0.39378708600997925,
"learning_rate": 0.00037867482517482513,
"loss": 3.2457,
"step": 63400
},
{
"epoch": 18.47705032618826,
"grad_norm": 0.3788743019104004,
"learning_rate": 0.0003785,
"loss": 3.2521,
"step": 63450
},
{
"epoch": 18.49161230195713,
"grad_norm": 0.3708218038082123,
"learning_rate": 0.0003783251748251748,
"loss": 3.263,
"step": 63500
},
{
"epoch": 18.506174277726004,
"grad_norm": 0.37706801295280457,
"learning_rate": 0.00037815034965034964,
"loss": 3.247,
"step": 63550
},
{
"epoch": 18.520736253494874,
"grad_norm": 0.4102998971939087,
"learning_rate": 0.00037797552447552444,
"loss": 3.2465,
"step": 63600
},
{
"epoch": 18.535298229263745,
"grad_norm": 0.41490569710731506,
"learning_rate": 0.0003778006993006993,
"loss": 3.2588,
"step": 63650
},
{
"epoch": 18.54986020503262,
"grad_norm": 0.3888053894042969,
"learning_rate": 0.00037762587412587404,
"loss": 3.254,
"step": 63700
},
{
"epoch": 18.56442218080149,
"grad_norm": 0.4218922257423401,
"learning_rate": 0.0003774510489510489,
"loss": 3.2644,
"step": 63750
},
{
"epoch": 18.578984156570364,
"grad_norm": 0.3588375747203827,
"learning_rate": 0.0003772762237762238,
"loss": 3.258,
"step": 63800
},
{
"epoch": 18.593546132339235,
"grad_norm": 0.3538081645965576,
"learning_rate": 0.00037710139860139854,
"loss": 3.263,
"step": 63850
},
{
"epoch": 18.60810810810811,
"grad_norm": 0.38032105565071106,
"learning_rate": 0.0003769265734265734,
"loss": 3.2679,
"step": 63900
},
{
"epoch": 18.62267008387698,
"grad_norm": 0.37946024537086487,
"learning_rate": 0.0003767517482517482,
"loss": 3.2503,
"step": 63950
},
{
"epoch": 18.637232059645854,
"grad_norm": 0.37310442328453064,
"learning_rate": 0.00037657692307692305,
"loss": 3.2615,
"step": 64000
},
{
"epoch": 18.637232059645854,
"eval_accuracy": 0.37308696774776123,
"eval_loss": 3.542891025543213,
"eval_runtime": 53.6472,
"eval_samples_per_second": 310.249,
"eval_steps_per_second": 19.405,
"step": 64000
},
{
"epoch": 18.651794035414724,
"grad_norm": 0.3829942047595978,
"learning_rate": 0.00037640209790209785,
"loss": 3.2537,
"step": 64050
},
{
"epoch": 18.6663560111836,
"grad_norm": 0.38445472717285156,
"learning_rate": 0.0003762272727272727,
"loss": 3.2592,
"step": 64100
},
{
"epoch": 18.68091798695247,
"grad_norm": 0.3823604881763458,
"learning_rate": 0.0003760524475524475,
"loss": 3.2438,
"step": 64150
},
{
"epoch": 18.695479962721343,
"grad_norm": 0.3722836673259735,
"learning_rate": 0.00037587762237762236,
"loss": 3.2596,
"step": 64200
},
{
"epoch": 18.710041938490214,
"grad_norm": 0.35490530729293823,
"learning_rate": 0.00037570279720279716,
"loss": 3.2636,
"step": 64250
},
{
"epoch": 18.724603914259088,
"grad_norm": 0.36468833684921265,
"learning_rate": 0.000375527972027972,
"loss": 3.269,
"step": 64300
},
{
"epoch": 18.73916589002796,
"grad_norm": 0.3716800808906555,
"learning_rate": 0.0003753531468531468,
"loss": 3.268,
"step": 64350
},
{
"epoch": 18.753727865796833,
"grad_norm": 0.3842315077781677,
"learning_rate": 0.00037517832167832167,
"loss": 3.2724,
"step": 64400
},
{
"epoch": 18.768289841565704,
"grad_norm": 0.4173533320426941,
"learning_rate": 0.0003750034965034965,
"loss": 3.2614,
"step": 64450
},
{
"epoch": 18.782851817334574,
"grad_norm": 0.36763879656791687,
"learning_rate": 0.00037482867132867127,
"loss": 3.2677,
"step": 64500
},
{
"epoch": 18.79741379310345,
"grad_norm": 0.3957614600658417,
"learning_rate": 0.0003746538461538462,
"loss": 3.2671,
"step": 64550
},
{
"epoch": 18.81197576887232,
"grad_norm": 0.37960317730903625,
"learning_rate": 0.0003744790209790209,
"loss": 3.2856,
"step": 64600
},
{
"epoch": 18.826537744641193,
"grad_norm": 0.3816615045070648,
"learning_rate": 0.0003743041958041958,
"loss": 3.2665,
"step": 64650
},
{
"epoch": 18.841099720410064,
"grad_norm": 0.34941965341567993,
"learning_rate": 0.0003741293706293706,
"loss": 3.2813,
"step": 64700
},
{
"epoch": 18.855661696178938,
"grad_norm": 0.411423921585083,
"learning_rate": 0.0003739545454545454,
"loss": 3.2731,
"step": 64750
},
{
"epoch": 18.87022367194781,
"grad_norm": 0.368845671415329,
"learning_rate": 0.0003737797202797202,
"loss": 3.2698,
"step": 64800
},
{
"epoch": 18.884785647716683,
"grad_norm": 0.41026976704597473,
"learning_rate": 0.0003736048951048951,
"loss": 3.2827,
"step": 64850
},
{
"epoch": 18.899347623485554,
"grad_norm": 0.34741586446762085,
"learning_rate": 0.0003734300699300699,
"loss": 3.2883,
"step": 64900
},
{
"epoch": 18.913909599254428,
"grad_norm": 0.36796513199806213,
"learning_rate": 0.00037325524475524473,
"loss": 3.2757,
"step": 64950
},
{
"epoch": 18.9284715750233,
"grad_norm": 0.4108026921749115,
"learning_rate": 0.00037308041958041953,
"loss": 3.2662,
"step": 65000
},
{
"epoch": 18.9284715750233,
"eval_accuracy": 0.37334457837218304,
"eval_loss": 3.5351333618164062,
"eval_runtime": 53.5478,
"eval_samples_per_second": 310.825,
"eval_steps_per_second": 19.441,
"step": 65000
},
{
"epoch": 18.943033550792173,
"grad_norm": 0.35917073488235474,
"learning_rate": 0.0003729055944055944,
"loss": 3.2701,
"step": 65050
},
{
"epoch": 18.957595526561043,
"grad_norm": 0.35945555567741394,
"learning_rate": 0.0003727307692307692,
"loss": 3.2814,
"step": 65100
},
{
"epoch": 18.972157502329917,
"grad_norm": 0.37766772508621216,
"learning_rate": 0.00037255594405594404,
"loss": 3.2783,
"step": 65150
},
{
"epoch": 18.986719478098788,
"grad_norm": 0.3846476078033447,
"learning_rate": 0.0003723811188811189,
"loss": 3.2838,
"step": 65200
},
{
"epoch": 19.00116495806151,
"grad_norm": 0.41956663131713867,
"learning_rate": 0.00037220629370629364,
"loss": 3.2632,
"step": 65250
},
{
"epoch": 19.015726933830383,
"grad_norm": 0.3777720630168915,
"learning_rate": 0.00037203146853146855,
"loss": 3.1728,
"step": 65300
},
{
"epoch": 19.030288909599253,
"grad_norm": 0.36135587096214294,
"learning_rate": 0.0003718566433566433,
"loss": 3.178,
"step": 65350
},
{
"epoch": 19.044850885368128,
"grad_norm": 0.3920912742614746,
"learning_rate": 0.00037168181818181815,
"loss": 3.1806,
"step": 65400
},
{
"epoch": 19.059412861136998,
"grad_norm": 0.39687469601631165,
"learning_rate": 0.00037150699300699295,
"loss": 3.1745,
"step": 65450
},
{
"epoch": 19.073974836905872,
"grad_norm": 0.39901939034461975,
"learning_rate": 0.0003713321678321678,
"loss": 3.1879,
"step": 65500
},
{
"epoch": 19.088536812674743,
"grad_norm": 0.37469103932380676,
"learning_rate": 0.0003711573426573426,
"loss": 3.1825,
"step": 65550
},
{
"epoch": 19.103098788443617,
"grad_norm": 0.40531569719314575,
"learning_rate": 0.00037098251748251746,
"loss": 3.1891,
"step": 65600
},
{
"epoch": 19.117660764212488,
"grad_norm": 0.39202824234962463,
"learning_rate": 0.00037080769230769226,
"loss": 3.2018,
"step": 65650
},
{
"epoch": 19.132222739981362,
"grad_norm": 0.37663426995277405,
"learning_rate": 0.0003706328671328671,
"loss": 3.1906,
"step": 65700
},
{
"epoch": 19.146784715750233,
"grad_norm": 0.37909311056137085,
"learning_rate": 0.0003704580419580419,
"loss": 3.2038,
"step": 65750
},
{
"epoch": 19.161346691519107,
"grad_norm": 0.3874996602535248,
"learning_rate": 0.00037028321678321676,
"loss": 3.2037,
"step": 65800
},
{
"epoch": 19.175908667287977,
"grad_norm": 0.4042704701423645,
"learning_rate": 0.0003701083916083916,
"loss": 3.1917,
"step": 65850
},
{
"epoch": 19.19047064305685,
"grad_norm": 0.3726341128349304,
"learning_rate": 0.0003699335664335664,
"loss": 3.2,
"step": 65900
},
{
"epoch": 19.205032618825722,
"grad_norm": 0.4252948462963104,
"learning_rate": 0.00036975874125874127,
"loss": 3.2133,
"step": 65950
},
{
"epoch": 19.219594594594593,
"grad_norm": 0.3802109360694885,
"learning_rate": 0.00036958391608391607,
"loss": 3.2062,
"step": 66000
},
{
"epoch": 19.219594594594593,
"eval_accuracy": 0.3728054890463163,
"eval_loss": 3.5497379302978516,
"eval_runtime": 53.6654,
"eval_samples_per_second": 310.144,
"eval_steps_per_second": 19.398,
"step": 66000
},
{
"epoch": 19.234156570363467,
"grad_norm": 0.4133206307888031,
"learning_rate": 0.0003694090909090909,
"loss": 3.2076,
"step": 66050
},
{
"epoch": 19.248718546132338,
"grad_norm": 0.38479116559028625,
"learning_rate": 0.00036923426573426567,
"loss": 3.2175,
"step": 66100
},
{
"epoch": 19.263280521901212,
"grad_norm": 0.41108012199401855,
"learning_rate": 0.0003690594405594405,
"loss": 3.2145,
"step": 66150
},
{
"epoch": 19.277842497670083,
"grad_norm": 0.4138273596763611,
"learning_rate": 0.0003688846153846153,
"loss": 3.2205,
"step": 66200
},
{
"epoch": 19.292404473438957,
"grad_norm": 0.37386050820350647,
"learning_rate": 0.0003687097902097902,
"loss": 3.2188,
"step": 66250
},
{
"epoch": 19.306966449207827,
"grad_norm": 0.3839118182659149,
"learning_rate": 0.000368534965034965,
"loss": 3.2226,
"step": 66300
},
{
"epoch": 19.3215284249767,
"grad_norm": 0.40011078119277954,
"learning_rate": 0.00036836013986013983,
"loss": 3.2172,
"step": 66350
},
{
"epoch": 19.336090400745572,
"grad_norm": 0.3948891758918762,
"learning_rate": 0.00036818531468531463,
"loss": 3.2176,
"step": 66400
},
{
"epoch": 19.350652376514446,
"grad_norm": 0.36948251724243164,
"learning_rate": 0.0003680104895104895,
"loss": 3.2251,
"step": 66450
},
{
"epoch": 19.365214352283317,
"grad_norm": 0.3812604546546936,
"learning_rate": 0.0003678356643356643,
"loss": 3.2263,
"step": 66500
},
{
"epoch": 19.37977632805219,
"grad_norm": 0.4402337074279785,
"learning_rate": 0.00036766083916083914,
"loss": 3.233,
"step": 66550
},
{
"epoch": 19.394338303821062,
"grad_norm": 0.3661404550075531,
"learning_rate": 0.000367486013986014,
"loss": 3.2324,
"step": 66600
},
{
"epoch": 19.408900279589936,
"grad_norm": 0.362842857837677,
"learning_rate": 0.0003673111888111888,
"loss": 3.2284,
"step": 66650
},
{
"epoch": 19.423462255358807,
"grad_norm": 0.4055345952510834,
"learning_rate": 0.00036713636363636365,
"loss": 3.2299,
"step": 66700
},
{
"epoch": 19.43802423112768,
"grad_norm": 0.3992021083831787,
"learning_rate": 0.00036696153846153844,
"loss": 3.244,
"step": 66750
},
{
"epoch": 19.45258620689655,
"grad_norm": 0.3800589442253113,
"learning_rate": 0.0003667867132867133,
"loss": 3.2337,
"step": 66800
},
{
"epoch": 19.467148182665426,
"grad_norm": 0.3811376392841339,
"learning_rate": 0.00036661188811188804,
"loss": 3.2337,
"step": 66850
},
{
"epoch": 19.481710158434296,
"grad_norm": 0.3804425597190857,
"learning_rate": 0.0003664370629370629,
"loss": 3.2338,
"step": 66900
},
{
"epoch": 19.496272134203167,
"grad_norm": 0.36933010816574097,
"learning_rate": 0.0003662622377622377,
"loss": 3.2523,
"step": 66950
},
{
"epoch": 19.51083410997204,
"grad_norm": 0.38615190982818604,
"learning_rate": 0.00036608741258741255,
"loss": 3.242,
"step": 67000
},
{
"epoch": 19.51083410997204,
"eval_accuracy": 0.3726040801007962,
"eval_loss": 3.5483808517456055,
"eval_runtime": 53.4497,
"eval_samples_per_second": 311.395,
"eval_steps_per_second": 19.476,
"step": 67000
},
{
"epoch": 19.525396085740912,
"grad_norm": 0.4136638641357422,
"learning_rate": 0.00036591258741258735,
"loss": 3.2398,
"step": 67050
},
{
"epoch": 19.539958061509786,
"grad_norm": 0.41616570949554443,
"learning_rate": 0.0003657377622377622,
"loss": 3.2418,
"step": 67100
},
{
"epoch": 19.554520037278657,
"grad_norm": 0.38881203532218933,
"learning_rate": 0.000365562937062937,
"loss": 3.2485,
"step": 67150
},
{
"epoch": 19.56908201304753,
"grad_norm": 0.3823322057723999,
"learning_rate": 0.00036538811188811186,
"loss": 3.2523,
"step": 67200
},
{
"epoch": 19.5836439888164,
"grad_norm": 0.40406888723373413,
"learning_rate": 0.0003652132867132867,
"loss": 3.2513,
"step": 67250
},
{
"epoch": 19.598205964585276,
"grad_norm": 0.39410239458084106,
"learning_rate": 0.0003650384615384615,
"loss": 3.2452,
"step": 67300
},
{
"epoch": 19.612767940354146,
"grad_norm": 0.3842507600784302,
"learning_rate": 0.00036486363636363637,
"loss": 3.2548,
"step": 67350
},
{
"epoch": 19.62732991612302,
"grad_norm": 0.3869752883911133,
"learning_rate": 0.00036468881118881117,
"loss": 3.2466,
"step": 67400
},
{
"epoch": 19.64189189189189,
"grad_norm": 0.41032785177230835,
"learning_rate": 0.000364513986013986,
"loss": 3.2446,
"step": 67450
},
{
"epoch": 19.656453867660765,
"grad_norm": 0.38462066650390625,
"learning_rate": 0.0003643391608391608,
"loss": 3.2495,
"step": 67500
},
{
"epoch": 19.671015843429636,
"grad_norm": 0.39043310284614563,
"learning_rate": 0.0003641643356643357,
"loss": 3.2456,
"step": 67550
},
{
"epoch": 19.68557781919851,
"grad_norm": 0.40164053440093994,
"learning_rate": 0.0003639895104895104,
"loss": 3.2385,
"step": 67600
},
{
"epoch": 19.70013979496738,
"grad_norm": 0.41391077637672424,
"learning_rate": 0.0003638146853146853,
"loss": 3.2571,
"step": 67650
},
{
"epoch": 19.714701770736255,
"grad_norm": 0.4099098742008209,
"learning_rate": 0.00036363986013986007,
"loss": 3.2434,
"step": 67700
},
{
"epoch": 19.729263746505126,
"grad_norm": 0.38578134775161743,
"learning_rate": 0.0003634650349650349,
"loss": 3.2451,
"step": 67750
},
{
"epoch": 19.743825722273996,
"grad_norm": 0.37067756056785583,
"learning_rate": 0.0003632902097902097,
"loss": 3.2473,
"step": 67800
},
{
"epoch": 19.75838769804287,
"grad_norm": 0.39515194296836853,
"learning_rate": 0.0003631153846153846,
"loss": 3.2602,
"step": 67850
},
{
"epoch": 19.77294967381174,
"grad_norm": 0.3894272446632385,
"learning_rate": 0.00036294055944055943,
"loss": 3.2439,
"step": 67900
},
{
"epoch": 19.787511649580615,
"grad_norm": 0.36319202184677124,
"learning_rate": 0.00036276573426573423,
"loss": 3.2531,
"step": 67950
},
{
"epoch": 19.802073625349486,
"grad_norm": 0.3721925914287567,
"learning_rate": 0.0003625909090909091,
"loss": 3.2734,
"step": 68000
},
{
"epoch": 19.802073625349486,
"eval_accuracy": 0.37333822922854143,
"eval_loss": 3.538437604904175,
"eval_runtime": 53.8392,
"eval_samples_per_second": 309.143,
"eval_steps_per_second": 19.335,
"step": 68000
},
{
"epoch": 19.81663560111836,
"grad_norm": 0.39392781257629395,
"learning_rate": 0.0003624160839160839,
"loss": 3.2668,
"step": 68050
},
{
"epoch": 19.83119757688723,
"grad_norm": 0.41753748059272766,
"learning_rate": 0.00036224125874125874,
"loss": 3.2599,
"step": 68100
},
{
"epoch": 19.845759552656105,
"grad_norm": 0.39892056584358215,
"learning_rate": 0.00036206643356643354,
"loss": 3.2604,
"step": 68150
},
{
"epoch": 19.860321528424976,
"grad_norm": 0.38567325472831726,
"learning_rate": 0.0003618916083916084,
"loss": 3.2723,
"step": 68200
},
{
"epoch": 19.87488350419385,
"grad_norm": 0.3812924027442932,
"learning_rate": 0.0003617167832167832,
"loss": 3.2558,
"step": 68250
},
{
"epoch": 19.88944547996272,
"grad_norm": 0.382830411195755,
"learning_rate": 0.00036154195804195805,
"loss": 3.2611,
"step": 68300
},
{
"epoch": 19.904007455731595,
"grad_norm": 0.4037676155567169,
"learning_rate": 0.0003613671328671328,
"loss": 3.2651,
"step": 68350
},
{
"epoch": 19.918569431500465,
"grad_norm": 0.37967729568481445,
"learning_rate": 0.00036119230769230765,
"loss": 3.2734,
"step": 68400
},
{
"epoch": 19.93313140726934,
"grad_norm": 0.3941499590873718,
"learning_rate": 0.00036101748251748245,
"loss": 3.2649,
"step": 68450
},
{
"epoch": 19.94769338303821,
"grad_norm": 0.40444591641426086,
"learning_rate": 0.0003608426573426573,
"loss": 3.2642,
"step": 68500
},
{
"epoch": 19.962255358807084,
"grad_norm": 0.375336617231369,
"learning_rate": 0.0003606678321678321,
"loss": 3.2692,
"step": 68550
},
{
"epoch": 19.976817334575955,
"grad_norm": 0.4084981083869934,
"learning_rate": 0.00036049300699300696,
"loss": 3.2645,
"step": 68600
},
{
"epoch": 19.99137931034483,
"grad_norm": 0.37530145049095154,
"learning_rate": 0.0003603181818181818,
"loss": 3.2563,
"step": 68650
},
{
"epoch": 20.00582479030755,
"grad_norm": 0.40279820561408997,
"learning_rate": 0.0003601433566433566,
"loss": 3.2205,
"step": 68700
},
{
"epoch": 20.02038676607642,
"grad_norm": 0.3879990577697754,
"learning_rate": 0.00035996853146853146,
"loss": 3.1578,
"step": 68750
},
{
"epoch": 20.034948741845295,
"grad_norm": 0.439457505941391,
"learning_rate": 0.00035979370629370626,
"loss": 3.1662,
"step": 68800
},
{
"epoch": 20.049510717614165,
"grad_norm": 0.4187467694282532,
"learning_rate": 0.0003596188811188811,
"loss": 3.1751,
"step": 68850
},
{
"epoch": 20.06407269338304,
"grad_norm": 0.38843098282814026,
"learning_rate": 0.0003594440559440559,
"loss": 3.1713,
"step": 68900
},
{
"epoch": 20.07863466915191,
"grad_norm": 0.38127413392066956,
"learning_rate": 0.00035926923076923077,
"loss": 3.1732,
"step": 68950
},
{
"epoch": 20.093196644920784,
"grad_norm": 0.42359402775764465,
"learning_rate": 0.00035909440559440557,
"loss": 3.1755,
"step": 69000
},
{
"epoch": 20.093196644920784,
"eval_accuracy": 0.3727432909539753,
"eval_loss": 3.554373025894165,
"eval_runtime": 53.6419,
"eval_samples_per_second": 310.28,
"eval_steps_per_second": 19.406,
"step": 69000
},
{
"epoch": 20.107758620689655,
"grad_norm": 0.379149854183197,
"learning_rate": 0.0003589195804195804,
"loss": 3.177,
"step": 69050
},
{
"epoch": 20.12232059645853,
"grad_norm": 0.37855619192123413,
"learning_rate": 0.00035874475524475517,
"loss": 3.1842,
"step": 69100
},
{
"epoch": 20.1368825722274,
"grad_norm": 0.4195460081100464,
"learning_rate": 0.00035856993006993,
"loss": 3.1919,
"step": 69150
},
{
"epoch": 20.151444547996274,
"grad_norm": 0.38967326283454895,
"learning_rate": 0.0003583951048951048,
"loss": 3.1964,
"step": 69200
},
{
"epoch": 20.166006523765144,
"grad_norm": 0.4318048655986786,
"learning_rate": 0.0003582202797202797,
"loss": 3.1925,
"step": 69250
},
{
"epoch": 20.180568499534015,
"grad_norm": 0.39629480242729187,
"learning_rate": 0.00035804545454545453,
"loss": 3.205,
"step": 69300
},
{
"epoch": 20.19513047530289,
"grad_norm": 0.3838287889957428,
"learning_rate": 0.00035787062937062933,
"loss": 3.1975,
"step": 69350
},
{
"epoch": 20.20969245107176,
"grad_norm": 0.4163590967655182,
"learning_rate": 0.0003576958041958042,
"loss": 3.1989,
"step": 69400
},
{
"epoch": 20.224254426840634,
"grad_norm": 0.40171295404434204,
"learning_rate": 0.000357520979020979,
"loss": 3.2048,
"step": 69450
},
{
"epoch": 20.238816402609505,
"grad_norm": 0.3948460519313812,
"learning_rate": 0.00035734615384615384,
"loss": 3.2049,
"step": 69500
},
{
"epoch": 20.25337837837838,
"grad_norm": 0.39959606528282166,
"learning_rate": 0.00035717132867132864,
"loss": 3.2079,
"step": 69550
},
{
"epoch": 20.26794035414725,
"grad_norm": 0.3934629261493683,
"learning_rate": 0.0003569965034965035,
"loss": 3.2002,
"step": 69600
},
{
"epoch": 20.282502329916124,
"grad_norm": 0.41200846433639526,
"learning_rate": 0.0003568216783216783,
"loss": 3.2036,
"step": 69650
},
{
"epoch": 20.297064305684994,
"grad_norm": 0.41301435232162476,
"learning_rate": 0.00035664685314685314,
"loss": 3.2147,
"step": 69700
},
{
"epoch": 20.31162628145387,
"grad_norm": 0.4042994976043701,
"learning_rate": 0.00035647202797202794,
"loss": 3.219,
"step": 69750
},
{
"epoch": 20.32618825722274,
"grad_norm": 0.4100610613822937,
"learning_rate": 0.0003562972027972028,
"loss": 3.2105,
"step": 69800
},
{
"epoch": 20.340750232991613,
"grad_norm": 0.3665405511856079,
"learning_rate": 0.00035612237762237754,
"loss": 3.2183,
"step": 69850
},
{
"epoch": 20.355312208760484,
"grad_norm": 0.42888033390045166,
"learning_rate": 0.0003559475524475524,
"loss": 3.2236,
"step": 69900
},
{
"epoch": 20.36987418452936,
"grad_norm": 0.3644813001155853,
"learning_rate": 0.0003557727272727272,
"loss": 3.2221,
"step": 69950
},
{
"epoch": 20.38443616029823,
"grad_norm": 0.3995056450366974,
"learning_rate": 0.00035559790209790205,
"loss": 3.2174,
"step": 70000
},
{
"epoch": 20.38443616029823,
"eval_accuracy": 0.3727511685951603,
"eval_loss": 3.5511600971221924,
"eval_runtime": 53.5518,
"eval_samples_per_second": 310.802,
"eval_steps_per_second": 19.439,
"step": 70000
},
{
"epoch": 20.398998136067103,
"grad_norm": 0.37608620524406433,
"learning_rate": 0.0003554230769230769,
"loss": 3.2234,
"step": 70050
},
{
"epoch": 20.413560111835974,
"grad_norm": 0.37095916271209717,
"learning_rate": 0.0003552482517482517,
"loss": 3.2267,
"step": 70100
},
{
"epoch": 20.428122087604848,
"grad_norm": 0.40074586868286133,
"learning_rate": 0.00035507342657342656,
"loss": 3.2143,
"step": 70150
},
{
"epoch": 20.44268406337372,
"grad_norm": 0.3774716854095459,
"learning_rate": 0.00035489860139860136,
"loss": 3.2161,
"step": 70200
},
{
"epoch": 20.45724603914259,
"grad_norm": 0.38591891527175903,
"learning_rate": 0.0003547237762237762,
"loss": 3.2136,
"step": 70250
},
{
"epoch": 20.471808014911463,
"grad_norm": 0.3808690011501312,
"learning_rate": 0.000354548951048951,
"loss": 3.2343,
"step": 70300
},
{
"epoch": 20.486369990680334,
"grad_norm": 0.41351810097694397,
"learning_rate": 0.00035437412587412587,
"loss": 3.2146,
"step": 70350
},
{
"epoch": 20.50093196644921,
"grad_norm": 0.3997446596622467,
"learning_rate": 0.00035419930069930067,
"loss": 3.2265,
"step": 70400
},
{
"epoch": 20.51549394221808,
"grad_norm": 0.36297866702079773,
"learning_rate": 0.0003540244755244755,
"loss": 3.2354,
"step": 70450
},
{
"epoch": 20.530055917986953,
"grad_norm": 0.38284286856651306,
"learning_rate": 0.0003538496503496503,
"loss": 3.2386,
"step": 70500
},
{
"epoch": 20.544617893755824,
"grad_norm": 0.38133928179740906,
"learning_rate": 0.0003536748251748252,
"loss": 3.2329,
"step": 70550
},
{
"epoch": 20.559179869524698,
"grad_norm": 0.385995477437973,
"learning_rate": 0.0003534999999999999,
"loss": 3.2371,
"step": 70600
},
{
"epoch": 20.57374184529357,
"grad_norm": 0.4186258912086487,
"learning_rate": 0.00035332517482517477,
"loss": 3.2257,
"step": 70650
},
{
"epoch": 20.588303821062443,
"grad_norm": 0.390411376953125,
"learning_rate": 0.0003531503496503496,
"loss": 3.2255,
"step": 70700
},
{
"epoch": 20.602865796831313,
"grad_norm": 0.38977673649787903,
"learning_rate": 0.0003529755244755244,
"loss": 3.2293,
"step": 70750
},
{
"epoch": 20.617427772600188,
"grad_norm": 0.40031591057777405,
"learning_rate": 0.0003528006993006993,
"loss": 3.2441,
"step": 70800
},
{
"epoch": 20.631989748369058,
"grad_norm": 0.37329474091529846,
"learning_rate": 0.0003526258741258741,
"loss": 3.2423,
"step": 70850
},
{
"epoch": 20.646551724137932,
"grad_norm": 0.4275941252708435,
"learning_rate": 0.00035245104895104893,
"loss": 3.2377,
"step": 70900
},
{
"epoch": 20.661113699906803,
"grad_norm": 0.38450363278388977,
"learning_rate": 0.00035227622377622373,
"loss": 3.2387,
"step": 70950
},
{
"epoch": 20.675675675675677,
"grad_norm": 0.3979182243347168,
"learning_rate": 0.0003521013986013986,
"loss": 3.2381,
"step": 71000
},
{
"epoch": 20.675675675675677,
"eval_accuracy": 0.3735670335531078,
"eval_loss": 3.5422792434692383,
"eval_runtime": 53.7212,
"eval_samples_per_second": 309.822,
"eval_steps_per_second": 19.378,
"step": 71000
},
{
"epoch": 20.690237651444548,
"grad_norm": 0.40853452682495117,
"learning_rate": 0.0003519265734265734,
"loss": 3.2331,
"step": 71050
},
{
"epoch": 20.70479962721342,
"grad_norm": 0.39112091064453125,
"learning_rate": 0.00035175174825174824,
"loss": 3.2512,
"step": 71100
},
{
"epoch": 20.719361602982293,
"grad_norm": 0.3723371624946594,
"learning_rate": 0.00035157692307692304,
"loss": 3.2462,
"step": 71150
},
{
"epoch": 20.733923578751163,
"grad_norm": 0.42912352085113525,
"learning_rate": 0.0003514020979020979,
"loss": 3.2412,
"step": 71200
},
{
"epoch": 20.748485554520038,
"grad_norm": 0.40721452236175537,
"learning_rate": 0.0003512272727272727,
"loss": 3.2465,
"step": 71250
},
{
"epoch": 20.763047530288908,
"grad_norm": 0.42458590865135193,
"learning_rate": 0.00035105244755244755,
"loss": 3.2518,
"step": 71300
},
{
"epoch": 20.777609506057782,
"grad_norm": 0.37446263432502747,
"learning_rate": 0.0003508776223776223,
"loss": 3.2493,
"step": 71350
},
{
"epoch": 20.792171481826653,
"grad_norm": 0.39642587304115295,
"learning_rate": 0.00035070279720279715,
"loss": 3.247,
"step": 71400
},
{
"epoch": 20.806733457595527,
"grad_norm": 0.4110299348831177,
"learning_rate": 0.000350527972027972,
"loss": 3.2551,
"step": 71450
},
{
"epoch": 20.821295433364398,
"grad_norm": 0.36979174613952637,
"learning_rate": 0.0003503531468531468,
"loss": 3.2441,
"step": 71500
},
{
"epoch": 20.835857409133272,
"grad_norm": 0.3842044770717621,
"learning_rate": 0.00035017832167832166,
"loss": 3.2428,
"step": 71550
},
{
"epoch": 20.850419384902143,
"grad_norm": 0.3942607045173645,
"learning_rate": 0.00035000349650349645,
"loss": 3.2425,
"step": 71600
},
{
"epoch": 20.864981360671017,
"grad_norm": 0.400119423866272,
"learning_rate": 0.0003498286713286713,
"loss": 3.2444,
"step": 71650
},
{
"epoch": 20.879543336439887,
"grad_norm": 0.3815798759460449,
"learning_rate": 0.0003496538461538461,
"loss": 3.2592,
"step": 71700
},
{
"epoch": 20.89410531220876,
"grad_norm": 0.3906811773777008,
"learning_rate": 0.00034947902097902096,
"loss": 3.2483,
"step": 71750
},
{
"epoch": 20.908667287977632,
"grad_norm": 0.367766797542572,
"learning_rate": 0.00034930419580419576,
"loss": 3.2491,
"step": 71800
},
{
"epoch": 20.923229263746506,
"grad_norm": 0.4095619022846222,
"learning_rate": 0.0003491293706293706,
"loss": 3.2504,
"step": 71850
},
{
"epoch": 20.937791239515377,
"grad_norm": 0.3742462694644928,
"learning_rate": 0.0003489545454545454,
"loss": 3.2566,
"step": 71900
},
{
"epoch": 20.95235321528425,
"grad_norm": 0.39355388283729553,
"learning_rate": 0.00034877972027972027,
"loss": 3.25,
"step": 71950
},
{
"epoch": 20.966915191053122,
"grad_norm": 0.4069824516773224,
"learning_rate": 0.00034860489510489507,
"loss": 3.2437,
"step": 72000
},
{
"epoch": 20.966915191053122,
"eval_accuracy": 0.3738097119322984,
"eval_loss": 3.5342633724212646,
"eval_runtime": 53.8776,
"eval_samples_per_second": 308.923,
"eval_steps_per_second": 19.322,
"step": 72000
},
{
"epoch": 20.981477166821996,
"grad_norm": 0.3976595401763916,
"learning_rate": 0.0003484300699300699,
"loss": 3.2602,
"step": 72050
},
{
"epoch": 20.996039142590867,
"grad_norm": 0.396127849817276,
"learning_rate": 0.0003482552447552448,
"loss": 3.2456,
"step": 72100
},
{
"epoch": 21.010484622553587,
"grad_norm": 0.39420220255851746,
"learning_rate": 0.0003480804195804195,
"loss": 3.1874,
"step": 72150
},
{
"epoch": 21.02504659832246,
"grad_norm": 0.393216609954834,
"learning_rate": 0.0003479055944055944,
"loss": 3.1555,
"step": 72200
},
{
"epoch": 21.039608574091332,
"grad_norm": 0.3881584405899048,
"learning_rate": 0.0003477307692307692,
"loss": 3.1586,
"step": 72250
},
{
"epoch": 21.054170549860206,
"grad_norm": 0.3874966502189636,
"learning_rate": 0.00034755594405594403,
"loss": 3.1575,
"step": 72300
},
{
"epoch": 21.068732525629077,
"grad_norm": 0.4072263836860657,
"learning_rate": 0.00034738111888111883,
"loss": 3.1657,
"step": 72350
},
{
"epoch": 21.08329450139795,
"grad_norm": 0.39619114995002747,
"learning_rate": 0.0003472062937062937,
"loss": 3.1553,
"step": 72400
},
{
"epoch": 21.09785647716682,
"grad_norm": 0.41326701641082764,
"learning_rate": 0.0003470314685314685,
"loss": 3.1683,
"step": 72450
},
{
"epoch": 21.112418452935696,
"grad_norm": 0.4170162081718445,
"learning_rate": 0.00034685664335664334,
"loss": 3.1696,
"step": 72500
},
{
"epoch": 21.126980428704567,
"grad_norm": 0.393693745136261,
"learning_rate": 0.00034668181818181814,
"loss": 3.1703,
"step": 72550
},
{
"epoch": 21.141542404473437,
"grad_norm": 0.3787849545478821,
"learning_rate": 0.000346506993006993,
"loss": 3.1926,
"step": 72600
},
{
"epoch": 21.15610438024231,
"grad_norm": 0.38826873898506165,
"learning_rate": 0.0003463321678321678,
"loss": 3.1892,
"step": 72650
},
{
"epoch": 21.170666356011182,
"grad_norm": 0.3828301727771759,
"learning_rate": 0.00034615734265734264,
"loss": 3.1898,
"step": 72700
},
{
"epoch": 21.185228331780056,
"grad_norm": 0.40055036544799805,
"learning_rate": 0.0003459825174825175,
"loss": 3.179,
"step": 72750
},
{
"epoch": 21.199790307548927,
"grad_norm": 0.3872724175453186,
"learning_rate": 0.0003458076923076923,
"loss": 3.1851,
"step": 72800
},
{
"epoch": 21.2143522833178,
"grad_norm": 0.4158056378364563,
"learning_rate": 0.00034563286713286715,
"loss": 3.1998,
"step": 72850
},
{
"epoch": 21.22891425908667,
"grad_norm": 0.41352295875549316,
"learning_rate": 0.0003454580419580419,
"loss": 3.2048,
"step": 72900
},
{
"epoch": 21.243476234855546,
"grad_norm": 0.3767777383327484,
"learning_rate": 0.00034528321678321675,
"loss": 3.1816,
"step": 72950
},
{
"epoch": 21.258038210624417,
"grad_norm": 0.38860374689102173,
"learning_rate": 0.00034510839160839155,
"loss": 3.204,
"step": 73000
},
{
"epoch": 21.258038210624417,
"eval_accuracy": 0.37261219289544933,
"eval_loss": 3.5535717010498047,
"eval_runtime": 53.6505,
"eval_samples_per_second": 310.23,
"eval_steps_per_second": 19.403,
"step": 73000
},
{
"epoch": 21.27260018639329,
"grad_norm": 0.42040345072746277,
"learning_rate": 0.0003449335664335664,
"loss": 3.2045,
"step": 73050
},
{
"epoch": 21.28716216216216,
"grad_norm": 0.4155707061290741,
"learning_rate": 0.0003447587412587412,
"loss": 3.1881,
"step": 73100
},
{
"epoch": 21.301724137931036,
"grad_norm": 0.4353258013725281,
"learning_rate": 0.00034458391608391606,
"loss": 3.2045,
"step": 73150
},
{
"epoch": 21.316286113699906,
"grad_norm": 0.4045839011669159,
"learning_rate": 0.00034440909090909086,
"loss": 3.1805,
"step": 73200
},
{
"epoch": 21.33084808946878,
"grad_norm": 0.4096652865409851,
"learning_rate": 0.0003442342657342657,
"loss": 3.1998,
"step": 73250
},
{
"epoch": 21.34541006523765,
"grad_norm": 0.4016556143760681,
"learning_rate": 0.0003440594405594405,
"loss": 3.2029,
"step": 73300
},
{
"epoch": 21.359972041006525,
"grad_norm": 0.4026934802532196,
"learning_rate": 0.00034388461538461537,
"loss": 3.2031,
"step": 73350
},
{
"epoch": 21.374534016775396,
"grad_norm": 0.3856440484523773,
"learning_rate": 0.00034370979020979017,
"loss": 3.2018,
"step": 73400
},
{
"epoch": 21.38909599254427,
"grad_norm": 0.37840890884399414,
"learning_rate": 0.000343534965034965,
"loss": 3.2169,
"step": 73450
},
{
"epoch": 21.40365796831314,
"grad_norm": 0.4057793915271759,
"learning_rate": 0.0003433601398601399,
"loss": 3.2167,
"step": 73500
},
{
"epoch": 21.41821994408201,
"grad_norm": 0.4393506646156311,
"learning_rate": 0.0003431853146853147,
"loss": 3.2073,
"step": 73550
},
{
"epoch": 21.432781919850886,
"grad_norm": 0.4116910398006439,
"learning_rate": 0.0003430104895104895,
"loss": 3.217,
"step": 73600
},
{
"epoch": 21.447343895619756,
"grad_norm": 0.4329574406147003,
"learning_rate": 0.00034283566433566427,
"loss": 3.219,
"step": 73650
},
{
"epoch": 21.46190587138863,
"grad_norm": 0.3817179203033447,
"learning_rate": 0.0003426608391608391,
"loss": 3.2151,
"step": 73700
},
{
"epoch": 21.4764678471575,
"grad_norm": 0.4023750126361847,
"learning_rate": 0.0003424860139860139,
"loss": 3.2181,
"step": 73750
},
{
"epoch": 21.491029822926375,
"grad_norm": 0.39865052700042725,
"learning_rate": 0.0003423111888111888,
"loss": 3.1973,
"step": 73800
},
{
"epoch": 21.505591798695246,
"grad_norm": 0.39138761162757874,
"learning_rate": 0.0003421363636363636,
"loss": 3.2162,
"step": 73850
},
{
"epoch": 21.52015377446412,
"grad_norm": 0.3885617256164551,
"learning_rate": 0.00034196153846153843,
"loss": 3.2203,
"step": 73900
},
{
"epoch": 21.53471575023299,
"grad_norm": 0.3830677568912506,
"learning_rate": 0.00034178671328671323,
"loss": 3.2268,
"step": 73950
},
{
"epoch": 21.549277726001865,
"grad_norm": 0.3745560944080353,
"learning_rate": 0.0003416118881118881,
"loss": 3.2134,
"step": 74000
},
{
"epoch": 21.549277726001865,
"eval_accuracy": 0.37313223479039126,
"eval_loss": 3.546206474304199,
"eval_runtime": 53.7466,
"eval_samples_per_second": 309.675,
"eval_steps_per_second": 19.369,
"step": 74000
},
{
"epoch": 21.563839701770736,
"grad_norm": 0.4401836097240448,
"learning_rate": 0.0003414370629370629,
"loss": 3.2201,
"step": 74050
},
{
"epoch": 21.57840167753961,
"grad_norm": 0.42541825771331787,
"learning_rate": 0.00034126223776223774,
"loss": 3.2306,
"step": 74100
},
{
"epoch": 21.59296365330848,
"grad_norm": 0.3805076777935028,
"learning_rate": 0.0003410874125874126,
"loss": 3.2298,
"step": 74150
},
{
"epoch": 21.607525629077355,
"grad_norm": 0.44168519973754883,
"learning_rate": 0.0003409125874125874,
"loss": 3.2304,
"step": 74200
},
{
"epoch": 21.622087604846225,
"grad_norm": 0.41441184282302856,
"learning_rate": 0.00034073776223776225,
"loss": 3.2218,
"step": 74250
},
{
"epoch": 21.6366495806151,
"grad_norm": 0.37558814883232117,
"learning_rate": 0.00034056293706293705,
"loss": 3.2153,
"step": 74300
},
{
"epoch": 21.65121155638397,
"grad_norm": 0.3728470504283905,
"learning_rate": 0.0003403881118881119,
"loss": 3.2388,
"step": 74350
},
{
"epoch": 21.665773532152844,
"grad_norm": 0.4029659628868103,
"learning_rate": 0.00034021328671328665,
"loss": 3.2296,
"step": 74400
},
{
"epoch": 21.680335507921715,
"grad_norm": 0.38797706365585327,
"learning_rate": 0.0003400384615384615,
"loss": 3.2351,
"step": 74450
},
{
"epoch": 21.694897483690585,
"grad_norm": 0.3840591609477997,
"learning_rate": 0.0003398636363636363,
"loss": 3.2306,
"step": 74500
},
{
"epoch": 21.70945945945946,
"grad_norm": 0.40627843141555786,
"learning_rate": 0.00033968881118881115,
"loss": 3.2432,
"step": 74550
},
{
"epoch": 21.72402143522833,
"grad_norm": 0.3884592652320862,
"learning_rate": 0.00033951398601398595,
"loss": 3.2281,
"step": 74600
},
{
"epoch": 21.738583410997204,
"grad_norm": 0.4909214377403259,
"learning_rate": 0.0003393391608391608,
"loss": 3.2371,
"step": 74650
},
{
"epoch": 21.753145386766075,
"grad_norm": 0.4191752076148987,
"learning_rate": 0.0003391643356643356,
"loss": 3.2308,
"step": 74700
},
{
"epoch": 21.76770736253495,
"grad_norm": 0.41994035243988037,
"learning_rate": 0.00033898951048951046,
"loss": 3.2297,
"step": 74750
},
{
"epoch": 21.78226933830382,
"grad_norm": 0.40065521001815796,
"learning_rate": 0.00033881468531468526,
"loss": 3.2387,
"step": 74800
},
{
"epoch": 21.796831314072694,
"grad_norm": 0.39243024587631226,
"learning_rate": 0.0003386398601398601,
"loss": 3.2371,
"step": 74850
},
{
"epoch": 21.811393289841565,
"grad_norm": 0.3721027076244354,
"learning_rate": 0.00033846503496503497,
"loss": 3.2413,
"step": 74900
},
{
"epoch": 21.82595526561044,
"grad_norm": 0.41749846935272217,
"learning_rate": 0.00033829020979020977,
"loss": 3.2357,
"step": 74950
},
{
"epoch": 21.84051724137931,
"grad_norm": 0.3856858015060425,
"learning_rate": 0.0003381153846153846,
"loss": 3.2241,
"step": 75000
},
{
"epoch": 21.84051724137931,
"eval_accuracy": 0.3736914297377898,
"eval_loss": 3.539247512817383,
"eval_runtime": 53.6014,
"eval_samples_per_second": 310.514,
"eval_steps_per_second": 19.421,
"step": 75000
},
{
"epoch": 21.855079217148184,
"grad_norm": 0.3717370927333832,
"learning_rate": 0.0003379405594405594,
"loss": 3.2297,
"step": 75050
},
{
"epoch": 21.869641192917054,
"grad_norm": 0.406404048204422,
"learning_rate": 0.0003377657342657343,
"loss": 3.2443,
"step": 75100
},
{
"epoch": 21.88420316868593,
"grad_norm": 0.42060935497283936,
"learning_rate": 0.000337590909090909,
"loss": 3.2396,
"step": 75150
},
{
"epoch": 21.8987651444548,
"grad_norm": 0.40486958622932434,
"learning_rate": 0.0003374160839160839,
"loss": 3.2437,
"step": 75200
},
{
"epoch": 21.913327120223673,
"grad_norm": 0.3771274983882904,
"learning_rate": 0.0003372412587412587,
"loss": 3.253,
"step": 75250
},
{
"epoch": 21.927889095992544,
"grad_norm": 0.380912721157074,
"learning_rate": 0.00033706643356643353,
"loss": 3.2551,
"step": 75300
},
{
"epoch": 21.94245107176142,
"grad_norm": 0.4271264374256134,
"learning_rate": 0.00033689160839160833,
"loss": 3.2422,
"step": 75350
},
{
"epoch": 21.95701304753029,
"grad_norm": 0.3783544898033142,
"learning_rate": 0.0003367167832167832,
"loss": 3.2409,
"step": 75400
},
{
"epoch": 21.97157502329916,
"grad_norm": 0.3998122811317444,
"learning_rate": 0.000336541958041958,
"loss": 3.2439,
"step": 75450
},
{
"epoch": 21.986136999068034,
"grad_norm": 0.38411828875541687,
"learning_rate": 0.00033636713286713284,
"loss": 3.2554,
"step": 75500
},
{
"epoch": 22.000582479030754,
"grad_norm": 0.4125036895275116,
"learning_rate": 0.0003361923076923077,
"loss": 3.2221,
"step": 75550
},
{
"epoch": 22.01514445479963,
"grad_norm": 0.4056716561317444,
"learning_rate": 0.0003360174825174825,
"loss": 3.1341,
"step": 75600
},
{
"epoch": 22.0297064305685,
"grad_norm": 0.44730907678604126,
"learning_rate": 0.00033584265734265734,
"loss": 3.1374,
"step": 75650
},
{
"epoch": 22.044268406337373,
"grad_norm": 0.37317419052124023,
"learning_rate": 0.00033566783216783214,
"loss": 3.1469,
"step": 75700
},
{
"epoch": 22.058830382106244,
"grad_norm": 0.4454713761806488,
"learning_rate": 0.000335493006993007,
"loss": 3.1495,
"step": 75750
},
{
"epoch": 22.073392357875118,
"grad_norm": 0.40124979615211487,
"learning_rate": 0.0003353181818181818,
"loss": 3.1522,
"step": 75800
},
{
"epoch": 22.08795433364399,
"grad_norm": 0.4181446135044098,
"learning_rate": 0.00033514335664335665,
"loss": 3.1621,
"step": 75850
},
{
"epoch": 22.10251630941286,
"grad_norm": 0.3863363265991211,
"learning_rate": 0.0003349685314685314,
"loss": 3.1557,
"step": 75900
},
{
"epoch": 22.117078285181734,
"grad_norm": 0.4252418577671051,
"learning_rate": 0.00033479370629370625,
"loss": 3.1556,
"step": 75950
},
{
"epoch": 22.131640260950604,
"grad_norm": 0.3969983458518982,
"learning_rate": 0.00033461888111888105,
"loss": 3.1705,
"step": 76000
},
{
"epoch": 22.131640260950604,
"eval_accuracy": 0.3732756784059981,
"eval_loss": 3.551520347595215,
"eval_runtime": 53.6444,
"eval_samples_per_second": 310.265,
"eval_steps_per_second": 19.406,
"step": 76000
},
{
"epoch": 22.14620223671948,
"grad_norm": 0.3937150835990906,
"learning_rate": 0.0003344440559440559,
"loss": 3.1621,
"step": 76050
},
{
"epoch": 22.16076421248835,
"grad_norm": 0.3968026638031006,
"learning_rate": 0.0003342692307692307,
"loss": 3.1649,
"step": 76100
},
{
"epoch": 22.175326188257223,
"grad_norm": 0.3895382881164551,
"learning_rate": 0.00033409440559440556,
"loss": 3.1622,
"step": 76150
},
{
"epoch": 22.189888164026094,
"grad_norm": 0.3964536190032959,
"learning_rate": 0.00033391958041958036,
"loss": 3.185,
"step": 76200
},
{
"epoch": 22.204450139794968,
"grad_norm": 0.41914913058280945,
"learning_rate": 0.0003337447552447552,
"loss": 3.1825,
"step": 76250
},
{
"epoch": 22.21901211556384,
"grad_norm": 0.39592286944389343,
"learning_rate": 0.00033356993006993007,
"loss": 3.1869,
"step": 76300
},
{
"epoch": 22.233574091332713,
"grad_norm": 0.39550790190696716,
"learning_rate": 0.00033339510489510487,
"loss": 3.1796,
"step": 76350
},
{
"epoch": 22.248136067101584,
"grad_norm": 0.43073078989982605,
"learning_rate": 0.0003332202797202797,
"loss": 3.1802,
"step": 76400
},
{
"epoch": 22.262698042870458,
"grad_norm": 0.39744439721107483,
"learning_rate": 0.0003330454545454545,
"loss": 3.1891,
"step": 76450
},
{
"epoch": 22.27726001863933,
"grad_norm": 0.41097643971443176,
"learning_rate": 0.0003328706293706294,
"loss": 3.1963,
"step": 76500
},
{
"epoch": 22.291821994408203,
"grad_norm": 0.4132195711135864,
"learning_rate": 0.00033269580419580417,
"loss": 3.1976,
"step": 76550
},
{
"epoch": 22.306383970177073,
"grad_norm": 0.39138537645339966,
"learning_rate": 0.000332520979020979,
"loss": 3.1897,
"step": 76600
},
{
"epoch": 22.320945945945947,
"grad_norm": 0.41629618406295776,
"learning_rate": 0.00033234615384615377,
"loss": 3.193,
"step": 76650
},
{
"epoch": 22.335507921714818,
"grad_norm": 0.3976675570011139,
"learning_rate": 0.0003321713286713286,
"loss": 3.1897,
"step": 76700
},
{
"epoch": 22.350069897483692,
"grad_norm": 0.3983791470527649,
"learning_rate": 0.0003319965034965034,
"loss": 3.2127,
"step": 76750
},
{
"epoch": 22.364631873252563,
"grad_norm": 0.4043506681919098,
"learning_rate": 0.0003318216783216783,
"loss": 3.2072,
"step": 76800
},
{
"epoch": 22.379193849021433,
"grad_norm": 0.4207497835159302,
"learning_rate": 0.0003316468531468531,
"loss": 3.2042,
"step": 76850
},
{
"epoch": 22.393755824790308,
"grad_norm": 0.4057113528251648,
"learning_rate": 0.00033147202797202793,
"loss": 3.2052,
"step": 76900
},
{
"epoch": 22.40831780055918,
"grad_norm": 0.44642117619514465,
"learning_rate": 0.0003312972027972028,
"loss": 3.2106,
"step": 76950
},
{
"epoch": 22.422879776328053,
"grad_norm": 0.40626928210258484,
"learning_rate": 0.0003311223776223776,
"loss": 3.2019,
"step": 77000
},
{
"epoch": 22.422879776328053,
"eval_accuracy": 0.3736867266684256,
"eval_loss": 3.5471279621124268,
"eval_runtime": 53.8269,
"eval_samples_per_second": 309.213,
"eval_steps_per_second": 19.34,
"step": 77000
},
{
"epoch": 22.437441752096923,
"grad_norm": 0.4069664478302002,
"learning_rate": 0.00033094755244755244,
"loss": 3.2164,
"step": 77050
},
{
"epoch": 22.452003727865797,
"grad_norm": 0.4306388795375824,
"learning_rate": 0.00033077272727272724,
"loss": 3.1861,
"step": 77100
},
{
"epoch": 22.466565703634668,
"grad_norm": 0.4088691174983978,
"learning_rate": 0.0003305979020979021,
"loss": 3.2014,
"step": 77150
},
{
"epoch": 22.481127679403542,
"grad_norm": 0.42260071635246277,
"learning_rate": 0.0003304230769230769,
"loss": 3.2185,
"step": 77200
},
{
"epoch": 22.495689655172413,
"grad_norm": 0.3822173774242401,
"learning_rate": 0.00033024825174825175,
"loss": 3.1988,
"step": 77250
},
{
"epoch": 22.510251630941287,
"grad_norm": 0.3947815001010895,
"learning_rate": 0.00033007342657342655,
"loss": 3.2037,
"step": 77300
},
{
"epoch": 22.524813606710158,
"grad_norm": 0.4035757780075073,
"learning_rate": 0.0003298986013986014,
"loss": 3.2042,
"step": 77350
},
{
"epoch": 22.539375582479032,
"grad_norm": 0.39407262206077576,
"learning_rate": 0.00032972377622377615,
"loss": 3.1987,
"step": 77400
},
{
"epoch": 22.553937558247902,
"grad_norm": 0.4021145701408386,
"learning_rate": 0.000329548951048951,
"loss": 3.213,
"step": 77450
},
{
"epoch": 22.568499534016777,
"grad_norm": 0.4199064075946808,
"learning_rate": 0.0003293741258741258,
"loss": 3.2167,
"step": 77500
},
{
"epoch": 22.583061509785647,
"grad_norm": 0.4025714099407196,
"learning_rate": 0.00032919930069930065,
"loss": 3.222,
"step": 77550
},
{
"epoch": 22.59762348555452,
"grad_norm": 0.4109303653240204,
"learning_rate": 0.0003290244755244755,
"loss": 3.2103,
"step": 77600
},
{
"epoch": 22.612185461323392,
"grad_norm": 0.3831614851951599,
"learning_rate": 0.0003288496503496503,
"loss": 3.2133,
"step": 77650
},
{
"epoch": 22.626747437092266,
"grad_norm": 0.38504159450531006,
"learning_rate": 0.00032867482517482516,
"loss": 3.2155,
"step": 77700
},
{
"epoch": 22.641309412861137,
"grad_norm": 0.3741070330142975,
"learning_rate": 0.00032849999999999996,
"loss": 3.211,
"step": 77750
},
{
"epoch": 22.655871388630008,
"grad_norm": 0.42629310488700867,
"learning_rate": 0.0003283251748251748,
"loss": 3.2188,
"step": 77800
},
{
"epoch": 22.670433364398882,
"grad_norm": 0.42940154671669006,
"learning_rate": 0.0003281503496503496,
"loss": 3.2274,
"step": 77850
},
{
"epoch": 22.684995340167752,
"grad_norm": 0.3877221345901489,
"learning_rate": 0.00032797552447552447,
"loss": 3.2277,
"step": 77900
},
{
"epoch": 22.699557315936627,
"grad_norm": 0.40116119384765625,
"learning_rate": 0.00032780069930069927,
"loss": 3.2121,
"step": 77950
},
{
"epoch": 22.714119291705497,
"grad_norm": 0.37814322113990784,
"learning_rate": 0.0003276258741258741,
"loss": 3.2065,
"step": 78000
},
{
"epoch": 22.714119291705497,
"eval_accuracy": 0.37389765932940816,
"eval_loss": 3.5403106212615967,
"eval_runtime": 53.5367,
"eval_samples_per_second": 310.889,
"eval_steps_per_second": 19.445,
"step": 78000
},
{
"epoch": 22.72868126747437,
"grad_norm": 0.4460028409957886,
"learning_rate": 0.0003274510489510489,
"loss": 3.2265,
"step": 78050
},
{
"epoch": 22.743243243243242,
"grad_norm": 0.39633312821388245,
"learning_rate": 0.0003272762237762238,
"loss": 3.2329,
"step": 78100
},
{
"epoch": 22.757805219012116,
"grad_norm": 0.42298927903175354,
"learning_rate": 0.0003271013986013985,
"loss": 3.234,
"step": 78150
},
{
"epoch": 22.772367194780987,
"grad_norm": 0.4159824252128601,
"learning_rate": 0.0003269265734265734,
"loss": 3.2266,
"step": 78200
},
{
"epoch": 22.78692917054986,
"grad_norm": 0.41692405939102173,
"learning_rate": 0.0003267517482517482,
"loss": 3.2213,
"step": 78250
},
{
"epoch": 22.80149114631873,
"grad_norm": 0.41382187604904175,
"learning_rate": 0.00032657692307692303,
"loss": 3.2392,
"step": 78300
},
{
"epoch": 22.816053122087606,
"grad_norm": 0.41410788893699646,
"learning_rate": 0.0003264020979020979,
"loss": 3.2321,
"step": 78350
},
{
"epoch": 22.830615097856477,
"grad_norm": 0.4324868619441986,
"learning_rate": 0.0003262272727272727,
"loss": 3.2236,
"step": 78400
},
{
"epoch": 22.84517707362535,
"grad_norm": 0.38736671209335327,
"learning_rate": 0.00032605244755244754,
"loss": 3.2303,
"step": 78450
},
{
"epoch": 22.85973904939422,
"grad_norm": 0.4221399128437042,
"learning_rate": 0.00032587762237762234,
"loss": 3.2348,
"step": 78500
},
{
"epoch": 22.874301025163096,
"grad_norm": 0.38114288449287415,
"learning_rate": 0.0003257027972027972,
"loss": 3.2253,
"step": 78550
},
{
"epoch": 22.888863000931966,
"grad_norm": 0.3975285589694977,
"learning_rate": 0.000325527972027972,
"loss": 3.2262,
"step": 78600
},
{
"epoch": 22.90342497670084,
"grad_norm": 0.40540263056755066,
"learning_rate": 0.00032535314685314684,
"loss": 3.2257,
"step": 78650
},
{
"epoch": 22.91798695246971,
"grad_norm": 0.3953773081302643,
"learning_rate": 0.00032517832167832164,
"loss": 3.2365,
"step": 78700
},
{
"epoch": 22.93254892823858,
"grad_norm": 0.3982967138290405,
"learning_rate": 0.0003250034965034965,
"loss": 3.2219,
"step": 78750
},
{
"epoch": 22.947110904007456,
"grad_norm": 0.41824063658714294,
"learning_rate": 0.0003248286713286713,
"loss": 3.2344,
"step": 78800
},
{
"epoch": 22.961672879776327,
"grad_norm": 0.40607979893684387,
"learning_rate": 0.00032465384615384615,
"loss": 3.2305,
"step": 78850
},
{
"epoch": 22.9762348555452,
"grad_norm": 0.4212436079978943,
"learning_rate": 0.0003244790209790209,
"loss": 3.2386,
"step": 78900
},
{
"epoch": 22.99079683131407,
"grad_norm": 0.4023859202861786,
"learning_rate": 0.00032430419580419575,
"loss": 3.2445,
"step": 78950
},
{
"epoch": 23.005242311276795,
"grad_norm": 0.38621482253074646,
"learning_rate": 0.00032412937062937066,
"loss": 3.194,
"step": 79000
},
{
"epoch": 23.005242311276795,
"eval_accuracy": 0.37375503875094,
"eval_loss": 3.5451126098632812,
"eval_runtime": 53.8234,
"eval_samples_per_second": 309.233,
"eval_steps_per_second": 19.341,
"step": 79000
},
{
"epoch": 23.019804287045666,
"grad_norm": 0.45066943764686584,
"learning_rate": 0.0003239545454545454,
"loss": 3.1238,
"step": 79050
},
{
"epoch": 23.03436626281454,
"grad_norm": 0.3973267078399658,
"learning_rate": 0.00032377972027972026,
"loss": 3.1312,
"step": 79100
},
{
"epoch": 23.04892823858341,
"grad_norm": 0.4207251965999603,
"learning_rate": 0.00032360489510489506,
"loss": 3.1431,
"step": 79150
},
{
"epoch": 23.063490214352285,
"grad_norm": 0.4109000861644745,
"learning_rate": 0.0003234300699300699,
"loss": 3.133,
"step": 79200
},
{
"epoch": 23.078052190121156,
"grad_norm": 0.39351069927215576,
"learning_rate": 0.0003232552447552447,
"loss": 3.1474,
"step": 79250
},
{
"epoch": 23.092614165890026,
"grad_norm": 0.4133410155773163,
"learning_rate": 0.00032308041958041957,
"loss": 3.1619,
"step": 79300
},
{
"epoch": 23.1071761416589,
"grad_norm": 0.45815539360046387,
"learning_rate": 0.00032290559440559437,
"loss": 3.1522,
"step": 79350
},
{
"epoch": 23.12173811742777,
"grad_norm": 0.4205847680568695,
"learning_rate": 0.0003227307692307692,
"loss": 3.1542,
"step": 79400
},
{
"epoch": 23.136300093196645,
"grad_norm": 0.38456612825393677,
"learning_rate": 0.000322555944055944,
"loss": 3.1557,
"step": 79450
},
{
"epoch": 23.150862068965516,
"grad_norm": 0.40150612592697144,
"learning_rate": 0.00032238111888111887,
"loss": 3.1687,
"step": 79500
},
{
"epoch": 23.16542404473439,
"grad_norm": 0.4437256455421448,
"learning_rate": 0.00032220629370629367,
"loss": 3.1614,
"step": 79550
},
{
"epoch": 23.17998602050326,
"grad_norm": 0.4081536829471588,
"learning_rate": 0.0003220314685314685,
"loss": 3.1674,
"step": 79600
},
{
"epoch": 23.194547996272135,
"grad_norm": 0.41392892599105835,
"learning_rate": 0.00032185664335664327,
"loss": 3.1684,
"step": 79650
},
{
"epoch": 23.209109972041006,
"grad_norm": 0.38823989033699036,
"learning_rate": 0.0003216818181818181,
"loss": 3.1682,
"step": 79700
},
{
"epoch": 23.22367194780988,
"grad_norm": 0.4122358560562134,
"learning_rate": 0.00032150699300699303,
"loss": 3.1736,
"step": 79750
},
{
"epoch": 23.23823392357875,
"grad_norm": 0.4099797308444977,
"learning_rate": 0.0003213321678321678,
"loss": 3.1803,
"step": 79800
},
{
"epoch": 23.252795899347625,
"grad_norm": 0.4234201908111572,
"learning_rate": 0.00032115734265734263,
"loss": 3.1737,
"step": 79850
},
{
"epoch": 23.267357875116495,
"grad_norm": 0.41463881731033325,
"learning_rate": 0.00032098251748251743,
"loss": 3.179,
"step": 79900
},
{
"epoch": 23.28191985088537,
"grad_norm": 0.40318965911865234,
"learning_rate": 0.0003208076923076923,
"loss": 3.1699,
"step": 79950
},
{
"epoch": 23.29648182665424,
"grad_norm": 0.40535327792167664,
"learning_rate": 0.0003206328671328671,
"loss": 3.1815,
"step": 80000
},
{
"epoch": 23.29648182665424,
"eval_accuracy": 0.3734642714875009,
"eval_loss": 3.5514914989471436,
"eval_runtime": 53.816,
"eval_samples_per_second": 309.276,
"eval_steps_per_second": 19.344,
"step": 80000
},
{
"epoch": 23.311043802423114,
"grad_norm": 0.3818470239639282,
"learning_rate": 0.00032045804195804194,
"loss": 3.1755,
"step": 80050
},
{
"epoch": 23.325605778191985,
"grad_norm": 0.41949790716171265,
"learning_rate": 0.00032028321678321674,
"loss": 3.1716,
"step": 80100
},
{
"epoch": 23.340167753960856,
"grad_norm": 0.4188728928565979,
"learning_rate": 0.0003201083916083916,
"loss": 3.1978,
"step": 80150
},
{
"epoch": 23.35472972972973,
"grad_norm": 0.40345263481140137,
"learning_rate": 0.0003199335664335664,
"loss": 3.1794,
"step": 80200
},
{
"epoch": 23.3692917054986,
"grad_norm": 0.42796680331230164,
"learning_rate": 0.00031975874125874125,
"loss": 3.1957,
"step": 80250
},
{
"epoch": 23.383853681267475,
"grad_norm": 0.39610591530799866,
"learning_rate": 0.00031958391608391605,
"loss": 3.1904,
"step": 80300
},
{
"epoch": 23.398415657036345,
"grad_norm": 0.4445754885673523,
"learning_rate": 0.0003194090909090909,
"loss": 3.1841,
"step": 80350
},
{
"epoch": 23.41297763280522,
"grad_norm": 0.4124175012111664,
"learning_rate": 0.00031923426573426576,
"loss": 3.1849,
"step": 80400
},
{
"epoch": 23.42753960857409,
"grad_norm": 0.4136520028114319,
"learning_rate": 0.0003190594405594405,
"loss": 3.1927,
"step": 80450
},
{
"epoch": 23.442101584342964,
"grad_norm": 0.4014403223991394,
"learning_rate": 0.0003188846153846154,
"loss": 3.1989,
"step": 80500
},
{
"epoch": 23.456663560111835,
"grad_norm": 0.42746081948280334,
"learning_rate": 0.00031870979020979015,
"loss": 3.2109,
"step": 80550
},
{
"epoch": 23.47122553588071,
"grad_norm": 0.3877197504043579,
"learning_rate": 0.000318534965034965,
"loss": 3.191,
"step": 80600
},
{
"epoch": 23.48578751164958,
"grad_norm": 0.42082786560058594,
"learning_rate": 0.0003183601398601398,
"loss": 3.2043,
"step": 80650
},
{
"epoch": 23.500349487418454,
"grad_norm": 0.3994758129119873,
"learning_rate": 0.00031818531468531466,
"loss": 3.1973,
"step": 80700
},
{
"epoch": 23.514911463187325,
"grad_norm": 0.42390763759613037,
"learning_rate": 0.00031801048951048946,
"loss": 3.2018,
"step": 80750
},
{
"epoch": 23.5294734389562,
"grad_norm": 0.4568711519241333,
"learning_rate": 0.0003178356643356643,
"loss": 3.1995,
"step": 80800
},
{
"epoch": 23.54403541472507,
"grad_norm": 0.4177471101284027,
"learning_rate": 0.0003176608391608391,
"loss": 3.1894,
"step": 80850
},
{
"epoch": 23.558597390493944,
"grad_norm": 0.42740505933761597,
"learning_rate": 0.00031748601398601397,
"loss": 3.1983,
"step": 80900
},
{
"epoch": 23.573159366262814,
"grad_norm": 0.4288630187511444,
"learning_rate": 0.00031731118881118877,
"loss": 3.2102,
"step": 80950
},
{
"epoch": 23.58772134203169,
"grad_norm": 0.42180898785591125,
"learning_rate": 0.0003171363636363636,
"loss": 3.218,
"step": 81000
},
{
"epoch": 23.58772134203169,
"eval_accuracy": 0.37408472391336756,
"eval_loss": 3.5433216094970703,
"eval_runtime": 53.7335,
"eval_samples_per_second": 309.751,
"eval_steps_per_second": 19.373,
"step": 81000
},
{
"epoch": 23.60228331780056,
"grad_norm": 0.3826054632663727,
"learning_rate": 0.0003169615384615385,
"loss": 3.2049,
"step": 81050
},
{
"epoch": 23.616845293569433,
"grad_norm": 0.43105751276016235,
"learning_rate": 0.0003167867132867133,
"loss": 3.2022,
"step": 81100
},
{
"epoch": 23.631407269338304,
"grad_norm": 0.4408121705055237,
"learning_rate": 0.00031661188811188813,
"loss": 3.2094,
"step": 81150
},
{
"epoch": 23.645969245107175,
"grad_norm": 0.4172849655151367,
"learning_rate": 0.0003164370629370629,
"loss": 3.2015,
"step": 81200
},
{
"epoch": 23.66053122087605,
"grad_norm": 0.4063512682914734,
"learning_rate": 0.0003162622377622378,
"loss": 3.2081,
"step": 81250
},
{
"epoch": 23.67509319664492,
"grad_norm": 0.4677945673465729,
"learning_rate": 0.00031608741258741253,
"loss": 3.1941,
"step": 81300
},
{
"epoch": 23.689655172413794,
"grad_norm": 0.40490514039993286,
"learning_rate": 0.0003159125874125874,
"loss": 3.2041,
"step": 81350
},
{
"epoch": 23.704217148182664,
"grad_norm": 0.41468971967697144,
"learning_rate": 0.0003157377622377622,
"loss": 3.213,
"step": 81400
},
{
"epoch": 23.71877912395154,
"grad_norm": 0.4094548523426056,
"learning_rate": 0.00031556293706293704,
"loss": 3.2095,
"step": 81450
},
{
"epoch": 23.73334109972041,
"grad_norm": 0.39661234617233276,
"learning_rate": 0.00031538811188811184,
"loss": 3.2104,
"step": 81500
},
{
"epoch": 23.747903075489283,
"grad_norm": 0.37781253457069397,
"learning_rate": 0.0003152132867132867,
"loss": 3.2234,
"step": 81550
},
{
"epoch": 23.762465051258154,
"grad_norm": 0.4035096764564514,
"learning_rate": 0.0003150384615384615,
"loss": 3.2082,
"step": 81600
},
{
"epoch": 23.777027027027028,
"grad_norm": 0.4032893180847168,
"learning_rate": 0.00031486363636363634,
"loss": 3.2254,
"step": 81650
},
{
"epoch": 23.7915890027959,
"grad_norm": 0.4168602228164673,
"learning_rate": 0.00031468881118881114,
"loss": 3.2216,
"step": 81700
},
{
"epoch": 23.806150978564773,
"grad_norm": 0.4204430878162384,
"learning_rate": 0.000314513986013986,
"loss": 3.2252,
"step": 81750
},
{
"epoch": 23.820712954333644,
"grad_norm": 0.3943716287612915,
"learning_rate": 0.00031433916083916085,
"loss": 3.2147,
"step": 81800
},
{
"epoch": 23.835274930102518,
"grad_norm": 0.42684200406074524,
"learning_rate": 0.00031416433566433565,
"loss": 3.2144,
"step": 81850
},
{
"epoch": 23.84983690587139,
"grad_norm": 0.4027450680732727,
"learning_rate": 0.0003139895104895105,
"loss": 3.2226,
"step": 81900
},
{
"epoch": 23.864398881640263,
"grad_norm": 0.3920784294605255,
"learning_rate": 0.00031381468531468525,
"loss": 3.2226,
"step": 81950
},
{
"epoch": 23.878960857409133,
"grad_norm": 0.41487202048301697,
"learning_rate": 0.00031363986013986016,
"loss": 3.2422,
"step": 82000
},
{
"epoch": 23.878960857409133,
"eval_accuracy": 0.37429424565354086,
"eval_loss": 3.5338425636291504,
"eval_runtime": 53.8113,
"eval_samples_per_second": 309.303,
"eval_steps_per_second": 19.345,
"step": 82000
},
{
"epoch": 23.893522833178004,
"grad_norm": 0.3978407680988312,
"learning_rate": 0.0003134650349650349,
"loss": 3.2213,
"step": 82050
},
{
"epoch": 23.908084808946878,
"grad_norm": 0.4096793532371521,
"learning_rate": 0.00031329020979020976,
"loss": 3.2204,
"step": 82100
},
{
"epoch": 23.92264678471575,
"grad_norm": 0.41329044103622437,
"learning_rate": 0.00031311538461538456,
"loss": 3.2204,
"step": 82150
},
{
"epoch": 23.937208760484623,
"grad_norm": 0.4152350425720215,
"learning_rate": 0.0003129405594405594,
"loss": 3.2261,
"step": 82200
},
{
"epoch": 23.951770736253494,
"grad_norm": 0.40401697158813477,
"learning_rate": 0.0003127657342657342,
"loss": 3.2322,
"step": 82250
},
{
"epoch": 23.966332712022368,
"grad_norm": 0.4095125198364258,
"learning_rate": 0.00031259090909090907,
"loss": 3.2234,
"step": 82300
},
{
"epoch": 23.98089468779124,
"grad_norm": 0.4391355812549591,
"learning_rate": 0.00031241608391608386,
"loss": 3.2262,
"step": 82350
},
{
"epoch": 23.995456663560113,
"grad_norm": 0.42681077122688293,
"learning_rate": 0.0003122412587412587,
"loss": 3.2344,
"step": 82400
},
{
"epoch": 24.009902143522833,
"grad_norm": 0.4587476849555969,
"learning_rate": 0.00031206643356643357,
"loss": 3.1612,
"step": 82450
},
{
"epoch": 24.024464119291707,
"grad_norm": 0.4497005045413971,
"learning_rate": 0.00031189160839160837,
"loss": 3.1218,
"step": 82500
},
{
"epoch": 24.039026095060578,
"grad_norm": 0.3948291540145874,
"learning_rate": 0.0003117167832167832,
"loss": 3.1221,
"step": 82550
},
{
"epoch": 24.05358807082945,
"grad_norm": 0.4125128388404846,
"learning_rate": 0.000311541958041958,
"loss": 3.1346,
"step": 82600
},
{
"epoch": 24.068150046598323,
"grad_norm": 0.3803099989891052,
"learning_rate": 0.0003113671328671329,
"loss": 3.1386,
"step": 82650
},
{
"epoch": 24.082712022367193,
"grad_norm": 0.4099292457103729,
"learning_rate": 0.0003111923076923076,
"loss": 3.1414,
"step": 82700
},
{
"epoch": 24.097273998136068,
"grad_norm": 0.43308761715888977,
"learning_rate": 0.00031101748251748253,
"loss": 3.1423,
"step": 82750
},
{
"epoch": 24.111835973904938,
"grad_norm": 0.4209023118019104,
"learning_rate": 0.0003108426573426573,
"loss": 3.1439,
"step": 82800
},
{
"epoch": 24.126397949673812,
"grad_norm": 0.42245200276374817,
"learning_rate": 0.00031066783216783213,
"loss": 3.1285,
"step": 82850
},
{
"epoch": 24.140959925442683,
"grad_norm": 0.43536925315856934,
"learning_rate": 0.00031049300699300693,
"loss": 3.1439,
"step": 82900
},
{
"epoch": 24.155521901211557,
"grad_norm": 0.4053642451763153,
"learning_rate": 0.0003103181818181818,
"loss": 3.1508,
"step": 82950
},
{
"epoch": 24.170083876980428,
"grad_norm": 0.4120132625102997,
"learning_rate": 0.0003101433566433566,
"loss": 3.1459,
"step": 83000
},
{
"epoch": 24.170083876980428,
"eval_accuracy": 0.3734714436682812,
"eval_loss": 3.5529048442840576,
"eval_runtime": 53.7547,
"eval_samples_per_second": 309.629,
"eval_steps_per_second": 19.366,
"step": 83000
},
{
"epoch": 24.184645852749302,
"grad_norm": 0.42911526560783386,
"learning_rate": 0.00030996853146853144,
"loss": 3.1493,
"step": 83050
},
{
"epoch": 24.199207828518173,
"grad_norm": 0.41745200753211975,
"learning_rate": 0.00030979370629370624,
"loss": 3.1569,
"step": 83100
},
{
"epoch": 24.213769804287047,
"grad_norm": 0.4385972321033478,
"learning_rate": 0.0003096188811188811,
"loss": 3.1605,
"step": 83150
},
{
"epoch": 24.228331780055917,
"grad_norm": 0.40563392639160156,
"learning_rate": 0.00030944405594405595,
"loss": 3.1664,
"step": 83200
},
{
"epoch": 24.24289375582479,
"grad_norm": 0.40807583928108215,
"learning_rate": 0.00030926923076923075,
"loss": 3.1623,
"step": 83250
},
{
"epoch": 24.257455731593662,
"grad_norm": 0.4392421841621399,
"learning_rate": 0.0003090944055944056,
"loss": 3.1665,
"step": 83300
},
{
"epoch": 24.272017707362536,
"grad_norm": 0.4189673662185669,
"learning_rate": 0.0003089195804195804,
"loss": 3.1699,
"step": 83350
},
{
"epoch": 24.286579683131407,
"grad_norm": 0.44244757294654846,
"learning_rate": 0.00030874475524475525,
"loss": 3.1737,
"step": 83400
},
{
"epoch": 24.30114165890028,
"grad_norm": 0.46909067034721375,
"learning_rate": 0.00030856993006993,
"loss": 3.1885,
"step": 83450
},
{
"epoch": 24.315703634669152,
"grad_norm": 0.45255881547927856,
"learning_rate": 0.0003083951048951049,
"loss": 3.1687,
"step": 83500
},
{
"epoch": 24.330265610438023,
"grad_norm": 0.4203416705131531,
"learning_rate": 0.00030822027972027965,
"loss": 3.1754,
"step": 83550
},
{
"epoch": 24.344827586206897,
"grad_norm": 0.4178505539894104,
"learning_rate": 0.0003080454545454545,
"loss": 3.1703,
"step": 83600
},
{
"epoch": 24.359389561975767,
"grad_norm": 0.4160827696323395,
"learning_rate": 0.0003078706293706293,
"loss": 3.1656,
"step": 83650
},
{
"epoch": 24.37395153774464,
"grad_norm": 0.401161253452301,
"learning_rate": 0.00030769580419580416,
"loss": 3.1885,
"step": 83700
},
{
"epoch": 24.388513513513512,
"grad_norm": 0.4042643904685974,
"learning_rate": 0.00030752097902097896,
"loss": 3.1816,
"step": 83750
},
{
"epoch": 24.403075489282386,
"grad_norm": 0.4274575412273407,
"learning_rate": 0.0003073461538461538,
"loss": 3.1821,
"step": 83800
},
{
"epoch": 24.417637465051257,
"grad_norm": 0.45580828189849854,
"learning_rate": 0.00030717132867132867,
"loss": 3.1833,
"step": 83850
},
{
"epoch": 24.43219944082013,
"grad_norm": 0.39442870020866394,
"learning_rate": 0.00030699650349650347,
"loss": 3.1932,
"step": 83900
},
{
"epoch": 24.446761416589002,
"grad_norm": 0.3898670971393585,
"learning_rate": 0.0003068216783216783,
"loss": 3.2083,
"step": 83950
},
{
"epoch": 24.461323392357876,
"grad_norm": 0.4018656611442566,
"learning_rate": 0.0003066468531468531,
"loss": 3.1993,
"step": 84000
},
{
"epoch": 24.461323392357876,
"eval_accuracy": 0.3736348753286858,
"eval_loss": 3.550642251968384,
"eval_runtime": 53.7874,
"eval_samples_per_second": 309.441,
"eval_steps_per_second": 19.354,
"step": 84000
},
{
"epoch": 24.475885368126747,
"grad_norm": 0.41906172037124634,
"learning_rate": 0.000306472027972028,
"loss": 3.1734,
"step": 84050
},
{
"epoch": 24.49044734389562,
"grad_norm": 0.43437841534614563,
"learning_rate": 0.0003062972027972028,
"loss": 3.197,
"step": 84100
},
{
"epoch": 24.50500931966449,
"grad_norm": 0.43843838572502136,
"learning_rate": 0.00030612237762237763,
"loss": 3.1891,
"step": 84150
},
{
"epoch": 24.519571295433366,
"grad_norm": 0.41096222400665283,
"learning_rate": 0.0003059475524475524,
"loss": 3.1952,
"step": 84200
},
{
"epoch": 24.534133271202236,
"grad_norm": 0.3851706087589264,
"learning_rate": 0.0003057727272727273,
"loss": 3.1918,
"step": 84250
},
{
"epoch": 24.54869524697111,
"grad_norm": 0.4225330650806427,
"learning_rate": 0.00030559790209790203,
"loss": 3.1952,
"step": 84300
},
{
"epoch": 24.56325722273998,
"grad_norm": 0.411729633808136,
"learning_rate": 0.0003054230769230769,
"loss": 3.1816,
"step": 84350
},
{
"epoch": 24.577819198508855,
"grad_norm": 0.42367714643478394,
"learning_rate": 0.0003052482517482517,
"loss": 3.1886,
"step": 84400
},
{
"epoch": 24.592381174277726,
"grad_norm": 0.3968988060951233,
"learning_rate": 0.00030507342657342654,
"loss": 3.1871,
"step": 84450
},
{
"epoch": 24.606943150046597,
"grad_norm": 0.4362886846065521,
"learning_rate": 0.00030489860139860134,
"loss": 3.1957,
"step": 84500
},
{
"epoch": 24.62150512581547,
"grad_norm": 0.43027788400650024,
"learning_rate": 0.0003047237762237762,
"loss": 3.2085,
"step": 84550
},
{
"epoch": 24.63606710158434,
"grad_norm": 0.42857909202575684,
"learning_rate": 0.00030454895104895104,
"loss": 3.2013,
"step": 84600
},
{
"epoch": 24.650629077353216,
"grad_norm": 0.4252914488315582,
"learning_rate": 0.00030437412587412584,
"loss": 3.1985,
"step": 84650
},
{
"epoch": 24.665191053122086,
"grad_norm": 0.44467562437057495,
"learning_rate": 0.0003041993006993007,
"loss": 3.2045,
"step": 84700
},
{
"epoch": 24.67975302889096,
"grad_norm": 0.4184528887271881,
"learning_rate": 0.0003040244755244755,
"loss": 3.2113,
"step": 84750
},
{
"epoch": 24.69431500465983,
"grad_norm": 0.386417955160141,
"learning_rate": 0.00030384965034965035,
"loss": 3.2107,
"step": 84800
},
{
"epoch": 24.708876980428705,
"grad_norm": 0.40415215492248535,
"learning_rate": 0.00030367482517482515,
"loss": 3.2079,
"step": 84850
},
{
"epoch": 24.723438956197576,
"grad_norm": 0.40872862935066223,
"learning_rate": 0.0003035,
"loss": 3.2053,
"step": 84900
},
{
"epoch": 24.73800093196645,
"grad_norm": 0.38416963815689087,
"learning_rate": 0.00030332517482517475,
"loss": 3.2088,
"step": 84950
},
{
"epoch": 24.75256290773532,
"grad_norm": 0.4279167652130127,
"learning_rate": 0.00030315034965034966,
"loss": 3.2121,
"step": 85000
},
{
"epoch": 24.75256290773532,
"eval_accuracy": 0.3745151723369222,
"eval_loss": 3.5380096435546875,
"eval_runtime": 53.8009,
"eval_samples_per_second": 309.363,
"eval_steps_per_second": 19.349,
"step": 85000
},
{
"epoch": 24.767124883504195,
"grad_norm": 0.4610745310783386,
"learning_rate": 0.0003029755244755244,
"loss": 3.2114,
"step": 85050
},
{
"epoch": 24.781686859273066,
"grad_norm": 0.4009937345981598,
"learning_rate": 0.00030280069930069926,
"loss": 3.2105,
"step": 85100
},
{
"epoch": 24.79624883504194,
"grad_norm": 0.43339040875434875,
"learning_rate": 0.00030262587412587406,
"loss": 3.1943,
"step": 85150
},
{
"epoch": 24.81081081081081,
"grad_norm": 0.46495065093040466,
"learning_rate": 0.0003024510489510489,
"loss": 3.2136,
"step": 85200
},
{
"epoch": 24.825372786579685,
"grad_norm": 0.4479699730873108,
"learning_rate": 0.00030227622377622377,
"loss": 3.2088,
"step": 85250
},
{
"epoch": 24.839934762348555,
"grad_norm": 0.4210740923881531,
"learning_rate": 0.00030210139860139856,
"loss": 3.2095,
"step": 85300
},
{
"epoch": 24.854496738117426,
"grad_norm": 0.4120892584323883,
"learning_rate": 0.0003019265734265734,
"loss": 3.2034,
"step": 85350
},
{
"epoch": 24.8690587138863,
"grad_norm": 0.42725127935409546,
"learning_rate": 0.0003017517482517482,
"loss": 3.2062,
"step": 85400
},
{
"epoch": 24.88362068965517,
"grad_norm": 0.4295431077480316,
"learning_rate": 0.00030157692307692307,
"loss": 3.2098,
"step": 85450
},
{
"epoch": 24.898182665424045,
"grad_norm": 0.4166405200958252,
"learning_rate": 0.00030140209790209787,
"loss": 3.2043,
"step": 85500
},
{
"epoch": 24.912744641192916,
"grad_norm": 0.4288575351238251,
"learning_rate": 0.0003012272727272727,
"loss": 3.2116,
"step": 85550
},
{
"epoch": 24.92730661696179,
"grad_norm": 0.41819635033607483,
"learning_rate": 0.0003010524475524475,
"loss": 3.2256,
"step": 85600
},
{
"epoch": 24.94186859273066,
"grad_norm": 0.39592012763023376,
"learning_rate": 0.0003008776223776224,
"loss": 3.2045,
"step": 85650
},
{
"epoch": 24.956430568499535,
"grad_norm": 0.4047764241695404,
"learning_rate": 0.0003007027972027972,
"loss": 3.2122,
"step": 85700
},
{
"epoch": 24.970992544268405,
"grad_norm": 0.4179820716381073,
"learning_rate": 0.00030052797202797203,
"loss": 3.2178,
"step": 85750
},
{
"epoch": 24.98555452003728,
"grad_norm": 0.40104392170906067,
"learning_rate": 0.0003003531468531468,
"loss": 3.2109,
"step": 85800
},
{
"epoch": 25.0,
"grad_norm": 0.5622439384460449,
"learning_rate": 0.00030017832167832163,
"loss": 3.2232,
"step": 85850
},
{
"epoch": 25.01456197576887,
"grad_norm": 0.4549800157546997,
"learning_rate": 0.0003000034965034965,
"loss": 3.1134,
"step": 85900
},
{
"epoch": 25.029123951537745,
"grad_norm": 0.4355591833591461,
"learning_rate": 0.0002998286713286713,
"loss": 3.1201,
"step": 85950
},
{
"epoch": 25.043685927306615,
"grad_norm": 0.409805566072464,
"learning_rate": 0.00029965384615384614,
"loss": 3.1188,
"step": 86000
},
{
"epoch": 25.043685927306615,
"eval_accuracy": 0.37394857005527515,
"eval_loss": 3.552666425704956,
"eval_runtime": 53.5077,
"eval_samples_per_second": 311.058,
"eval_steps_per_second": 19.455,
"step": 86000
},
{
"epoch": 25.05824790307549,
"grad_norm": 0.4548097848892212,
"learning_rate": 0.00029947902097902094,
"loss": 3.127,
"step": 86050
},
{
"epoch": 25.07280987884436,
"grad_norm": 0.4102542996406555,
"learning_rate": 0.0002993041958041958,
"loss": 3.1309,
"step": 86100
},
{
"epoch": 25.087371854613234,
"grad_norm": 0.43193888664245605,
"learning_rate": 0.0002991293706293706,
"loss": 3.1349,
"step": 86150
},
{
"epoch": 25.101933830382105,
"grad_norm": 0.4661305844783783,
"learning_rate": 0.0002989545454545454,
"loss": 3.1198,
"step": 86200
},
{
"epoch": 25.11649580615098,
"grad_norm": 0.43192169070243835,
"learning_rate": 0.00029877972027972025,
"loss": 3.1369,
"step": 86250
},
{
"epoch": 25.13105778191985,
"grad_norm": 0.44566482305526733,
"learning_rate": 0.0002986048951048951,
"loss": 3.1434,
"step": 86300
},
{
"epoch": 25.145619757688724,
"grad_norm": 0.41159921884536743,
"learning_rate": 0.0002984300699300699,
"loss": 3.1485,
"step": 86350
},
{
"epoch": 25.160181733457595,
"grad_norm": 0.42221495509147644,
"learning_rate": 0.00029825524475524475,
"loss": 3.143,
"step": 86400
},
{
"epoch": 25.17474370922647,
"grad_norm": 0.4047253727912903,
"learning_rate": 0.00029808041958041955,
"loss": 3.1483,
"step": 86450
},
{
"epoch": 25.18930568499534,
"grad_norm": 0.420423686504364,
"learning_rate": 0.0002979055944055944,
"loss": 3.1678,
"step": 86500
},
{
"epoch": 25.203867660764214,
"grad_norm": 0.41166287660598755,
"learning_rate": 0.0002977307692307692,
"loss": 3.1539,
"step": 86550
},
{
"epoch": 25.218429636533084,
"grad_norm": 0.4156063497066498,
"learning_rate": 0.000297555944055944,
"loss": 3.1652,
"step": 86600
},
{
"epoch": 25.23299161230196,
"grad_norm": 0.4309043884277344,
"learning_rate": 0.00029738111888111886,
"loss": 3.1514,
"step": 86650
},
{
"epoch": 25.24755358807083,
"grad_norm": 0.40342429280281067,
"learning_rate": 0.00029720629370629366,
"loss": 3.1473,
"step": 86700
},
{
"epoch": 25.262115563839703,
"grad_norm": 0.4230315387248993,
"learning_rate": 0.0002970314685314685,
"loss": 3.1495,
"step": 86750
},
{
"epoch": 25.276677539608574,
"grad_norm": 0.4255284070968628,
"learning_rate": 0.0002968566433566433,
"loss": 3.1535,
"step": 86800
},
{
"epoch": 25.291239515377445,
"grad_norm": 0.43190792202949524,
"learning_rate": 0.00029668181818181817,
"loss": 3.1664,
"step": 86850
},
{
"epoch": 25.30580149114632,
"grad_norm": 0.41948994994163513,
"learning_rate": 0.00029650699300699297,
"loss": 3.1693,
"step": 86900
},
{
"epoch": 25.32036346691519,
"grad_norm": 0.4156516194343567,
"learning_rate": 0.0002963321678321678,
"loss": 3.1674,
"step": 86950
},
{
"epoch": 25.334925442684064,
"grad_norm": 0.43022507429122925,
"learning_rate": 0.0002961573426573426,
"loss": 3.1541,
"step": 87000
},
{
"epoch": 25.334925442684064,
"eval_accuracy": 0.37386814756914805,
"eval_loss": 3.5510289669036865,
"eval_runtime": 53.6269,
"eval_samples_per_second": 310.367,
"eval_steps_per_second": 19.412,
"step": 87000
},
{
"epoch": 25.349487418452934,
"grad_norm": 0.43121078610420227,
"learning_rate": 0.0002959825174825175,
"loss": 3.1686,
"step": 87050
},
{
"epoch": 25.36404939422181,
"grad_norm": 0.42564132809638977,
"learning_rate": 0.0002958076923076923,
"loss": 3.1656,
"step": 87100
},
{
"epoch": 25.37861136999068,
"grad_norm": 0.41207486391067505,
"learning_rate": 0.00029563286713286713,
"loss": 3.1703,
"step": 87150
},
{
"epoch": 25.393173345759553,
"grad_norm": 0.43623030185699463,
"learning_rate": 0.00029545804195804193,
"loss": 3.169,
"step": 87200
},
{
"epoch": 25.407735321528424,
"grad_norm": 0.44384080171585083,
"learning_rate": 0.0002952832167832168,
"loss": 3.1754,
"step": 87250
},
{
"epoch": 25.4222972972973,
"grad_norm": 0.4300347864627838,
"learning_rate": 0.0002951083916083916,
"loss": 3.1774,
"step": 87300
},
{
"epoch": 25.43685927306617,
"grad_norm": 0.4289652109146118,
"learning_rate": 0.0002949335664335664,
"loss": 3.1638,
"step": 87350
},
{
"epoch": 25.451421248835043,
"grad_norm": 0.4287082850933075,
"learning_rate": 0.00029475874125874124,
"loss": 3.1745,
"step": 87400
},
{
"epoch": 25.465983224603914,
"grad_norm": 0.39625516533851624,
"learning_rate": 0.00029458391608391604,
"loss": 3.1731,
"step": 87450
},
{
"epoch": 25.480545200372788,
"grad_norm": 0.4283997416496277,
"learning_rate": 0.0002944090909090909,
"loss": 3.182,
"step": 87500
},
{
"epoch": 25.49510717614166,
"grad_norm": 0.4386811852455139,
"learning_rate": 0.0002942342657342657,
"loss": 3.177,
"step": 87550
},
{
"epoch": 25.509669151910533,
"grad_norm": 0.42211535573005676,
"learning_rate": 0.00029405944055944054,
"loss": 3.175,
"step": 87600
},
{
"epoch": 25.524231127679403,
"grad_norm": 0.4209630787372589,
"learning_rate": 0.0002938846153846154,
"loss": 3.1778,
"step": 87650
},
{
"epoch": 25.538793103448278,
"grad_norm": 0.44520241022109985,
"learning_rate": 0.0002937097902097902,
"loss": 3.1704,
"step": 87700
},
{
"epoch": 25.553355079217148,
"grad_norm": 0.4534125030040741,
"learning_rate": 0.000293534965034965,
"loss": 3.1802,
"step": 87750
},
{
"epoch": 25.56791705498602,
"grad_norm": 0.40445277094841003,
"learning_rate": 0.00029336013986013985,
"loss": 3.1892,
"step": 87800
},
{
"epoch": 25.582479030754893,
"grad_norm": 0.38358354568481445,
"learning_rate": 0.00029318531468531465,
"loss": 3.1797,
"step": 87850
},
{
"epoch": 25.597041006523764,
"grad_norm": 0.4403440058231354,
"learning_rate": 0.0002930104895104895,
"loss": 3.1851,
"step": 87900
},
{
"epoch": 25.611602982292638,
"grad_norm": 0.4246091842651367,
"learning_rate": 0.0002928356643356643,
"loss": 3.1896,
"step": 87950
},
{
"epoch": 25.62616495806151,
"grad_norm": 0.43709608912467957,
"learning_rate": 0.00029266083916083916,
"loss": 3.182,
"step": 88000
},
{
"epoch": 25.62616495806151,
"eval_accuracy": 0.37425403441047733,
"eval_loss": 3.543616533279419,
"eval_runtime": 53.8799,
"eval_samples_per_second": 308.909,
"eval_steps_per_second": 19.321,
"step": 88000
},
{
"epoch": 25.640726933830383,
"grad_norm": 0.42380014061927795,
"learning_rate": 0.00029248601398601396,
"loss": 3.192,
"step": 88050
},
{
"epoch": 25.655288909599253,
"grad_norm": 0.42167308926582336,
"learning_rate": 0.00029231118881118876,
"loss": 3.1922,
"step": 88100
},
{
"epoch": 25.669850885368128,
"grad_norm": 0.4025057852268219,
"learning_rate": 0.0002921363636363636,
"loss": 3.1967,
"step": 88150
},
{
"epoch": 25.684412861136998,
"grad_norm": 0.4348795413970947,
"learning_rate": 0.0002919615384615384,
"loss": 3.1959,
"step": 88200
},
{
"epoch": 25.698974836905872,
"grad_norm": 0.4117673635482788,
"learning_rate": 0.00029178671328671326,
"loss": 3.197,
"step": 88250
},
{
"epoch": 25.713536812674743,
"grad_norm": 0.40845316648483276,
"learning_rate": 0.00029161188811188806,
"loss": 3.201,
"step": 88300
},
{
"epoch": 25.728098788443617,
"grad_norm": 0.44190987944602966,
"learning_rate": 0.0002914370629370629,
"loss": 3.1909,
"step": 88350
},
{
"epoch": 25.742660764212488,
"grad_norm": 0.4156600832939148,
"learning_rate": 0.00029126223776223777,
"loss": 3.2135,
"step": 88400
},
{
"epoch": 25.757222739981362,
"grad_norm": 0.4604335427284241,
"learning_rate": 0.00029108741258741257,
"loss": 3.1903,
"step": 88450
},
{
"epoch": 25.771784715750233,
"grad_norm": 0.5294921398162842,
"learning_rate": 0.00029091258741258737,
"loss": 3.2009,
"step": 88500
},
{
"epoch": 25.786346691519107,
"grad_norm": 0.45587196946144104,
"learning_rate": 0.0002907377622377622,
"loss": 3.189,
"step": 88550
},
{
"epoch": 25.800908667287977,
"grad_norm": 0.43664881587028503,
"learning_rate": 0.000290562937062937,
"loss": 3.2097,
"step": 88600
},
{
"epoch": 25.815470643056848,
"grad_norm": 0.43888285756111145,
"learning_rate": 0.0002903881118881119,
"loss": 3.1963,
"step": 88650
},
{
"epoch": 25.830032618825722,
"grad_norm": 0.4586959779262543,
"learning_rate": 0.0002902132867132867,
"loss": 3.2004,
"step": 88700
},
{
"epoch": 25.844594594594593,
"grad_norm": 0.4349871873855591,
"learning_rate": 0.00029003846153846153,
"loss": 3.205,
"step": 88750
},
{
"epoch": 25.859156570363467,
"grad_norm": 0.4294140934944153,
"learning_rate": 0.00028986363636363633,
"loss": 3.2124,
"step": 88800
},
{
"epoch": 25.873718546132338,
"grad_norm": 0.3970581293106079,
"learning_rate": 0.00028968881118881113,
"loss": 3.2041,
"step": 88850
},
{
"epoch": 25.888280521901212,
"grad_norm": 0.42786160111427307,
"learning_rate": 0.000289513986013986,
"loss": 3.2013,
"step": 88900
},
{
"epoch": 25.902842497670083,
"grad_norm": 0.453769326210022,
"learning_rate": 0.0002893391608391608,
"loss": 3.1962,
"step": 88950
},
{
"epoch": 25.917404473438957,
"grad_norm": 0.45318394899368286,
"learning_rate": 0.00028916433566433564,
"loss": 3.2141,
"step": 89000
},
{
"epoch": 25.917404473438957,
"eval_accuracy": 0.3747393911688585,
"eval_loss": 3.534339427947998,
"eval_runtime": 53.6851,
"eval_samples_per_second": 310.03,
"eval_steps_per_second": 19.391,
"step": 89000
},
{
"epoch": 25.931966449207827,
"grad_norm": 0.4190371036529541,
"learning_rate": 0.0002889895104895105,
"loss": 3.2111,
"step": 89050
},
{
"epoch": 25.9465284249767,
"grad_norm": 0.4242444336414337,
"learning_rate": 0.0002888146853146853,
"loss": 3.2154,
"step": 89100
},
{
"epoch": 25.961090400745572,
"grad_norm": 0.43644312024116516,
"learning_rate": 0.00028863986013986015,
"loss": 3.2004,
"step": 89150
},
{
"epoch": 25.975652376514446,
"grad_norm": 0.41194868087768555,
"learning_rate": 0.00028846503496503495,
"loss": 3.1978,
"step": 89200
},
{
"epoch": 25.990214352283317,
"grad_norm": 0.38906967639923096,
"learning_rate": 0.00028829020979020975,
"loss": 3.2196,
"step": 89250
},
{
"epoch": 26.004659832246038,
"grad_norm": 0.4650445282459259,
"learning_rate": 0.0002881153846153846,
"loss": 3.1695,
"step": 89300
},
{
"epoch": 26.019221808014912,
"grad_norm": 0.45053941011428833,
"learning_rate": 0.0002879405594405594,
"loss": 3.099,
"step": 89350
},
{
"epoch": 26.033783783783782,
"grad_norm": 0.4252159595489502,
"learning_rate": 0.00028776573426573425,
"loss": 3.1137,
"step": 89400
},
{
"epoch": 26.048345759552657,
"grad_norm": 0.4305402338504791,
"learning_rate": 0.00028759090909090905,
"loss": 3.1096,
"step": 89450
},
{
"epoch": 26.062907735321527,
"grad_norm": 0.40122270584106445,
"learning_rate": 0.0002874160839160839,
"loss": 3.1085,
"step": 89500
},
{
"epoch": 26.0774697110904,
"grad_norm": 0.4272627532482147,
"learning_rate": 0.0002872412587412587,
"loss": 3.1185,
"step": 89550
},
{
"epoch": 26.092031686859272,
"grad_norm": 0.4182010293006897,
"learning_rate": 0.0002870664335664335,
"loss": 3.1124,
"step": 89600
},
{
"epoch": 26.106593662628146,
"grad_norm": 0.4463525414466858,
"learning_rate": 0.00028689160839160836,
"loss": 3.1189,
"step": 89650
},
{
"epoch": 26.121155638397017,
"grad_norm": 0.4242148995399475,
"learning_rate": 0.0002867167832167832,
"loss": 3.1286,
"step": 89700
},
{
"epoch": 26.13571761416589,
"grad_norm": 0.46834519505500793,
"learning_rate": 0.000286541958041958,
"loss": 3.1437,
"step": 89750
},
{
"epoch": 26.15027958993476,
"grad_norm": 0.4111859202384949,
"learning_rate": 0.00028636713286713287,
"loss": 3.1388,
"step": 89800
},
{
"epoch": 26.164841565703636,
"grad_norm": 0.45419996976852417,
"learning_rate": 0.00028619230769230767,
"loss": 3.1428,
"step": 89850
},
{
"epoch": 26.179403541472507,
"grad_norm": 0.42230865359306335,
"learning_rate": 0.0002860174825174825,
"loss": 3.1496,
"step": 89900
},
{
"epoch": 26.19396551724138,
"grad_norm": 0.4368692636489868,
"learning_rate": 0.0002858426573426573,
"loss": 3.1308,
"step": 89950
},
{
"epoch": 26.20852749301025,
"grad_norm": 0.40982940793037415,
"learning_rate": 0.0002856678321678321,
"loss": 3.1395,
"step": 90000
},
{
"epoch": 26.20852749301025,
"eval_accuracy": 0.37377961228836776,
"eval_loss": 3.556647300720215,
"eval_runtime": 53.6833,
"eval_samples_per_second": 310.041,
"eval_steps_per_second": 19.392,
"step": 90000
},
{
"epoch": 26.223089468779126,
"grad_norm": 0.44282498955726624,
"learning_rate": 0.000285493006993007,
"loss": 3.1518,
"step": 90050
},
{
"epoch": 26.237651444547996,
"grad_norm": 0.4250069260597229,
"learning_rate": 0.0002853181818181818,
"loss": 3.147,
"step": 90100
},
{
"epoch": 26.252213420316867,
"grad_norm": 0.4652498662471771,
"learning_rate": 0.00028514335664335663,
"loss": 3.144,
"step": 90150
},
{
"epoch": 26.26677539608574,
"grad_norm": 0.41768208146095276,
"learning_rate": 0.00028496853146853143,
"loss": 3.1613,
"step": 90200
},
{
"epoch": 26.28133737185461,
"grad_norm": 0.4293578565120697,
"learning_rate": 0.0002847937062937063,
"loss": 3.1477,
"step": 90250
},
{
"epoch": 26.295899347623486,
"grad_norm": 0.43309634923934937,
"learning_rate": 0.0002846188811188811,
"loss": 3.16,
"step": 90300
},
{
"epoch": 26.310461323392357,
"grad_norm": 0.4498419463634491,
"learning_rate": 0.0002844440559440559,
"loss": 3.1534,
"step": 90350
},
{
"epoch": 26.32502329916123,
"grad_norm": 0.45427951216697693,
"learning_rate": 0.00028426923076923074,
"loss": 3.1634,
"step": 90400
},
{
"epoch": 26.3395852749301,
"grad_norm": 0.43068617582321167,
"learning_rate": 0.0002840944055944056,
"loss": 3.1627,
"step": 90450
},
{
"epoch": 26.354147250698976,
"grad_norm": 0.42055797576904297,
"learning_rate": 0.0002839195804195804,
"loss": 3.1581,
"step": 90500
},
{
"epoch": 26.368709226467846,
"grad_norm": 0.4183354675769806,
"learning_rate": 0.00028374475524475524,
"loss": 3.1586,
"step": 90550
},
{
"epoch": 26.38327120223672,
"grad_norm": 0.43487775325775146,
"learning_rate": 0.00028356993006993004,
"loss": 3.156,
"step": 90600
},
{
"epoch": 26.39783317800559,
"grad_norm": 0.42014575004577637,
"learning_rate": 0.0002833951048951049,
"loss": 3.1716,
"step": 90650
},
{
"epoch": 26.412395153774465,
"grad_norm": 0.42945271730422974,
"learning_rate": 0.0002832202797202797,
"loss": 3.1736,
"step": 90700
},
{
"epoch": 26.426957129543336,
"grad_norm": 0.41654732823371887,
"learning_rate": 0.0002830454545454545,
"loss": 3.1642,
"step": 90750
},
{
"epoch": 26.44151910531221,
"grad_norm": 0.4265001118183136,
"learning_rate": 0.00028287062937062935,
"loss": 3.1667,
"step": 90800
},
{
"epoch": 26.45608108108108,
"grad_norm": 0.4233382046222687,
"learning_rate": 0.00028269580419580415,
"loss": 3.163,
"step": 90850
},
{
"epoch": 26.470643056849955,
"grad_norm": 0.4084939658641815,
"learning_rate": 0.000282520979020979,
"loss": 3.1625,
"step": 90900
},
{
"epoch": 26.485205032618826,
"grad_norm": 0.45396101474761963,
"learning_rate": 0.0002823461538461538,
"loss": 3.1666,
"step": 90950
},
{
"epoch": 26.4997670083877,
"grad_norm": 0.42796140909194946,
"learning_rate": 0.00028217132867132866,
"loss": 3.1696,
"step": 91000
},
{
"epoch": 26.4997670083877,
"eval_accuracy": 0.3743129403542634,
"eval_loss": 3.5446956157684326,
"eval_runtime": 53.4825,
"eval_samples_per_second": 311.205,
"eval_steps_per_second": 19.464,
"step": 91000
},
{
"epoch": 26.51432898415657,
"grad_norm": 0.46724438667297363,
"learning_rate": 0.00028199650349650346,
"loss": 3.1759,
"step": 91050
},
{
"epoch": 26.52889095992544,
"grad_norm": 0.41262003779411316,
"learning_rate": 0.0002818216783216783,
"loss": 3.1775,
"step": 91100
},
{
"epoch": 26.543452935694315,
"grad_norm": 0.3990762233734131,
"learning_rate": 0.0002816468531468531,
"loss": 3.179,
"step": 91150
},
{
"epoch": 26.558014911463186,
"grad_norm": 0.4612864553928375,
"learning_rate": 0.00028147202797202796,
"loss": 3.1815,
"step": 91200
},
{
"epoch": 26.57257688723206,
"grad_norm": 0.4417341947555542,
"learning_rate": 0.00028129720279720276,
"loss": 3.1754,
"step": 91250
},
{
"epoch": 26.58713886300093,
"grad_norm": 0.4242549538612366,
"learning_rate": 0.0002811223776223776,
"loss": 3.1781,
"step": 91300
},
{
"epoch": 26.601700838769805,
"grad_norm": 0.4152440130710602,
"learning_rate": 0.0002809475524475524,
"loss": 3.173,
"step": 91350
},
{
"epoch": 26.616262814538675,
"grad_norm": 0.407860666513443,
"learning_rate": 0.00028077272727272727,
"loss": 3.1779,
"step": 91400
},
{
"epoch": 26.63082479030755,
"grad_norm": 0.42334768176078796,
"learning_rate": 0.00028059790209790207,
"loss": 3.1807,
"step": 91450
},
{
"epoch": 26.64538676607642,
"grad_norm": 0.41385746002197266,
"learning_rate": 0.00028042307692307687,
"loss": 3.184,
"step": 91500
},
{
"epoch": 26.659948741845295,
"grad_norm": 0.4681982696056366,
"learning_rate": 0.0002802482517482517,
"loss": 3.1806,
"step": 91550
},
{
"epoch": 26.674510717614165,
"grad_norm": 0.44684475660324097,
"learning_rate": 0.0002800734265734265,
"loss": 3.1788,
"step": 91600
},
{
"epoch": 26.68907269338304,
"grad_norm": 0.45268094539642334,
"learning_rate": 0.0002798986013986014,
"loss": 3.1951,
"step": 91650
},
{
"epoch": 26.70363466915191,
"grad_norm": 0.4566771984100342,
"learning_rate": 0.0002797237762237762,
"loss": 3.1857,
"step": 91700
},
{
"epoch": 26.718196644920784,
"grad_norm": 0.4360271394252777,
"learning_rate": 0.00027954895104895103,
"loss": 3.1925,
"step": 91750
},
{
"epoch": 26.732758620689655,
"grad_norm": 0.41877779364585876,
"learning_rate": 0.0002793741258741259,
"loss": 3.1743,
"step": 91800
},
{
"epoch": 26.74732059645853,
"grad_norm": 0.451029509305954,
"learning_rate": 0.0002791993006993007,
"loss": 3.1816,
"step": 91850
},
{
"epoch": 26.7618825722274,
"grad_norm": 0.42223063111305237,
"learning_rate": 0.0002790244755244755,
"loss": 3.1882,
"step": 91900
},
{
"epoch": 26.776444547996274,
"grad_norm": 0.44341567158699036,
"learning_rate": 0.00027884965034965034,
"loss": 3.1878,
"step": 91950
},
{
"epoch": 26.791006523765144,
"grad_norm": 0.4176470935344696,
"learning_rate": 0.00027867482517482514,
"loss": 3.1961,
"step": 92000
},
{
"epoch": 26.791006523765144,
"eval_accuracy": 0.3744488590588876,
"eval_loss": 3.5415284633636475,
"eval_runtime": 53.6177,
"eval_samples_per_second": 310.42,
"eval_steps_per_second": 19.415,
"step": 92000
},
{
"epoch": 26.805568499534015,
"grad_norm": 0.4171764850616455,
"learning_rate": 0.0002785,
"loss": 3.1802,
"step": 92050
},
{
"epoch": 26.82013047530289,
"grad_norm": 0.42712369561195374,
"learning_rate": 0.0002783251748251748,
"loss": 3.195,
"step": 92100
},
{
"epoch": 26.83469245107176,
"grad_norm": 0.4453660547733307,
"learning_rate": 0.00027815034965034965,
"loss": 3.1924,
"step": 92150
},
{
"epoch": 26.849254426840634,
"grad_norm": 0.42237964272499084,
"learning_rate": 0.00027797552447552445,
"loss": 3.1995,
"step": 92200
},
{
"epoch": 26.863816402609505,
"grad_norm": 0.4148341417312622,
"learning_rate": 0.00027780069930069925,
"loss": 3.1963,
"step": 92250
},
{
"epoch": 26.87837837837838,
"grad_norm": 0.4194555878639221,
"learning_rate": 0.0002776258741258741,
"loss": 3.1966,
"step": 92300
},
{
"epoch": 26.89294035414725,
"grad_norm": 0.4421636164188385,
"learning_rate": 0.0002774510489510489,
"loss": 3.2083,
"step": 92350
},
{
"epoch": 26.907502329916124,
"grad_norm": 0.42861008644104004,
"learning_rate": 0.00027727622377622375,
"loss": 3.1808,
"step": 92400
},
{
"epoch": 26.922064305684994,
"grad_norm": 0.4487074017524719,
"learning_rate": 0.00027710139860139855,
"loss": 3.2002,
"step": 92450
},
{
"epoch": 26.93662628145387,
"grad_norm": 0.4668884873390198,
"learning_rate": 0.0002769265734265734,
"loss": 3.1858,
"step": 92500
},
{
"epoch": 26.95118825722274,
"grad_norm": 0.4213342070579529,
"learning_rate": 0.00027675174825174826,
"loss": 3.2018,
"step": 92550
},
{
"epoch": 26.965750232991613,
"grad_norm": 0.4327002167701721,
"learning_rate": 0.00027657692307692306,
"loss": 3.1964,
"step": 92600
},
{
"epoch": 26.980312208760484,
"grad_norm": 0.43151915073394775,
"learning_rate": 0.00027640209790209786,
"loss": 3.2008,
"step": 92650
},
{
"epoch": 26.99487418452936,
"grad_norm": 0.45595821738243103,
"learning_rate": 0.0002762272727272727,
"loss": 3.197,
"step": 92700
},
{
"epoch": 27.00931966449208,
"grad_norm": 0.43289080262184143,
"learning_rate": 0.0002760524475524475,
"loss": 3.1388,
"step": 92750
},
{
"epoch": 27.02388164026095,
"grad_norm": 0.4297803044319153,
"learning_rate": 0.00027587762237762237,
"loss": 3.1002,
"step": 92800
},
{
"epoch": 27.038443616029824,
"grad_norm": 0.44579535722732544,
"learning_rate": 0.00027570279720279717,
"loss": 3.0885,
"step": 92850
},
{
"epoch": 27.053005591798694,
"grad_norm": 0.43222174048423767,
"learning_rate": 0.000275527972027972,
"loss": 3.1066,
"step": 92900
},
{
"epoch": 27.06756756756757,
"grad_norm": 0.4319220185279846,
"learning_rate": 0.0002753531468531468,
"loss": 3.0927,
"step": 92950
},
{
"epoch": 27.08212954333644,
"grad_norm": 0.4116879999637604,
"learning_rate": 0.0002751783216783216,
"loss": 3.1084,
"step": 93000
},
{
"epoch": 27.08212954333644,
"eval_accuracy": 0.3740686159007953,
"eval_loss": 3.5531694889068604,
"eval_runtime": 53.7303,
"eval_samples_per_second": 309.769,
"eval_steps_per_second": 19.375,
"step": 93000
},
{
"epoch": 27.096691519105313,
"grad_norm": 0.4252121150493622,
"learning_rate": 0.0002750034965034965,
"loss": 3.1168,
"step": 93050
},
{
"epoch": 27.111253494874184,
"grad_norm": 0.4439890682697296,
"learning_rate": 0.0002748286713286713,
"loss": 3.1125,
"step": 93100
},
{
"epoch": 27.125815470643058,
"grad_norm": 0.4510648250579834,
"learning_rate": 0.00027465384615384613,
"loss": 3.1169,
"step": 93150
},
{
"epoch": 27.14037744641193,
"grad_norm": 0.4003378748893738,
"learning_rate": 0.000274479020979021,
"loss": 3.1248,
"step": 93200
},
{
"epoch": 27.154939422180803,
"grad_norm": 0.43054619431495667,
"learning_rate": 0.0002743041958041958,
"loss": 3.116,
"step": 93250
},
{
"epoch": 27.169501397949674,
"grad_norm": 0.4461906850337982,
"learning_rate": 0.00027412937062937064,
"loss": 3.1239,
"step": 93300
},
{
"epoch": 27.184063373718548,
"grad_norm": 0.4158555269241333,
"learning_rate": 0.00027395454545454544,
"loss": 3.1211,
"step": 93350
},
{
"epoch": 27.19862534948742,
"grad_norm": 0.444968581199646,
"learning_rate": 0.00027377972027972024,
"loss": 3.1313,
"step": 93400
},
{
"epoch": 27.21318732525629,
"grad_norm": 0.42571595311164856,
"learning_rate": 0.0002736048951048951,
"loss": 3.1396,
"step": 93450
},
{
"epoch": 27.227749301025163,
"grad_norm": 0.45029646158218384,
"learning_rate": 0.0002734300699300699,
"loss": 3.141,
"step": 93500
},
{
"epoch": 27.242311276794034,
"grad_norm": 0.4302981197834015,
"learning_rate": 0.00027325524475524474,
"loss": 3.1396,
"step": 93550
},
{
"epoch": 27.256873252562908,
"grad_norm": 0.4424735903739929,
"learning_rate": 0.00027308041958041954,
"loss": 3.1427,
"step": 93600
},
{
"epoch": 27.27143522833178,
"grad_norm": 0.4379088580608368,
"learning_rate": 0.0002729055944055944,
"loss": 3.1474,
"step": 93650
},
{
"epoch": 27.285997204100653,
"grad_norm": 0.428368479013443,
"learning_rate": 0.0002727307692307692,
"loss": 3.1487,
"step": 93700
},
{
"epoch": 27.300559179869524,
"grad_norm": 0.4178659915924072,
"learning_rate": 0.000272555944055944,
"loss": 3.1444,
"step": 93750
},
{
"epoch": 27.315121155638398,
"grad_norm": 0.42984670400619507,
"learning_rate": 0.00027238111888111885,
"loss": 3.1458,
"step": 93800
},
{
"epoch": 27.32968313140727,
"grad_norm": 0.4429548680782318,
"learning_rate": 0.0002722062937062937,
"loss": 3.146,
"step": 93850
},
{
"epoch": 27.344245107176143,
"grad_norm": 0.475292444229126,
"learning_rate": 0.0002720314685314685,
"loss": 3.1438,
"step": 93900
},
{
"epoch": 27.358807082945013,
"grad_norm": 0.4281918406486511,
"learning_rate": 0.00027185664335664336,
"loss": 3.1511,
"step": 93950
},
{
"epoch": 27.373369058713887,
"grad_norm": 0.40582549571990967,
"learning_rate": 0.00027168181818181816,
"loss": 3.1561,
"step": 94000
},
{
"epoch": 27.373369058713887,
"eval_accuracy": 0.374502474049639,
"eval_loss": 3.548748254776001,
"eval_runtime": 53.6868,
"eval_samples_per_second": 310.02,
"eval_steps_per_second": 19.39,
"step": 94000
},
{
"epoch": 27.387931034482758,
"grad_norm": 0.47851642966270447,
"learning_rate": 0.000271506993006993,
"loss": 3.1493,
"step": 94050
},
{
"epoch": 27.402493010251632,
"grad_norm": 0.45141974091529846,
"learning_rate": 0.0002713321678321678,
"loss": 3.1604,
"step": 94100
},
{
"epoch": 27.417054986020503,
"grad_norm": 0.44606834650039673,
"learning_rate": 0.0002711573426573426,
"loss": 3.1586,
"step": 94150
},
{
"epoch": 27.431616961789377,
"grad_norm": 0.4571624994277954,
"learning_rate": 0.00027098251748251746,
"loss": 3.1612,
"step": 94200
},
{
"epoch": 27.446178937558248,
"grad_norm": 0.4431142807006836,
"learning_rate": 0.00027080769230769226,
"loss": 3.1489,
"step": 94250
},
{
"epoch": 27.460740913327122,
"grad_norm": 0.4453687071800232,
"learning_rate": 0.0002706328671328671,
"loss": 3.1676,
"step": 94300
},
{
"epoch": 27.475302889095992,
"grad_norm": 0.44453132152557373,
"learning_rate": 0.0002704580419580419,
"loss": 3.1509,
"step": 94350
},
{
"epoch": 27.489864864864863,
"grad_norm": 0.45242783427238464,
"learning_rate": 0.00027028321678321677,
"loss": 3.1541,
"step": 94400
},
{
"epoch": 27.504426840633737,
"grad_norm": 0.4506557583808899,
"learning_rate": 0.00027010839160839157,
"loss": 3.1683,
"step": 94450
},
{
"epoch": 27.518988816402608,
"grad_norm": 0.4558444321155548,
"learning_rate": 0.00026993356643356637,
"loss": 3.1673,
"step": 94500
},
{
"epoch": 27.533550792171482,
"grad_norm": 0.4382563531398773,
"learning_rate": 0.0002697587412587412,
"loss": 3.1651,
"step": 94550
},
{
"epoch": 27.548112767940353,
"grad_norm": 0.41427773237228394,
"learning_rate": 0.0002695839160839161,
"loss": 3.1763,
"step": 94600
},
{
"epoch": 27.562674743709227,
"grad_norm": 0.4399355351924896,
"learning_rate": 0.0002694090909090909,
"loss": 3.1505,
"step": 94650
},
{
"epoch": 27.577236719478098,
"grad_norm": 0.4320809841156006,
"learning_rate": 0.00026923426573426573,
"loss": 3.1723,
"step": 94700
},
{
"epoch": 27.591798695246972,
"grad_norm": 0.4338119328022003,
"learning_rate": 0.00026905944055944053,
"loss": 3.1757,
"step": 94750
},
{
"epoch": 27.606360671015842,
"grad_norm": 0.42641645669937134,
"learning_rate": 0.0002688846153846154,
"loss": 3.1758,
"step": 94800
},
{
"epoch": 27.620922646784717,
"grad_norm": 0.4421967566013336,
"learning_rate": 0.0002687097902097902,
"loss": 3.1772,
"step": 94850
},
{
"epoch": 27.635484622553587,
"grad_norm": 0.46785882115364075,
"learning_rate": 0.000268534965034965,
"loss": 3.1662,
"step": 94900
},
{
"epoch": 27.65004659832246,
"grad_norm": 0.4574052095413208,
"learning_rate": 0.00026836013986013984,
"loss": 3.1755,
"step": 94950
},
{
"epoch": 27.664608574091332,
"grad_norm": 0.4320479929447174,
"learning_rate": 0.00026818531468531464,
"loss": 3.1751,
"step": 95000
},
{
"epoch": 27.664608574091332,
"eval_accuracy": 0.3745143492997835,
"eval_loss": 3.5431621074676514,
"eval_runtime": 53.7035,
"eval_samples_per_second": 309.924,
"eval_steps_per_second": 19.384,
"step": 95000
},
{
"epoch": 27.679170549860206,
"grad_norm": 0.43245890736579895,
"learning_rate": 0.0002680104895104895,
"loss": 3.1758,
"step": 95050
},
{
"epoch": 27.693732525629077,
"grad_norm": 0.4555778503417969,
"learning_rate": 0.0002678356643356643,
"loss": 3.1862,
"step": 95100
},
{
"epoch": 27.70829450139795,
"grad_norm": 0.4318227171897888,
"learning_rate": 0.00026766083916083915,
"loss": 3.1614,
"step": 95150
},
{
"epoch": 27.72285647716682,
"grad_norm": 0.44459792971611023,
"learning_rate": 0.00026748601398601395,
"loss": 3.1822,
"step": 95200
},
{
"epoch": 27.737418452935696,
"grad_norm": 0.42615237832069397,
"learning_rate": 0.0002673111888111888,
"loss": 3.1924,
"step": 95250
},
{
"epoch": 27.751980428704567,
"grad_norm": 0.46632900834083557,
"learning_rate": 0.0002671363636363636,
"loss": 3.1863,
"step": 95300
},
{
"epoch": 27.766542404473437,
"grad_norm": 0.4340624213218689,
"learning_rate": 0.00026696153846153845,
"loss": 3.1919,
"step": 95350
},
{
"epoch": 27.78110438024231,
"grad_norm": 0.44297027587890625,
"learning_rate": 0.00026678671328671325,
"loss": 3.1803,
"step": 95400
},
{
"epoch": 27.795666356011182,
"grad_norm": 0.4163863956928253,
"learning_rate": 0.0002666118881118881,
"loss": 3.1778,
"step": 95450
},
{
"epoch": 27.810228331780056,
"grad_norm": 0.4271012842655182,
"learning_rate": 0.0002664370629370629,
"loss": 3.1921,
"step": 95500
},
{
"epoch": 27.824790307548927,
"grad_norm": 0.4465010464191437,
"learning_rate": 0.00026626223776223776,
"loss": 3.1824,
"step": 95550
},
{
"epoch": 27.8393522833178,
"grad_norm": 0.44772741198539734,
"learning_rate": 0.00026608741258741256,
"loss": 3.1743,
"step": 95600
},
{
"epoch": 27.85391425908667,
"grad_norm": 0.42869317531585693,
"learning_rate": 0.00026591258741258736,
"loss": 3.1881,
"step": 95650
},
{
"epoch": 27.868476234855546,
"grad_norm": 0.4499087631702423,
"learning_rate": 0.0002657377622377622,
"loss": 3.1821,
"step": 95700
},
{
"epoch": 27.883038210624417,
"grad_norm": 0.43807747960090637,
"learning_rate": 0.000265562937062937,
"loss": 3.1826,
"step": 95750
},
{
"epoch": 27.89760018639329,
"grad_norm": 0.42944756150245667,
"learning_rate": 0.00026538811188811187,
"loss": 3.1834,
"step": 95800
},
{
"epoch": 27.91216216216216,
"grad_norm": 0.44492489099502563,
"learning_rate": 0.00026521328671328667,
"loss": 3.1759,
"step": 95850
},
{
"epoch": 27.926724137931036,
"grad_norm": 0.44922009110450745,
"learning_rate": 0.0002650384615384615,
"loss": 3.191,
"step": 95900
},
{
"epoch": 27.941286113699906,
"grad_norm": 0.4366289973258972,
"learning_rate": 0.0002648636363636364,
"loss": 3.2052,
"step": 95950
},
{
"epoch": 27.95584808946878,
"grad_norm": 0.4741586446762085,
"learning_rate": 0.0002646888111888112,
"loss": 3.1853,
"step": 96000
},
{
"epoch": 27.95584808946878,
"eval_accuracy": 0.3749015294851879,
"eval_loss": 3.535959005355835,
"eval_runtime": 53.7399,
"eval_samples_per_second": 309.714,
"eval_steps_per_second": 19.371,
"step": 96000
},
{
"epoch": 27.97041006523765,
"grad_norm": 0.4826781153678894,
"learning_rate": 0.000264513986013986,
"loss": 3.2059,
"step": 96050
},
{
"epoch": 27.984972041006525,
"grad_norm": 0.42744797468185425,
"learning_rate": 0.00026433916083916083,
"loss": 3.1952,
"step": 96100
},
{
"epoch": 27.999534016775396,
"grad_norm": 0.4376899302005768,
"learning_rate": 0.00026416433566433563,
"loss": 3.1929,
"step": 96150
},
{
"epoch": 28.013979496738116,
"grad_norm": 0.4622137248516083,
"learning_rate": 0.0002639895104895105,
"loss": 3.0896,
"step": 96200
},
{
"epoch": 28.02854147250699,
"grad_norm": 0.44420769810676575,
"learning_rate": 0.0002638146853146853,
"loss": 3.1003,
"step": 96250
},
{
"epoch": 28.04310344827586,
"grad_norm": 0.4357249438762665,
"learning_rate": 0.00026363986013986014,
"loss": 3.0941,
"step": 96300
},
{
"epoch": 28.057665424044735,
"grad_norm": 0.4448012113571167,
"learning_rate": 0.00026346503496503494,
"loss": 3.0897,
"step": 96350
},
{
"epoch": 28.072227399813606,
"grad_norm": 0.4402657151222229,
"learning_rate": 0.00026329020979020974,
"loss": 3.1048,
"step": 96400
},
{
"epoch": 28.08678937558248,
"grad_norm": 0.4506484270095825,
"learning_rate": 0.0002631153846153846,
"loss": 3.1187,
"step": 96450
},
{
"epoch": 28.10135135135135,
"grad_norm": 0.44956478476524353,
"learning_rate": 0.0002629405594405594,
"loss": 3.1123,
"step": 96500
},
{
"epoch": 28.115913327120225,
"grad_norm": 0.4638758599758148,
"learning_rate": 0.00026276573426573424,
"loss": 3.1151,
"step": 96550
},
{
"epoch": 28.130475302889096,
"grad_norm": 0.4638550877571106,
"learning_rate": 0.00026259090909090904,
"loss": 3.1031,
"step": 96600
},
{
"epoch": 28.14503727865797,
"grad_norm": 0.4719196856021881,
"learning_rate": 0.0002624160839160839,
"loss": 3.1213,
"step": 96650
},
{
"epoch": 28.15959925442684,
"grad_norm": 0.4546055495738983,
"learning_rate": 0.00026224125874125875,
"loss": 3.1215,
"step": 96700
},
{
"epoch": 28.174161230195715,
"grad_norm": 0.4388442635536194,
"learning_rate": 0.00026206643356643355,
"loss": 3.1116,
"step": 96750
},
{
"epoch": 28.188723205964585,
"grad_norm": 0.4409039318561554,
"learning_rate": 0.00026189160839160835,
"loss": 3.1271,
"step": 96800
},
{
"epoch": 28.203285181733456,
"grad_norm": 0.47931012511253357,
"learning_rate": 0.0002617167832167832,
"loss": 3.1201,
"step": 96850
},
{
"epoch": 28.21784715750233,
"grad_norm": 0.49225330352783203,
"learning_rate": 0.000261541958041958,
"loss": 3.1317,
"step": 96900
},
{
"epoch": 28.2324091332712,
"grad_norm": 0.46105310320854187,
"learning_rate": 0.00026136713286713286,
"loss": 3.1278,
"step": 96950
},
{
"epoch": 28.246971109040075,
"grad_norm": 0.4445840120315552,
"learning_rate": 0.00026119230769230766,
"loss": 3.139,
"step": 97000
},
{
"epoch": 28.246971109040075,
"eval_accuracy": 0.3739401045304197,
"eval_loss": 3.554464340209961,
"eval_runtime": 53.4624,
"eval_samples_per_second": 311.322,
"eval_steps_per_second": 19.472,
"step": 97000
},
{
"epoch": 28.261533084808946,
"grad_norm": 0.4632137417793274,
"learning_rate": 0.0002610174825174825,
"loss": 3.1351,
"step": 97050
},
{
"epoch": 28.27609506057782,
"grad_norm": 0.4355744421482086,
"learning_rate": 0.0002608426573426573,
"loss": 3.1259,
"step": 97100
},
{
"epoch": 28.29065703634669,
"grad_norm": 0.42398175597190857,
"learning_rate": 0.0002606678321678321,
"loss": 3.1389,
"step": 97150
},
{
"epoch": 28.305219012115565,
"grad_norm": 0.42285165190696716,
"learning_rate": 0.00026049300699300696,
"loss": 3.1449,
"step": 97200
},
{
"epoch": 28.319780987884435,
"grad_norm": 0.4170607924461365,
"learning_rate": 0.00026031818181818176,
"loss": 3.135,
"step": 97250
},
{
"epoch": 28.33434296365331,
"grad_norm": 0.4427075684070587,
"learning_rate": 0.0002601433566433566,
"loss": 3.1346,
"step": 97300
},
{
"epoch": 28.34890493942218,
"grad_norm": 0.4347171485424042,
"learning_rate": 0.00025996853146853147,
"loss": 3.1472,
"step": 97350
},
{
"epoch": 28.363466915191054,
"grad_norm": 0.44221243262290955,
"learning_rate": 0.00025979370629370627,
"loss": 3.1354,
"step": 97400
},
{
"epoch": 28.378028890959925,
"grad_norm": 0.45819225907325745,
"learning_rate": 0.0002596188811188811,
"loss": 3.1474,
"step": 97450
},
{
"epoch": 28.3925908667288,
"grad_norm": 0.44699475169181824,
"learning_rate": 0.0002594440559440559,
"loss": 3.1431,
"step": 97500
},
{
"epoch": 28.40715284249767,
"grad_norm": 0.45074644684791565,
"learning_rate": 0.0002592692307692307,
"loss": 3.1619,
"step": 97550
},
{
"epoch": 28.421714818266544,
"grad_norm": 0.42600199580192566,
"learning_rate": 0.0002590944055944056,
"loss": 3.1447,
"step": 97600
},
{
"epoch": 28.436276794035415,
"grad_norm": 0.44456231594085693,
"learning_rate": 0.0002589195804195804,
"loss": 3.1499,
"step": 97650
},
{
"epoch": 28.450838769804285,
"grad_norm": 0.4862534701824188,
"learning_rate": 0.00025874475524475523,
"loss": 3.1513,
"step": 97700
},
{
"epoch": 28.46540074557316,
"grad_norm": 0.45727989077568054,
"learning_rate": 0.00025856993006993003,
"loss": 3.1476,
"step": 97750
},
{
"epoch": 28.47996272134203,
"grad_norm": 0.48387905955314636,
"learning_rate": 0.0002583951048951049,
"loss": 3.1551,
"step": 97800
},
{
"epoch": 28.494524697110904,
"grad_norm": 0.4310111701488495,
"learning_rate": 0.0002582202797202797,
"loss": 3.1528,
"step": 97850
},
{
"epoch": 28.509086672879775,
"grad_norm": 0.4387485086917877,
"learning_rate": 0.0002580454545454545,
"loss": 3.1557,
"step": 97900
},
{
"epoch": 28.52364864864865,
"grad_norm": 0.43874770402908325,
"learning_rate": 0.00025787062937062934,
"loss": 3.1688,
"step": 97950
},
{
"epoch": 28.53821062441752,
"grad_norm": 0.44133517146110535,
"learning_rate": 0.0002576958041958042,
"loss": 3.1499,
"step": 98000
},
{
"epoch": 28.53821062441752,
"eval_accuracy": 0.37463698183345395,
"eval_loss": 3.5472443103790283,
"eval_runtime": 53.6248,
"eval_samples_per_second": 310.379,
"eval_steps_per_second": 19.413,
"step": 98000
},
{
"epoch": 28.552772600186394,
"grad_norm": 0.45987606048583984,
"learning_rate": 0.000257520979020979,
"loss": 3.1609,
"step": 98050
},
{
"epoch": 28.567334575955265,
"grad_norm": 0.4473000764846802,
"learning_rate": 0.00025734615384615385,
"loss": 3.1625,
"step": 98100
},
{
"epoch": 28.58189655172414,
"grad_norm": 0.4264574646949768,
"learning_rate": 0.00025717132867132865,
"loss": 3.1542,
"step": 98150
},
{
"epoch": 28.59645852749301,
"grad_norm": 0.44503456354141235,
"learning_rate": 0.0002569965034965035,
"loss": 3.1588,
"step": 98200
},
{
"epoch": 28.611020503261884,
"grad_norm": 0.4370170831680298,
"learning_rate": 0.0002568216783216783,
"loss": 3.1678,
"step": 98250
},
{
"epoch": 28.625582479030754,
"grad_norm": 0.46959516406059265,
"learning_rate": 0.0002566468531468531,
"loss": 3.1704,
"step": 98300
},
{
"epoch": 28.64014445479963,
"grad_norm": 0.43134886026382446,
"learning_rate": 0.00025647202797202795,
"loss": 3.1791,
"step": 98350
},
{
"epoch": 28.6547064305685,
"grad_norm": 0.4523867666721344,
"learning_rate": 0.00025629720279720275,
"loss": 3.1619,
"step": 98400
},
{
"epoch": 28.669268406337373,
"grad_norm": 0.4620450437068939,
"learning_rate": 0.0002561223776223776,
"loss": 3.166,
"step": 98450
},
{
"epoch": 28.683830382106244,
"grad_norm": 0.44689565896987915,
"learning_rate": 0.0002559475524475524,
"loss": 3.1708,
"step": 98500
},
{
"epoch": 28.698392357875118,
"grad_norm": 0.4532458782196045,
"learning_rate": 0.00025577272727272726,
"loss": 3.1625,
"step": 98550
},
{
"epoch": 28.71295433364399,
"grad_norm": 0.4778449833393097,
"learning_rate": 0.00025559790209790206,
"loss": 3.1719,
"step": 98600
},
{
"epoch": 28.727516309412863,
"grad_norm": 0.445504367351532,
"learning_rate": 0.00025542307692307686,
"loss": 3.1564,
"step": 98650
},
{
"epoch": 28.742078285181734,
"grad_norm": 0.4707012474536896,
"learning_rate": 0.00025524825174825177,
"loss": 3.1772,
"step": 98700
},
{
"epoch": 28.756640260950604,
"grad_norm": 0.44212526082992554,
"learning_rate": 0.00025507342657342657,
"loss": 3.1779,
"step": 98750
},
{
"epoch": 28.77120223671948,
"grad_norm": 0.43493756651878357,
"learning_rate": 0.00025489860139860137,
"loss": 3.1706,
"step": 98800
},
{
"epoch": 28.78576421248835,
"grad_norm": 0.4900915324687958,
"learning_rate": 0.0002547237762237762,
"loss": 3.1682,
"step": 98850
},
{
"epoch": 28.800326188257223,
"grad_norm": 0.4247000515460968,
"learning_rate": 0.000254548951048951,
"loss": 3.172,
"step": 98900
},
{
"epoch": 28.814888164026094,
"grad_norm": 0.449750691652298,
"learning_rate": 0.0002543741258741259,
"loss": 3.1795,
"step": 98950
},
{
"epoch": 28.829450139794968,
"grad_norm": 0.45670533180236816,
"learning_rate": 0.0002541993006993007,
"loss": 3.1747,
"step": 99000
},
{
"epoch": 28.829450139794968,
"eval_accuracy": 0.37490458648027464,
"eval_loss": 3.5373735427856445,
"eval_runtime": 53.7663,
"eval_samples_per_second": 309.562,
"eval_steps_per_second": 19.362,
"step": 99000
},
{
"epoch": 28.84401211556384,
"grad_norm": 0.4435442090034485,
"learning_rate": 0.0002540244755244755,
"loss": 3.1754,
"step": 99050
},
{
"epoch": 28.858574091332713,
"grad_norm": 0.4304124116897583,
"learning_rate": 0.00025384965034965033,
"loss": 3.1627,
"step": 99100
},
{
"epoch": 28.873136067101584,
"grad_norm": 0.45406800508499146,
"learning_rate": 0.00025367482517482513,
"loss": 3.1713,
"step": 99150
},
{
"epoch": 28.887698042870458,
"grad_norm": 0.44807180762290955,
"learning_rate": 0.0002535,
"loss": 3.169,
"step": 99200
},
{
"epoch": 28.90226001863933,
"grad_norm": 0.4605872333049774,
"learning_rate": 0.0002533251748251748,
"loss": 3.1823,
"step": 99250
},
{
"epoch": 28.916821994408203,
"grad_norm": 0.43984824419021606,
"learning_rate": 0.00025315034965034964,
"loss": 3.1692,
"step": 99300
},
{
"epoch": 28.931383970177073,
"grad_norm": 0.4410223662853241,
"learning_rate": 0.00025297552447552444,
"loss": 3.1781,
"step": 99350
},
{
"epoch": 28.945945945945947,
"grad_norm": 0.43068429827690125,
"learning_rate": 0.0002528006993006993,
"loss": 3.1835,
"step": 99400
},
{
"epoch": 28.960507921714818,
"grad_norm": 0.447227418422699,
"learning_rate": 0.00025262587412587414,
"loss": 3.1843,
"step": 99450
},
{
"epoch": 28.975069897483692,
"grad_norm": 0.4420487880706787,
"learning_rate": 0.00025245104895104894,
"loss": 3.1755,
"step": 99500
},
{
"epoch": 28.989631873252563,
"grad_norm": 0.4623829424381256,
"learning_rate": 0.00025227622377622374,
"loss": 3.1737,
"step": 99550
},
{
"epoch": 29.004077353215283,
"grad_norm": 0.44582149386405945,
"learning_rate": 0.0002521013986013986,
"loss": 3.1614,
"step": 99600
},
{
"epoch": 29.018639328984158,
"grad_norm": 0.4791642129421234,
"learning_rate": 0.0002519265734265734,
"loss": 3.0798,
"step": 99650
},
{
"epoch": 29.033201304753028,
"grad_norm": 0.44250455498695374,
"learning_rate": 0.00025175174825174825,
"loss": 3.0932,
"step": 99700
},
{
"epoch": 29.047763280521902,
"grad_norm": 0.46487218141555786,
"learning_rate": 0.00025157692307692305,
"loss": 3.093,
"step": 99750
},
{
"epoch": 29.062325256290773,
"grad_norm": 0.4407868981361389,
"learning_rate": 0.0002514020979020979,
"loss": 3.0993,
"step": 99800
},
{
"epoch": 29.076887232059647,
"grad_norm": 0.43440374732017517,
"learning_rate": 0.0002512272727272727,
"loss": 3.0946,
"step": 99850
},
{
"epoch": 29.091449207828518,
"grad_norm": 0.45058801770210266,
"learning_rate": 0.0002510524475524475,
"loss": 3.0925,
"step": 99900
},
{
"epoch": 29.106011183597392,
"grad_norm": 0.46985459327697754,
"learning_rate": 0.00025087762237762236,
"loss": 3.1015,
"step": 99950
},
{
"epoch": 29.120573159366263,
"grad_norm": 0.4654427170753479,
"learning_rate": 0.00025070279720279716,
"loss": 3.1073,
"step": 100000
},
{
"epoch": 29.120573159366263,
"eval_accuracy": 0.3739781993922694,
"eval_loss": 3.5579988956451416,
"eval_runtime": 53.6538,
"eval_samples_per_second": 310.211,
"eval_steps_per_second": 19.402,
"step": 100000
},
{
"epoch": 29.135135135135137,
"grad_norm": 0.4768226444721222,
"learning_rate": 0.000250527972027972,
"loss": 3.1043,
"step": 100050
},
{
"epoch": 29.149697110904008,
"grad_norm": 0.43046435713768005,
"learning_rate": 0.00025035314685314686,
"loss": 3.1163,
"step": 100100
},
{
"epoch": 29.164259086672878,
"grad_norm": 0.5126052498817444,
"learning_rate": 0.00025017832167832166,
"loss": 3.1186,
"step": 100150
},
{
"epoch": 29.178821062441752,
"grad_norm": 0.4530967175960541,
"learning_rate": 0.0002500034965034965,
"loss": 3.1141,
"step": 100200
},
{
"epoch": 29.193383038210623,
"grad_norm": 0.46856310963630676,
"learning_rate": 0.0002498286713286713,
"loss": 3.1029,
"step": 100250
},
{
"epoch": 29.207945013979497,
"grad_norm": 0.47363170981407166,
"learning_rate": 0.0002496538461538461,
"loss": 3.1104,
"step": 100300
},
{
"epoch": 29.222506989748368,
"grad_norm": 0.4353318214416504,
"learning_rate": 0.00024947902097902097,
"loss": 3.1185,
"step": 100350
},
{
"epoch": 29.237068965517242,
"grad_norm": 0.4660615921020508,
"learning_rate": 0.00024930419580419577,
"loss": 3.123,
"step": 100400
},
{
"epoch": 29.251630941286113,
"grad_norm": 0.450541615486145,
"learning_rate": 0.0002491293706293706,
"loss": 3.1216,
"step": 100450
},
{
"epoch": 29.266192917054987,
"grad_norm": 0.47547033429145813,
"learning_rate": 0.0002489545454545454,
"loss": 3.1252,
"step": 100500
},
{
"epoch": 29.280754892823857,
"grad_norm": 0.4383224844932556,
"learning_rate": 0.0002487797202797203,
"loss": 3.1223,
"step": 100550
},
{
"epoch": 29.29531686859273,
"grad_norm": 0.4649880826473236,
"learning_rate": 0.0002486048951048951,
"loss": 3.1273,
"step": 100600
},
{
"epoch": 29.309878844361602,
"grad_norm": 0.44201621413230896,
"learning_rate": 0.0002484300699300699,
"loss": 3.1335,
"step": 100650
},
{
"epoch": 29.324440820130476,
"grad_norm": 0.4683978855609894,
"learning_rate": 0.00024825524475524473,
"loss": 3.1405,
"step": 100700
},
{
"epoch": 29.339002795899347,
"grad_norm": 0.4427500069141388,
"learning_rate": 0.00024808041958041953,
"loss": 3.1324,
"step": 100750
},
{
"epoch": 29.35356477166822,
"grad_norm": 0.49367576837539673,
"learning_rate": 0.0002479055944055944,
"loss": 3.1266,
"step": 100800
},
{
"epoch": 29.368126747437092,
"grad_norm": 0.4418413043022156,
"learning_rate": 0.00024773076923076924,
"loss": 3.1186,
"step": 100850
},
{
"epoch": 29.382688723205966,
"grad_norm": 0.46410617232322693,
"learning_rate": 0.00024755594405594404,
"loss": 3.1535,
"step": 100900
},
{
"epoch": 29.397250698974837,
"grad_norm": 0.46582889556884766,
"learning_rate": 0.0002473811188811189,
"loss": 3.1423,
"step": 100950
},
{
"epoch": 29.41181267474371,
"grad_norm": 0.46468058228492737,
"learning_rate": 0.0002472062937062937,
"loss": 3.1268,
"step": 101000
},
{
"epoch": 29.41181267474371,
"eval_accuracy": 0.37427896067810734,
"eval_loss": 3.5532479286193848,
"eval_runtime": 53.662,
"eval_samples_per_second": 310.164,
"eval_steps_per_second": 19.399,
"step": 101000
},
{
"epoch": 29.42637465051258,
"grad_norm": 0.47465112805366516,
"learning_rate": 0.0002470314685314685,
"loss": 3.1535,
"step": 101050
},
{
"epoch": 29.440936626281452,
"grad_norm": 0.44461822509765625,
"learning_rate": 0.00024685664335664335,
"loss": 3.1444,
"step": 101100
},
{
"epoch": 29.455498602050326,
"grad_norm": 0.44391322135925293,
"learning_rate": 0.00024668181818181815,
"loss": 3.152,
"step": 101150
},
{
"epoch": 29.470060577819197,
"grad_norm": 0.44190195202827454,
"learning_rate": 0.000246506993006993,
"loss": 3.1346,
"step": 101200
},
{
"epoch": 29.48462255358807,
"grad_norm": 0.4293469488620758,
"learning_rate": 0.0002463321678321678,
"loss": 3.1397,
"step": 101250
},
{
"epoch": 29.499184529356942,
"grad_norm": 0.4673719108104706,
"learning_rate": 0.00024615734265734265,
"loss": 3.1422,
"step": 101300
},
{
"epoch": 29.513746505125816,
"grad_norm": 0.4394907057285309,
"learning_rate": 0.00024598251748251745,
"loss": 3.1516,
"step": 101350
},
{
"epoch": 29.528308480894687,
"grad_norm": 0.46596258878707886,
"learning_rate": 0.00024580769230769225,
"loss": 3.1471,
"step": 101400
},
{
"epoch": 29.54287045666356,
"grad_norm": 0.46689680218696594,
"learning_rate": 0.0002456328671328671,
"loss": 3.1483,
"step": 101450
},
{
"epoch": 29.55743243243243,
"grad_norm": 0.4656352996826172,
"learning_rate": 0.00024545804195804196,
"loss": 3.1602,
"step": 101500
},
{
"epoch": 29.571994408201306,
"grad_norm": 0.46260908246040344,
"learning_rate": 0.00024528321678321676,
"loss": 3.1436,
"step": 101550
},
{
"epoch": 29.586556383970176,
"grad_norm": 0.43992796540260315,
"learning_rate": 0.0002451083916083916,
"loss": 3.1595,
"step": 101600
},
{
"epoch": 29.60111835973905,
"grad_norm": 0.463177353143692,
"learning_rate": 0.0002449335664335664,
"loss": 3.1497,
"step": 101650
},
{
"epoch": 29.61568033550792,
"grad_norm": 0.44098642468452454,
"learning_rate": 0.00024475874125874127,
"loss": 3.1578,
"step": 101700
},
{
"epoch": 29.630242311276795,
"grad_norm": 0.4605831801891327,
"learning_rate": 0.00024458391608391607,
"loss": 3.1564,
"step": 101750
},
{
"epoch": 29.644804287045666,
"grad_norm": 0.4362461268901825,
"learning_rate": 0.00024440909090909087,
"loss": 3.149,
"step": 101800
},
{
"epoch": 29.65936626281454,
"grad_norm": 0.45928579568862915,
"learning_rate": 0.0002442342657342657,
"loss": 3.1544,
"step": 101850
},
{
"epoch": 29.67392823858341,
"grad_norm": 0.4574165642261505,
"learning_rate": 0.00024405944055944052,
"loss": 3.1615,
"step": 101900
},
{
"epoch": 29.688490214352285,
"grad_norm": 0.4415947198867798,
"learning_rate": 0.00024388461538461535,
"loss": 3.1512,
"step": 101950
},
{
"epoch": 29.703052190121156,
"grad_norm": 0.45984649658203125,
"learning_rate": 0.00024370979020979017,
"loss": 3.1554,
"step": 102000
},
{
"epoch": 29.703052190121156,
"eval_accuracy": 0.3748505011825868,
"eval_loss": 3.5421435832977295,
"eval_runtime": 53.7157,
"eval_samples_per_second": 309.853,
"eval_steps_per_second": 19.38,
"step": 102000
},
{
"epoch": 29.703052190121156,
"step": 102000,
"total_flos": 2.13187776970752e+18,
"train_loss": 3.386132829703537,
"train_runtime": 45304.3422,
"train_samples_per_second": 303.155,
"train_steps_per_second": 3.79
}
],
"logging_steps": 50,
"max_steps": 171700,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 10000,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 20,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 18
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.13187776970752e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}