100M_high_1000_495 / trainer_state.json
craa's picture
End of training
d95bb55 verified
Invalid JSON: Unexpected token 'I', ..."ad_norm": Infinity, "... is not valid JSON
{
"best_metric": 3.302588701248169,
"best_model_checkpoint": "/scratch/cl5625/exceptions/models/100M_495/checkpoint-90000",
"epoch": 10.0,
"eval_steps": 1000,
"global_step": 92910,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005381552039608223,
"grad_norm": 12.074577331542969,
"learning_rate": 0.0003,
"loss": 8.6374,
"step": 50
},
{
"epoch": 0.010763104079216447,
"grad_norm": 1.3158553838729858,
"learning_rate": 0.0006,
"loss": 6.8912,
"step": 100
},
{
"epoch": 0.01614465611882467,
"grad_norm": 2.628305196762085,
"learning_rate": 0.0005996767589699385,
"loss": 6.4839,
"step": 150
},
{
"epoch": 0.021526208158432893,
"grad_norm": 1.0599905252456665,
"learning_rate": 0.0005993535179398771,
"loss": 6.2264,
"step": 200
},
{
"epoch": 0.026907760198041114,
"grad_norm": 1.638780951499939,
"learning_rate": 0.0005990302769098158,
"loss": 6.0419,
"step": 250
},
{
"epoch": 0.03228931223764934,
"grad_norm": 1.9191187620162964,
"learning_rate": 0.0005987070358797543,
"loss": 5.955,
"step": 300
},
{
"epoch": 0.03767086427725756,
"grad_norm": 1.12886643409729,
"learning_rate": 0.0005983837948496929,
"loss": 5.8538,
"step": 350
},
{
"epoch": 0.04305241631686579,
"grad_norm": 1.3254728317260742,
"learning_rate": 0.0005980605538196314,
"loss": 5.7663,
"step": 400
},
{
"epoch": 0.048433968356474004,
"grad_norm": 2.3004322052001953,
"learning_rate": 0.0005977373127895701,
"loss": 5.6952,
"step": 450
},
{
"epoch": 0.05381552039608223,
"grad_norm": 1.495474934577942,
"learning_rate": 0.0005974140717595086,
"loss": 5.6341,
"step": 500
},
{
"epoch": 0.05919707243569045,
"grad_norm": 0.9526841640472412,
"learning_rate": 0.0005970908307294472,
"loss": 5.5508,
"step": 550
},
{
"epoch": 0.06457862447529868,
"grad_norm": 1.1846691370010376,
"learning_rate": 0.0005967675896993858,
"loss": 5.4745,
"step": 600
},
{
"epoch": 0.0699601765149069,
"grad_norm": 1.5218234062194824,
"learning_rate": 0.0005964443486693243,
"loss": 5.4167,
"step": 650
},
{
"epoch": 0.07534172855451512,
"grad_norm": 1.037400484085083,
"learning_rate": 0.000596121107639263,
"loss": 5.3214,
"step": 700
},
{
"epoch": 0.08072328059412334,
"grad_norm": 1.1252976655960083,
"learning_rate": 0.0005957978666092015,
"loss": 5.3002,
"step": 750
},
{
"epoch": 0.08610483263373157,
"grad_norm": 0.9828529953956604,
"learning_rate": 0.0005954746255791401,
"loss": 5.224,
"step": 800
},
{
"epoch": 0.09148638467333979,
"grad_norm": 1.4241634607315063,
"learning_rate": 0.0005951513845490787,
"loss": 5.2284,
"step": 850
},
{
"epoch": 0.09686793671294801,
"grad_norm": 1.0950024127960205,
"learning_rate": 0.0005948281435190174,
"loss": 5.1559,
"step": 900
},
{
"epoch": 0.10224948875255624,
"grad_norm": 0.8689674735069275,
"learning_rate": 0.0005945049024889559,
"loss": 5.1198,
"step": 950
},
{
"epoch": 0.10763104079216446,
"grad_norm": 0.8567312955856323,
"learning_rate": 0.0005941816614588944,
"loss": 5.0633,
"step": 1000
},
{
"epoch": 0.10763104079216446,
"eval_accuracy": 0.22941774764519313,
"eval_loss": 4.999941825866699,
"eval_runtime": 181.8712,
"eval_samples_per_second": 99.032,
"eval_steps_per_second": 6.191,
"step": 1000
},
{
"epoch": 0.11301259283177269,
"grad_norm": 1.0926947593688965,
"learning_rate": 0.000593858420428833,
"loss": 5.0126,
"step": 1050
},
{
"epoch": 0.1183941448713809,
"grad_norm": 1.079075813293457,
"learning_rate": 0.0005935351793987716,
"loss": 4.9581,
"step": 1100
},
{
"epoch": 0.12377569691098914,
"grad_norm": 1.9798409938812256,
"learning_rate": 0.0005932119383687103,
"loss": 4.9724,
"step": 1150
},
{
"epoch": 0.12915724895059735,
"grad_norm": 1.3321768045425415,
"learning_rate": 0.0005928886973386488,
"loss": 4.9741,
"step": 1200
},
{
"epoch": 0.13453880099020557,
"grad_norm": 1.1671667098999023,
"learning_rate": 0.0005925654563085874,
"loss": 4.9051,
"step": 1250
},
{
"epoch": 0.1399203530298138,
"grad_norm": 0.8295050263404846,
"learning_rate": 0.000592242215278526,
"loss": 4.9002,
"step": 1300
},
{
"epoch": 0.14530190506942203,
"grad_norm": 1.0559158325195312,
"learning_rate": 0.0005919189742484645,
"loss": 4.8626,
"step": 1350
},
{
"epoch": 0.15068345710903025,
"grad_norm": 1.2337747812271118,
"learning_rate": 0.0005915957332184032,
"loss": 4.851,
"step": 1400
},
{
"epoch": 0.15606500914863847,
"grad_norm": 1.0464680194854736,
"learning_rate": 0.0005912724921883417,
"loss": 4.8242,
"step": 1450
},
{
"epoch": 0.16144656118824668,
"grad_norm": 0.8018038868904114,
"learning_rate": 0.0005909492511582803,
"loss": 4.7894,
"step": 1500
},
{
"epoch": 0.1668281132278549,
"grad_norm": 1.5508265495300293,
"learning_rate": 0.0005906260101282189,
"loss": 4.74,
"step": 1550
},
{
"epoch": 0.17220966526746315,
"grad_norm": 0.9371482133865356,
"learning_rate": 0.0005903027690981575,
"loss": 4.7316,
"step": 1600
},
{
"epoch": 0.17759121730707136,
"grad_norm": 1.0305496454238892,
"learning_rate": 0.000589979528068096,
"loss": 4.7221,
"step": 1650
},
{
"epoch": 0.18297276934667958,
"grad_norm": 0.90619957447052,
"learning_rate": 0.0005896562870380347,
"loss": 4.7063,
"step": 1700
},
{
"epoch": 0.1883543213862878,
"grad_norm": 1.0820300579071045,
"learning_rate": 0.0005893330460079732,
"loss": 4.668,
"step": 1750
},
{
"epoch": 0.19373587342589602,
"grad_norm": 0.857718825340271,
"learning_rate": 0.0005890098049779118,
"loss": 4.6453,
"step": 1800
},
{
"epoch": 0.19911742546550426,
"grad_norm": 0.9784050583839417,
"learning_rate": 0.0005886865639478504,
"loss": 4.6397,
"step": 1850
},
{
"epoch": 0.20449897750511248,
"grad_norm": 0.9087166786193848,
"learning_rate": 0.0005883633229177889,
"loss": 4.6246,
"step": 1900
},
{
"epoch": 0.2098805295447207,
"grad_norm": 0.9737820029258728,
"learning_rate": 0.0005880400818877276,
"loss": 4.6047,
"step": 1950
},
{
"epoch": 0.2152620815843289,
"grad_norm": 0.8255292773246765,
"learning_rate": 0.0005877168408576662,
"loss": 4.5492,
"step": 2000
},
{
"epoch": 0.2152620815843289,
"eval_accuracy": 0.27245189692187455,
"eval_loss": 4.491321563720703,
"eval_runtime": 181.6962,
"eval_samples_per_second": 99.127,
"eval_steps_per_second": 6.197,
"step": 2000
},
{
"epoch": 0.22064363362393713,
"grad_norm": 0.7951588034629822,
"learning_rate": 0.0005873935998276048,
"loss": 4.5555,
"step": 2050
},
{
"epoch": 0.22602518566354537,
"grad_norm": 0.8975157141685486,
"learning_rate": 0.0005870703587975433,
"loss": 4.5375,
"step": 2100
},
{
"epoch": 0.2314067377031536,
"grad_norm": 1.0026023387908936,
"learning_rate": 0.0005867471177674818,
"loss": 4.5323,
"step": 2150
},
{
"epoch": 0.2367882897427618,
"grad_norm": 0.803642988204956,
"learning_rate": 0.0005864238767374205,
"loss": 4.4924,
"step": 2200
},
{
"epoch": 0.24216984178237003,
"grad_norm": 1.0086907148361206,
"learning_rate": 0.0005861006357073591,
"loss": 4.5072,
"step": 2250
},
{
"epoch": 0.24755139382197827,
"grad_norm": 0.8211397528648376,
"learning_rate": 0.0005857773946772977,
"loss": 4.4681,
"step": 2300
},
{
"epoch": 0.2529329458615865,
"grad_norm": 0.7603225111961365,
"learning_rate": 0.0005854541536472362,
"loss": 4.4691,
"step": 2350
},
{
"epoch": 0.2583144979011947,
"grad_norm": 0.8533746004104614,
"learning_rate": 0.0005851309126171749,
"loss": 4.4473,
"step": 2400
},
{
"epoch": 0.2636960499408029,
"grad_norm": 1.300952434539795,
"learning_rate": 0.0005848076715871134,
"loss": 4.4076,
"step": 2450
},
{
"epoch": 0.26907760198041114,
"grad_norm": 0.7179780006408691,
"learning_rate": 0.000584484430557052,
"loss": 4.4134,
"step": 2500
},
{
"epoch": 0.27445915402001936,
"grad_norm": 0.8938673734664917,
"learning_rate": 0.0005841611895269906,
"loss": 4.4109,
"step": 2550
},
{
"epoch": 0.2798407060596276,
"grad_norm": 0.8200742602348328,
"learning_rate": 0.0005838379484969291,
"loss": 4.4013,
"step": 2600
},
{
"epoch": 0.2852222580992358,
"grad_norm": 0.8515391945838928,
"learning_rate": 0.0005835147074668678,
"loss": 4.3763,
"step": 2650
},
{
"epoch": 0.29060381013884407,
"grad_norm": 0.8918663859367371,
"learning_rate": 0.0005831914664368063,
"loss": 4.3774,
"step": 2700
},
{
"epoch": 0.2959853621784523,
"grad_norm": 0.6826805472373962,
"learning_rate": 0.0005828682254067449,
"loss": 4.3631,
"step": 2750
},
{
"epoch": 0.3013669142180605,
"grad_norm": 0.7818616628646851,
"learning_rate": 0.0005825449843766835,
"loss": 4.3313,
"step": 2800
},
{
"epoch": 0.3067484662576687,
"grad_norm": 0.7908890843391418,
"learning_rate": 0.0005822217433466221,
"loss": 4.3407,
"step": 2850
},
{
"epoch": 0.31213001829727693,
"grad_norm": 1.1532859802246094,
"learning_rate": 0.0005818985023165607,
"loss": 4.3188,
"step": 2900
},
{
"epoch": 0.31751157033688515,
"grad_norm": 0.7944912910461426,
"learning_rate": 0.0005815752612864992,
"loss": 4.3062,
"step": 2950
},
{
"epoch": 0.32289312237649337,
"grad_norm": 0.7332167029380798,
"learning_rate": 0.0005812520202564378,
"loss": 4.2863,
"step": 3000
},
{
"epoch": 0.32289312237649337,
"eval_accuracy": 0.29881217403454574,
"eval_loss": 4.233547687530518,
"eval_runtime": 181.7872,
"eval_samples_per_second": 99.077,
"eval_steps_per_second": 6.194,
"step": 3000
},
{
"epoch": 0.3282746744161016,
"grad_norm": 0.6582856774330139,
"learning_rate": 0.0005809287792263764,
"loss": 4.3131,
"step": 3050
},
{
"epoch": 0.3336562264557098,
"grad_norm": 0.7379726767539978,
"learning_rate": 0.0005806055381963151,
"loss": 4.2889,
"step": 3100
},
{
"epoch": 0.3390377784953181,
"grad_norm": 0.7286725640296936,
"learning_rate": 0.0005802822971662536,
"loss": 4.287,
"step": 3150
},
{
"epoch": 0.3444193305349263,
"grad_norm": 0.7306691408157349,
"learning_rate": 0.0005799590561361922,
"loss": 4.2596,
"step": 3200
},
{
"epoch": 0.3498008825745345,
"grad_norm": 0.6126540303230286,
"learning_rate": 0.0005796358151061307,
"loss": 4.2404,
"step": 3250
},
{
"epoch": 0.35518243461414273,
"grad_norm": 0.7405567765235901,
"learning_rate": 0.0005793125740760694,
"loss": 4.253,
"step": 3300
},
{
"epoch": 0.36056398665375095,
"grad_norm": 0.8209679126739502,
"learning_rate": 0.0005789893330460079,
"loss": 4.2517,
"step": 3350
},
{
"epoch": 0.36594553869335916,
"grad_norm": 0.7422078847885132,
"learning_rate": 0.0005786660920159465,
"loss": 4.2362,
"step": 3400
},
{
"epoch": 0.3713270907329674,
"grad_norm": 0.6978297233581543,
"learning_rate": 0.0005783428509858851,
"loss": 4.2293,
"step": 3450
},
{
"epoch": 0.3767086427725756,
"grad_norm": 0.7036471962928772,
"learning_rate": 0.0005780196099558237,
"loss": 4.2389,
"step": 3500
},
{
"epoch": 0.3820901948121838,
"grad_norm": 0.8010238409042358,
"learning_rate": 0.0005776963689257623,
"loss": 4.2198,
"step": 3550
},
{
"epoch": 0.38747174685179203,
"grad_norm": 0.6499230265617371,
"learning_rate": 0.0005773731278957008,
"loss": 4.1941,
"step": 3600
},
{
"epoch": 0.3928532988914003,
"grad_norm": 0.7018994688987732,
"learning_rate": 0.0005770498868656394,
"loss": 4.2102,
"step": 3650
},
{
"epoch": 0.3982348509310085,
"grad_norm": 0.6573231816291809,
"learning_rate": 0.000576726645835578,
"loss": 4.1892,
"step": 3700
},
{
"epoch": 0.40361640297061674,
"grad_norm": 0.6301137804985046,
"learning_rate": 0.0005764034048055167,
"loss": 4.1782,
"step": 3750
},
{
"epoch": 0.40899795501022496,
"grad_norm": 0.6420815587043762,
"learning_rate": 0.0005760801637754552,
"loss": 4.1881,
"step": 3800
},
{
"epoch": 0.4143795070498332,
"grad_norm": 0.7286609411239624,
"learning_rate": 0.0005757569227453937,
"loss": 4.1681,
"step": 3850
},
{
"epoch": 0.4197610590894414,
"grad_norm": 0.7999758720397949,
"learning_rate": 0.0005754336817153324,
"loss": 4.1663,
"step": 3900
},
{
"epoch": 0.4251426111290496,
"grad_norm": 0.681491494178772,
"learning_rate": 0.0005751104406852709,
"loss": 4.1526,
"step": 3950
},
{
"epoch": 0.4305241631686578,
"grad_norm": 0.8898637890815735,
"learning_rate": 0.0005747871996552096,
"loss": 4.1472,
"step": 4000
},
{
"epoch": 0.4305241631686578,
"eval_accuracy": 0.31332352777238437,
"eval_loss": 4.088411808013916,
"eval_runtime": 182.0558,
"eval_samples_per_second": 98.931,
"eval_steps_per_second": 6.185,
"step": 4000
},
{
"epoch": 0.43590571520826604,
"grad_norm": 1.0454559326171875,
"learning_rate": 0.0005744639586251481,
"loss": 4.174,
"step": 4050
},
{
"epoch": 0.44128726724787426,
"grad_norm": 0.7238677144050598,
"learning_rate": 0.0005741407175950867,
"loss": 4.1417,
"step": 4100
},
{
"epoch": 0.44666881928748253,
"grad_norm": 0.6636929512023926,
"learning_rate": 0.0005738174765650253,
"loss": 4.14,
"step": 4150
},
{
"epoch": 0.45205037132709075,
"grad_norm": 0.553901195526123,
"learning_rate": 0.0005734942355349638,
"loss": 4.1244,
"step": 4200
},
{
"epoch": 0.45743192336669897,
"grad_norm": 0.7060267925262451,
"learning_rate": 0.0005731709945049025,
"loss": 4.1363,
"step": 4250
},
{
"epoch": 0.4628134754063072,
"grad_norm": 0.6566117405891418,
"learning_rate": 0.000572847753474841,
"loss": 4.1328,
"step": 4300
},
{
"epoch": 0.4681950274459154,
"grad_norm": 0.663583517074585,
"learning_rate": 0.0005725245124447796,
"loss": 4.1281,
"step": 4350
},
{
"epoch": 0.4735765794855236,
"grad_norm": 0.797214925289154,
"learning_rate": 0.0005722012714147182,
"loss": 4.1199,
"step": 4400
},
{
"epoch": 0.47895813152513184,
"grad_norm": 0.6216398477554321,
"learning_rate": 0.0005718780303846568,
"loss": 4.1097,
"step": 4450
},
{
"epoch": 0.48433968356474005,
"grad_norm": 0.7291539907455444,
"learning_rate": 0.0005715547893545953,
"loss": 4.0946,
"step": 4500
},
{
"epoch": 0.48972123560434827,
"grad_norm": 0.6444728374481201,
"learning_rate": 0.000571231548324534,
"loss": 4.0871,
"step": 4550
},
{
"epoch": 0.49510278764395654,
"grad_norm": 0.6845434308052063,
"learning_rate": 0.0005709083072944725,
"loss": 4.0885,
"step": 4600
},
{
"epoch": 0.5004843396835648,
"grad_norm": 0.7555699944496155,
"learning_rate": 0.0005705850662644111,
"loss": 4.0955,
"step": 4650
},
{
"epoch": 0.505865891723173,
"grad_norm": 0.7021520733833313,
"learning_rate": 0.0005702618252343497,
"loss": 4.0961,
"step": 4700
},
{
"epoch": 0.5112474437627812,
"grad_norm": 0.5818867087364197,
"learning_rate": 0.0005699385842042882,
"loss": 4.0799,
"step": 4750
},
{
"epoch": 0.5166289958023894,
"grad_norm": 0.665334939956665,
"learning_rate": 0.0005696153431742269,
"loss": 4.0524,
"step": 4800
},
{
"epoch": 0.5220105478419976,
"grad_norm": 0.6017621755599976,
"learning_rate": 0.0005692921021441655,
"loss": 4.0778,
"step": 4850
},
{
"epoch": 0.5273920998816058,
"grad_norm": 0.6869152188301086,
"learning_rate": 0.0005689688611141041,
"loss": 4.0557,
"step": 4900
},
{
"epoch": 0.5327736519212141,
"grad_norm": 0.7289915680885315,
"learning_rate": 0.0005686456200840426,
"loss": 4.0789,
"step": 4950
},
{
"epoch": 0.5381552039608223,
"grad_norm": 0.6597164869308472,
"learning_rate": 0.0005683223790539811,
"loss": 4.062,
"step": 5000
},
{
"epoch": 0.5381552039608223,
"eval_accuracy": 0.32235986249325127,
"eval_loss": 3.9826269149780273,
"eval_runtime": 181.7213,
"eval_samples_per_second": 99.113,
"eval_steps_per_second": 6.196,
"step": 5000
},
{
"epoch": 0.5435367560004305,
"grad_norm": 0.6261274218559265,
"learning_rate": 0.0005679991380239198,
"loss": 4.0553,
"step": 5050
},
{
"epoch": 0.5489183080400387,
"grad_norm": 0.649811863899231,
"learning_rate": 0.0005676823618144596,
"loss": 4.0688,
"step": 5100
},
{
"epoch": 0.5542998600796469,
"grad_norm": 0.6599764823913574,
"learning_rate": 0.0005673591207843981,
"loss": 4.0458,
"step": 5150
},
{
"epoch": 0.5596814121192552,
"grad_norm": 0.5525273680686951,
"learning_rate": 0.0005670358797543368,
"loss": 4.032,
"step": 5200
},
{
"epoch": 0.5650629641588634,
"grad_norm": 0.5733092427253723,
"learning_rate": 0.0005667126387242753,
"loss": 4.0628,
"step": 5250
},
{
"epoch": 0.5704445161984716,
"grad_norm": 0.6523923277854919,
"learning_rate": 0.000566389397694214,
"loss": 4.0207,
"step": 5300
},
{
"epoch": 0.5758260682380799,
"grad_norm": 0.5884820222854614,
"learning_rate": 0.0005660661566641525,
"loss": 4.0289,
"step": 5350
},
{
"epoch": 0.5812076202776881,
"grad_norm": 0.6992023587226868,
"learning_rate": 0.0005657429156340911,
"loss": 4.0149,
"step": 5400
},
{
"epoch": 0.5865891723172963,
"grad_norm": 0.6001935601234436,
"learning_rate": 0.0005654196746040297,
"loss": 4.0226,
"step": 5450
},
{
"epoch": 0.5919707243569046,
"grad_norm": 0.7079454660415649,
"learning_rate": 0.0005650964335739684,
"loss": 4.0292,
"step": 5500
},
{
"epoch": 0.5973522763965128,
"grad_norm": 0.6667420864105225,
"learning_rate": 0.0005647731925439069,
"loss": 3.9974,
"step": 5550
},
{
"epoch": 0.602733828436121,
"grad_norm": 0.589589536190033,
"learning_rate": 0.0005644499515138454,
"loss": 4.0047,
"step": 5600
},
{
"epoch": 0.6081153804757292,
"grad_norm": 0.6412240266799927,
"learning_rate": 0.000564126710483784,
"loss": 3.9956,
"step": 5650
},
{
"epoch": 0.6134969325153374,
"grad_norm": 0.6452463269233704,
"learning_rate": 0.0005638034694537226,
"loss": 3.9969,
"step": 5700
},
{
"epoch": 0.6188784845549457,
"grad_norm": 0.634573221206665,
"learning_rate": 0.0005634802284236612,
"loss": 3.9971,
"step": 5750
},
{
"epoch": 0.6242600365945539,
"grad_norm": 0.6738696694374084,
"learning_rate": 0.0005631569873935998,
"loss": 3.9907,
"step": 5800
},
{
"epoch": 0.6296415886341621,
"grad_norm": 0.6989587545394897,
"learning_rate": 0.0005628337463635384,
"loss": 4.0001,
"step": 5850
},
{
"epoch": 0.6350231406737703,
"grad_norm": 0.7382339239120483,
"learning_rate": 0.0005625105053334769,
"loss": 3.9866,
"step": 5900
},
{
"epoch": 0.6404046927133785,
"grad_norm": 0.6265085339546204,
"learning_rate": 0.0005621872643034155,
"loss": 3.965,
"step": 5950
},
{
"epoch": 0.6457862447529867,
"grad_norm": 0.675687313079834,
"learning_rate": 0.0005618640232733541,
"loss": 3.9919,
"step": 6000
},
{
"epoch": 0.6457862447529867,
"eval_accuracy": 0.3284569192929609,
"eval_loss": 3.910811424255371,
"eval_runtime": 181.5773,
"eval_samples_per_second": 99.192,
"eval_steps_per_second": 6.201,
"step": 6000
},
{
"epoch": 0.651167796792595,
"grad_norm": 0.6309311389923096,
"learning_rate": 0.0005615407822432927,
"loss": 3.9811,
"step": 6050
},
{
"epoch": 0.6565493488322032,
"grad_norm": 0.5584713220596313,
"learning_rate": 0.0005612175412132313,
"loss": 3.9619,
"step": 6100
},
{
"epoch": 0.6619309008718114,
"grad_norm": 0.5973617434501648,
"learning_rate": 0.0005608943001831699,
"loss": 3.9528,
"step": 6150
},
{
"epoch": 0.6673124529114196,
"grad_norm": 0.6784116625785828,
"learning_rate": 0.0005605710591531085,
"loss": 3.9584,
"step": 6200
},
{
"epoch": 0.6726940049510278,
"grad_norm": 0.5735004544258118,
"learning_rate": 0.000560247818123047,
"loss": 3.9522,
"step": 6250
},
{
"epoch": 0.6780755569906362,
"grad_norm": 0.6804719567298889,
"learning_rate": 0.0005599245770929855,
"loss": 3.9563,
"step": 6300
},
{
"epoch": 0.6834571090302444,
"grad_norm": 0.6392138004302979,
"learning_rate": 0.0005596013360629242,
"loss": 3.9444,
"step": 6350
},
{
"epoch": 0.6888386610698526,
"grad_norm": 0.5223947763442993,
"learning_rate": 0.0005592780950328628,
"loss": 3.9685,
"step": 6400
},
{
"epoch": 0.6942202131094608,
"grad_norm": 0.6110231280326843,
"learning_rate": 0.0005589548540028014,
"loss": 3.9501,
"step": 6450
},
{
"epoch": 0.699601765149069,
"grad_norm": 0.6495262980461121,
"learning_rate": 0.0005586316129727399,
"loss": 3.9733,
"step": 6500
},
{
"epoch": 0.7049833171886772,
"grad_norm": 0.5392972230911255,
"learning_rate": 0.0005583083719426786,
"loss": 3.9475,
"step": 6550
},
{
"epoch": 0.7103648692282855,
"grad_norm": 0.5373910665512085,
"learning_rate": 0.0005579851309126171,
"loss": 3.9475,
"step": 6600
},
{
"epoch": 0.7157464212678937,
"grad_norm": 0.5609496235847473,
"learning_rate": 0.0005576618898825558,
"loss": 3.9292,
"step": 6650
},
{
"epoch": 0.7211279733075019,
"grad_norm": 0.5721986293792725,
"learning_rate": 0.0005573386488524943,
"loss": 3.9432,
"step": 6700
},
{
"epoch": 0.7265095253471101,
"grad_norm": 0.5666886568069458,
"learning_rate": 0.0005570154078224328,
"loss": 3.9348,
"step": 6750
},
{
"epoch": 0.7318910773867183,
"grad_norm": 0.5143133997917175,
"learning_rate": 0.0005566921667923715,
"loss": 3.9322,
"step": 6800
},
{
"epoch": 0.7372726294263265,
"grad_norm": 0.5764801502227783,
"learning_rate": 0.00055636892576231,
"loss": 3.9196,
"step": 6850
},
{
"epoch": 0.7426541814659348,
"grad_norm": 0.6438068151473999,
"learning_rate": 0.0005560456847322487,
"loss": 3.9207,
"step": 6900
},
{
"epoch": 0.748035733505543,
"grad_norm": 0.547141432762146,
"learning_rate": 0.0005557224437021872,
"loss": 3.9211,
"step": 6950
},
{
"epoch": 0.7534172855451512,
"grad_norm": 0.5400163531303406,
"learning_rate": 0.0005553992026721258,
"loss": 3.912,
"step": 7000
},
{
"epoch": 0.7534172855451512,
"eval_accuracy": 0.3342374702304669,
"eval_loss": 3.858619451522827,
"eval_runtime": 181.854,
"eval_samples_per_second": 99.041,
"eval_steps_per_second": 6.192,
"step": 7000
},
{
"epoch": 0.7587988375847594,
"grad_norm": 0.6468452215194702,
"learning_rate": 0.0005550759616420644,
"loss": 3.9213,
"step": 7050
},
{
"epoch": 0.7641803896243676,
"grad_norm": 0.8306963443756104,
"learning_rate": 0.000554752720612003,
"loss": 3.9261,
"step": 7100
},
{
"epoch": 0.7695619416639758,
"grad_norm": 0.5487331748008728,
"learning_rate": 0.0005544294795819415,
"loss": 3.9015,
"step": 7150
},
{
"epoch": 0.7749434937035841,
"grad_norm": 0.5448126196861267,
"learning_rate": 0.0005541062385518801,
"loss": 3.9063,
"step": 7200
},
{
"epoch": 0.7803250457431924,
"grad_norm": 0.5569677948951721,
"learning_rate": 0.0005537829975218188,
"loss": 3.9022,
"step": 7250
},
{
"epoch": 0.7857065977828006,
"grad_norm": 0.5910586714744568,
"learning_rate": 0.0005534597564917573,
"loss": 3.9111,
"step": 7300
},
{
"epoch": 0.7910881498224088,
"grad_norm": 0.75867760181427,
"learning_rate": 0.0005531365154616959,
"loss": 3.9149,
"step": 7350
},
{
"epoch": 0.796469701862017,
"grad_norm": 0.5806201696395874,
"learning_rate": 0.0005528132744316344,
"loss": 3.8975,
"step": 7400
},
{
"epoch": 0.8018512539016253,
"grad_norm": 0.5979334115982056,
"learning_rate": 0.0005524900334015731,
"loss": 3.9194,
"step": 7450
},
{
"epoch": 0.8072328059412335,
"grad_norm": 0.6362331509590149,
"learning_rate": 0.0005521667923715117,
"loss": 3.8981,
"step": 7500
},
{
"epoch": 0.8126143579808417,
"grad_norm": 0.5838714838027954,
"learning_rate": 0.0005518435513414502,
"loss": 3.8961,
"step": 7550
},
{
"epoch": 0.8179959100204499,
"grad_norm": 0.7832590937614441,
"learning_rate": 0.0005515203103113888,
"loss": 3.8931,
"step": 7600
},
{
"epoch": 0.8233774620600581,
"grad_norm": 0.5615019798278809,
"learning_rate": 0.0005511970692813274,
"loss": 3.8981,
"step": 7650
},
{
"epoch": 0.8287590140996663,
"grad_norm": 0.6099148392677307,
"learning_rate": 0.000550873828251266,
"loss": 3.8941,
"step": 7700
},
{
"epoch": 0.8341405661392746,
"grad_norm": 0.5140008330345154,
"learning_rate": 0.0005505505872212045,
"loss": 3.8796,
"step": 7750
},
{
"epoch": 0.8395221181788828,
"grad_norm": 0.6559896469116211,
"learning_rate": 0.0005502273461911432,
"loss": 3.8723,
"step": 7800
},
{
"epoch": 0.844903670218491,
"grad_norm": 0.557579517364502,
"learning_rate": 0.0005499041051610817,
"loss": 3.8736,
"step": 7850
},
{
"epoch": 0.8502852222580992,
"grad_norm": 0.6086620688438416,
"learning_rate": 0.0005495808641310204,
"loss": 3.9014,
"step": 7900
},
{
"epoch": 0.8556667742977074,
"grad_norm": 0.6662868857383728,
"learning_rate": 0.0005492576231009589,
"loss": 3.8747,
"step": 7950
},
{
"epoch": 0.8610483263373157,
"grad_norm": 1.007802963256836,
"learning_rate": 0.0005489343820708974,
"loss": 3.862,
"step": 8000
},
{
"epoch": 0.8610483263373157,
"eval_accuracy": 0.33844711771595115,
"eval_loss": 3.811176061630249,
"eval_runtime": 181.8668,
"eval_samples_per_second": 99.034,
"eval_steps_per_second": 6.191,
"step": 8000
},
{
"epoch": 0.8664298783769239,
"grad_norm": 0.5262889266014099,
"learning_rate": 0.0005486111410408361,
"loss": 3.8806,
"step": 8050
},
{
"epoch": 0.8718114304165321,
"grad_norm": 0.5448309183120728,
"learning_rate": 0.0005482879000107746,
"loss": 3.8702,
"step": 8100
},
{
"epoch": 0.8771929824561403,
"grad_norm": 0.5762999057769775,
"learning_rate": 0.0005479646589807133,
"loss": 3.8735,
"step": 8150
},
{
"epoch": 0.8825745344957485,
"grad_norm": 1.141692876815796,
"learning_rate": 0.0005476414179506518,
"loss": 3.8806,
"step": 8200
},
{
"epoch": 0.8879560865353568,
"grad_norm": 0.5628105998039246,
"learning_rate": 0.0005473181769205904,
"loss": 3.8622,
"step": 8250
},
{
"epoch": 0.8933376385749651,
"grad_norm": 0.5854573249816895,
"learning_rate": 0.000546994935890529,
"loss": 3.8763,
"step": 8300
},
{
"epoch": 0.8987191906145733,
"grad_norm": 0.5357927083969116,
"learning_rate": 0.0005466716948604677,
"loss": 3.8505,
"step": 8350
},
{
"epoch": 0.9041007426541815,
"grad_norm": 0.6378769874572754,
"learning_rate": 0.0005463484538304062,
"loss": 3.845,
"step": 8400
},
{
"epoch": 0.9094822946937897,
"grad_norm": 0.6226728558540344,
"learning_rate": 0.0005460252128003447,
"loss": 3.8583,
"step": 8450
},
{
"epoch": 0.9148638467333979,
"grad_norm": 0.5731797218322754,
"learning_rate": 0.0005457019717702833,
"loss": 3.8498,
"step": 8500
},
{
"epoch": 0.9202453987730062,
"grad_norm": 0.6198984384536743,
"learning_rate": 0.0005453787307402219,
"loss": 3.8389,
"step": 8550
},
{
"epoch": 0.9256269508126144,
"grad_norm": 0.600885272026062,
"learning_rate": 0.0005450554897101605,
"loss": 3.8547,
"step": 8600
},
{
"epoch": 0.9310085028522226,
"grad_norm": 0.5399230122566223,
"learning_rate": 0.0005447322486800991,
"loss": 3.8578,
"step": 8650
},
{
"epoch": 0.9363900548918308,
"grad_norm": 0.5042197108268738,
"learning_rate": 0.0005444090076500377,
"loss": 3.8613,
"step": 8700
},
{
"epoch": 0.941771606931439,
"grad_norm": 0.5343637466430664,
"learning_rate": 0.0005440857666199763,
"loss": 3.8458,
"step": 8750
},
{
"epoch": 0.9471531589710472,
"grad_norm": 0.5491181015968323,
"learning_rate": 0.0005437625255899148,
"loss": 3.8335,
"step": 8800
},
{
"epoch": 0.9525347110106555,
"grad_norm": 0.5667814612388611,
"learning_rate": 0.0005434392845598534,
"loss": 3.845,
"step": 8850
},
{
"epoch": 0.9579162630502637,
"grad_norm": 0.5124309659004211,
"learning_rate": 0.000543116043529792,
"loss": 3.8512,
"step": 8900
},
{
"epoch": 0.9632978150898719,
"grad_norm": 0.5880436301231384,
"learning_rate": 0.0005427928024997306,
"loss": 3.8386,
"step": 8950
},
{
"epoch": 0.9686793671294801,
"grad_norm": 0.5593850016593933,
"learning_rate": 0.0005424695614696692,
"loss": 3.8266,
"step": 9000
},
{
"epoch": 0.9686793671294801,
"eval_accuracy": 0.34156187005092886,
"eval_loss": 3.7744014263153076,
"eval_runtime": 179.4219,
"eval_samples_per_second": 100.384,
"eval_steps_per_second": 6.276,
"step": 9000
},
{
"epoch": 0.9740609191690883,
"grad_norm": 0.6109694838523865,
"learning_rate": 0.0005421463204396078,
"loss": 3.8173,
"step": 9050
},
{
"epoch": 0.9794424712086965,
"grad_norm": 0.5747610330581665,
"learning_rate": 0.0005418295442301476,
"loss": 3.8562,
"step": 9100
},
{
"epoch": 0.9848240232483048,
"grad_norm": 0.536498486995697,
"learning_rate": 0.0005415063032000861,
"loss": 3.8285,
"step": 9150
},
{
"epoch": 0.9902055752879131,
"grad_norm": 0.5872951745986938,
"learning_rate": 0.0005411830621700248,
"loss": 3.8311,
"step": 9200
},
{
"epoch": 0.9955871273275213,
"grad_norm": 0.6314157843589783,
"learning_rate": 0.0005408598211399633,
"loss": 3.8067,
"step": 9250
},
{
"epoch": 1.0009686793671295,
"grad_norm": 0.5174910426139832,
"learning_rate": 0.0005405365801099019,
"loss": 3.8005,
"step": 9300
},
{
"epoch": 1.0063502314067376,
"grad_norm": 0.5861645936965942,
"learning_rate": 0.0005402133390798405,
"loss": 3.7532,
"step": 9350
},
{
"epoch": 1.011731783446346,
"grad_norm": 0.6189785003662109,
"learning_rate": 0.000539890098049779,
"loss": 3.7568,
"step": 9400
},
{
"epoch": 1.017113335485954,
"grad_norm": 0.5914862751960754,
"learning_rate": 0.0005395668570197177,
"loss": 3.7533,
"step": 9450
},
{
"epoch": 1.0224948875255624,
"grad_norm": 0.618517279624939,
"learning_rate": 0.0005392436159896562,
"loss": 3.7495,
"step": 9500
},
{
"epoch": 1.0278764395651705,
"grad_norm": 0.5718392133712769,
"learning_rate": 0.0005389203749595948,
"loss": 3.7624,
"step": 9550
},
{
"epoch": 1.0332579916047788,
"grad_norm": 0.5652843713760376,
"learning_rate": 0.0005385971339295334,
"loss": 3.7637,
"step": 9600
},
{
"epoch": 1.0386395436443872,
"grad_norm": 0.6264984011650085,
"learning_rate": 0.000538273892899472,
"loss": 3.7699,
"step": 9650
},
{
"epoch": 1.0440210956839953,
"grad_norm": 0.5926131010055542,
"learning_rate": 0.0005379506518694106,
"loss": 3.7665,
"step": 9700
},
{
"epoch": 1.0494026477236036,
"grad_norm": 0.539849579334259,
"learning_rate": 0.0005376274108393491,
"loss": 3.7516,
"step": 9750
},
{
"epoch": 1.0547841997632117,
"grad_norm": 0.626375138759613,
"learning_rate": 0.0005373041698092877,
"loss": 3.7572,
"step": 9800
},
{
"epoch": 1.06016575180282,
"grad_norm": 0.5491912364959717,
"learning_rate": 0.0005369809287792263,
"loss": 3.7701,
"step": 9850
},
{
"epoch": 1.0655473038424281,
"grad_norm": 0.5011469721794128,
"learning_rate": 0.000536657687749165,
"loss": 3.7628,
"step": 9900
},
{
"epoch": 1.0709288558820365,
"grad_norm": 0.6251816153526306,
"learning_rate": 0.0005363344467191035,
"loss": 3.7487,
"step": 9950
},
{
"epoch": 1.0763104079216446,
"grad_norm": 0.702565610408783,
"learning_rate": 0.000536011205689042,
"loss": 3.7541,
"step": 10000
},
{
"epoch": 1.0763104079216446,
"eval_accuracy": 0.34536037500892314,
"eval_loss": 3.741765022277832,
"eval_runtime": 180.285,
"eval_samples_per_second": 99.903,
"eval_steps_per_second": 6.246,
"step": 10000
},
{
"epoch": 1.081691959961253,
"grad_norm": 0.5714436769485474,
"learning_rate": 0.0005356879646589807,
"loss": 3.7614,
"step": 10050
},
{
"epoch": 1.087073512000861,
"grad_norm": 0.5856513977050781,
"learning_rate": 0.0005353647236289192,
"loss": 3.7646,
"step": 10100
},
{
"epoch": 1.0924550640404693,
"grad_norm": 0.5325464606285095,
"learning_rate": 0.0005350414825988579,
"loss": 3.77,
"step": 10150
},
{
"epoch": 1.0978366160800774,
"grad_norm": 0.6802165508270264,
"learning_rate": 0.0005347182415687964,
"loss": 3.745,
"step": 10200
},
{
"epoch": 1.1032181681196858,
"grad_norm": 0.5991120934486389,
"learning_rate": 0.000534395000538735,
"loss": 3.7629,
"step": 10250
},
{
"epoch": 1.1085997201592939,
"grad_norm": 0.5548239350318909,
"learning_rate": 0.0005340717595086736,
"loss": 3.7561,
"step": 10300
},
{
"epoch": 1.1139812721989022,
"grad_norm": 0.5725270509719849,
"learning_rate": 0.0005337485184786122,
"loss": 3.7547,
"step": 10350
},
{
"epoch": 1.1193628242385103,
"grad_norm": 0.6069482564926147,
"learning_rate": 0.0005334252774485507,
"loss": 3.755,
"step": 10400
},
{
"epoch": 1.1247443762781186,
"grad_norm": 0.5790407657623291,
"learning_rate": 0.0005331020364184894,
"loss": 3.742,
"step": 10450
},
{
"epoch": 1.1301259283177267,
"grad_norm": 0.5913881659507751,
"learning_rate": 0.0005327787953884279,
"loss": 3.7511,
"step": 10500
},
{
"epoch": 1.135507480357335,
"grad_norm": 0.6094677448272705,
"learning_rate": 0.0005324555543583665,
"loss": 3.7278,
"step": 10550
},
{
"epoch": 1.1408890323969434,
"grad_norm": 0.5710995197296143,
"learning_rate": 0.0005321323133283051,
"loss": 3.7383,
"step": 10600
},
{
"epoch": 1.1462705844365515,
"grad_norm": 0.532470703125,
"learning_rate": 0.0005318090722982436,
"loss": 3.7373,
"step": 10650
},
{
"epoch": 1.1516521364761596,
"grad_norm": 0.5976846218109131,
"learning_rate": 0.0005314858312681823,
"loss": 3.7609,
"step": 10700
},
{
"epoch": 1.157033688515768,
"grad_norm": 0.5880893468856812,
"learning_rate": 0.0005311625902381209,
"loss": 3.7448,
"step": 10750
},
{
"epoch": 1.1624152405553763,
"grad_norm": 0.6322693228721619,
"learning_rate": 0.0005308393492080595,
"loss": 3.7477,
"step": 10800
},
{
"epoch": 1.1677967925949844,
"grad_norm": 0.5472283363342285,
"learning_rate": 0.000530516108177998,
"loss": 3.737,
"step": 10850
},
{
"epoch": 1.1731783446345927,
"grad_norm": 0.5412709712982178,
"learning_rate": 0.0005301928671479365,
"loss": 3.7412,
"step": 10900
},
{
"epoch": 1.1785598966742008,
"grad_norm": 0.5746583342552185,
"learning_rate": 0.0005298696261178752,
"loss": 3.7427,
"step": 10950
},
{
"epoch": 1.1839414487138091,
"grad_norm": 0.543925940990448,
"learning_rate": 0.0005295463850878138,
"loss": 3.7349,
"step": 11000
},
{
"epoch": 1.1839414487138091,
"eval_accuracy": 0.34741369728284116,
"eval_loss": 3.7178292274475098,
"eval_runtime": 179.9682,
"eval_samples_per_second": 100.079,
"eval_steps_per_second": 6.257,
"step": 11000
},
{
"epoch": 1.1893230007534172,
"grad_norm": 0.6048632860183716,
"learning_rate": 0.0005292231440577524,
"loss": 3.7374,
"step": 11050
},
{
"epoch": 1.1947045527930256,
"grad_norm": 0.5460487008094788,
"learning_rate": 0.0005288999030276909,
"loss": 3.7464,
"step": 11100
},
{
"epoch": 1.2000861048326337,
"grad_norm": 0.6168481111526489,
"learning_rate": 0.0005285766619976295,
"loss": 3.7319,
"step": 11150
},
{
"epoch": 1.205467656872242,
"grad_norm": 0.5460136532783508,
"learning_rate": 0.0005282534209675681,
"loss": 3.7478,
"step": 11200
},
{
"epoch": 1.21084920891185,
"grad_norm": 0.5822840332984924,
"learning_rate": 0.0005279301799375066,
"loss": 3.7287,
"step": 11250
},
{
"epoch": 1.2162307609514584,
"grad_norm": 0.528600811958313,
"learning_rate": 0.0005276069389074453,
"loss": 3.7461,
"step": 11300
},
{
"epoch": 1.2216123129910665,
"grad_norm": 0.5433237552642822,
"learning_rate": 0.0005272836978773838,
"loss": 3.738,
"step": 11350
},
{
"epoch": 1.2269938650306749,
"grad_norm": 0.5144934058189392,
"learning_rate": 0.0005269604568473225,
"loss": 3.6953,
"step": 11400
},
{
"epoch": 1.232375417070283,
"grad_norm": 0.5954382419586182,
"learning_rate": 0.000526637215817261,
"loss": 3.7262,
"step": 11450
},
{
"epoch": 1.2377569691098913,
"grad_norm": 0.6082680225372314,
"learning_rate": 0.0005263139747871996,
"loss": 3.7328,
"step": 11500
},
{
"epoch": 1.2431385211494996,
"grad_norm": 0.510694682598114,
"learning_rate": 0.0005259907337571381,
"loss": 3.7532,
"step": 11550
},
{
"epoch": 1.2485200731891077,
"grad_norm": 0.5158873796463013,
"learning_rate": 0.0005256674927270768,
"loss": 3.7257,
"step": 11600
},
{
"epoch": 1.2539016252287158,
"grad_norm": 0.559540331363678,
"learning_rate": 0.0005253442516970154,
"loss": 3.7084,
"step": 11650
},
{
"epoch": 1.2592831772683242,
"grad_norm": 0.6382985711097717,
"learning_rate": 0.000525021010666954,
"loss": 3.7259,
"step": 11700
},
{
"epoch": 1.2646647293079325,
"grad_norm": 0.6098189353942871,
"learning_rate": 0.0005246977696368925,
"loss": 3.7354,
"step": 11750
},
{
"epoch": 1.2700462813475406,
"grad_norm": 0.5487571358680725,
"learning_rate": 0.0005243745286068311,
"loss": 3.7322,
"step": 11800
},
{
"epoch": 1.275427833387149,
"grad_norm": 0.5637610554695129,
"learning_rate": 0.0005240577523973709,
"loss": 3.7314,
"step": 11850
},
{
"epoch": 1.280809385426757,
"grad_norm": 0.5707816481590271,
"learning_rate": 0.0005237345113673095,
"loss": 3.7303,
"step": 11900
},
{
"epoch": 1.2861909374663654,
"grad_norm": 0.5442221164703369,
"learning_rate": 0.0005234112703372481,
"loss": 3.7523,
"step": 11950
},
{
"epoch": 1.2915724895059735,
"grad_norm": 0.7692139148712158,
"learning_rate": 0.0005230880293071867,
"loss": 3.7312,
"step": 12000
},
{
"epoch": 1.2915724895059735,
"eval_accuracy": 0.3501756536910853,
"eval_loss": 3.6950345039367676,
"eval_runtime": 182.9419,
"eval_samples_per_second": 98.452,
"eval_steps_per_second": 6.155,
"step": 12000
},
{
"epoch": 1.2969540415455818,
"grad_norm": 0.5720347762107849,
"learning_rate": 0.0005227647882771253,
"loss": 3.7174,
"step": 12050
},
{
"epoch": 1.30233559358519,
"grad_norm": 0.5530905723571777,
"learning_rate": 0.0005224415472470639,
"loss": 3.7105,
"step": 12100
},
{
"epoch": 1.3077171456247982,
"grad_norm": 0.5424105525016785,
"learning_rate": 0.0005221183062170024,
"loss": 3.7247,
"step": 12150
},
{
"epoch": 1.3130986976644063,
"grad_norm": 0.5885405540466309,
"learning_rate": 0.0005217950651869409,
"loss": 3.7127,
"step": 12200
},
{
"epoch": 1.3184802497040147,
"grad_norm": 0.5528365969657898,
"learning_rate": 0.0005214718241568796,
"loss": 3.7347,
"step": 12250
},
{
"epoch": 1.3238618017436228,
"grad_norm": 0.5199754238128662,
"learning_rate": 0.0005211485831268182,
"loss": 3.7388,
"step": 12300
},
{
"epoch": 1.329243353783231,
"grad_norm": 0.581535279750824,
"learning_rate": 0.0005208253420967568,
"loss": 3.7209,
"step": 12350
},
{
"epoch": 1.3346249058228392,
"grad_norm": 0.614345371723175,
"learning_rate": 0.0005205021010666953,
"loss": 3.7146,
"step": 12400
},
{
"epoch": 1.3400064578624475,
"grad_norm": 0.6020660400390625,
"learning_rate": 0.0005201788600366339,
"loss": 3.7209,
"step": 12450
},
{
"epoch": 1.3453880099020559,
"grad_norm": 0.5743170976638794,
"learning_rate": 0.0005198556190065725,
"loss": 3.7123,
"step": 12500
},
{
"epoch": 1.350769561941664,
"grad_norm": 0.5332421660423279,
"learning_rate": 0.0005195323779765112,
"loss": 3.7184,
"step": 12550
},
{
"epoch": 1.356151113981272,
"grad_norm": 0.5720219612121582,
"learning_rate": 0.0005192091369464497,
"loss": 3.7093,
"step": 12600
},
{
"epoch": 1.3615326660208804,
"grad_norm": 0.5706057548522949,
"learning_rate": 0.0005188858959163882,
"loss": 3.7122,
"step": 12650
},
{
"epoch": 1.3669142180604887,
"grad_norm": 0.5560119152069092,
"learning_rate": 0.0005185626548863269,
"loss": 3.7149,
"step": 12700
},
{
"epoch": 1.3722957701000968,
"grad_norm": 0.6042450666427612,
"learning_rate": 0.0005182394138562654,
"loss": 3.7158,
"step": 12750
},
{
"epoch": 1.3776773221397052,
"grad_norm": 0.6797989010810852,
"learning_rate": 0.0005179161728262041,
"loss": 3.7213,
"step": 12800
},
{
"epoch": 1.3830588741793133,
"grad_norm": 0.6140666007995605,
"learning_rate": 0.0005175929317961426,
"loss": 3.7106,
"step": 12850
},
{
"epoch": 1.3884404262189216,
"grad_norm": 0.5921536684036255,
"learning_rate": 0.0005172696907660812,
"loss": 3.7036,
"step": 12900
},
{
"epoch": 1.3938219782585297,
"grad_norm": 0.547536313533783,
"learning_rate": 0.0005169464497360198,
"loss": 3.7023,
"step": 12950
},
{
"epoch": 1.399203530298138,
"grad_norm": 0.5648983716964722,
"learning_rate": 0.0005166232087059583,
"loss": 3.7127,
"step": 13000
},
{
"epoch": 1.399203530298138,
"eval_accuracy": 0.35228004282227615,
"eval_loss": 3.673454761505127,
"eval_runtime": 182.9372,
"eval_samples_per_second": 98.455,
"eval_steps_per_second": 6.155,
"step": 13000
},
{
"epoch": 1.4045850823377461,
"grad_norm": 0.5507751107215881,
"learning_rate": 0.0005162999676758969,
"loss": 3.7025,
"step": 13050
},
{
"epoch": 1.4099666343773545,
"grad_norm": 0.5451176166534424,
"learning_rate": 0.0005159767266458355,
"loss": 3.7267,
"step": 13100
},
{
"epoch": 1.4153481864169626,
"grad_norm": 0.6242395043373108,
"learning_rate": 0.0005156534856157741,
"loss": 3.6888,
"step": 13150
},
{
"epoch": 1.420729738456571,
"grad_norm": 0.5988582372665405,
"learning_rate": 0.0005153302445857127,
"loss": 3.7165,
"step": 13200
},
{
"epoch": 1.426111290496179,
"grad_norm": 0.5528572201728821,
"learning_rate": 0.0005150070035556513,
"loss": 3.6995,
"step": 13250
},
{
"epoch": 1.4314928425357873,
"grad_norm": 0.581158459186554,
"learning_rate": 0.0005146837625255898,
"loss": 3.7006,
"step": 13300
},
{
"epoch": 1.4368743945753955,
"grad_norm": 0.5531030297279358,
"learning_rate": 0.0005143605214955285,
"loss": 3.6895,
"step": 13350
},
{
"epoch": 1.4422559466150038,
"grad_norm": 0.5417513847351074,
"learning_rate": 0.0005140372804654671,
"loss": 3.6885,
"step": 13400
},
{
"epoch": 1.447637498654612,
"grad_norm": 0.5422365665435791,
"learning_rate": 0.0005137140394354056,
"loss": 3.7015,
"step": 13450
},
{
"epoch": 1.4530190506942202,
"grad_norm": 0.5661604404449463,
"learning_rate": 0.0005133907984053442,
"loss": 3.6965,
"step": 13500
},
{
"epoch": 1.4584006027338283,
"grad_norm": 0.581283688545227,
"learning_rate": 0.0005130675573752827,
"loss": 3.7015,
"step": 13550
},
{
"epoch": 1.4637821547734367,
"grad_norm": 0.5587770938873291,
"learning_rate": 0.0005127443163452214,
"loss": 3.7007,
"step": 13600
},
{
"epoch": 1.469163706813045,
"grad_norm": 0.5291188955307007,
"learning_rate": 0.00051242107531516,
"loss": 3.6855,
"step": 13650
},
{
"epoch": 1.474545258852653,
"grad_norm": 0.5866516828536987,
"learning_rate": 0.0005120978342850986,
"loss": 3.6982,
"step": 13700
},
{
"epoch": 1.4799268108922612,
"grad_norm": 0.6090840697288513,
"learning_rate": 0.0005117745932550371,
"loss": 3.7063,
"step": 13750
},
{
"epoch": 1.4853083629318695,
"grad_norm": 0.5599469542503357,
"learning_rate": 0.0005114513522249758,
"loss": 3.6826,
"step": 13800
},
{
"epoch": 1.4906899149714778,
"grad_norm": 0.5345983505249023,
"learning_rate": 0.0005111281111949143,
"loss": 3.7033,
"step": 13850
},
{
"epoch": 1.496071467011086,
"grad_norm": 0.582324206829071,
"learning_rate": 0.0005108048701648528,
"loss": 3.706,
"step": 13900
},
{
"epoch": 1.501453019050694,
"grad_norm": 0.5937202572822571,
"learning_rate": 0.0005104880939553926,
"loss": 3.6788,
"step": 13950
},
{
"epoch": 1.5068345710903024,
"grad_norm": 0.5526854395866394,
"learning_rate": 0.0005101648529253313,
"loss": 3.6974,
"step": 14000
},
{
"epoch": 1.5068345710903024,
"eval_accuracy": 0.3537209974204718,
"eval_loss": 3.6525895595550537,
"eval_runtime": 183.0004,
"eval_samples_per_second": 98.421,
"eval_steps_per_second": 6.153,
"step": 14000
},
{
"epoch": 1.5122161231299107,
"grad_norm": 0.530371904373169,
"learning_rate": 0.0005098416118952699,
"loss": 3.7083,
"step": 14050
},
{
"epoch": 1.5175976751695188,
"grad_norm": 0.5713452100753784,
"learning_rate": 0.0005095183708652085,
"loss": 3.681,
"step": 14100
},
{
"epoch": 1.5229792272091272,
"grad_norm": 0.5218368768692017,
"learning_rate": 0.000509195129835147,
"loss": 3.6881,
"step": 14150
},
{
"epoch": 1.5283607792487355,
"grad_norm": 0.5329176783561707,
"learning_rate": 0.0005088718888050856,
"loss": 3.6926,
"step": 14200
},
{
"epoch": 1.5337423312883436,
"grad_norm": 0.5380780100822449,
"learning_rate": 0.0005085486477750242,
"loss": 3.705,
"step": 14250
},
{
"epoch": 1.5391238833279517,
"grad_norm": 0.5449959635734558,
"learning_rate": 0.0005082254067449629,
"loss": 3.6826,
"step": 14300
},
{
"epoch": 1.54450543536756,
"grad_norm": 0.6050571203231812,
"learning_rate": 0.0005079021657149014,
"loss": 3.6679,
"step": 14350
},
{
"epoch": 1.5498869874071683,
"grad_norm": 0.5612210631370544,
"learning_rate": 0.0005075789246848399,
"loss": 3.6984,
"step": 14400
},
{
"epoch": 1.5552685394467765,
"grad_norm": 0.5346029996871948,
"learning_rate": 0.0005072556836547785,
"loss": 3.6831,
"step": 14450
},
{
"epoch": 1.5606500914863846,
"grad_norm": 0.5634347200393677,
"learning_rate": 0.0005069324426247171,
"loss": 3.6772,
"step": 14500
},
{
"epoch": 1.566031643525993,
"grad_norm": 0.5833031535148621,
"learning_rate": 0.0005066092015946557,
"loss": 3.6852,
"step": 14550
},
{
"epoch": 1.5714131955656012,
"grad_norm": 0.5364494919776917,
"learning_rate": 0.0005062859605645943,
"loss": 3.6904,
"step": 14600
},
{
"epoch": 1.5767947476052093,
"grad_norm": 0.592128336429596,
"learning_rate": 0.0005059627195345329,
"loss": 3.6875,
"step": 14650
},
{
"epoch": 1.5821762996448174,
"grad_norm": 0.5206913352012634,
"learning_rate": 0.0005056394785044715,
"loss": 3.6847,
"step": 14700
},
{
"epoch": 1.5875578516844258,
"grad_norm": 0.5297030806541443,
"learning_rate": 0.00050531623747441,
"loss": 3.677,
"step": 14750
},
{
"epoch": 1.592939403724034,
"grad_norm": 0.6701345443725586,
"learning_rate": 0.0005049929964443486,
"loss": 3.6901,
"step": 14800
},
{
"epoch": 1.5983209557636422,
"grad_norm": 0.5313422083854675,
"learning_rate": 0.0005046697554142871,
"loss": 3.6859,
"step": 14850
},
{
"epoch": 1.6037025078032503,
"grad_norm": 0.5316382646560669,
"learning_rate": 0.0005043465143842258,
"loss": 3.6802,
"step": 14900
},
{
"epoch": 1.6090840598428586,
"grad_norm": 0.6596531271934509,
"learning_rate": 0.0005040232733541644,
"loss": 3.6445,
"step": 14950
},
{
"epoch": 1.614465611882467,
"grad_norm": 0.6033098101615906,
"learning_rate": 0.000503700032324103,
"loss": 3.6788,
"step": 15000
},
{
"epoch": 1.614465611882467,
"eval_accuracy": 0.35575758714966643,
"eval_loss": 3.6347877979278564,
"eval_runtime": 183.0426,
"eval_samples_per_second": 98.398,
"eval_steps_per_second": 6.152,
"step": 15000
},
{
"epoch": 1.619847163922075,
"grad_norm": 0.5396302342414856,
"learning_rate": 0.0005033767912940415,
"loss": 3.663,
"step": 15050
},
{
"epoch": 1.6252287159616834,
"grad_norm": 0.6209707856178284,
"learning_rate": 0.0005030535502639802,
"loss": 3.6696,
"step": 15100
},
{
"epoch": 1.6306102680012917,
"grad_norm": 0.5924604535102844,
"learning_rate": 0.0005027303092339187,
"loss": 3.6585,
"step": 15150
},
{
"epoch": 1.6359918200408998,
"grad_norm": 0.5263779163360596,
"learning_rate": 0.0005024070682038573,
"loss": 3.6734,
"step": 15200
},
{
"epoch": 1.641373372080508,
"grad_norm": 0.5559638142585754,
"learning_rate": 0.0005020838271737959,
"loss": 3.6798,
"step": 15250
},
{
"epoch": 1.6467549241201163,
"grad_norm": 0.5919946432113647,
"learning_rate": 0.0005017605861437344,
"loss": 3.6702,
"step": 15300
},
{
"epoch": 1.6521364761597246,
"grad_norm": 0.4996275007724762,
"learning_rate": 0.0005014373451136731,
"loss": 3.673,
"step": 15350
},
{
"epoch": 1.6575180281993327,
"grad_norm": 0.5767028331756592,
"learning_rate": 0.0005011141040836116,
"loss": 3.6692,
"step": 15400
},
{
"epoch": 1.6628995802389408,
"grad_norm": 0.5547932386398315,
"learning_rate": 0.0005007908630535503,
"loss": 3.665,
"step": 15450
},
{
"epoch": 1.6682811322785491,
"grad_norm": 0.5808051228523254,
"learning_rate": 0.0005004676220234888,
"loss": 3.6697,
"step": 15500
},
{
"epoch": 1.6736626843181575,
"grad_norm": 0.6720907688140869,
"learning_rate": 0.0005001443809934273,
"loss": 3.666,
"step": 15550
},
{
"epoch": 1.6790442363577656,
"grad_norm": 0.5560029149055481,
"learning_rate": 0.000499821139963366,
"loss": 3.66,
"step": 15600
},
{
"epoch": 1.6844257883973737,
"grad_norm": 0.5708025693893433,
"learning_rate": 0.0004994978989333045,
"loss": 3.6729,
"step": 15650
},
{
"epoch": 1.689807340436982,
"grad_norm": 0.6271914839744568,
"learning_rate": 0.0004991746579032431,
"loss": 3.6725,
"step": 15700
},
{
"epoch": 1.6951888924765903,
"grad_norm": 0.5649659037590027,
"learning_rate": 0.0004988514168731817,
"loss": 3.6506,
"step": 15750
},
{
"epoch": 1.7005704445161984,
"grad_norm": 0.5502942204475403,
"learning_rate": 0.0004985281758431204,
"loss": 3.68,
"step": 15800
},
{
"epoch": 1.7059519965558065,
"grad_norm": 0.6230396628379822,
"learning_rate": 0.0004982049348130589,
"loss": 3.6764,
"step": 15850
},
{
"epoch": 1.7113335485954149,
"grad_norm": 0.5738170742988586,
"learning_rate": 0.0004978816937829975,
"loss": 3.6683,
"step": 15900
},
{
"epoch": 1.7167151006350232,
"grad_norm": 0.5674835443496704,
"learning_rate": 0.000497558452752936,
"loss": 3.6641,
"step": 15950
},
{
"epoch": 1.7220966526746313,
"grad_norm": 0.5687111616134644,
"learning_rate": 0.0004972416765434759,
"loss": 3.653,
"step": 16000
},
{
"epoch": 1.7220966526746313,
"eval_accuracy": 0.35766868279343533,
"eval_loss": 3.6169068813323975,
"eval_runtime": 182.8519,
"eval_samples_per_second": 98.5,
"eval_steps_per_second": 6.158,
"step": 16000
},
{
"epoch": 1.7274782047142396,
"grad_norm": 0.5726053714752197,
"learning_rate": 0.0004969184355134145,
"loss": 3.6704,
"step": 16050
},
{
"epoch": 1.732859756753848,
"grad_norm": 0.5218805074691772,
"learning_rate": 0.0004965951944833531,
"loss": 3.6687,
"step": 16100
},
{
"epoch": 1.738241308793456,
"grad_norm": 0.5829153060913086,
"learning_rate": 0.0004962719534532916,
"loss": 3.6701,
"step": 16150
},
{
"epoch": 1.7436228608330642,
"grad_norm": 0.6053098440170288,
"learning_rate": 0.0004959487124232302,
"loss": 3.6489,
"step": 16200
},
{
"epoch": 1.7490044128726725,
"grad_norm": 0.5329374670982361,
"learning_rate": 0.0004956254713931688,
"loss": 3.6494,
"step": 16250
},
{
"epoch": 1.7543859649122808,
"grad_norm": 0.5787749886512756,
"learning_rate": 0.0004953022303631074,
"loss": 3.6523,
"step": 16300
},
{
"epoch": 1.759767516951889,
"grad_norm": 0.5446376800537109,
"learning_rate": 0.0004949789893330459,
"loss": 3.6551,
"step": 16350
},
{
"epoch": 1.765149068991497,
"grad_norm": 0.6261696815490723,
"learning_rate": 0.0004946557483029846,
"loss": 3.6529,
"step": 16400
},
{
"epoch": 1.7705306210311054,
"grad_norm": 0.5923592448234558,
"learning_rate": 0.0004943325072729231,
"loss": 3.6742,
"step": 16450
},
{
"epoch": 1.7759121730707137,
"grad_norm": 0.5514686703681946,
"learning_rate": 0.0004940092662428617,
"loss": 3.6655,
"step": 16500
},
{
"epoch": 1.7812937251103218,
"grad_norm": 0.5481582283973694,
"learning_rate": 0.0004936860252128003,
"loss": 3.6613,
"step": 16550
},
{
"epoch": 1.78667527714993,
"grad_norm": 0.5708692073822021,
"learning_rate": 0.0004933627841827388,
"loss": 3.6644,
"step": 16600
},
{
"epoch": 1.7920568291895382,
"grad_norm": 0.5547276139259338,
"learning_rate": 0.0004930395431526775,
"loss": 3.6443,
"step": 16650
},
{
"epoch": 1.7974383812291466,
"grad_norm": 0.60475754737854,
"learning_rate": 0.0004927163021226161,
"loss": 3.666,
"step": 16700
},
{
"epoch": 1.8028199332687547,
"grad_norm": 0.568871796131134,
"learning_rate": 0.0004923930610925547,
"loss": 3.6573,
"step": 16750
},
{
"epoch": 1.8082014853083628,
"grad_norm": 0.5564674139022827,
"learning_rate": 0.0004920698200624932,
"loss": 3.6515,
"step": 16800
},
{
"epoch": 1.813583037347971,
"grad_norm": 0.5341878533363342,
"learning_rate": 0.0004917465790324317,
"loss": 3.6652,
"step": 16850
},
{
"epoch": 1.8189645893875794,
"grad_norm": 0.58695387840271,
"learning_rate": 0.0004914233380023704,
"loss": 3.6546,
"step": 16900
},
{
"epoch": 1.8243461414271875,
"grad_norm": 0.5584468245506287,
"learning_rate": 0.0004911000969723089,
"loss": 3.631,
"step": 16950
},
{
"epoch": 1.8297276934667959,
"grad_norm": 0.5749080777168274,
"learning_rate": 0.0004907768559422476,
"loss": 3.6484,
"step": 17000
},
{
"epoch": 1.8297276934667959,
"eval_accuracy": 0.35881909956961505,
"eval_loss": 3.601506233215332,
"eval_runtime": 182.9699,
"eval_samples_per_second": 98.437,
"eval_steps_per_second": 6.154,
"step": 17000
},
{
"epoch": 1.8351092455064042,
"grad_norm": 0.5957604050636292,
"learning_rate": 0.0004904536149121861,
"loss": 3.6388,
"step": 17050
},
{
"epoch": 1.8404907975460123,
"grad_norm": 0.5906072854995728,
"learning_rate": 0.0004901303738821248,
"loss": 3.6523,
"step": 17100
},
{
"epoch": 1.8458723495856204,
"grad_norm": 0.6116198301315308,
"learning_rate": 0.0004898071328520633,
"loss": 3.6424,
"step": 17150
},
{
"epoch": 1.8512539016252287,
"grad_norm": 0.5727099776268005,
"learning_rate": 0.0004894838918220019,
"loss": 3.6625,
"step": 17200
},
{
"epoch": 1.856635453664837,
"grad_norm": 0.5920504331588745,
"learning_rate": 0.0004891606507919405,
"loss": 3.6366,
"step": 17250
},
{
"epoch": 1.8620170057044452,
"grad_norm": 0.6015959978103638,
"learning_rate": 0.000488837409761879,
"loss": 3.6506,
"step": 17300
},
{
"epoch": 1.8673985577440533,
"grad_norm": 0.5476948022842407,
"learning_rate": 0.0004885141687318177,
"loss": 3.654,
"step": 17350
},
{
"epoch": 1.8727801097836616,
"grad_norm": 0.5550304651260376,
"learning_rate": 0.00048819092770175623,
"loss": 3.6382,
"step": 17400
},
{
"epoch": 1.87816166182327,
"grad_norm": 0.5779985189437866,
"learning_rate": 0.0004878676866716948,
"loss": 3.6562,
"step": 17450
},
{
"epoch": 1.883543213862878,
"grad_norm": 0.5878141522407532,
"learning_rate": 0.00048754444564163337,
"loss": 3.6424,
"step": 17500
},
{
"epoch": 1.8889247659024861,
"grad_norm": 0.5703890323638916,
"learning_rate": 0.000487221204611572,
"loss": 3.6491,
"step": 17550
},
{
"epoch": 1.8943063179420945,
"grad_norm": 0.5712037086486816,
"learning_rate": 0.00048689796358151056,
"loss": 3.6276,
"step": 17600
},
{
"epoch": 1.8996878699817028,
"grad_norm": 0.5311342477798462,
"learning_rate": 0.00048657472255144915,
"loss": 3.6438,
"step": 17650
},
{
"epoch": 1.905069422021311,
"grad_norm": 0.5727416276931763,
"learning_rate": 0.00048625148152138775,
"loss": 3.6264,
"step": 17700
},
{
"epoch": 1.910450974060919,
"grad_norm": 0.6044231653213501,
"learning_rate": 0.0004859282404913263,
"loss": 3.6242,
"step": 17750
},
{
"epoch": 1.9158325261005273,
"grad_norm": 0.5727190971374512,
"learning_rate": 0.0004856049994612649,
"loss": 3.6462,
"step": 17800
},
{
"epoch": 1.9212140781401357,
"grad_norm": 0.5734631419181824,
"learning_rate": 0.00048528175843120353,
"loss": 3.6472,
"step": 17850
},
{
"epoch": 1.9265956301797438,
"grad_norm": 0.5388041734695435,
"learning_rate": 0.0004849585174011421,
"loss": 3.6489,
"step": 17900
},
{
"epoch": 1.931977182219352,
"grad_norm": 0.5606278777122498,
"learning_rate": 0.00048463527637108067,
"loss": 3.6294,
"step": 17950
},
{
"epoch": 1.9373587342589604,
"grad_norm": 0.5878045558929443,
"learning_rate": 0.0004843120353410192,
"loss": 3.6472,
"step": 18000
},
{
"epoch": 1.9373587342589604,
"eval_accuracy": 0.36079995036736084,
"eval_loss": 3.5877466201782227,
"eval_runtime": 182.9553,
"eval_samples_per_second": 98.445,
"eval_steps_per_second": 6.155,
"step": 18000
},
{
"epoch": 1.9427402862985685,
"grad_norm": 0.5944440960884094,
"learning_rate": 0.00048399525913155907,
"loss": 3.6349,
"step": 18050
},
{
"epoch": 1.9481218383381766,
"grad_norm": 0.6114388704299927,
"learning_rate": 0.0004836720181014976,
"loss": 3.6319,
"step": 18100
},
{
"epoch": 1.953503390377785,
"grad_norm": 0.5699650645256042,
"learning_rate": 0.0004833487770714362,
"loss": 3.6309,
"step": 18150
},
{
"epoch": 1.9588849424173933,
"grad_norm": 0.5740841031074524,
"learning_rate": 0.00048302553604137485,
"loss": 3.6278,
"step": 18200
},
{
"epoch": 1.9642664944570014,
"grad_norm": 0.6211947798728943,
"learning_rate": 0.0004827022950113134,
"loss": 3.6346,
"step": 18250
},
{
"epoch": 1.9696480464966095,
"grad_norm": 0.5401277542114258,
"learning_rate": 0.000482379053981252,
"loss": 3.6603,
"step": 18300
},
{
"epoch": 1.9750295985362178,
"grad_norm": 0.5557017922401428,
"learning_rate": 0.0004820558129511906,
"loss": 3.629,
"step": 18350
},
{
"epoch": 1.9804111505758262,
"grad_norm": 0.592074453830719,
"learning_rate": 0.0004817325719211291,
"loss": 3.6235,
"step": 18400
},
{
"epoch": 1.9857927026154343,
"grad_norm": 0.600532591342926,
"learning_rate": 0.0004814093308910677,
"loss": 3.6253,
"step": 18450
},
{
"epoch": 1.9911742546550424,
"grad_norm": 0.4943503141403198,
"learning_rate": 0.00048108608986100637,
"loss": 3.6225,
"step": 18500
},
{
"epoch": 1.9965558066946507,
"grad_norm": 0.5403441190719604,
"learning_rate": 0.0004807628488309449,
"loss": 3.6333,
"step": 18550
},
{
"epoch": 2.001937358734259,
"grad_norm": 0.5593751668930054,
"learning_rate": 0.0004804396078008835,
"loss": 3.5902,
"step": 18600
},
{
"epoch": 2.007318910773867,
"grad_norm": 0.5596261024475098,
"learning_rate": 0.00048011636677082204,
"loss": 3.5438,
"step": 18650
},
{
"epoch": 2.0127004628134753,
"grad_norm": 0.5620816946029663,
"learning_rate": 0.00047979312574076064,
"loss": 3.5367,
"step": 18700
},
{
"epoch": 2.018082014853084,
"grad_norm": 0.6553834080696106,
"learning_rate": 0.0004794698847106992,
"loss": 3.5336,
"step": 18750
},
{
"epoch": 2.023463566892692,
"grad_norm": 0.5942727327346802,
"learning_rate": 0.0004791466436806378,
"loss": 3.5532,
"step": 18800
},
{
"epoch": 2.0288451189323,
"grad_norm": 0.563507616519928,
"learning_rate": 0.0004788234026505764,
"loss": 3.565,
"step": 18850
},
{
"epoch": 2.034226670971908,
"grad_norm": 0.5451712608337402,
"learning_rate": 0.00047850016162051496,
"loss": 3.55,
"step": 18900
},
{
"epoch": 2.0396082230115167,
"grad_norm": 0.6260777711868286,
"learning_rate": 0.00047817692059045356,
"loss": 3.5395,
"step": 18950
},
{
"epoch": 2.044989775051125,
"grad_norm": 0.6237614750862122,
"learning_rate": 0.00047785367956039215,
"loss": 3.5601,
"step": 19000
},
{
"epoch": 2.044989775051125,
"eval_accuracy": 0.36222699739591624,
"eval_loss": 3.5780997276306152,
"eval_runtime": 182.9631,
"eval_samples_per_second": 98.441,
"eval_steps_per_second": 6.154,
"step": 19000
},
{
"epoch": 2.050371327090733,
"grad_norm": 0.5645211935043335,
"learning_rate": 0.00047753043853033075,
"loss": 3.5712,
"step": 19050
},
{
"epoch": 2.055752879130341,
"grad_norm": 0.550080418586731,
"learning_rate": 0.00047720719750026934,
"loss": 3.5519,
"step": 19100
},
{
"epoch": 2.0611344311699495,
"grad_norm": 0.5775645971298218,
"learning_rate": 0.00047688395647020793,
"loss": 3.5345,
"step": 19150
},
{
"epoch": 2.0665159832095576,
"grad_norm": 0.6265087127685547,
"learning_rate": 0.0004765607154401465,
"loss": 3.5604,
"step": 19200
},
{
"epoch": 2.0718975352491658,
"grad_norm": 0.645406186580658,
"learning_rate": 0.00047623747441008507,
"loss": 3.5486,
"step": 19250
},
{
"epoch": 2.0772790872887743,
"grad_norm": 0.5847898721694946,
"learning_rate": 0.0004759142333800236,
"loss": 3.5457,
"step": 19300
},
{
"epoch": 2.0826606393283824,
"grad_norm": 0.5650346875190735,
"learning_rate": 0.00047559099234996226,
"loss": 3.5555,
"step": 19350
},
{
"epoch": 2.0880421913679905,
"grad_norm": 0.599783182144165,
"learning_rate": 0.00047526775131990085,
"loss": 3.5459,
"step": 19400
},
{
"epoch": 2.0934237434075986,
"grad_norm": 0.5775169730186462,
"learning_rate": 0.0004749445102898394,
"loss": 3.5612,
"step": 19450
},
{
"epoch": 2.098805295447207,
"grad_norm": 0.6146634817123413,
"learning_rate": 0.000474621269259778,
"loss": 3.5666,
"step": 19500
},
{
"epoch": 2.1041868474868153,
"grad_norm": 0.6270310878753662,
"learning_rate": 0.0004742980282297166,
"loss": 3.5408,
"step": 19550
},
{
"epoch": 2.1095683995264234,
"grad_norm": 0.517265796661377,
"learning_rate": 0.0004739747871996551,
"loss": 3.5576,
"step": 19600
},
{
"epoch": 2.1149499515660315,
"grad_norm": 0.6480297446250916,
"learning_rate": 0.00047365154616959377,
"loss": 3.542,
"step": 19650
},
{
"epoch": 2.12033150360564,
"grad_norm": 0.5682874321937561,
"learning_rate": 0.00047332830513953237,
"loss": 3.5669,
"step": 19700
},
{
"epoch": 2.125713055645248,
"grad_norm": 0.6224113702774048,
"learning_rate": 0.0004730050641094709,
"loss": 3.5485,
"step": 19750
},
{
"epoch": 2.1310946076848563,
"grad_norm": 0.554499089717865,
"learning_rate": 0.0004726818230794095,
"loss": 3.5338,
"step": 19800
},
{
"epoch": 2.1364761597244644,
"grad_norm": 0.6027452349662781,
"learning_rate": 0.0004723650468699493,
"loss": 3.5481,
"step": 19850
},
{
"epoch": 2.141857711764073,
"grad_norm": 0.6047434210777283,
"learning_rate": 0.0004720418058398879,
"loss": 3.5465,
"step": 19900
},
{
"epoch": 2.147239263803681,
"grad_norm": 0.5787104964256287,
"learning_rate": 0.00047171856480982644,
"loss": 3.5562,
"step": 19950
},
{
"epoch": 2.152620815843289,
"grad_norm": 0.5642232298851013,
"learning_rate": 0.0004713953237797651,
"loss": 3.5694,
"step": 20000
},
{
"epoch": 2.152620815843289,
"eval_accuracy": 0.36314848253747084,
"eval_loss": 3.5687355995178223,
"eval_runtime": 179.7649,
"eval_samples_per_second": 100.192,
"eval_steps_per_second": 6.264,
"step": 20000
},
{
"epoch": 2.1580023678828972,
"grad_norm": 0.6349261999130249,
"learning_rate": 0.0004710720827497037,
"loss": 3.5747,
"step": 20050
},
{
"epoch": 2.163383919922506,
"grad_norm": 0.5958331823348999,
"learning_rate": 0.00047074884171964223,
"loss": 3.5547,
"step": 20100
},
{
"epoch": 2.168765471962114,
"grad_norm": 0.6347177028656006,
"learning_rate": 0.0004704256006895808,
"loss": 3.561,
"step": 20150
},
{
"epoch": 2.174147024001722,
"grad_norm": 0.6787843108177185,
"learning_rate": 0.00047010235965951936,
"loss": 3.5584,
"step": 20200
},
{
"epoch": 2.1795285760413305,
"grad_norm": 0.5568081140518188,
"learning_rate": 0.00046977911862945796,
"loss": 3.5667,
"step": 20250
},
{
"epoch": 2.1849101280809387,
"grad_norm": 0.6003414988517761,
"learning_rate": 0.0004694558775993966,
"loss": 3.5515,
"step": 20300
},
{
"epoch": 2.1902916801205468,
"grad_norm": 0.5988909602165222,
"learning_rate": 0.00046913263656933515,
"loss": 3.5563,
"step": 20350
},
{
"epoch": 2.195673232160155,
"grad_norm": 0.6274018287658691,
"learning_rate": 0.00046880939553927374,
"loss": 3.55,
"step": 20400
},
{
"epoch": 2.2010547841997634,
"grad_norm": 0.5922993421554565,
"learning_rate": 0.00046848615450921234,
"loss": 3.5385,
"step": 20450
},
{
"epoch": 2.2064363362393715,
"grad_norm": 0.6406957507133484,
"learning_rate": 0.0004681629134791509,
"loss": 3.5292,
"step": 20500
},
{
"epoch": 2.2118178882789796,
"grad_norm": 0.5569520592689514,
"learning_rate": 0.00046783967244908947,
"loss": 3.5393,
"step": 20550
},
{
"epoch": 2.2171994403185877,
"grad_norm": 0.5793364644050598,
"learning_rate": 0.0004675164314190281,
"loss": 3.5537,
"step": 20600
},
{
"epoch": 2.2225809923581963,
"grad_norm": 0.5888959765434265,
"learning_rate": 0.00046719319038896666,
"loss": 3.5403,
"step": 20650
},
{
"epoch": 2.2279625443978044,
"grad_norm": 0.5922018885612488,
"learning_rate": 0.00046686994935890526,
"loss": 3.547,
"step": 20700
},
{
"epoch": 2.2333440964374125,
"grad_norm": 0.6159331798553467,
"learning_rate": 0.0004665467083288438,
"loss": 3.5509,
"step": 20750
},
{
"epoch": 2.2387256484770206,
"grad_norm": 0.6433561444282532,
"learning_rate": 0.0004662234672987824,
"loss": 3.5546,
"step": 20800
},
{
"epoch": 2.244107200516629,
"grad_norm": 0.5780196189880371,
"learning_rate": 0.00046590022626872104,
"loss": 3.5505,
"step": 20850
},
{
"epoch": 2.2494887525562373,
"grad_norm": 0.6302474141120911,
"learning_rate": 0.0004655769852386596,
"loss": 3.5528,
"step": 20900
},
{
"epoch": 2.2548703045958454,
"grad_norm": 0.6163661479949951,
"learning_rate": 0.0004652537442085982,
"loss": 3.5506,
"step": 20950
},
{
"epoch": 2.2602518566354535,
"grad_norm": 0.5856783390045166,
"learning_rate": 0.00046493050317853677,
"loss": 3.5485,
"step": 21000
},
{
"epoch": 2.2602518566354535,
"eval_accuracy": 0.364305092528256,
"eval_loss": 3.5584065914154053,
"eval_runtime": 179.7572,
"eval_samples_per_second": 100.196,
"eval_steps_per_second": 6.264,
"step": 21000
},
{
"epoch": 2.265633408675062,
"grad_norm": 0.5490982532501221,
"learning_rate": 0.0004646072621484753,
"loss": 3.5765,
"step": 21050
},
{
"epoch": 2.27101496071467,
"grad_norm": 0.6259151697158813,
"learning_rate": 0.0004642840211184139,
"loss": 3.5557,
"step": 21100
},
{
"epoch": 2.2763965127542782,
"grad_norm": 0.6125593185424805,
"learning_rate": 0.00046396078008835255,
"loss": 3.562,
"step": 21150
},
{
"epoch": 2.281778064793887,
"grad_norm": 0.596523642539978,
"learning_rate": 0.0004636375390582911,
"loss": 3.544,
"step": 21200
},
{
"epoch": 2.287159616833495,
"grad_norm": 0.6259929537773132,
"learning_rate": 0.0004633142980282297,
"loss": 3.5504,
"step": 21250
},
{
"epoch": 2.292541168873103,
"grad_norm": 0.5609055161476135,
"learning_rate": 0.00046299105699816823,
"loss": 3.5574,
"step": 21300
},
{
"epoch": 2.297922720912711,
"grad_norm": 0.6183776259422302,
"learning_rate": 0.0004626678159681068,
"loss": 3.5536,
"step": 21350
},
{
"epoch": 2.303304272952319,
"grad_norm": 0.531791090965271,
"learning_rate": 0.0004623445749380454,
"loss": 3.5414,
"step": 21400
},
{
"epoch": 2.3086858249919278,
"grad_norm": 0.558052122592926,
"learning_rate": 0.000462021333907984,
"loss": 3.538,
"step": 21450
},
{
"epoch": 2.314067377031536,
"grad_norm": 0.5366657972335815,
"learning_rate": 0.0004616980928779226,
"loss": 3.5508,
"step": 21500
},
{
"epoch": 2.319448929071144,
"grad_norm": 0.5605040192604065,
"learning_rate": 0.0004613748518478612,
"loss": 3.5527,
"step": 21550
},
{
"epoch": 2.3248304811107525,
"grad_norm": 0.5950835347175598,
"learning_rate": 0.00046105161081779974,
"loss": 3.553,
"step": 21600
},
{
"epoch": 2.3302120331503606,
"grad_norm": 0.5673235058784485,
"learning_rate": 0.00046072836978773834,
"loss": 3.5484,
"step": 21650
},
{
"epoch": 2.3355935851899687,
"grad_norm": 0.5962451100349426,
"learning_rate": 0.000460405128757677,
"loss": 3.5665,
"step": 21700
},
{
"epoch": 2.340975137229577,
"grad_norm": 0.5817105770111084,
"learning_rate": 0.0004600818877276155,
"loss": 3.5456,
"step": 21750
},
{
"epoch": 2.3463566892691854,
"grad_norm": 0.5566921234130859,
"learning_rate": 0.0004597586466975541,
"loss": 3.5419,
"step": 21800
},
{
"epoch": 2.3517382413087935,
"grad_norm": 0.5889357924461365,
"learning_rate": 0.00045943540566749266,
"loss": 3.553,
"step": 21850
},
{
"epoch": 2.3571197933484016,
"grad_norm": 0.5869264006614685,
"learning_rate": 0.00045911216463743126,
"loss": 3.549,
"step": 21900
},
{
"epoch": 2.3625013453880097,
"grad_norm": 0.5697734951972961,
"learning_rate": 0.00045878892360736985,
"loss": 3.5583,
"step": 21950
},
{
"epoch": 2.3678828974276183,
"grad_norm": 0.6061686873435974,
"learning_rate": 0.00045846568257730845,
"loss": 3.5613,
"step": 22000
},
{
"epoch": 2.3678828974276183,
"eval_accuracy": 0.36498678074640406,
"eval_loss": 3.547973871231079,
"eval_runtime": 180.0761,
"eval_samples_per_second": 100.019,
"eval_steps_per_second": 6.253,
"step": 22000
},
{
"epoch": 2.3732644494672264,
"grad_norm": 0.5746018290519714,
"learning_rate": 0.00045814244154724704,
"loss": 3.5496,
"step": 22050
},
{
"epoch": 2.3786460015068345,
"grad_norm": 0.6272563338279724,
"learning_rate": 0.00045781920051718563,
"loss": 3.5636,
"step": 22100
},
{
"epoch": 2.384027553546443,
"grad_norm": 0.5796659588813782,
"learning_rate": 0.0004574959594871242,
"loss": 3.5412,
"step": 22150
},
{
"epoch": 2.389409105586051,
"grad_norm": 0.5960429906845093,
"learning_rate": 0.00045717271845706277,
"loss": 3.5478,
"step": 22200
},
{
"epoch": 2.3947906576256592,
"grad_norm": 0.6166843175888062,
"learning_rate": 0.0004568494774270013,
"loss": 3.5584,
"step": 22250
},
{
"epoch": 2.4001722096652673,
"grad_norm": 0.5762814879417419,
"learning_rate": 0.00045652623639693996,
"loss": 3.537,
"step": 22300
},
{
"epoch": 2.4055537617048754,
"grad_norm": 0.5935600399971008,
"learning_rate": 0.00045620299536687855,
"loss": 3.5506,
"step": 22350
},
{
"epoch": 2.410935313744484,
"grad_norm": 0.5715270042419434,
"learning_rate": 0.0004558797543368171,
"loss": 3.5349,
"step": 22400
},
{
"epoch": 2.416316865784092,
"grad_norm": 0.5746564269065857,
"learning_rate": 0.0004555565133067557,
"loss": 3.5393,
"step": 22450
},
{
"epoch": 2.4216984178237,
"grad_norm": 0.6002961993217468,
"learning_rate": 0.0004552332722766943,
"loss": 3.5703,
"step": 22500
},
{
"epoch": 2.4270799698633088,
"grad_norm": 0.6364290118217468,
"learning_rate": 0.0004549100312466328,
"loss": 3.535,
"step": 22550
},
{
"epoch": 2.432461521902917,
"grad_norm": 0.5654012560844421,
"learning_rate": 0.0004545867902165715,
"loss": 3.5494,
"step": 22600
},
{
"epoch": 2.437843073942525,
"grad_norm": 0.6237375736236572,
"learning_rate": 0.00045426354918651007,
"loss": 3.5464,
"step": 22650
},
{
"epoch": 2.443224625982133,
"grad_norm": 0.5933444499969482,
"learning_rate": 0.0004539467729770499,
"loss": 3.5417,
"step": 22700
},
{
"epoch": 2.4486061780217416,
"grad_norm": 0.5668746829032898,
"learning_rate": 0.0004536235319469884,
"loss": 3.5352,
"step": 22750
},
{
"epoch": 2.4539877300613497,
"grad_norm": 0.6203579902648926,
"learning_rate": 0.000453300290916927,
"loss": 3.574,
"step": 22800
},
{
"epoch": 2.459369282100958,
"grad_norm": 0.5573844909667969,
"learning_rate": 0.0004529770498868656,
"loss": 3.5529,
"step": 22850
},
{
"epoch": 2.464750834140566,
"grad_norm": 0.6508901715278625,
"learning_rate": 0.00045265380885680414,
"loss": 3.531,
"step": 22900
},
{
"epoch": 2.4701323861801745,
"grad_norm": 0.5782315135002136,
"learning_rate": 0.0004523305678267428,
"loss": 3.5689,
"step": 22950
},
{
"epoch": 2.4755139382197826,
"grad_norm": 0.6106559038162231,
"learning_rate": 0.0004520073267966814,
"loss": 3.5521,
"step": 23000
},
{
"epoch": 2.4755139382197826,
"eval_accuracy": 0.3663403784227969,
"eval_loss": 3.5380020141601562,
"eval_runtime": 179.612,
"eval_samples_per_second": 100.277,
"eval_steps_per_second": 6.269,
"step": 23000
},
{
"epoch": 2.4808954902593907,
"grad_norm": 0.5985417366027832,
"learning_rate": 0.00045168408576661993,
"loss": 3.5309,
"step": 23050
},
{
"epoch": 2.4862770422989993,
"grad_norm": 0.5568486452102661,
"learning_rate": 0.0004513608447365585,
"loss": 3.5504,
"step": 23100
},
{
"epoch": 2.4916585943386074,
"grad_norm": 0.607467770576477,
"learning_rate": 0.00045103760370649706,
"loss": 3.5585,
"step": 23150
},
{
"epoch": 2.4970401463782155,
"grad_norm": 0.6122623085975647,
"learning_rate": 0.00045071436267643566,
"loss": 3.55,
"step": 23200
},
{
"epoch": 2.5024216984178236,
"grad_norm": 0.6299822330474854,
"learning_rate": 0.0004503911216463743,
"loss": 3.5361,
"step": 23250
},
{
"epoch": 2.5078032504574317,
"grad_norm": 0.5975040793418884,
"learning_rate": 0.00045006788061631285,
"loss": 3.5452,
"step": 23300
},
{
"epoch": 2.5131848024970402,
"grad_norm": 0.6091236472129822,
"learning_rate": 0.00044974463958625144,
"loss": 3.5612,
"step": 23350
},
{
"epoch": 2.5185663545366483,
"grad_norm": 0.5954560041427612,
"learning_rate": 0.00044942139855619004,
"loss": 3.5283,
"step": 23400
},
{
"epoch": 2.5239479065762565,
"grad_norm": 0.5859009623527527,
"learning_rate": 0.0004490981575261286,
"loss": 3.537,
"step": 23450
},
{
"epoch": 2.529329458615865,
"grad_norm": 0.5438385009765625,
"learning_rate": 0.0004487749164960672,
"loss": 3.5416,
"step": 23500
},
{
"epoch": 2.534711010655473,
"grad_norm": 0.5849915146827698,
"learning_rate": 0.0004484516754660058,
"loss": 3.544,
"step": 23550
},
{
"epoch": 2.540092562695081,
"grad_norm": 0.6110696792602539,
"learning_rate": 0.00044812843443594436,
"loss": 3.5456,
"step": 23600
},
{
"epoch": 2.5454741147346893,
"grad_norm": 0.6262888312339783,
"learning_rate": 0.00044780519340588296,
"loss": 3.5357,
"step": 23650
},
{
"epoch": 2.550855666774298,
"grad_norm": 0.6227126717567444,
"learning_rate": 0.0004474819523758215,
"loss": 3.5471,
"step": 23700
},
{
"epoch": 2.556237218813906,
"grad_norm": 0.607038140296936,
"learning_rate": 0.0004471587113457601,
"loss": 3.5478,
"step": 23750
},
{
"epoch": 2.561618770853514,
"grad_norm": 0.5638023614883423,
"learning_rate": 0.00044683547031569874,
"loss": 3.5658,
"step": 23800
},
{
"epoch": 2.567000322893122,
"grad_norm": 0.6163343787193298,
"learning_rate": 0.0004465122292856373,
"loss": 3.5602,
"step": 23850
},
{
"epoch": 2.5723818749327307,
"grad_norm": 0.5720522999763489,
"learning_rate": 0.0004461889882555759,
"loss": 3.5562,
"step": 23900
},
{
"epoch": 2.577763426972339,
"grad_norm": 0.636634111404419,
"learning_rate": 0.00044586574722551447,
"loss": 3.5685,
"step": 23950
},
{
"epoch": 2.583144979011947,
"grad_norm": 0.5891180634498596,
"learning_rate": 0.000445542506195453,
"loss": 3.5455,
"step": 24000
},
{
"epoch": 2.583144979011947,
"eval_accuracy": 0.36704879525134726,
"eval_loss": 3.526322364807129,
"eval_runtime": 179.7285,
"eval_samples_per_second": 100.212,
"eval_steps_per_second": 6.265,
"step": 24000
},
{
"epoch": 2.5885265310515555,
"grad_norm": 0.6043258905410767,
"learning_rate": 0.0004452192651653916,
"loss": 3.5164,
"step": 24050
},
{
"epoch": 2.5939080830911636,
"grad_norm": 0.6400256752967834,
"learning_rate": 0.00044489602413533025,
"loss": 3.5409,
"step": 24100
},
{
"epoch": 2.5992896351307717,
"grad_norm": 0.6273722648620605,
"learning_rate": 0.0004445727831052688,
"loss": 3.5391,
"step": 24150
},
{
"epoch": 2.60467118717038,
"grad_norm": 0.6210452318191528,
"learning_rate": 0.0004442495420752074,
"loss": 3.5436,
"step": 24200
},
{
"epoch": 2.610052739209988,
"grad_norm": 0.5660796761512756,
"learning_rate": 0.00044392630104514593,
"loss": 3.5256,
"step": 24250
},
{
"epoch": 2.6154342912495965,
"grad_norm": 0.5474342107772827,
"learning_rate": 0.0004436030600150845,
"loss": 3.526,
"step": 24300
},
{
"epoch": 2.6208158432892046,
"grad_norm": 0.5688294768333435,
"learning_rate": 0.0004432798189850231,
"loss": 3.5341,
"step": 24350
},
{
"epoch": 2.6261973953288127,
"grad_norm": 0.5909520387649536,
"learning_rate": 0.0004429565779549617,
"loss": 3.5394,
"step": 24400
},
{
"epoch": 2.6315789473684212,
"grad_norm": 0.5881052613258362,
"learning_rate": 0.0004426333369249003,
"loss": 3.544,
"step": 24450
},
{
"epoch": 2.6369604994080293,
"grad_norm": 0.5401953458786011,
"learning_rate": 0.0004423100958948389,
"loss": 3.5518,
"step": 24500
},
{
"epoch": 2.6423420514476375,
"grad_norm": 0.6648808121681213,
"learning_rate": 0.00044198685486477744,
"loss": 3.5483,
"step": 24550
},
{
"epoch": 2.6477236034872456,
"grad_norm": 0.5619612336158752,
"learning_rate": 0.00044166361383471604,
"loss": 3.5388,
"step": 24600
},
{
"epoch": 2.653105155526854,
"grad_norm": 0.5457707643508911,
"learning_rate": 0.0004413403728046547,
"loss": 3.5491,
"step": 24650
},
{
"epoch": 2.658486707566462,
"grad_norm": 0.6164374947547913,
"learning_rate": 0.0004410171317745932,
"loss": 3.5383,
"step": 24700
},
{
"epoch": 2.6638682596060703,
"grad_norm": 0.5915449261665344,
"learning_rate": 0.0004406938907445318,
"loss": 3.5412,
"step": 24750
},
{
"epoch": 2.6692498116456784,
"grad_norm": 0.6016857028007507,
"learning_rate": 0.00044037064971447036,
"loss": 3.5451,
"step": 24800
},
{
"epoch": 2.674631363685287,
"grad_norm": 0.5480591058731079,
"learning_rate": 0.00044004740868440896,
"loss": 3.538,
"step": 24850
},
{
"epoch": 2.680012915724895,
"grad_norm": 0.5656848549842834,
"learning_rate": 0.00043972416765434755,
"loss": 3.5566,
"step": 24900
},
{
"epoch": 2.685394467764503,
"grad_norm": 0.6277399063110352,
"learning_rate": 0.00043940092662428615,
"loss": 3.5456,
"step": 24950
},
{
"epoch": 2.6907760198041117,
"grad_norm": 0.6056678295135498,
"learning_rate": 0.00043907768559422474,
"loss": 3.5473,
"step": 25000
},
{
"epoch": 2.6907760198041117,
"eval_accuracy": 0.36839630836602244,
"eval_loss": 3.5200185775756836,
"eval_runtime": 179.6896,
"eval_samples_per_second": 100.234,
"eval_steps_per_second": 6.266,
"step": 25000
},
{
"epoch": 2.69615757184372,
"grad_norm": 0.635338544845581,
"learning_rate": 0.00043875444456416334,
"loss": 3.5616,
"step": 25050
},
{
"epoch": 2.701539123883328,
"grad_norm": 0.6054742932319641,
"learning_rate": 0.0004384312035341019,
"loss": 3.5651,
"step": 25100
},
{
"epoch": 2.706920675922936,
"grad_norm": 0.6213764548301697,
"learning_rate": 0.00043810796250404047,
"loss": 3.5406,
"step": 25150
},
{
"epoch": 2.712302227962544,
"grad_norm": 0.5552936792373657,
"learning_rate": 0.000437784721473979,
"loss": 3.5186,
"step": 25200
},
{
"epoch": 2.7176837800021527,
"grad_norm": 0.588412344455719,
"learning_rate": 0.00043746148044391766,
"loss": 3.5416,
"step": 25250
},
{
"epoch": 2.723065332041761,
"grad_norm": 0.5414818525314331,
"learning_rate": 0.00043713823941385625,
"loss": 3.5289,
"step": 25300
},
{
"epoch": 2.728446884081369,
"grad_norm": 0.6171042919158936,
"learning_rate": 0.0004368149983837948,
"loss": 3.548,
"step": 25350
},
{
"epoch": 2.7338284361209775,
"grad_norm": 0.6250415444374084,
"learning_rate": 0.0004364917573537334,
"loss": 3.5393,
"step": 25400
},
{
"epoch": 2.7392099881605856,
"grad_norm": 0.6534850597381592,
"learning_rate": 0.00043616851632367193,
"loss": 3.5351,
"step": 25450
},
{
"epoch": 2.7445915402001937,
"grad_norm": 0.6056131720542908,
"learning_rate": 0.0004358452752936106,
"loss": 3.5331,
"step": 25500
},
{
"epoch": 2.749973092239802,
"grad_norm": 0.6097458004951477,
"learning_rate": 0.0004355220342635492,
"loss": 3.5229,
"step": 25550
},
{
"epoch": 2.7553546442794103,
"grad_norm": 0.5821205377578735,
"learning_rate": 0.00043519879323348777,
"loss": 3.5547,
"step": 25600
},
{
"epoch": 2.7607361963190185,
"grad_norm": 0.572923481464386,
"learning_rate": 0.0004348755522034263,
"loss": 3.5459,
"step": 25650
},
{
"epoch": 2.7661177483586266,
"grad_norm": 0.5959768295288086,
"learning_rate": 0.0004345523111733649,
"loss": 3.53,
"step": 25700
},
{
"epoch": 2.7714993003982347,
"grad_norm": 0.6123167872428894,
"learning_rate": 0.00043422907014330344,
"loss": 3.5332,
"step": 25750
},
{
"epoch": 2.776880852437843,
"grad_norm": 0.5567452311515808,
"learning_rate": 0.0004339058291132421,
"loss": 3.5329,
"step": 25800
},
{
"epoch": 2.7822624044774513,
"grad_norm": 0.6061723232269287,
"learning_rate": 0.0004335825880831807,
"loss": 3.5429,
"step": 25850
},
{
"epoch": 2.7876439565170594,
"grad_norm": 0.5593557357788086,
"learning_rate": 0.00043325934705311923,
"loss": 3.5072,
"step": 25900
},
{
"epoch": 2.793025508556668,
"grad_norm": 0.5608957409858704,
"learning_rate": 0.0004329361060230578,
"loss": 3.5159,
"step": 25950
},
{
"epoch": 2.798407060596276,
"grad_norm": 0.5900284051895142,
"learning_rate": 0.00043261286499299636,
"loss": 3.5301,
"step": 26000
},
{
"epoch": 2.798407060596276,
"eval_accuracy": 0.36886427635383945,
"eval_loss": 3.5095598697662354,
"eval_runtime": 180.5366,
"eval_samples_per_second": 99.764,
"eval_steps_per_second": 6.237,
"step": 26000
},
{
"epoch": 2.803788612635884,
"grad_norm": 0.5952501893043518,
"learning_rate": 0.00043228962396293496,
"loss": 3.5283,
"step": 26050
},
{
"epoch": 2.8091701646754923,
"grad_norm": 0.5934816598892212,
"learning_rate": 0.00043197284775347476,
"loss": 3.5287,
"step": 26100
},
{
"epoch": 2.8145517167151004,
"grad_norm": 0.6665016412734985,
"learning_rate": 0.00043164960672341336,
"loss": 3.5178,
"step": 26150
},
{
"epoch": 2.819933268754709,
"grad_norm": 0.5784537196159363,
"learning_rate": 0.000431326365693352,
"loss": 3.5368,
"step": 26200
},
{
"epoch": 2.825314820794317,
"grad_norm": 0.5916054248809814,
"learning_rate": 0.00043100312466329055,
"loss": 3.5361,
"step": 26250
},
{
"epoch": 2.830696372833925,
"grad_norm": 0.5684788227081299,
"learning_rate": 0.00043067988363322914,
"loss": 3.5204,
"step": 26300
},
{
"epoch": 2.8360779248735337,
"grad_norm": 0.5924267172813416,
"learning_rate": 0.00043035664260316774,
"loss": 3.5382,
"step": 26350
},
{
"epoch": 2.841459476913142,
"grad_norm": 0.6063621640205383,
"learning_rate": 0.0004300334015731063,
"loss": 3.5343,
"step": 26400
},
{
"epoch": 2.84684102895275,
"grad_norm": 0.6259608268737793,
"learning_rate": 0.0004297101605430449,
"loss": 3.5212,
"step": 26450
},
{
"epoch": 2.852222580992358,
"grad_norm": 0.5912953019142151,
"learning_rate": 0.0004293869195129835,
"loss": 3.5333,
"step": 26500
},
{
"epoch": 2.857604133031966,
"grad_norm": 0.6470860838890076,
"learning_rate": 0.00042906367848292206,
"loss": 3.5356,
"step": 26550
},
{
"epoch": 2.8629856850715747,
"grad_norm": 0.6130536198616028,
"learning_rate": 0.00042874043745286066,
"loss": 3.5259,
"step": 26600
},
{
"epoch": 2.868367237111183,
"grad_norm": 0.5800825357437134,
"learning_rate": 0.0004284171964227992,
"loss": 3.5425,
"step": 26650
},
{
"epoch": 2.873748789150791,
"grad_norm": 0.5847500562667847,
"learning_rate": 0.0004280939553927378,
"loss": 3.5086,
"step": 26700
},
{
"epoch": 2.8791303411903995,
"grad_norm": 0.6748270988464355,
"learning_rate": 0.00042777071436267644,
"loss": 3.5213,
"step": 26750
},
{
"epoch": 2.8845118932300076,
"grad_norm": 0.576877772808075,
"learning_rate": 0.000427447473332615,
"loss": 3.5173,
"step": 26800
},
{
"epoch": 2.8898934452696157,
"grad_norm": 0.5994119644165039,
"learning_rate": 0.0004271242323025536,
"loss": 3.5184,
"step": 26850
},
{
"epoch": 2.895274997309224,
"grad_norm": 0.5870147943496704,
"learning_rate": 0.00042680099127249217,
"loss": 3.5404,
"step": 26900
},
{
"epoch": 2.9006565493488323,
"grad_norm": 0.5605146884918213,
"learning_rate": 0.0004264777502424307,
"loss": 3.5338,
"step": 26950
},
{
"epoch": 2.9060381013884404,
"grad_norm": 0.6184528470039368,
"learning_rate": 0.0004261545092123693,
"loss": 3.5226,
"step": 27000
},
{
"epoch": 2.9060381013884404,
"eval_accuracy": 0.3700053489816671,
"eval_loss": 3.502581834793091,
"eval_runtime": 180.637,
"eval_samples_per_second": 99.708,
"eval_steps_per_second": 6.233,
"step": 27000
},
{
"epoch": 2.9114196534280485,
"grad_norm": 0.6496872305870056,
"learning_rate": 0.00042583126818230795,
"loss": 3.5242,
"step": 27050
},
{
"epoch": 2.9168012054676566,
"grad_norm": 0.6035163402557373,
"learning_rate": 0.0004255080271522465,
"loss": 3.5265,
"step": 27100
},
{
"epoch": 2.922182757507265,
"grad_norm": 0.5869918465614319,
"learning_rate": 0.0004251847861221851,
"loss": 3.5208,
"step": 27150
},
{
"epoch": 2.9275643095468733,
"grad_norm": 0.6070534586906433,
"learning_rate": 0.00042486154509212363,
"loss": 3.4932,
"step": 27200
},
{
"epoch": 2.9329458615864814,
"grad_norm": 0.6247556805610657,
"learning_rate": 0.0004245383040620622,
"loss": 3.5168,
"step": 27250
},
{
"epoch": 2.93832741362609,
"grad_norm": 0.606638491153717,
"learning_rate": 0.0004242150630320009,
"loss": 3.5379,
"step": 27300
},
{
"epoch": 2.943708965665698,
"grad_norm": 0.5896838307380676,
"learning_rate": 0.0004238918220019394,
"loss": 3.5439,
"step": 27350
},
{
"epoch": 2.949090517705306,
"grad_norm": 0.7161862850189209,
"learning_rate": 0.000423568580971878,
"loss": 3.524,
"step": 27400
},
{
"epoch": 2.9544720697449143,
"grad_norm": 0.5472960472106934,
"learning_rate": 0.00042324533994181655,
"loss": 3.5082,
"step": 27450
},
{
"epoch": 2.9598536217845224,
"grad_norm": 0.6270456910133362,
"learning_rate": 0.00042292209891175514,
"loss": 3.5133,
"step": 27500
},
{
"epoch": 2.965235173824131,
"grad_norm": 0.6163716912269592,
"learning_rate": 0.00042259885788169374,
"loss": 3.5185,
"step": 27550
},
{
"epoch": 2.970616725863739,
"grad_norm": 0.5813859105110168,
"learning_rate": 0.00042227561685163233,
"loss": 3.5219,
"step": 27600
},
{
"epoch": 2.975998277903347,
"grad_norm": 0.6137823462486267,
"learning_rate": 0.00042195237582157093,
"loss": 3.5238,
"step": 27650
},
{
"epoch": 2.9813798299429557,
"grad_norm": 0.573917031288147,
"learning_rate": 0.0004216291347915095,
"loss": 3.531,
"step": 27700
},
{
"epoch": 2.986761381982564,
"grad_norm": 0.6336318254470825,
"learning_rate": 0.00042130589376144806,
"loss": 3.5142,
"step": 27750
},
{
"epoch": 2.992142934022172,
"grad_norm": 0.5717305541038513,
"learning_rate": 0.00042098265273138666,
"loss": 3.5073,
"step": 27800
},
{
"epoch": 2.9975244860617805,
"grad_norm": 0.6153519749641418,
"learning_rate": 0.0004206594117013252,
"loss": 3.5313,
"step": 27850
},
{
"epoch": 3.0029060381013886,
"grad_norm": 0.5966550707817078,
"learning_rate": 0.00042033617067126385,
"loss": 3.4862,
"step": 27900
},
{
"epoch": 3.0082875901409967,
"grad_norm": 0.5811104774475098,
"learning_rate": 0.00042001292964120244,
"loss": 3.4131,
"step": 27950
},
{
"epoch": 3.0136691421806048,
"grad_norm": 0.5833897590637207,
"learning_rate": 0.000419689688611141,
"loss": 3.4297,
"step": 28000
},
{
"epoch": 3.0136691421806048,
"eval_accuracy": 0.3707112667937978,
"eval_loss": 3.4965574741363525,
"eval_runtime": 180.5598,
"eval_samples_per_second": 99.751,
"eval_steps_per_second": 6.236,
"step": 28000
},
{
"epoch": 3.0190506942202133,
"grad_norm": 0.6142577528953552,
"learning_rate": 0.0004193664475810796,
"loss": 3.4263,
"step": 28050
},
{
"epoch": 3.0244322462598214,
"grad_norm": 0.60226970911026,
"learning_rate": 0.00041904320655101817,
"loss": 3.4323,
"step": 28100
},
{
"epoch": 3.0298137982994295,
"grad_norm": 0.6177240014076233,
"learning_rate": 0.0004187199655209567,
"loss": 3.4357,
"step": 28150
},
{
"epoch": 3.0351953503390376,
"grad_norm": 0.5931535363197327,
"learning_rate": 0.00041839672449089536,
"loss": 3.4589,
"step": 28200
},
{
"epoch": 3.040576902378646,
"grad_norm": 0.608650803565979,
"learning_rate": 0.00041807348346083395,
"loss": 3.4432,
"step": 28250
},
{
"epoch": 3.0459584544182543,
"grad_norm": 0.6467320322990417,
"learning_rate": 0.0004177502424307725,
"loss": 3.4356,
"step": 28300
},
{
"epoch": 3.0513400064578624,
"grad_norm": 0.6208910346031189,
"learning_rate": 0.0004174270014007111,
"loss": 3.4335,
"step": 28350
},
{
"epoch": 3.0567215584974705,
"grad_norm": 0.6012397408485413,
"learning_rate": 0.00041710376037064963,
"loss": 3.4368,
"step": 28400
},
{
"epoch": 3.062103110537079,
"grad_norm": 0.5883954763412476,
"learning_rate": 0.0004167805193405883,
"loss": 3.4407,
"step": 28450
},
{
"epoch": 3.067484662576687,
"grad_norm": 0.5667783617973328,
"learning_rate": 0.0004164572783105269,
"loss": 3.4358,
"step": 28500
},
{
"epoch": 3.0728662146162953,
"grad_norm": 0.6078146696090698,
"learning_rate": 0.0004161340372804654,
"loss": 3.4422,
"step": 28550
},
{
"epoch": 3.0782477666559034,
"grad_norm": 0.6011075377464294,
"learning_rate": 0.000415810796250404,
"loss": 3.4444,
"step": 28600
},
{
"epoch": 3.083629318695512,
"grad_norm": 0.6752050518989563,
"learning_rate": 0.0004154875552203426,
"loss": 3.4557,
"step": 28650
},
{
"epoch": 3.08901087073512,
"grad_norm": 0.6102807521820068,
"learning_rate": 0.00041516431419028114,
"loss": 3.4272,
"step": 28700
},
{
"epoch": 3.094392422774728,
"grad_norm": 0.6542812585830688,
"learning_rate": 0.0004148410731602198,
"loss": 3.4495,
"step": 28750
},
{
"epoch": 3.0997739748143363,
"grad_norm": 0.6202298998832703,
"learning_rate": 0.0004145178321301584,
"loss": 3.4493,
"step": 28800
},
{
"epoch": 3.105155526853945,
"grad_norm": 0.6084877848625183,
"learning_rate": 0.00041419459110009693,
"loss": 3.4437,
"step": 28850
},
{
"epoch": 3.110537078893553,
"grad_norm": 0.8142428398132324,
"learning_rate": 0.0004138713500700355,
"loss": 3.4456,
"step": 28900
},
{
"epoch": 3.115918630933161,
"grad_norm": 0.6456471681594849,
"learning_rate": 0.00041354810903997406,
"loss": 3.4468,
"step": 28950
},
{
"epoch": 3.121300182972769,
"grad_norm": 0.6340752840042114,
"learning_rate": 0.00041322486800991266,
"loss": 3.4224,
"step": 29000
},
{
"epoch": 3.121300182972769,
"eval_accuracy": 0.37119194716948906,
"eval_loss": 3.4933841228485107,
"eval_runtime": 180.305,
"eval_samples_per_second": 99.892,
"eval_steps_per_second": 6.245,
"step": 29000
},
{
"epoch": 3.1266817350123777,
"grad_norm": 0.6109960675239563,
"learning_rate": 0.0004129016269798513,
"loss": 3.4461,
"step": 29050
},
{
"epoch": 3.132063287051986,
"grad_norm": 0.5882677435874939,
"learning_rate": 0.00041257838594978985,
"loss": 3.4344,
"step": 29100
},
{
"epoch": 3.137444839091594,
"grad_norm": 0.5873978734016418,
"learning_rate": 0.00041225514491972844,
"loss": 3.4492,
"step": 29150
},
{
"epoch": 3.1428263911312024,
"grad_norm": 0.6426523923873901,
"learning_rate": 0.00041193190388966704,
"loss": 3.4381,
"step": 29200
},
{
"epoch": 3.1482079431708105,
"grad_norm": 0.5868753790855408,
"learning_rate": 0.0004116086628596056,
"loss": 3.4539,
"step": 29250
},
{
"epoch": 3.1535894952104186,
"grad_norm": 0.6552289128303528,
"learning_rate": 0.0004112854218295442,
"loss": 3.4576,
"step": 29300
},
{
"epoch": 3.1589710472500268,
"grad_norm": 0.6329394578933716,
"learning_rate": 0.0004109621807994828,
"loss": 3.4514,
"step": 29350
},
{
"epoch": 3.1643525992896353,
"grad_norm": 0.6486338376998901,
"learning_rate": 0.00041063893976942136,
"loss": 3.4588,
"step": 29400
},
{
"epoch": 3.1697341513292434,
"grad_norm": 0.5754224061965942,
"learning_rate": 0.00041031569873935996,
"loss": 3.4392,
"step": 29450
},
{
"epoch": 3.1751157033688515,
"grad_norm": 0.6380184888839722,
"learning_rate": 0.0004099924577092985,
"loss": 3.4547,
"step": 29500
},
{
"epoch": 3.1804972554084596,
"grad_norm": 0.6305249333381653,
"learning_rate": 0.0004096692166792371,
"loss": 3.4568,
"step": 29550
},
{
"epoch": 3.185878807448068,
"grad_norm": 0.6137855648994446,
"learning_rate": 0.00040934597564917574,
"loss": 3.4612,
"step": 29600
},
{
"epoch": 3.1912603594876763,
"grad_norm": 0.5795332789421082,
"learning_rate": 0.0004090227346191143,
"loss": 3.4387,
"step": 29650
},
{
"epoch": 3.1966419115272844,
"grad_norm": 0.6044164299964905,
"learning_rate": 0.0004086994935890529,
"loss": 3.4696,
"step": 29700
},
{
"epoch": 3.2020234635668925,
"grad_norm": 0.60833340883255,
"learning_rate": 0.00040837625255899147,
"loss": 3.4597,
"step": 29750
},
{
"epoch": 3.207405015606501,
"grad_norm": 0.6233122944831848,
"learning_rate": 0.00040805301152893,
"loss": 3.4374,
"step": 29800
},
{
"epoch": 3.212786567646109,
"grad_norm": 0.6239629983901978,
"learning_rate": 0.0004077297704988686,
"loss": 3.4452,
"step": 29850
},
{
"epoch": 3.2181681196857173,
"grad_norm": 0.6396247148513794,
"learning_rate": 0.00040740652946880725,
"loss": 3.4325,
"step": 29900
},
{
"epoch": 3.2235496717253254,
"grad_norm": 0.58219975233078,
"learning_rate": 0.0004070832884387458,
"loss": 3.466,
"step": 29950
},
{
"epoch": 3.228931223764934,
"grad_norm": 0.6302553415298462,
"learning_rate": 0.0004067600474086844,
"loss": 3.456,
"step": 30000
},
{
"epoch": 3.228931223764934,
"eval_accuracy": 0.3716267760265226,
"eval_loss": 3.489248275756836,
"eval_runtime": 180.7912,
"eval_samples_per_second": 99.623,
"eval_steps_per_second": 6.228,
"step": 30000
},
{
"epoch": 3.234312775804542,
"grad_norm": 0.6658568382263184,
"learning_rate": 0.00040643680637862293,
"loss": 3.4516,
"step": 30050
},
{
"epoch": 3.23969432784415,
"grad_norm": 0.6323238611221313,
"learning_rate": 0.0004061135653485615,
"loss": 3.457,
"step": 30100
},
{
"epoch": 3.2450758798837587,
"grad_norm": 0.5995866060256958,
"learning_rate": 0.00040579678913910133,
"loss": 3.4326,
"step": 30150
},
{
"epoch": 3.250457431923367,
"grad_norm": 0.6197038888931274,
"learning_rate": 0.0004054735481090399,
"loss": 3.4274,
"step": 30200
},
{
"epoch": 3.255838983962975,
"grad_norm": 0.5652536749839783,
"learning_rate": 0.0004051503070789786,
"loss": 3.4548,
"step": 30250
},
{
"epoch": 3.261220536002583,
"grad_norm": 0.6363932490348816,
"learning_rate": 0.0004048270660489171,
"loss": 3.4593,
"step": 30300
},
{
"epoch": 3.2666020880421915,
"grad_norm": 0.6436594128608704,
"learning_rate": 0.0004045038250188557,
"loss": 3.4508,
"step": 30350
},
{
"epoch": 3.2719836400817996,
"grad_norm": 0.627250075340271,
"learning_rate": 0.00040418058398879425,
"loss": 3.4444,
"step": 30400
},
{
"epoch": 3.2773651921214078,
"grad_norm": 0.659887433052063,
"learning_rate": 0.00040385734295873284,
"loss": 3.4687,
"step": 30450
},
{
"epoch": 3.282746744161016,
"grad_norm": 0.5804165005683899,
"learning_rate": 0.00040353410192867144,
"loss": 3.4656,
"step": 30500
},
{
"epoch": 3.2881282962006244,
"grad_norm": 0.6306347846984863,
"learning_rate": 0.00040321086089861003,
"loss": 3.4417,
"step": 30550
},
{
"epoch": 3.2935098482402325,
"grad_norm": 0.6350857019424438,
"learning_rate": 0.00040288761986854863,
"loss": 3.4682,
"step": 30600
},
{
"epoch": 3.2988914002798406,
"grad_norm": 0.7076759338378906,
"learning_rate": 0.0004025643788384872,
"loss": 3.4531,
"step": 30650
},
{
"epoch": 3.304272952319449,
"grad_norm": 0.6139525175094604,
"learning_rate": 0.00040224113780842576,
"loss": 3.4659,
"step": 30700
},
{
"epoch": 3.3096545043590573,
"grad_norm": 0.6305450201034546,
"learning_rate": 0.00040191789677836436,
"loss": 3.4443,
"step": 30750
},
{
"epoch": 3.3150360563986654,
"grad_norm": 0.5848320722579956,
"learning_rate": 0.0004015946557483029,
"loss": 3.453,
"step": 30800
},
{
"epoch": 3.3204176084382735,
"grad_norm": 0.6427562236785889,
"learning_rate": 0.00040127141471824155,
"loss": 3.4731,
"step": 30850
},
{
"epoch": 3.3257991604778816,
"grad_norm": 0.6468438506126404,
"learning_rate": 0.00040094817368818014,
"loss": 3.4487,
"step": 30900
},
{
"epoch": 3.33118071251749,
"grad_norm": 0.6019175052642822,
"learning_rate": 0.0004006249326581187,
"loss": 3.4629,
"step": 30950
},
{
"epoch": 3.3365622645570983,
"grad_norm": 0.6581705212593079,
"learning_rate": 0.0004003016916280573,
"loss": 3.4661,
"step": 31000
},
{
"epoch": 3.3365622645570983,
"eval_accuracy": 0.37244656206508286,
"eval_loss": 3.4832379817962646,
"eval_runtime": 180.6125,
"eval_samples_per_second": 99.722,
"eval_steps_per_second": 6.234,
"step": 31000
},
{
"epoch": 3.3419438165967064,
"grad_norm": 0.65335613489151,
"learning_rate": 0.00039997845059799587,
"loss": 3.4359,
"step": 31050
},
{
"epoch": 3.347325368636315,
"grad_norm": 0.6507302522659302,
"learning_rate": 0.00039965520956793447,
"loss": 3.4525,
"step": 31100
},
{
"epoch": 3.352706920675923,
"grad_norm": 0.5894781351089478,
"learning_rate": 0.00039933196853787306,
"loss": 3.4516,
"step": 31150
},
{
"epoch": 3.358088472715531,
"grad_norm": 0.6025640964508057,
"learning_rate": 0.00039900872750781166,
"loss": 3.4648,
"step": 31200
},
{
"epoch": 3.3634700247551392,
"grad_norm": 0.6366141438484192,
"learning_rate": 0.0003986854864777502,
"loss": 3.4504,
"step": 31250
},
{
"epoch": 3.368851576794748,
"grad_norm": 0.6077277064323425,
"learning_rate": 0.0003983622454476888,
"loss": 3.4439,
"step": 31300
},
{
"epoch": 3.374233128834356,
"grad_norm": 0.6532594561576843,
"learning_rate": 0.00039803900441762733,
"loss": 3.4624,
"step": 31350
},
{
"epoch": 3.379614680873964,
"grad_norm": 0.6048536896705627,
"learning_rate": 0.000397715763387566,
"loss": 3.4589,
"step": 31400
},
{
"epoch": 3.384996232913572,
"grad_norm": 0.6351385712623596,
"learning_rate": 0.0003973925223575046,
"loss": 3.4395,
"step": 31450
},
{
"epoch": 3.3903777849531807,
"grad_norm": 0.6504836082458496,
"learning_rate": 0.0003970692813274431,
"loss": 3.4588,
"step": 31500
},
{
"epoch": 3.3957593369927888,
"grad_norm": 0.6237932443618774,
"learning_rate": 0.0003967460402973817,
"loss": 3.4532,
"step": 31550
},
{
"epoch": 3.401140889032397,
"grad_norm": 0.5831241011619568,
"learning_rate": 0.0003964227992673203,
"loss": 3.447,
"step": 31600
},
{
"epoch": 3.4065224410720054,
"grad_norm": 0.6177220344543457,
"learning_rate": 0.00039609955823725884,
"loss": 3.4438,
"step": 31650
},
{
"epoch": 3.4119039931116135,
"grad_norm": 0.6046972870826721,
"learning_rate": 0.0003957763172071975,
"loss": 3.4752,
"step": 31700
},
{
"epoch": 3.4172855451512216,
"grad_norm": 0.6035352349281311,
"learning_rate": 0.0003954530761771361,
"loss": 3.4588,
"step": 31750
},
{
"epoch": 3.4226670971908297,
"grad_norm": 0.5911862850189209,
"learning_rate": 0.00039512983514707463,
"loss": 3.4505,
"step": 31800
},
{
"epoch": 3.428048649230438,
"grad_norm": 0.5918540358543396,
"learning_rate": 0.0003948065941170132,
"loss": 3.4555,
"step": 31850
},
{
"epoch": 3.4334302012700464,
"grad_norm": 0.6266008019447327,
"learning_rate": 0.00039448335308695176,
"loss": 3.4768,
"step": 31900
},
{
"epoch": 3.4388117533096545,
"grad_norm": 0.6207110285758972,
"learning_rate": 0.00039416011205689036,
"loss": 3.4568,
"step": 31950
},
{
"epoch": 3.4441933053492626,
"grad_norm": 0.6404582858085632,
"learning_rate": 0.000393836871026829,
"loss": 3.4577,
"step": 32000
},
{
"epoch": 3.4441933053492626,
"eval_accuracy": 0.37327199805380945,
"eval_loss": 3.4748642444610596,
"eval_runtime": 180.6631,
"eval_samples_per_second": 99.694,
"eval_steps_per_second": 6.233,
"step": 32000
},
{
"epoch": 3.449574857388871,
"grad_norm": 0.6518675088882446,
"learning_rate": 0.00039351362999676755,
"loss": 3.4445,
"step": 32050
},
{
"epoch": 3.4549564094284793,
"grad_norm": 0.6538469195365906,
"learning_rate": 0.00039319038896670614,
"loss": 3.4755,
"step": 32100
},
{
"epoch": 3.4603379614680874,
"grad_norm": 0.5812484622001648,
"learning_rate": 0.00039287361275724595,
"loss": 3.4689,
"step": 32150
},
{
"epoch": 3.4657195135076955,
"grad_norm": 0.6309418678283691,
"learning_rate": 0.00039255037172718454,
"loss": 3.4447,
"step": 32200
},
{
"epoch": 3.471101065547304,
"grad_norm": 0.6192950010299683,
"learning_rate": 0.0003922271306971231,
"loss": 3.4599,
"step": 32250
},
{
"epoch": 3.476482617586912,
"grad_norm": 0.6055722832679749,
"learning_rate": 0.0003919038896670617,
"loss": 3.462,
"step": 32300
},
{
"epoch": 3.4818641696265202,
"grad_norm": 0.6457564234733582,
"learning_rate": 0.00039158064863700033,
"loss": 3.4393,
"step": 32350
},
{
"epoch": 3.4872457216661283,
"grad_norm": 0.6618093848228455,
"learning_rate": 0.00039125740760693887,
"loss": 3.4606,
"step": 32400
},
{
"epoch": 3.492627273705737,
"grad_norm": 0.5980201363563538,
"learning_rate": 0.00039093416657687746,
"loss": 3.4392,
"step": 32450
},
{
"epoch": 3.498008825745345,
"grad_norm": 0.6752046346664429,
"learning_rate": 0.00039061092554681606,
"loss": 3.4676,
"step": 32500
},
{
"epoch": 3.503390377784953,
"grad_norm": 0.6271814703941345,
"learning_rate": 0.0003902876845167546,
"loss": 3.4844,
"step": 32550
},
{
"epoch": 3.5087719298245617,
"grad_norm": 0.6477372050285339,
"learning_rate": 0.0003899644434866932,
"loss": 3.457,
"step": 32600
},
{
"epoch": 3.5141534818641698,
"grad_norm": 0.613669753074646,
"learning_rate": 0.00038964120245663184,
"loss": 3.4533,
"step": 32650
},
{
"epoch": 3.519535033903778,
"grad_norm": 0.5857597589492798,
"learning_rate": 0.0003893179614265704,
"loss": 3.4571,
"step": 32700
},
{
"epoch": 3.524916585943386,
"grad_norm": 0.6374267339706421,
"learning_rate": 0.000388994720396509,
"loss": 3.455,
"step": 32750
},
{
"epoch": 3.530298137982994,
"grad_norm": 0.6730107665061951,
"learning_rate": 0.0003886714793664475,
"loss": 3.4661,
"step": 32800
},
{
"epoch": 3.5356796900226026,
"grad_norm": 0.6586828827857971,
"learning_rate": 0.0003883482383363861,
"loss": 3.4606,
"step": 32850
},
{
"epoch": 3.5410612420622107,
"grad_norm": 0.6340796947479248,
"learning_rate": 0.00038802499730632476,
"loss": 3.4729,
"step": 32900
},
{
"epoch": 3.546442794101819,
"grad_norm": 0.6165903806686401,
"learning_rate": 0.0003877017562762633,
"loss": 3.4466,
"step": 32950
},
{
"epoch": 3.5518243461414274,
"grad_norm": 0.6035377383232117,
"learning_rate": 0.0003873785152462019,
"loss": 3.4654,
"step": 33000
},
{
"epoch": 3.5518243461414274,
"eval_accuracy": 0.3737836445025279,
"eval_loss": 3.466870069503784,
"eval_runtime": 180.8094,
"eval_samples_per_second": 99.613,
"eval_steps_per_second": 6.228,
"step": 33000
},
{
"epoch": 3.5572058981810355,
"grad_norm": 0.6327840089797974,
"learning_rate": 0.0003870552742161405,
"loss": 3.4539,
"step": 33050
},
{
"epoch": 3.5625874502206436,
"grad_norm": 0.6125268936157227,
"learning_rate": 0.00038673203318607903,
"loss": 3.4392,
"step": 33100
},
{
"epoch": 3.5679690022602517,
"grad_norm": 0.6121278405189514,
"learning_rate": 0.0003864087921560176,
"loss": 3.452,
"step": 33150
},
{
"epoch": 3.57335055429986,
"grad_norm": 0.5984029769897461,
"learning_rate": 0.0003860855511259563,
"loss": 3.4638,
"step": 33200
},
{
"epoch": 3.5787321063394684,
"grad_norm": 0.6518922448158264,
"learning_rate": 0.0003857623100958948,
"loss": 3.4674,
"step": 33250
},
{
"epoch": 3.5841136583790765,
"grad_norm": 0.6262392401695251,
"learning_rate": 0.0003854390690658334,
"loss": 3.4391,
"step": 33300
},
{
"epoch": 3.5894952104186846,
"grad_norm": 0.5916134715080261,
"learning_rate": 0.00038511582803577195,
"loss": 3.4595,
"step": 33350
},
{
"epoch": 3.594876762458293,
"grad_norm": 0.6136584877967834,
"learning_rate": 0.00038479258700571054,
"loss": 3.4431,
"step": 33400
},
{
"epoch": 3.6002583144979012,
"grad_norm": 0.6699656248092651,
"learning_rate": 0.00038446934597564914,
"loss": 3.4583,
"step": 33450
},
{
"epoch": 3.6056398665375093,
"grad_norm": 0.5867238640785217,
"learning_rate": 0.00038414610494558773,
"loss": 3.4409,
"step": 33500
},
{
"epoch": 3.611021418577118,
"grad_norm": 0.6047042012214661,
"learning_rate": 0.00038382286391552633,
"loss": 3.4538,
"step": 33550
},
{
"epoch": 3.616402970616726,
"grad_norm": 0.6440869569778442,
"learning_rate": 0.0003834996228854649,
"loss": 3.4487,
"step": 33600
},
{
"epoch": 3.621784522656334,
"grad_norm": 0.5783991813659668,
"learning_rate": 0.00038317638185540346,
"loss": 3.4664,
"step": 33650
},
{
"epoch": 3.627166074695942,
"grad_norm": 0.611685574054718,
"learning_rate": 0.00038285314082534206,
"loss": 3.4513,
"step": 33700
},
{
"epoch": 3.6325476267355503,
"grad_norm": 0.6536784172058105,
"learning_rate": 0.0003825298997952806,
"loss": 3.4523,
"step": 33750
},
{
"epoch": 3.637929178775159,
"grad_norm": 0.557302713394165,
"learning_rate": 0.00038220665876521925,
"loss": 3.4496,
"step": 33800
},
{
"epoch": 3.643310730814767,
"grad_norm": 0.6502556204795837,
"learning_rate": 0.00038188341773515784,
"loss": 3.4551,
"step": 33850
},
{
"epoch": 3.648692282854375,
"grad_norm": 0.6656308770179749,
"learning_rate": 0.0003815601767050964,
"loss": 3.4679,
"step": 33900
},
{
"epoch": 3.6540738348939836,
"grad_norm": 0.6328909993171692,
"learning_rate": 0.000381236935675035,
"loss": 3.4338,
"step": 33950
},
{
"epoch": 3.6594553869335917,
"grad_norm": 0.5742416977882385,
"learning_rate": 0.0003809136946449735,
"loss": 3.4522,
"step": 34000
},
{
"epoch": 3.6594553869335917,
"eval_accuracy": 0.3745204197347979,
"eval_loss": 3.4621152877807617,
"eval_runtime": 180.7271,
"eval_samples_per_second": 99.659,
"eval_steps_per_second": 6.23,
"step": 34000
},
{
"epoch": 3.6648369389732,
"grad_norm": 0.6359716653823853,
"learning_rate": 0.00038059045361491217,
"loss": 3.4588,
"step": 34050
},
{
"epoch": 3.670218491012808,
"grad_norm": 0.5943437814712524,
"learning_rate": 0.00038026721258485076,
"loss": 3.4557,
"step": 34100
},
{
"epoch": 3.675600043052416,
"grad_norm": 0.6035296320915222,
"learning_rate": 0.0003799439715547893,
"loss": 3.4482,
"step": 34150
},
{
"epoch": 3.6809815950920246,
"grad_norm": 0.6305634379386902,
"learning_rate": 0.00037962719534532916,
"loss": 3.4752,
"step": 34200
},
{
"epoch": 3.6863631471316327,
"grad_norm": 0.5965917706489563,
"learning_rate": 0.0003793039543152677,
"loss": 3.4266,
"step": 34250
},
{
"epoch": 3.691744699171241,
"grad_norm": 0.6232327818870544,
"learning_rate": 0.0003789807132852063,
"loss": 3.4366,
"step": 34300
},
{
"epoch": 3.6971262512108494,
"grad_norm": 0.6077519655227661,
"learning_rate": 0.0003786574722551449,
"loss": 3.4553,
"step": 34350
},
{
"epoch": 3.7025078032504575,
"grad_norm": 0.6324222087860107,
"learning_rate": 0.00037833423122508343,
"loss": 3.4602,
"step": 34400
},
{
"epoch": 3.7078893552900656,
"grad_norm": 0.6138796210289001,
"learning_rate": 0.0003780109901950221,
"loss": 3.4462,
"step": 34450
},
{
"epoch": 3.713270907329674,
"grad_norm": 0.6130177974700928,
"learning_rate": 0.0003776877491649607,
"loss": 3.4534,
"step": 34500
},
{
"epoch": 3.7186524593692822,
"grad_norm": 0.585455596446991,
"learning_rate": 0.0003773645081348992,
"loss": 3.4594,
"step": 34550
},
{
"epoch": 3.7240340114088903,
"grad_norm": 0.6451506614685059,
"learning_rate": 0.0003770412671048378,
"loss": 3.4623,
"step": 34600
},
{
"epoch": 3.7294155634484984,
"grad_norm": 0.6334204077720642,
"learning_rate": 0.00037671802607477635,
"loss": 3.4552,
"step": 34650
},
{
"epoch": 3.7347971154881066,
"grad_norm": 0.6382574439048767,
"learning_rate": 0.000376394785044715,
"loss": 3.4504,
"step": 34700
},
{
"epoch": 3.740178667527715,
"grad_norm": 0.6397031545639038,
"learning_rate": 0.0003760715440146536,
"loss": 3.4533,
"step": 34750
},
{
"epoch": 3.745560219567323,
"grad_norm": 0.644877016544342,
"learning_rate": 0.00037574830298459214,
"loss": 3.4596,
"step": 34800
},
{
"epoch": 3.7509417716069313,
"grad_norm": 0.6290867924690247,
"learning_rate": 0.00037542506195453073,
"loss": 3.4531,
"step": 34850
},
{
"epoch": 3.75632332364654,
"grad_norm": 0.6182779669761658,
"learning_rate": 0.0003751018209244693,
"loss": 3.4488,
"step": 34900
},
{
"epoch": 3.761704875686148,
"grad_norm": 0.6078924536705017,
"learning_rate": 0.00037477857989440787,
"loss": 3.4599,
"step": 34950
},
{
"epoch": 3.767086427725756,
"grad_norm": 0.6303339004516602,
"learning_rate": 0.0003744553388643465,
"loss": 3.4511,
"step": 35000
},
{
"epoch": 3.767086427725756,
"eval_accuracy": 0.37501337788681216,
"eval_loss": 3.45632266998291,
"eval_runtime": 180.682,
"eval_samples_per_second": 99.683,
"eval_steps_per_second": 6.232,
"step": 35000
},
{
"epoch": 3.772467979765364,
"grad_norm": 0.6259037852287292,
"learning_rate": 0.0003741320978342851,
"loss": 3.4522,
"step": 35050
},
{
"epoch": 3.7778495318049723,
"grad_norm": 0.6218352913856506,
"learning_rate": 0.00037380885680422365,
"loss": 3.452,
"step": 35100
},
{
"epoch": 3.783231083844581,
"grad_norm": 0.6394650936126709,
"learning_rate": 0.00037348561577416224,
"loss": 3.4425,
"step": 35150
},
{
"epoch": 3.788612635884189,
"grad_norm": 0.6433684825897217,
"learning_rate": 0.0003731623747441008,
"loss": 3.4432,
"step": 35200
},
{
"epoch": 3.793994187923797,
"grad_norm": 0.5973502397537231,
"learning_rate": 0.0003728391337140394,
"loss": 3.4626,
"step": 35250
},
{
"epoch": 3.7993757399634056,
"grad_norm": 0.6366912126541138,
"learning_rate": 0.00037251589268397803,
"loss": 3.4434,
"step": 35300
},
{
"epoch": 3.8047572920030137,
"grad_norm": 0.6271637082099915,
"learning_rate": 0.00037219265165391657,
"loss": 3.4478,
"step": 35350
},
{
"epoch": 3.810138844042622,
"grad_norm": 0.6246579885482788,
"learning_rate": 0.00037186941062385516,
"loss": 3.4664,
"step": 35400
},
{
"epoch": 3.8155203960822304,
"grad_norm": 0.6329902410507202,
"learning_rate": 0.0003715461695937937,
"loss": 3.4509,
"step": 35450
},
{
"epoch": 3.8209019481218385,
"grad_norm": 0.6158545613288879,
"learning_rate": 0.0003712229285637323,
"loss": 3.4538,
"step": 35500
},
{
"epoch": 3.8262835001614466,
"grad_norm": 0.6100180149078369,
"learning_rate": 0.0003708996875336709,
"loss": 3.4539,
"step": 35550
},
{
"epoch": 3.8316650522010547,
"grad_norm": 0.6152170896530151,
"learning_rate": 0.0003705764465036095,
"loss": 3.4498,
"step": 35600
},
{
"epoch": 3.837046604240663,
"grad_norm": 0.6338374018669128,
"learning_rate": 0.0003702532054735481,
"loss": 3.4563,
"step": 35650
},
{
"epoch": 3.8424281562802713,
"grad_norm": 0.6253648996353149,
"learning_rate": 0.0003699299644434867,
"loss": 3.452,
"step": 35700
},
{
"epoch": 3.8478097083198795,
"grad_norm": 0.6302428245544434,
"learning_rate": 0.0003696067234134252,
"loss": 3.456,
"step": 35750
},
{
"epoch": 3.8531912603594876,
"grad_norm": 0.6266033053398132,
"learning_rate": 0.0003692834823833638,
"loss": 3.4385,
"step": 35800
},
{
"epoch": 3.858572812399096,
"grad_norm": 0.613212525844574,
"learning_rate": 0.00036896024135330246,
"loss": 3.4627,
"step": 35850
},
{
"epoch": 3.863954364438704,
"grad_norm": 0.6388803720474243,
"learning_rate": 0.000368637000323241,
"loss": 3.4467,
"step": 35900
},
{
"epoch": 3.8693359164783123,
"grad_norm": 0.6624432802200317,
"learning_rate": 0.0003683137592931796,
"loss": 3.4508,
"step": 35950
},
{
"epoch": 3.8747174685179204,
"grad_norm": 0.6262862086296082,
"learning_rate": 0.00036799051826311814,
"loss": 3.4455,
"step": 36000
},
{
"epoch": 3.8747174685179204,
"eval_accuracy": 0.37557652580435463,
"eval_loss": 3.453572988510132,
"eval_runtime": 180.586,
"eval_samples_per_second": 99.736,
"eval_steps_per_second": 6.235,
"step": 36000
},
{
"epoch": 3.8800990205575285,
"grad_norm": 0.6518728137016296,
"learning_rate": 0.00036766727723305673,
"loss": 3.4398,
"step": 36050
},
{
"epoch": 3.885480572597137,
"grad_norm": 0.6276541352272034,
"learning_rate": 0.0003673440362029953,
"loss": 3.4572,
"step": 36100
},
{
"epoch": 3.890862124636745,
"grad_norm": 0.6711607575416565,
"learning_rate": 0.0003670207951729339,
"loss": 3.4699,
"step": 36150
},
{
"epoch": 3.8962436766763533,
"grad_norm": 0.610508382320404,
"learning_rate": 0.0003667040189634737,
"loss": 3.4665,
"step": 36200
},
{
"epoch": 3.901625228715962,
"grad_norm": 0.6061813235282898,
"learning_rate": 0.0003663807779334123,
"loss": 3.4368,
"step": 36250
},
{
"epoch": 3.90700678075557,
"grad_norm": 0.6153860092163086,
"learning_rate": 0.0003660575369033509,
"loss": 3.4408,
"step": 36300
},
{
"epoch": 3.912388332795178,
"grad_norm": 0.6401193141937256,
"learning_rate": 0.0003657342958732895,
"loss": 3.4316,
"step": 36350
},
{
"epoch": 3.9177698848347866,
"grad_norm": 0.6334563493728638,
"learning_rate": 0.00036541105484322805,
"loss": 3.4479,
"step": 36400
},
{
"epoch": 3.9231514368743947,
"grad_norm": 0.6796964406967163,
"learning_rate": 0.00036508781381316665,
"loss": 3.4225,
"step": 36450
},
{
"epoch": 3.928532988914003,
"grad_norm": 0.5866906046867371,
"learning_rate": 0.0003647645727831053,
"loss": 3.4297,
"step": 36500
},
{
"epoch": 3.933914540953611,
"grad_norm": 0.6415225267410278,
"learning_rate": 0.00036444133175304384,
"loss": 3.4563,
"step": 36550
},
{
"epoch": 3.939296092993219,
"grad_norm": 0.6429531574249268,
"learning_rate": 0.00036411809072298243,
"loss": 3.4365,
"step": 36600
},
{
"epoch": 3.9446776450328276,
"grad_norm": 0.590459942817688,
"learning_rate": 0.00036379484969292097,
"loss": 3.4546,
"step": 36650
},
{
"epoch": 3.9500591970724357,
"grad_norm": 0.5942658185958862,
"learning_rate": 0.00036347160866285956,
"loss": 3.438,
"step": 36700
},
{
"epoch": 3.955440749112044,
"grad_norm": 0.6288406848907471,
"learning_rate": 0.0003631483676327981,
"loss": 3.4517,
"step": 36750
},
{
"epoch": 3.9608223011516523,
"grad_norm": 0.6701431274414062,
"learning_rate": 0.00036282512660273675,
"loss": 3.4485,
"step": 36800
},
{
"epoch": 3.9662038531912605,
"grad_norm": 0.7019712328910828,
"learning_rate": 0.00036250188557267535,
"loss": 3.4408,
"step": 36850
},
{
"epoch": 3.9715854052308686,
"grad_norm": 0.6082743406295776,
"learning_rate": 0.0003621786445426139,
"loss": 3.4446,
"step": 36900
},
{
"epoch": 3.9769669572704767,
"grad_norm": 0.6317737102508545,
"learning_rate": 0.0003618554035125525,
"loss": 3.4349,
"step": 36950
},
{
"epoch": 3.9823485093100848,
"grad_norm": 0.619764506816864,
"learning_rate": 0.0003615321624824911,
"loss": 3.4402,
"step": 37000
},
{
"epoch": 3.9823485093100848,
"eval_accuracy": 0.3760329765860633,
"eval_loss": 3.4432144165039062,
"eval_runtime": 180.6742,
"eval_samples_per_second": 99.688,
"eval_steps_per_second": 6.232,
"step": 37000
},
{
"epoch": 3.9877300613496933,
"grad_norm": 0.649409830570221,
"learning_rate": 0.0003612089214524296,
"loss": 3.4344,
"step": 37050
},
{
"epoch": 3.9931116133893014,
"grad_norm": 0.6841443777084351,
"learning_rate": 0.00036088568042236827,
"loss": 3.4393,
"step": 37100
},
{
"epoch": 3.9984931654289095,
"grad_norm": 0.6146060824394226,
"learning_rate": 0.00036056243939230686,
"loss": 3.4337,
"step": 37150
},
{
"epoch": 4.003874717468518,
"grad_norm": 0.6463866829872131,
"learning_rate": 0.0003602391983622454,
"loss": 3.3701,
"step": 37200
},
{
"epoch": 4.009256269508126,
"grad_norm": 0.6479421257972717,
"learning_rate": 0.000359915957332184,
"loss": 3.3524,
"step": 37250
},
{
"epoch": 4.014637821547734,
"grad_norm": 0.7297543883323669,
"learning_rate": 0.00035959271630212254,
"loss": 3.3483,
"step": 37300
},
{
"epoch": 4.020019373587343,
"grad_norm": 0.6528753638267517,
"learning_rate": 0.00035926947527206113,
"loss": 3.3525,
"step": 37350
},
{
"epoch": 4.0254009256269505,
"grad_norm": 0.6073748469352722,
"learning_rate": 0.0003589462342419998,
"loss": 3.3627,
"step": 37400
},
{
"epoch": 4.030782477666559,
"grad_norm": 0.6415776610374451,
"learning_rate": 0.0003586229932119383,
"loss": 3.3607,
"step": 37450
},
{
"epoch": 4.036164029706168,
"grad_norm": 0.629884660243988,
"learning_rate": 0.0003582997521818769,
"loss": 3.3572,
"step": 37500
},
{
"epoch": 4.041545581745775,
"grad_norm": 0.6344863772392273,
"learning_rate": 0.0003579765111518155,
"loss": 3.3406,
"step": 37550
},
{
"epoch": 4.046927133785384,
"grad_norm": 0.5791500806808472,
"learning_rate": 0.00035765327012175405,
"loss": 3.3334,
"step": 37600
},
{
"epoch": 4.0523086858249915,
"grad_norm": 0.6291230320930481,
"learning_rate": 0.0003573300290916927,
"loss": 3.364,
"step": 37650
},
{
"epoch": 4.0576902378646,
"grad_norm": 0.6606059670448303,
"learning_rate": 0.0003570067880616313,
"loss": 3.3702,
"step": 37700
},
{
"epoch": 4.063071789904209,
"grad_norm": 0.66637122631073,
"learning_rate": 0.00035668354703156984,
"loss": 3.375,
"step": 37750
},
{
"epoch": 4.068453341943816,
"grad_norm": 0.6079049110412598,
"learning_rate": 0.00035636030600150843,
"loss": 3.3491,
"step": 37800
},
{
"epoch": 4.073834893983425,
"grad_norm": 0.6581602692604065,
"learning_rate": 0.00035603706497144697,
"loss": 3.3601,
"step": 37850
},
{
"epoch": 4.079216446023033,
"grad_norm": 0.5903530716896057,
"learning_rate": 0.00035571382394138557,
"loss": 3.3685,
"step": 37900
},
{
"epoch": 4.084597998062641,
"grad_norm": 0.6376979947090149,
"learning_rate": 0.0003553905829113242,
"loss": 3.3583,
"step": 37950
},
{
"epoch": 4.08997955010225,
"grad_norm": 0.6378331184387207,
"learning_rate": 0.00035506734188126275,
"loss": 3.3602,
"step": 38000
},
{
"epoch": 4.08997955010225,
"eval_accuracy": 0.37666750945089983,
"eval_loss": 3.4465579986572266,
"eval_runtime": 180.5241,
"eval_samples_per_second": 99.771,
"eval_steps_per_second": 6.237,
"step": 38000
},
{
"epoch": 4.095361102141858,
"grad_norm": 0.6897552013397217,
"learning_rate": 0.00035474410085120135,
"loss": 3.3881,
"step": 38050
},
{
"epoch": 4.100742654181466,
"grad_norm": 0.624983549118042,
"learning_rate": 0.00035442085982113994,
"loss": 3.384,
"step": 38100
},
{
"epoch": 4.106124206221074,
"grad_norm": 0.6599211692810059,
"learning_rate": 0.0003540976187910785,
"loss": 3.3793,
"step": 38150
},
{
"epoch": 4.111505758260682,
"grad_norm": Infinity,
"learning_rate": 0.0003537808425816183,
"loss": 3.3838,
"step": 38200
},
{
"epoch": 4.1168873103002905,
"grad_norm": 0.6250761151313782,
"learning_rate": 0.0003534576015515569,
"loss": 3.372,
"step": 38250
},
{
"epoch": 4.122268862339899,
"grad_norm": 0.6939108967781067,
"learning_rate": 0.00035313436052149553,
"loss": 3.3702,
"step": 38300
},
{
"epoch": 4.127650414379507,
"grad_norm": 0.6383840441703796,
"learning_rate": 0.0003528111194914341,
"loss": 3.3634,
"step": 38350
},
{
"epoch": 4.133031966419115,
"grad_norm": 0.631736159324646,
"learning_rate": 0.00035248787846137267,
"loss": 3.3815,
"step": 38400
},
{
"epoch": 4.138413518458724,
"grad_norm": 0.9963732957839966,
"learning_rate": 0.00035216463743131126,
"loss": 3.3576,
"step": 38450
},
{
"epoch": 4.1437950704983315,
"grad_norm": 0.6733307838439941,
"learning_rate": 0.0003518413964012498,
"loss": 3.3926,
"step": 38500
},
{
"epoch": 4.14917662253794,
"grad_norm": 0.5962156653404236,
"learning_rate": 0.0003515181553711884,
"loss": 3.3779,
"step": 38550
},
{
"epoch": 4.154558174577549,
"grad_norm": 0.6685757637023926,
"learning_rate": 0.00035119491434112705,
"loss": 3.3795,
"step": 38600
},
{
"epoch": 4.159939726617156,
"grad_norm": 0.6577488780021667,
"learning_rate": 0.0003508716733110656,
"loss": 3.3678,
"step": 38650
},
{
"epoch": 4.165321278656765,
"grad_norm": 0.7001060247421265,
"learning_rate": 0.0003505484322810042,
"loss": 3.3803,
"step": 38700
},
{
"epoch": 4.1707028306963725,
"grad_norm": 0.6563752889633179,
"learning_rate": 0.0003502251912509427,
"loss": 3.3882,
"step": 38750
},
{
"epoch": 4.176084382735981,
"grad_norm": 0.6249417066574097,
"learning_rate": 0.0003499019502208813,
"loss": 3.3791,
"step": 38800
},
{
"epoch": 4.18146593477559,
"grad_norm": 0.6289679408073425,
"learning_rate": 0.0003495787091908199,
"loss": 3.3752,
"step": 38850
},
{
"epoch": 4.186847486815197,
"grad_norm": 0.6969509720802307,
"learning_rate": 0.0003492554681607585,
"loss": 3.3764,
"step": 38900
},
{
"epoch": 4.192229038854806,
"grad_norm": 0.663133442401886,
"learning_rate": 0.0003489322271306971,
"loss": 3.3928,
"step": 38950
},
{
"epoch": 4.197610590894414,
"grad_norm": 0.7347210645675659,
"learning_rate": 0.0003486089861006357,
"loss": 3.3807,
"step": 39000
},
{
"epoch": 4.197610590894414,
"eval_accuracy": 0.37702878030288295,
"eval_loss": 3.4417600631713867,
"eval_runtime": 180.8436,
"eval_samples_per_second": 99.594,
"eval_steps_per_second": 6.226,
"step": 39000
},
{
"epoch": 4.202992142934022,
"grad_norm": 0.72121262550354,
"learning_rate": 0.00034828574507057424,
"loss": 3.3713,
"step": 39050
},
{
"epoch": 4.208373694973631,
"grad_norm": 0.6440616250038147,
"learning_rate": 0.00034796250404051283,
"loss": 3.3781,
"step": 39100
},
{
"epoch": 4.213755247013238,
"grad_norm": 0.6587533950805664,
"learning_rate": 0.00034763926301045137,
"loss": 3.3838,
"step": 39150
},
{
"epoch": 4.219136799052847,
"grad_norm": 0.6770883798599243,
"learning_rate": 0.00034731602198039,
"loss": 3.3788,
"step": 39200
},
{
"epoch": 4.224518351092455,
"grad_norm": 0.6280528903007507,
"learning_rate": 0.0003469927809503286,
"loss": 3.3621,
"step": 39250
},
{
"epoch": 4.229899903132063,
"grad_norm": 0.7106229662895203,
"learning_rate": 0.00034666953992026716,
"loss": 3.3894,
"step": 39300
},
{
"epoch": 4.2352814551716715,
"grad_norm": 0.6384456157684326,
"learning_rate": 0.00034634629889020575,
"loss": 3.3686,
"step": 39350
},
{
"epoch": 4.24066300721128,
"grad_norm": 0.6792171001434326,
"learning_rate": 0.00034602305786014435,
"loss": 3.3783,
"step": 39400
},
{
"epoch": 4.246044559250888,
"grad_norm": 0.6978849768638611,
"learning_rate": 0.00034569981683008294,
"loss": 3.3816,
"step": 39450
},
{
"epoch": 4.251426111290496,
"grad_norm": 0.6810808181762695,
"learning_rate": 0.00034537657580002154,
"loss": 3.3828,
"step": 39500
},
{
"epoch": 4.256807663330104,
"grad_norm": 0.6624694466590881,
"learning_rate": 0.00034505333476996013,
"loss": 3.3848,
"step": 39550
},
{
"epoch": 4.2621892153697125,
"grad_norm": 0.6291442513465881,
"learning_rate": 0.00034473009373989867,
"loss": 3.3908,
"step": 39600
},
{
"epoch": 4.267570767409321,
"grad_norm": 0.6254659295082092,
"learning_rate": 0.00034440685270983727,
"loss": 3.3732,
"step": 39650
},
{
"epoch": 4.272952319448929,
"grad_norm": 0.6378159523010254,
"learning_rate": 0.0003440836116797758,
"loss": 3.3713,
"step": 39700
},
{
"epoch": 4.278333871488537,
"grad_norm": 0.6168226003646851,
"learning_rate": 0.00034376037064971445,
"loss": 3.3812,
"step": 39750
},
{
"epoch": 4.283715423528146,
"grad_norm": 0.6415433287620544,
"learning_rate": 0.00034343712961965305,
"loss": 3.3846,
"step": 39800
},
{
"epoch": 4.2890969755677535,
"grad_norm": 0.6528810262680054,
"learning_rate": 0.0003431138885895916,
"loss": 3.377,
"step": 39850
},
{
"epoch": 4.294478527607362,
"grad_norm": 0.6433749794960022,
"learning_rate": 0.0003427906475595302,
"loss": 3.3652,
"step": 39900
},
{
"epoch": 4.299860079646971,
"grad_norm": 0.6395750641822815,
"learning_rate": 0.0003424674065294688,
"loss": 3.381,
"step": 39950
},
{
"epoch": 4.305241631686578,
"grad_norm": 0.6627628207206726,
"learning_rate": 0.0003421441654994073,
"loss": 3.3776,
"step": 40000
},
{
"epoch": 4.305241631686578,
"eval_accuracy": 0.37774751915577576,
"eval_loss": 3.4375598430633545,
"eval_runtime": 180.6158,
"eval_samples_per_second": 99.72,
"eval_steps_per_second": 6.234,
"step": 40000
},
{
"epoch": 4.310623183726187,
"grad_norm": 0.6349745392799377,
"learning_rate": 0.00034182092446934597,
"loss": 3.3783,
"step": 40050
},
{
"epoch": 4.3160047357657945,
"grad_norm": 0.6404194831848145,
"learning_rate": 0.00034149768343928456,
"loss": 3.3909,
"step": 40100
},
{
"epoch": 4.321386287805403,
"grad_norm": 0.6430397033691406,
"learning_rate": 0.0003411744424092231,
"loss": 3.3771,
"step": 40150
},
{
"epoch": 4.326767839845012,
"grad_norm": 0.660280704498291,
"learning_rate": 0.0003408512013791617,
"loss": 3.3799,
"step": 40200
},
{
"epoch": 4.332149391884619,
"grad_norm": 0.6253730654716492,
"learning_rate": 0.0003405344251697015,
"loss": 3.3712,
"step": 40250
},
{
"epoch": 4.337530943924228,
"grad_norm": 0.6870384216308594,
"learning_rate": 0.0003402111841396401,
"loss": 3.381,
"step": 40300
},
{
"epoch": 4.342912495963836,
"grad_norm": 0.665998637676239,
"learning_rate": 0.00033988794310957864,
"loss": 3.3787,
"step": 40350
},
{
"epoch": 4.348294048003444,
"grad_norm": 0.6889680027961731,
"learning_rate": 0.0003395647020795173,
"loss": 3.3795,
"step": 40400
},
{
"epoch": 4.3536756000430525,
"grad_norm": 0.7430781722068787,
"learning_rate": 0.0003392414610494559,
"loss": 3.3779,
"step": 40450
},
{
"epoch": 4.359057152082661,
"grad_norm": 0.629690945148468,
"learning_rate": 0.0003389182200193944,
"loss": 3.3751,
"step": 40500
},
{
"epoch": 4.364438704122269,
"grad_norm": 0.6800459027290344,
"learning_rate": 0.000338594978989333,
"loss": 3.3752,
"step": 40550
},
{
"epoch": 4.369820256161877,
"grad_norm": 0.6480296850204468,
"learning_rate": 0.00033827173795927156,
"loss": 3.3933,
"step": 40600
},
{
"epoch": 4.375201808201485,
"grad_norm": 0.6850379705429077,
"learning_rate": 0.00033794849692921015,
"loss": 3.3732,
"step": 40650
},
{
"epoch": 4.3805833602410935,
"grad_norm": 0.6677266359329224,
"learning_rate": 0.0003376252558991488,
"loss": 3.3851,
"step": 40700
},
{
"epoch": 4.385964912280702,
"grad_norm": 0.6740007996559143,
"learning_rate": 0.00033730201486908734,
"loss": 3.3816,
"step": 40750
},
{
"epoch": 4.39134646432031,
"grad_norm": 0.6995536684989929,
"learning_rate": 0.00033697877383902594,
"loss": 3.3898,
"step": 40800
},
{
"epoch": 4.396728016359918,
"grad_norm": 0.6835097074508667,
"learning_rate": 0.00033665553280896453,
"loss": 3.3867,
"step": 40850
},
{
"epoch": 4.402109568399527,
"grad_norm": 0.6530295014381409,
"learning_rate": 0.00033633229177890307,
"loss": 3.3737,
"step": 40900
},
{
"epoch": 4.4074911204391345,
"grad_norm": 0.6370556354522705,
"learning_rate": 0.00033600905074884167,
"loss": 3.3841,
"step": 40950
},
{
"epoch": 4.412872672478743,
"grad_norm": 0.6709283590316772,
"learning_rate": 0.0003356858097187803,
"loss": 3.3871,
"step": 41000
},
{
"epoch": 4.412872672478743,
"eval_accuracy": 0.37784802307700416,
"eval_loss": 3.4330832958221436,
"eval_runtime": 180.4989,
"eval_samples_per_second": 99.785,
"eval_steps_per_second": 6.238,
"step": 41000
},
{
"epoch": 4.418254224518351,
"grad_norm": 0.6717859506607056,
"learning_rate": 0.00033536256868871886,
"loss": 3.3859,
"step": 41050
},
{
"epoch": 4.423635776557959,
"grad_norm": 0.6793646216392517,
"learning_rate": 0.00033503932765865745,
"loss": 3.3893,
"step": 41100
},
{
"epoch": 4.429017328597568,
"grad_norm": 0.6531360149383545,
"learning_rate": 0.000334716086628596,
"loss": 3.4012,
"step": 41150
},
{
"epoch": 4.4343988806371755,
"grad_norm": 0.655354917049408,
"learning_rate": 0.0003343928455985346,
"loss": 3.3705,
"step": 41200
},
{
"epoch": 4.439780432676784,
"grad_norm": 0.6870447993278503,
"learning_rate": 0.00033406960456847324,
"loss": 3.3836,
"step": 41250
},
{
"epoch": 4.445161984716393,
"grad_norm": 0.6646298170089722,
"learning_rate": 0.0003337463635384118,
"loss": 3.3703,
"step": 41300
},
{
"epoch": 4.450543536756,
"grad_norm": 0.7067282795906067,
"learning_rate": 0.00033342312250835037,
"loss": 3.3779,
"step": 41350
},
{
"epoch": 4.455925088795609,
"grad_norm": 0.6601843237876892,
"learning_rate": 0.00033309988147828896,
"loss": 3.3896,
"step": 41400
},
{
"epoch": 4.461306640835216,
"grad_norm": 0.6824533343315125,
"learning_rate": 0.0003327766404482275,
"loss": 3.3798,
"step": 41450
},
{
"epoch": 4.466688192874825,
"grad_norm": 0.6678142547607422,
"learning_rate": 0.0003324533994181661,
"loss": 3.385,
"step": 41500
},
{
"epoch": 4.4720697449144335,
"grad_norm": 0.6664413213729858,
"learning_rate": 0.00033213015838810475,
"loss": 3.4065,
"step": 41550
},
{
"epoch": 4.477451296954041,
"grad_norm": 0.6379727721214294,
"learning_rate": 0.0003318069173580433,
"loss": 3.3982,
"step": 41600
},
{
"epoch": 4.48283284899365,
"grad_norm": 0.7035315036773682,
"learning_rate": 0.0003314836763279819,
"loss": 3.3971,
"step": 41650
},
{
"epoch": 4.488214401033258,
"grad_norm": 0.6721235513687134,
"learning_rate": 0.0003311604352979204,
"loss": 3.3991,
"step": 41700
},
{
"epoch": 4.493595953072866,
"grad_norm": 0.6236030459403992,
"learning_rate": 0.000330837194267859,
"loss": 3.3777,
"step": 41750
},
{
"epoch": 4.4989775051124745,
"grad_norm": 0.6594476103782654,
"learning_rate": 0.0003305139532377976,
"loss": 3.392,
"step": 41800
},
{
"epoch": 4.504359057152083,
"grad_norm": 0.6631010174751282,
"learning_rate": 0.0003301907122077362,
"loss": 3.3953,
"step": 41850
},
{
"epoch": 4.509740609191691,
"grad_norm": 0.6775970458984375,
"learning_rate": 0.0003298674711776748,
"loss": 3.3798,
"step": 41900
},
{
"epoch": 4.515122161231299,
"grad_norm": 0.6755871772766113,
"learning_rate": 0.0003295442301476134,
"loss": 3.376,
"step": 41950
},
{
"epoch": 4.520503713270907,
"grad_norm": 0.660190761089325,
"learning_rate": 0.00032922098911755194,
"loss": 3.3824,
"step": 42000
},
{
"epoch": 4.520503713270907,
"eval_accuracy": 0.37855622259977895,
"eval_loss": 3.42791485786438,
"eval_runtime": 180.7657,
"eval_samples_per_second": 99.637,
"eval_steps_per_second": 6.229,
"step": 42000
},
{
"epoch": 4.5258852653105155,
"grad_norm": 0.6533682942390442,
"learning_rate": 0.00032889774808749053,
"loss": 3.3913,
"step": 42050
},
{
"epoch": 4.531266817350124,
"grad_norm": 0.6714156270027161,
"learning_rate": 0.0003285745070574292,
"loss": 3.3873,
"step": 42100
},
{
"epoch": 4.536648369389732,
"grad_norm": 0.6406194567680359,
"learning_rate": 0.0003282512660273677,
"loss": 3.3876,
"step": 42150
},
{
"epoch": 4.54202992142934,
"grad_norm": 0.672727108001709,
"learning_rate": 0.0003279280249973063,
"loss": 3.3871,
"step": 42200
},
{
"epoch": 4.547411473468949,
"grad_norm": 0.6561397314071655,
"learning_rate": 0.00032760478396724486,
"loss": 3.3847,
"step": 42250
},
{
"epoch": 4.5527930255085565,
"grad_norm": 0.7014278173446655,
"learning_rate": 0.0003272880077577847,
"loss": 3.4011,
"step": 42300
},
{
"epoch": 4.558174577548165,
"grad_norm": 0.6329679489135742,
"learning_rate": 0.00032696476672772326,
"loss": 3.3918,
"step": 42350
},
{
"epoch": 4.563556129587774,
"grad_norm": 0.6573483943939209,
"learning_rate": 0.00032664152569766185,
"loss": 3.4036,
"step": 42400
},
{
"epoch": 4.568937681627381,
"grad_norm": 0.6461696624755859,
"learning_rate": 0.0003263182846676004,
"loss": 3.3831,
"step": 42450
},
{
"epoch": 4.57431923366699,
"grad_norm": 0.6436484456062317,
"learning_rate": 0.00032599504363753904,
"loss": 3.4091,
"step": 42500
},
{
"epoch": 4.579700785706597,
"grad_norm": 0.683167040348053,
"learning_rate": 0.00032567180260747764,
"loss": 3.3936,
"step": 42550
},
{
"epoch": 4.585082337746206,
"grad_norm": 0.6897136569023132,
"learning_rate": 0.0003253485615774162,
"loss": 3.3839,
"step": 42600
},
{
"epoch": 4.5904638897858145,
"grad_norm": 0.7163823246955872,
"learning_rate": 0.00032502532054735477,
"loss": 3.3933,
"step": 42650
},
{
"epoch": 4.595845441825422,
"grad_norm": 0.6382591724395752,
"learning_rate": 0.00032470207951729337,
"loss": 3.3879,
"step": 42700
},
{
"epoch": 4.601226993865031,
"grad_norm": 0.6253649592399597,
"learning_rate": 0.0003243788384872319,
"loss": 3.3937,
"step": 42750
},
{
"epoch": 4.606608545904638,
"grad_norm": 0.6715789437294006,
"learning_rate": 0.00032405559745717056,
"loss": 3.3917,
"step": 42800
},
{
"epoch": 4.611990097944247,
"grad_norm": 0.7270500659942627,
"learning_rate": 0.00032373235642710915,
"loss": 3.3983,
"step": 42850
},
{
"epoch": 4.6173716499838555,
"grad_norm": 0.6829226613044739,
"learning_rate": 0.0003234091153970477,
"loss": 3.4023,
"step": 42900
},
{
"epoch": 4.622753202023463,
"grad_norm": 0.7018654942512512,
"learning_rate": 0.0003230858743669863,
"loss": 3.3912,
"step": 42950
},
{
"epoch": 4.628134754063072,
"grad_norm": 0.6380162239074707,
"learning_rate": 0.0003227626333369248,
"loss": 3.3943,
"step": 43000
},
{
"epoch": 4.628134754063072,
"eval_accuracy": 0.37904103178520715,
"eval_loss": 3.421799421310425,
"eval_runtime": 180.2956,
"eval_samples_per_second": 99.897,
"eval_steps_per_second": 6.245,
"step": 43000
},
{
"epoch": 4.63351630610268,
"grad_norm": 0.6637802124023438,
"learning_rate": 0.0003224393923068635,
"loss": 3.3806,
"step": 43050
},
{
"epoch": 4.638897858142288,
"grad_norm": 0.6968106031417847,
"learning_rate": 0.00032211615127680207,
"loss": 3.3677,
"step": 43100
},
{
"epoch": 4.6442794101818965,
"grad_norm": 0.6918413043022156,
"learning_rate": 0.0003217929102467406,
"loss": 3.3913,
"step": 43150
},
{
"epoch": 4.649660962221505,
"grad_norm": 0.637776255607605,
"learning_rate": 0.0003214696692166792,
"loss": 3.3909,
"step": 43200
},
{
"epoch": 4.655042514261113,
"grad_norm": 0.666828453540802,
"learning_rate": 0.0003211464281866178,
"loss": 3.3732,
"step": 43250
},
{
"epoch": 4.660424066300721,
"grad_norm": 0.6439411640167236,
"learning_rate": 0.00032082318715655634,
"loss": 3.3776,
"step": 43300
},
{
"epoch": 4.665805618340329,
"grad_norm": 0.6902055740356445,
"learning_rate": 0.000320499946126495,
"loss": 3.3948,
"step": 43350
},
{
"epoch": 4.6711871703799375,
"grad_norm": 0.7745203375816345,
"learning_rate": 0.0003201767050964336,
"loss": 3.3795,
"step": 43400
},
{
"epoch": 4.676568722419546,
"grad_norm": 0.671711266040802,
"learning_rate": 0.0003198534640663721,
"loss": 3.3798,
"step": 43450
},
{
"epoch": 4.681950274459154,
"grad_norm": 0.6714461445808411,
"learning_rate": 0.00031953668785691193,
"loss": 3.3852,
"step": 43500
},
{
"epoch": 4.687331826498762,
"grad_norm": 0.6817046999931335,
"learning_rate": 0.0003192134468268505,
"loss": 3.3894,
"step": 43550
},
{
"epoch": 4.692713378538371,
"grad_norm": 0.6941414475440979,
"learning_rate": 0.0003188902057967891,
"loss": 3.4095,
"step": 43600
},
{
"epoch": 4.6980949305779784,
"grad_norm": 0.7007185220718384,
"learning_rate": 0.00031856696476672766,
"loss": 3.3747,
"step": 43650
},
{
"epoch": 4.703476482617587,
"grad_norm": 0.7390422821044922,
"learning_rate": 0.0003182437237366663,
"loss": 3.3933,
"step": 43700
},
{
"epoch": 4.7088580346571955,
"grad_norm": 0.6899697780609131,
"learning_rate": 0.0003179204827066049,
"loss": 3.3846,
"step": 43750
},
{
"epoch": 4.714239586696803,
"grad_norm": 0.6393377184867859,
"learning_rate": 0.00031759724167654344,
"loss": 3.3877,
"step": 43800
},
{
"epoch": 4.719621138736412,
"grad_norm": 0.6704504489898682,
"learning_rate": 0.00031727400064648204,
"loss": 3.3905,
"step": 43850
},
{
"epoch": 4.725002690776019,
"grad_norm": 0.6651193499565125,
"learning_rate": 0.0003169507596164206,
"loss": 3.3773,
"step": 43900
},
{
"epoch": 4.730384242815628,
"grad_norm": 0.6523035168647766,
"learning_rate": 0.0003166275185863592,
"loss": 3.4,
"step": 43950
},
{
"epoch": 4.7357657948552365,
"grad_norm": 0.6419060230255127,
"learning_rate": 0.0003163042775562978,
"loss": 3.3728,
"step": 44000
},
{
"epoch": 4.7357657948552365,
"eval_accuracy": 0.37933276478898903,
"eval_loss": 3.417551040649414,
"eval_runtime": 180.8101,
"eval_samples_per_second": 99.613,
"eval_steps_per_second": 6.228,
"step": 44000
},
{
"epoch": 4.741147346894844,
"grad_norm": 0.6831957697868347,
"learning_rate": 0.00031598103652623636,
"loss": 3.3938,
"step": 44050
},
{
"epoch": 4.746528898934453,
"grad_norm": 0.6325632929801941,
"learning_rate": 0.00031565779549617496,
"loss": 3.4015,
"step": 44100
},
{
"epoch": 4.751910450974061,
"grad_norm": 0.687362790107727,
"learning_rate": 0.00031533455446611355,
"loss": 3.3946,
"step": 44150
},
{
"epoch": 4.757292003013669,
"grad_norm": 0.6923133134841919,
"learning_rate": 0.0003150113134360521,
"loss": 3.3823,
"step": 44200
},
{
"epoch": 4.7626735550532775,
"grad_norm": 0.709951639175415,
"learning_rate": 0.0003146880724059907,
"loss": 3.3947,
"step": 44250
},
{
"epoch": 4.768055107092886,
"grad_norm": 0.6787405610084534,
"learning_rate": 0.00031436483137592934,
"loss": 3.3734,
"step": 44300
},
{
"epoch": 4.773436659132494,
"grad_norm": 0.7011154890060425,
"learning_rate": 0.0003140415903458679,
"loss": 3.3932,
"step": 44350
},
{
"epoch": 4.778818211172102,
"grad_norm": 0.7104355096817017,
"learning_rate": 0.00031371834931580647,
"loss": 3.3876,
"step": 44400
},
{
"epoch": 4.78419976321171,
"grad_norm": 0.65087890625,
"learning_rate": 0.000313395108285745,
"loss": 3.3826,
"step": 44450
},
{
"epoch": 4.7895813152513185,
"grad_norm": 0.7193938493728638,
"learning_rate": 0.0003130718672556836,
"loss": 3.3899,
"step": 44500
},
{
"epoch": 4.794962867290927,
"grad_norm": 0.6938375234603882,
"learning_rate": 0.0003127486262256222,
"loss": 3.367,
"step": 44550
},
{
"epoch": 4.800344419330535,
"grad_norm": 0.6607935428619385,
"learning_rate": 0.0003124253851955608,
"loss": 3.3828,
"step": 44600
},
{
"epoch": 4.805725971370143,
"grad_norm": 0.7184892296791077,
"learning_rate": 0.0003121021441654994,
"loss": 3.4025,
"step": 44650
},
{
"epoch": 4.811107523409751,
"grad_norm": 0.6583475470542908,
"learning_rate": 0.000311778903135438,
"loss": 3.4065,
"step": 44700
},
{
"epoch": 4.8164890754493594,
"grad_norm": 0.746714174747467,
"learning_rate": 0.0003114556621053765,
"loss": 3.4003,
"step": 44750
},
{
"epoch": 4.821870627488968,
"grad_norm": 0.667801558971405,
"learning_rate": 0.0003111324210753151,
"loss": 3.3777,
"step": 44800
},
{
"epoch": 4.827252179528576,
"grad_norm": 0.6681658029556274,
"learning_rate": 0.00031080918004525377,
"loss": 3.3846,
"step": 44850
},
{
"epoch": 4.832633731568184,
"grad_norm": 0.6859903931617737,
"learning_rate": 0.0003104859390151923,
"loss": 3.3843,
"step": 44900
},
{
"epoch": 4.838015283607793,
"grad_norm": 0.6311453580856323,
"learning_rate": 0.0003101626979851309,
"loss": 3.3754,
"step": 44950
},
{
"epoch": 4.8433968356474,
"grad_norm": 0.6764549016952515,
"learning_rate": 0.00030983945695506945,
"loss": 3.3897,
"step": 45000
},
{
"epoch": 4.8433968356474,
"eval_accuracy": 0.3799441545887211,
"eval_loss": 3.4145073890686035,
"eval_runtime": 180.8088,
"eval_samples_per_second": 99.614,
"eval_steps_per_second": 6.228,
"step": 45000
},
{
"epoch": 4.848778387687009,
"grad_norm": 0.7071652412414551,
"learning_rate": 0.00030951621592500804,
"loss": 3.3874,
"step": 45050
},
{
"epoch": 4.8541599397266175,
"grad_norm": 0.6975886821746826,
"learning_rate": 0.00030919297489494663,
"loss": 3.381,
"step": 45100
},
{
"epoch": 4.859541491766225,
"grad_norm": 0.6888333559036255,
"learning_rate": 0.00030886973386488523,
"loss": 3.3775,
"step": 45150
},
{
"epoch": 4.864923043805834,
"grad_norm": 0.6672408580780029,
"learning_rate": 0.0003085464928348238,
"loss": 3.4037,
"step": 45200
},
{
"epoch": 4.870304595845441,
"grad_norm": 0.710537850856781,
"learning_rate": 0.0003082232518047624,
"loss": 3.391,
"step": 45250
},
{
"epoch": 4.87568614788505,
"grad_norm": 0.6636947393417358,
"learning_rate": 0.00030790001077470096,
"loss": 3.3911,
"step": 45300
},
{
"epoch": 4.8810676999246585,
"grad_norm": 0.6961594223976135,
"learning_rate": 0.00030757676974463955,
"loss": 3.3829,
"step": 45350
},
{
"epoch": 4.886449251964266,
"grad_norm": 0.8008875846862793,
"learning_rate": 0.0003072535287145781,
"loss": 3.3904,
"step": 45400
},
{
"epoch": 4.891830804003875,
"grad_norm": 0.6139016151428223,
"learning_rate": 0.00030693028768451674,
"loss": 3.3811,
"step": 45450
},
{
"epoch": 4.897212356043483,
"grad_norm": 0.6992993354797363,
"learning_rate": 0.00030660704665445534,
"loss": 3.3865,
"step": 45500
},
{
"epoch": 4.902593908083091,
"grad_norm": 0.6530194282531738,
"learning_rate": 0.0003062838056243939,
"loss": 3.3902,
"step": 45550
},
{
"epoch": 4.9079754601226995,
"grad_norm": 0.6725596189498901,
"learning_rate": 0.00030596056459433247,
"loss": 3.3907,
"step": 45600
},
{
"epoch": 4.913357012162308,
"grad_norm": 0.670534610748291,
"learning_rate": 0.00030563732356427107,
"loss": 3.3845,
"step": 45650
},
{
"epoch": 4.918738564201916,
"grad_norm": 0.6930390000343323,
"learning_rate": 0.00030531408253420966,
"loss": 3.3919,
"step": 45700
},
{
"epoch": 4.924120116241524,
"grad_norm": 0.7007985711097717,
"learning_rate": 0.00030499084150414826,
"loss": 3.375,
"step": 45750
},
{
"epoch": 4.929501668281132,
"grad_norm": 0.6509608030319214,
"learning_rate": 0.00030466760047408685,
"loss": 3.3936,
"step": 45800
},
{
"epoch": 4.9348832203207404,
"grad_norm": 0.7178775668144226,
"learning_rate": 0.0003043443594440254,
"loss": 3.3867,
"step": 45850
},
{
"epoch": 4.940264772360349,
"grad_norm": 0.7314358949661255,
"learning_rate": 0.000304021118413964,
"loss": 3.3853,
"step": 45900
},
{
"epoch": 4.945646324399957,
"grad_norm": 0.7417964339256287,
"learning_rate": 0.0003036978773839025,
"loss": 3.3955,
"step": 45950
},
{
"epoch": 4.951027876439565,
"grad_norm": 0.6962642669677734,
"learning_rate": 0.0003033746363538412,
"loss": 3.3879,
"step": 46000
},
{
"epoch": 4.951027876439565,
"eval_accuracy": 0.3806199755509272,
"eval_loss": 3.4076614379882812,
"eval_runtime": 180.463,
"eval_samples_per_second": 99.804,
"eval_steps_per_second": 6.24,
"step": 46000
},
{
"epoch": 4.956409428479174,
"grad_norm": 0.6814647912979126,
"learning_rate": 0.00030305139532377977,
"loss": 3.3941,
"step": 46050
},
{
"epoch": 4.961790980518781,
"grad_norm": 0.6750015020370483,
"learning_rate": 0.0003027281542937183,
"loss": 3.3918,
"step": 46100
},
{
"epoch": 4.96717253255839,
"grad_norm": 0.6892281174659729,
"learning_rate": 0.0003024049132636569,
"loss": 3.3945,
"step": 46150
},
{
"epoch": 4.9725540845979985,
"grad_norm": 0.6996626257896423,
"learning_rate": 0.0003020816722335955,
"loss": 3.3739,
"step": 46200
},
{
"epoch": 4.977935636637606,
"grad_norm": 0.6532090902328491,
"learning_rate": 0.00030175843120353404,
"loss": 3.4021,
"step": 46250
},
{
"epoch": 4.983317188677215,
"grad_norm": 0.6986241936683655,
"learning_rate": 0.0003014351901734727,
"loss": 3.3769,
"step": 46300
},
{
"epoch": 4.988698740716822,
"grad_norm": 0.6778721213340759,
"learning_rate": 0.0003011119491434113,
"loss": 3.3806,
"step": 46350
},
{
"epoch": 4.994080292756431,
"grad_norm": 0.7227446436882019,
"learning_rate": 0.0003007887081133498,
"loss": 3.386,
"step": 46400
},
{
"epoch": 4.9994618447960395,
"grad_norm": 0.6771180629730225,
"learning_rate": 0.0003004654670832884,
"loss": 3.3777,
"step": 46450
},
{
"epoch": 5.004843396835647,
"grad_norm": 0.6935259699821472,
"learning_rate": 0.00030014222605322696,
"loss": 3.3098,
"step": 46500
},
{
"epoch": 5.010224948875256,
"grad_norm": 0.7119801640510559,
"learning_rate": 0.00029981898502316555,
"loss": 3.3013,
"step": 46550
},
{
"epoch": 5.015606500914864,
"grad_norm": 0.7470939755439758,
"learning_rate": 0.00029949574399310415,
"loss": 3.2987,
"step": 46600
},
{
"epoch": 5.020988052954472,
"grad_norm": 0.7172041535377502,
"learning_rate": 0.00029917250296304274,
"loss": 3.2962,
"step": 46650
},
{
"epoch": 5.0263696049940805,
"grad_norm": 0.708893895149231,
"learning_rate": 0.00029884926193298134,
"loss": 3.2944,
"step": 46700
},
{
"epoch": 5.031751157033688,
"grad_norm": 0.6856112480163574,
"learning_rate": 0.0002985260209029199,
"loss": 3.2975,
"step": 46750
},
{
"epoch": 5.037132709073297,
"grad_norm": 0.6789200305938721,
"learning_rate": 0.00029820277987285853,
"loss": 3.2961,
"step": 46800
},
{
"epoch": 5.042514261112905,
"grad_norm": 0.6745328307151794,
"learning_rate": 0.00029787953884279707,
"loss": 3.294,
"step": 46850
},
{
"epoch": 5.047895813152513,
"grad_norm": 0.695334792137146,
"learning_rate": 0.00029756276263333693,
"loss": 3.3119,
"step": 46900
},
{
"epoch": 5.0532773651921215,
"grad_norm": 0.6939895153045654,
"learning_rate": 0.00029723952160327547,
"loss": 3.2977,
"step": 46950
},
{
"epoch": 5.05865891723173,
"grad_norm": 0.7092863917350769,
"learning_rate": 0.00029691628057321406,
"loss": 3.3093,
"step": 47000
},
{
"epoch": 5.05865891723173,
"eval_accuracy": 0.3807579647184516,
"eval_loss": 3.4131321907043457,
"eval_runtime": 180.5823,
"eval_samples_per_second": 99.738,
"eval_steps_per_second": 6.235,
"step": 47000
},
{
"epoch": 5.064040469271338,
"grad_norm": 0.7017537355422974,
"learning_rate": 0.00029659303954315266,
"loss": 3.3113,
"step": 47050
},
{
"epoch": 5.069422021310946,
"grad_norm": 0.7066820859909058,
"learning_rate": 0.00029626979851309125,
"loss": 3.3025,
"step": 47100
},
{
"epoch": 5.074803573350554,
"grad_norm": 0.7007570862770081,
"learning_rate": 0.00029594655748302985,
"loss": 3.3077,
"step": 47150
},
{
"epoch": 5.080185125390162,
"grad_norm": 0.7130218744277954,
"learning_rate": 0.0002956233164529684,
"loss": 3.2919,
"step": 47200
},
{
"epoch": 5.085566677429771,
"grad_norm": 0.7107332944869995,
"learning_rate": 0.000295300075422907,
"loss": 3.3093,
"step": 47250
},
{
"epoch": 5.090948229469379,
"grad_norm": 0.6778996586799622,
"learning_rate": 0.0002949768343928456,
"loss": 3.3089,
"step": 47300
},
{
"epoch": 5.096329781508987,
"grad_norm": 0.7302454113960266,
"learning_rate": 0.00029465359336278417,
"loss": 3.321,
"step": 47350
},
{
"epoch": 5.101711333548596,
"grad_norm": 0.7293109893798828,
"learning_rate": 0.0002943303523327227,
"loss": 3.3307,
"step": 47400
},
{
"epoch": 5.107092885588203,
"grad_norm": 0.7125155329704285,
"learning_rate": 0.00029400711130266136,
"loss": 3.3088,
"step": 47450
},
{
"epoch": 5.112474437627812,
"grad_norm": 0.7028940916061401,
"learning_rate": 0.0002936838702725999,
"loss": 3.3038,
"step": 47500
},
{
"epoch": 5.1178559896674205,
"grad_norm": 0.6821138858795166,
"learning_rate": 0.0002933606292425385,
"loss": 3.3197,
"step": 47550
},
{
"epoch": 5.123237541707028,
"grad_norm": 0.6831609010696411,
"learning_rate": 0.0002930373882124771,
"loss": 3.3003,
"step": 47600
},
{
"epoch": 5.128619093746637,
"grad_norm": 0.6890237331390381,
"learning_rate": 0.0002927141471824157,
"loss": 3.3091,
"step": 47650
},
{
"epoch": 5.134000645786244,
"grad_norm": 0.7032915949821472,
"learning_rate": 0.0002923909061523542,
"loss": 3.3137,
"step": 47700
},
{
"epoch": 5.139382197825853,
"grad_norm": 0.6753126382827759,
"learning_rate": 0.0002920676651222928,
"loss": 3.3034,
"step": 47750
},
{
"epoch": 5.1447637498654615,
"grad_norm": 0.7659389972686768,
"learning_rate": 0.0002917444240922314,
"loss": 3.3154,
"step": 47800
},
{
"epoch": 5.150145301905069,
"grad_norm": 0.688945472240448,
"learning_rate": 0.00029142118306216996,
"loss": 3.3108,
"step": 47850
},
{
"epoch": 5.155526853944678,
"grad_norm": 0.6884469389915466,
"learning_rate": 0.0002910979420321086,
"loss": 3.3065,
"step": 47900
},
{
"epoch": 5.160908405984286,
"grad_norm": 0.6957643032073975,
"learning_rate": 0.00029077470100204715,
"loss": 3.31,
"step": 47950
},
{
"epoch": 5.166289958023894,
"grad_norm": 0.7388163805007935,
"learning_rate": 0.00029045145997198574,
"loss": 3.2902,
"step": 48000
},
{
"epoch": 5.166289958023894,
"eval_accuracy": 0.3809007346130398,
"eval_loss": 3.4100382328033447,
"eval_runtime": 180.4717,
"eval_samples_per_second": 99.8,
"eval_steps_per_second": 6.239,
"step": 48000
},
{
"epoch": 5.1716715100635025,
"grad_norm": 0.7176282405853271,
"learning_rate": 0.00029012821894192433,
"loss": 3.315,
"step": 48050
},
{
"epoch": 5.17705306210311,
"grad_norm": 0.6990656852722168,
"learning_rate": 0.00028980497791186293,
"loss": 3.3085,
"step": 48100
},
{
"epoch": 5.182434614142719,
"grad_norm": 0.7440829277038574,
"learning_rate": 0.0002894817368818015,
"loss": 3.3026,
"step": 48150
},
{
"epoch": 5.187816166182327,
"grad_norm": 0.7810512781143188,
"learning_rate": 0.00028915849585174006,
"loss": 3.3067,
"step": 48200
},
{
"epoch": 5.193197718221935,
"grad_norm": 0.7514436841011047,
"learning_rate": 0.00028883525482167866,
"loss": 3.3155,
"step": 48250
},
{
"epoch": 5.198579270261543,
"grad_norm": 0.7587404251098633,
"learning_rate": 0.00028851201379161725,
"loss": 3.326,
"step": 48300
},
{
"epoch": 5.203960822301152,
"grad_norm": 0.728809654712677,
"learning_rate": 0.00028818877276155585,
"loss": 3.3046,
"step": 48350
},
{
"epoch": 5.20934237434076,
"grad_norm": 0.6925315260887146,
"learning_rate": 0.0002878655317314944,
"loss": 3.3156,
"step": 48400
},
{
"epoch": 5.214723926380368,
"grad_norm": 0.761146605014801,
"learning_rate": 0.00028754229070143304,
"loss": 3.3213,
"step": 48450
},
{
"epoch": 5.220105478419977,
"grad_norm": 0.7084577679634094,
"learning_rate": 0.0002872190496713716,
"loss": 3.3169,
"step": 48500
},
{
"epoch": 5.225487030459584,
"grad_norm": 0.6968151926994324,
"learning_rate": 0.0002868958086413102,
"loss": 3.307,
"step": 48550
},
{
"epoch": 5.230868582499193,
"grad_norm": 0.6933405995368958,
"learning_rate": 0.00028657256761124877,
"loss": 3.3176,
"step": 48600
},
{
"epoch": 5.236250134538801,
"grad_norm": 0.7592040300369263,
"learning_rate": 0.00028624932658118736,
"loss": 3.3159,
"step": 48650
},
{
"epoch": 5.241631686578409,
"grad_norm": 0.6854032874107361,
"learning_rate": 0.0002859260855511259,
"loss": 3.3186,
"step": 48700
},
{
"epoch": 5.247013238618018,
"grad_norm": 0.7083324193954468,
"learning_rate": 0.0002856028445210645,
"loss": 3.316,
"step": 48750
},
{
"epoch": 5.252394790657625,
"grad_norm": 0.7232556939125061,
"learning_rate": 0.0002852796034910031,
"loss": 3.3182,
"step": 48800
},
{
"epoch": 5.257776342697234,
"grad_norm": 0.6980643272399902,
"learning_rate": 0.0002849563624609417,
"loss": 3.318,
"step": 48850
},
{
"epoch": 5.2631578947368425,
"grad_norm": 0.6944864988327026,
"learning_rate": 0.0002846331214308803,
"loss": 3.3329,
"step": 48900
},
{
"epoch": 5.26853944677645,
"grad_norm": 0.749697744846344,
"learning_rate": 0.0002843098804008188,
"loss": 3.34,
"step": 48950
},
{
"epoch": 5.273920998816059,
"grad_norm": 0.706950843334198,
"learning_rate": 0.00028398663937075747,
"loss": 3.3097,
"step": 49000
},
{
"epoch": 5.273920998816059,
"eval_accuracy": 0.3812864523647812,
"eval_loss": 3.405521869659424,
"eval_runtime": 181.1668,
"eval_samples_per_second": 99.417,
"eval_steps_per_second": 6.215,
"step": 49000
},
{
"epoch": 5.279302550855666,
"grad_norm": 0.7286232709884644,
"learning_rate": 0.000283663398340696,
"loss": 3.3282,
"step": 49050
},
{
"epoch": 5.284684102895275,
"grad_norm": 0.7539182305335999,
"learning_rate": 0.0002833401573106346,
"loss": 3.3236,
"step": 49100
},
{
"epoch": 5.2900656549348835,
"grad_norm": 0.6944497227668762,
"learning_rate": 0.0002830169162805732,
"loss": 3.333,
"step": 49150
},
{
"epoch": 5.295447206974491,
"grad_norm": 0.684943675994873,
"learning_rate": 0.0002826936752505118,
"loss": 3.3295,
"step": 49200
},
{
"epoch": 5.3008287590141,
"grad_norm": 0.762440025806427,
"learning_rate": 0.00028237043422045034,
"loss": 3.3233,
"step": 49250
},
{
"epoch": 5.306210311053708,
"grad_norm": 0.7036476135253906,
"learning_rate": 0.00028205365801099014,
"loss": 3.3298,
"step": 49300
},
{
"epoch": 5.311591863093316,
"grad_norm": 0.7563344836235046,
"learning_rate": 0.00028173041698092874,
"loss": 3.3374,
"step": 49350
},
{
"epoch": 5.316973415132924,
"grad_norm": 0.7168316841125488,
"learning_rate": 0.00028140717595086733,
"loss": 3.3358,
"step": 49400
},
{
"epoch": 5.322354967172533,
"grad_norm": 0.7628238201141357,
"learning_rate": 0.0002810839349208059,
"loss": 3.3199,
"step": 49450
},
{
"epoch": 5.327736519212141,
"grad_norm": 0.7322455644607544,
"learning_rate": 0.00028076069389074447,
"loss": 3.327,
"step": 49500
},
{
"epoch": 5.333118071251749,
"grad_norm": 0.6967852115631104,
"learning_rate": 0.0002804374528606831,
"loss": 3.3419,
"step": 49550
},
{
"epoch": 5.338499623291357,
"grad_norm": 0.7441530227661133,
"learning_rate": 0.00028011421183062166,
"loss": 3.3185,
"step": 49600
},
{
"epoch": 5.343881175330965,
"grad_norm": 0.7443904280662537,
"learning_rate": 0.00027979097080056025,
"loss": 3.3199,
"step": 49650
},
{
"epoch": 5.349262727370574,
"grad_norm": 0.7386976480484009,
"learning_rate": 0.00027946772977049885,
"loss": 3.318,
"step": 49700
},
{
"epoch": 5.354644279410182,
"grad_norm": 0.7359290719032288,
"learning_rate": 0.00027914448874043744,
"loss": 3.3406,
"step": 49750
},
{
"epoch": 5.36002583144979,
"grad_norm": 0.730819046497345,
"learning_rate": 0.00027882124771037603,
"loss": 3.3092,
"step": 49800
},
{
"epoch": 5.365407383489399,
"grad_norm": 0.7585179209709167,
"learning_rate": 0.0002784980066803146,
"loss": 3.326,
"step": 49850
},
{
"epoch": 5.370788935529006,
"grad_norm": 0.7162631750106812,
"learning_rate": 0.00027817476565025317,
"loss": 3.3382,
"step": 49900
},
{
"epoch": 5.376170487568615,
"grad_norm": 0.7122279405593872,
"learning_rate": 0.00027785152462019176,
"loss": 3.3342,
"step": 49950
},
{
"epoch": 5.3815520396082235,
"grad_norm": 0.7170236706733704,
"learning_rate": 0.00027752828359013036,
"loss": 3.3242,
"step": 50000
},
{
"epoch": 5.3815520396082235,
"eval_accuracy": 0.38197922317748634,
"eval_loss": 3.400938034057617,
"eval_runtime": 180.5748,
"eval_samples_per_second": 99.743,
"eval_steps_per_second": 6.236,
"step": 50000
},
{
"epoch": 5.386933591647831,
"grad_norm": 0.6691011786460876,
"learning_rate": 0.0002772050425600689,
"loss": 3.3201,
"step": 50050
},
{
"epoch": 5.39231514368744,
"grad_norm": 0.7036200761795044,
"learning_rate": 0.00027688180153000755,
"loss": 3.3223,
"step": 50100
},
{
"epoch": 5.397696695727047,
"grad_norm": 0.6968643069267273,
"learning_rate": 0.0002765585604999461,
"loss": 3.34,
"step": 50150
},
{
"epoch": 5.403078247766656,
"grad_norm": 0.7583166360855103,
"learning_rate": 0.0002762353194698847,
"loss": 3.3211,
"step": 50200
},
{
"epoch": 5.4084597998062645,
"grad_norm": 0.7078987956047058,
"learning_rate": 0.0002759120784398233,
"loss": 3.3302,
"step": 50250
},
{
"epoch": 5.413841351845872,
"grad_norm": 0.764525294303894,
"learning_rate": 0.00027558883740976187,
"loss": 3.344,
"step": 50300
},
{
"epoch": 5.419222903885481,
"grad_norm": 0.7484478950500488,
"learning_rate": 0.0002752655963797004,
"loss": 3.3345,
"step": 50350
},
{
"epoch": 5.424604455925088,
"grad_norm": 0.7260310053825378,
"learning_rate": 0.000274942355349639,
"loss": 3.3208,
"step": 50400
},
{
"epoch": 5.429986007964697,
"grad_norm": 0.6879717707633972,
"learning_rate": 0.0002746191143195776,
"loss": 3.3432,
"step": 50450
},
{
"epoch": 5.435367560004305,
"grad_norm": 0.7341870665550232,
"learning_rate": 0.0002742958732895162,
"loss": 3.3261,
"step": 50500
},
{
"epoch": 5.440749112043913,
"grad_norm": 0.70168536901474,
"learning_rate": 0.0002739726322594548,
"loss": 3.3284,
"step": 50550
},
{
"epoch": 5.446130664083522,
"grad_norm": 0.745499312877655,
"learning_rate": 0.00027364939122939333,
"loss": 3.3374,
"step": 50600
},
{
"epoch": 5.45151221612313,
"grad_norm": 0.7081572413444519,
"learning_rate": 0.0002733261501993319,
"loss": 3.3199,
"step": 50650
},
{
"epoch": 5.456893768162738,
"grad_norm": 0.7260119915008545,
"learning_rate": 0.0002730029091692705,
"loss": 3.3515,
"step": 50700
},
{
"epoch": 5.462275320202346,
"grad_norm": 0.7278617024421692,
"learning_rate": 0.0002726796681392091,
"loss": 3.331,
"step": 50750
},
{
"epoch": 5.467656872241955,
"grad_norm": 0.7048456072807312,
"learning_rate": 0.0002723564271091477,
"loss": 3.3106,
"step": 50800
},
{
"epoch": 5.473038424281563,
"grad_norm": 0.7255328893661499,
"learning_rate": 0.0002720331860790863,
"loss": 3.3247,
"step": 50850
},
{
"epoch": 5.478419976321171,
"grad_norm": 0.7401384115219116,
"learning_rate": 0.00027170994504902485,
"loss": 3.3402,
"step": 50900
},
{
"epoch": 5.483801528360779,
"grad_norm": 0.786148190498352,
"learning_rate": 0.00027138670401896344,
"loss": 3.3437,
"step": 50950
},
{
"epoch": 5.489183080400387,
"grad_norm": 0.7473777532577515,
"learning_rate": 0.00027106346298890204,
"loss": 3.3597,
"step": 51000
},
{
"epoch": 5.489183080400387,
"eval_accuracy": 0.38263559527277363,
"eval_loss": 3.3964664936065674,
"eval_runtime": 180.3564,
"eval_samples_per_second": 99.863,
"eval_steps_per_second": 6.243,
"step": 51000
},
{
"epoch": 5.494564632439996,
"grad_norm": 0.7068842649459839,
"learning_rate": 0.00027074022195884063,
"loss": 3.3147,
"step": 51050
},
{
"epoch": 5.499946184479604,
"grad_norm": 0.7448179125785828,
"learning_rate": 0.0002704169809287792,
"loss": 3.3439,
"step": 51100
},
{
"epoch": 5.505327736519212,
"grad_norm": 0.7172561287879944,
"learning_rate": 0.00027009373989871776,
"loss": 3.3419,
"step": 51150
},
{
"epoch": 5.510709288558821,
"grad_norm": 0.7486968040466309,
"learning_rate": 0.00026977049886865636,
"loss": 3.3319,
"step": 51200
},
{
"epoch": 5.516090840598428,
"grad_norm": 0.7076429128646851,
"learning_rate": 0.00026944725783859495,
"loss": 3.3312,
"step": 51250
},
{
"epoch": 5.521472392638037,
"grad_norm": 0.7071561813354492,
"learning_rate": 0.00026912401680853355,
"loss": 3.3152,
"step": 51300
},
{
"epoch": 5.5268539446776455,
"grad_norm": 0.7227540612220764,
"learning_rate": 0.0002688007757784721,
"loss": 3.3266,
"step": 51350
},
{
"epoch": 5.532235496717253,
"grad_norm": 0.7778589129447937,
"learning_rate": 0.00026847753474841074,
"loss": 3.345,
"step": 51400
},
{
"epoch": 5.537617048756862,
"grad_norm": 0.7239121794700623,
"learning_rate": 0.0002681542937183493,
"loss": 3.3364,
"step": 51450
},
{
"epoch": 5.542998600796469,
"grad_norm": 0.7021813988685608,
"learning_rate": 0.0002678310526882879,
"loss": 3.3266,
"step": 51500
},
{
"epoch": 5.548380152836078,
"grad_norm": 0.7057538628578186,
"learning_rate": 0.00026750781165822647,
"loss": 3.315,
"step": 51550
},
{
"epoch": 5.553761704875686,
"grad_norm": 0.7174096703529358,
"learning_rate": 0.00026718457062816506,
"loss": 3.3249,
"step": 51600
},
{
"epoch": 5.559143256915294,
"grad_norm": 0.7189500331878662,
"learning_rate": 0.00026686132959810366,
"loss": 3.3414,
"step": 51650
},
{
"epoch": 5.564524808954903,
"grad_norm": 0.7119573354721069,
"learning_rate": 0.0002665380885680422,
"loss": 3.3625,
"step": 51700
},
{
"epoch": 5.569906360994511,
"grad_norm": 0.7115148901939392,
"learning_rate": 0.0002662148475379808,
"loss": 3.3203,
"step": 51750
},
{
"epoch": 5.575287913034119,
"grad_norm": 0.7708024382591248,
"learning_rate": 0.0002658916065079194,
"loss": 3.3362,
"step": 51800
},
{
"epoch": 5.580669465073727,
"grad_norm": 0.7303184866905212,
"learning_rate": 0.000265568365477858,
"loss": 3.334,
"step": 51850
},
{
"epoch": 5.586051017113336,
"grad_norm": 0.7622580528259277,
"learning_rate": 0.0002652451244477965,
"loss": 3.32,
"step": 51900
},
{
"epoch": 5.591432569152944,
"grad_norm": 0.7121456265449524,
"learning_rate": 0.00026492188341773517,
"loss": 3.316,
"step": 51950
},
{
"epoch": 5.596814121192552,
"grad_norm": 0.7205502390861511,
"learning_rate": 0.0002645986423876737,
"loss": 3.3013,
"step": 52000
},
{
"epoch": 5.596814121192552,
"eval_accuracy": 0.38279140351389956,
"eval_loss": 3.3938612937927246,
"eval_runtime": 180.5356,
"eval_samples_per_second": 99.764,
"eval_steps_per_second": 6.237,
"step": 52000
},
{
"epoch": 5.60219567323216,
"grad_norm": 0.7592937350273132,
"learning_rate": 0.0002642754013576123,
"loss": 3.3312,
"step": 52050
},
{
"epoch": 5.607577225271768,
"grad_norm": 0.7347540855407715,
"learning_rate": 0.0002639521603275509,
"loss": 3.3274,
"step": 52100
},
{
"epoch": 5.612958777311377,
"grad_norm": 0.717491626739502,
"learning_rate": 0.0002636289192974895,
"loss": 3.3496,
"step": 52150
},
{
"epoch": 5.618340329350985,
"grad_norm": 0.7288353443145752,
"learning_rate": 0.00026330567826742804,
"loss": 3.3103,
"step": 52200
},
{
"epoch": 5.623721881390593,
"grad_norm": 0.6995804309844971,
"learning_rate": 0.00026298243723736663,
"loss": 3.3326,
"step": 52250
},
{
"epoch": 5.629103433430201,
"grad_norm": 0.7089316844940186,
"learning_rate": 0.0002626591962073052,
"loss": 3.3313,
"step": 52300
},
{
"epoch": 5.634484985469809,
"grad_norm": 0.7350379824638367,
"learning_rate": 0.0002623359551772438,
"loss": 3.3358,
"step": 52350
},
{
"epoch": 5.639866537509418,
"grad_norm": 0.7242736220359802,
"learning_rate": 0.0002620127141471824,
"loss": 3.3154,
"step": 52400
},
{
"epoch": 5.645248089549026,
"grad_norm": 0.7510601282119751,
"learning_rate": 0.00026168947311712095,
"loss": 3.3391,
"step": 52450
},
{
"epoch": 5.650629641588634,
"grad_norm": 0.7219720482826233,
"learning_rate": 0.00026136623208705955,
"loss": 3.3203,
"step": 52500
},
{
"epoch": 5.656011193628243,
"grad_norm": 0.7292102575302124,
"learning_rate": 0.00026104299105699814,
"loss": 3.3382,
"step": 52550
},
{
"epoch": 5.66139274566785,
"grad_norm": 0.7652955651283264,
"learning_rate": 0.00026071975002693674,
"loss": 3.3266,
"step": 52600
},
{
"epoch": 5.666774297707459,
"grad_norm": 0.7515937685966492,
"learning_rate": 0.00026039650899687533,
"loss": 3.3376,
"step": 52650
},
{
"epoch": 5.672155849747067,
"grad_norm": 0.7084229588508606,
"learning_rate": 0.00026007326796681393,
"loss": 3.3164,
"step": 52700
},
{
"epoch": 5.677537401786675,
"grad_norm": 0.717211127281189,
"learning_rate": 0.00025975002693675247,
"loss": 3.3411,
"step": 52750
},
{
"epoch": 5.682918953826284,
"grad_norm": 0.7143423557281494,
"learning_rate": 0.00025942678590669106,
"loss": 3.3249,
"step": 52800
},
{
"epoch": 5.688300505865891,
"grad_norm": 0.7390002012252808,
"learning_rate": 0.00025910354487662966,
"loss": 3.3317,
"step": 52850
},
{
"epoch": 5.6936820579055,
"grad_norm": 0.7596791386604309,
"learning_rate": 0.00025878676866716946,
"loss": 3.3377,
"step": 52900
},
{
"epoch": 5.699063609945108,
"grad_norm": 0.7708361744880676,
"learning_rate": 0.00025846352763710806,
"loss": 3.3309,
"step": 52950
},
{
"epoch": 5.704445161984716,
"grad_norm": 0.7448739409446716,
"learning_rate": 0.0002581402866070466,
"loss": 3.3461,
"step": 53000
},
{
"epoch": 5.704445161984716,
"eval_accuracy": 0.382959815490012,
"eval_loss": 3.3894808292388916,
"eval_runtime": 180.4953,
"eval_samples_per_second": 99.787,
"eval_steps_per_second": 6.238,
"step": 53000
},
{
"epoch": 5.709826714024325,
"grad_norm": 0.7527272701263428,
"learning_rate": 0.00025781704557698525,
"loss": 3.3118,
"step": 53050
},
{
"epoch": 5.715208266063933,
"grad_norm": 0.7586866021156311,
"learning_rate": 0.0002574938045469238,
"loss": 3.323,
"step": 53100
},
{
"epoch": 5.720589818103541,
"grad_norm": 0.7504985928535461,
"learning_rate": 0.0002571705635168624,
"loss": 3.3293,
"step": 53150
},
{
"epoch": 5.725971370143149,
"grad_norm": 0.7618194222450256,
"learning_rate": 0.000256847322486801,
"loss": 3.344,
"step": 53200
},
{
"epoch": 5.731352922182758,
"grad_norm": 0.7982940673828125,
"learning_rate": 0.0002565240814567396,
"loss": 3.3382,
"step": 53250
},
{
"epoch": 5.736734474222366,
"grad_norm": 0.7086600661277771,
"learning_rate": 0.0002562008404266781,
"loss": 3.3341,
"step": 53300
},
{
"epoch": 5.742116026261974,
"grad_norm": 0.71695476770401,
"learning_rate": 0.0002558775993966167,
"loss": 3.3213,
"step": 53350
},
{
"epoch": 5.747497578301582,
"grad_norm": 0.7411425709724426,
"learning_rate": 0.0002555543583665553,
"loss": 3.3474,
"step": 53400
},
{
"epoch": 5.75287913034119,
"grad_norm": 0.7171765565872192,
"learning_rate": 0.0002552311173364939,
"loss": 3.3428,
"step": 53450
},
{
"epoch": 5.758260682380799,
"grad_norm": 0.7351555228233337,
"learning_rate": 0.0002549078763064325,
"loss": 3.3225,
"step": 53500
},
{
"epoch": 5.763642234420407,
"grad_norm": 0.7267298102378845,
"learning_rate": 0.00025458463527637103,
"loss": 3.3269,
"step": 53550
},
{
"epoch": 5.769023786460015,
"grad_norm": 0.6977094411849976,
"learning_rate": 0.0002542613942463097,
"loss": 3.3547,
"step": 53600
},
{
"epoch": 5.774405338499624,
"grad_norm": 0.7931075096130371,
"learning_rate": 0.0002539381532162482,
"loss": 3.3261,
"step": 53650
},
{
"epoch": 5.779786890539231,
"grad_norm": 0.7646996378898621,
"learning_rate": 0.0002536149121861868,
"loss": 3.3383,
"step": 53700
},
{
"epoch": 5.78516844257884,
"grad_norm": 0.7829656004905701,
"learning_rate": 0.0002532916711561254,
"loss": 3.3332,
"step": 53750
},
{
"epoch": 5.790549994618448,
"grad_norm": 0.7650371789932251,
"learning_rate": 0.000252968430126064,
"loss": 3.3414,
"step": 53800
},
{
"epoch": 5.795931546658056,
"grad_norm": 0.7446619272232056,
"learning_rate": 0.00025264518909600255,
"loss": 3.3277,
"step": 53850
},
{
"epoch": 5.801313098697665,
"grad_norm": 0.7219211459159851,
"learning_rate": 0.00025232194806594114,
"loss": 3.3408,
"step": 53900
},
{
"epoch": 5.806694650737272,
"grad_norm": 0.7577422857284546,
"learning_rate": 0.00025199870703587974,
"loss": 3.345,
"step": 53950
},
{
"epoch": 5.812076202776881,
"grad_norm": 0.7506514191627502,
"learning_rate": 0.00025167546600581833,
"loss": 3.3376,
"step": 54000
},
{
"epoch": 5.812076202776881,
"eval_accuracy": 0.3836133626102161,
"eval_loss": 3.3827064037323,
"eval_runtime": 180.6271,
"eval_samples_per_second": 99.714,
"eval_steps_per_second": 6.234,
"step": 54000
},
{
"epoch": 5.817457754816489,
"grad_norm": 0.7082645297050476,
"learning_rate": 0.0002513522249757569,
"loss": 3.3294,
"step": 54050
},
{
"epoch": 5.822839306856097,
"grad_norm": 0.8100736141204834,
"learning_rate": 0.00025102898394569547,
"loss": 3.3329,
"step": 54100
},
{
"epoch": 5.828220858895706,
"grad_norm": 0.7404306530952454,
"learning_rate": 0.00025070574291563406,
"loss": 3.3335,
"step": 54150
},
{
"epoch": 5.833602410935313,
"grad_norm": 0.7143645286560059,
"learning_rate": 0.00025038250188557265,
"loss": 3.3362,
"step": 54200
},
{
"epoch": 5.838983962974922,
"grad_norm": 0.7716860175132751,
"learning_rate": 0.00025005926085551125,
"loss": 3.3455,
"step": 54250
},
{
"epoch": 5.84436551501453,
"grad_norm": 0.769177258014679,
"learning_rate": 0.0002497360198254498,
"loss": 3.3358,
"step": 54300
},
{
"epoch": 5.849747067054138,
"grad_norm": 0.7687222957611084,
"learning_rate": 0.00024941277879538844,
"loss": 3.3366,
"step": 54350
},
{
"epoch": 5.855128619093747,
"grad_norm": 0.7368986010551453,
"learning_rate": 0.000249089537765327,
"loss": 3.332,
"step": 54400
},
{
"epoch": 5.860510171133355,
"grad_norm": 0.7428856492042542,
"learning_rate": 0.0002487662967352656,
"loss": 3.3448,
"step": 54450
},
{
"epoch": 5.865891723172963,
"grad_norm": 0.7492393255233765,
"learning_rate": 0.00024844305570520417,
"loss": 3.3396,
"step": 54500
},
{
"epoch": 5.871273275212571,
"grad_norm": 0.8640391826629639,
"learning_rate": 0.00024811981467514276,
"loss": 3.3256,
"step": 54550
},
{
"epoch": 5.87665482725218,
"grad_norm": 0.7381857633590698,
"learning_rate": 0.00024779657364508136,
"loss": 3.3202,
"step": 54600
},
{
"epoch": 5.882036379291788,
"grad_norm": 0.7371327877044678,
"learning_rate": 0.0002474733326150199,
"loss": 3.3164,
"step": 54650
},
{
"epoch": 5.887417931331396,
"grad_norm": 0.7317863702774048,
"learning_rate": 0.0002471500915849585,
"loss": 3.314,
"step": 54700
},
{
"epoch": 5.892799483371004,
"grad_norm": 0.7320008277893066,
"learning_rate": 0.0002468268505548971,
"loss": 3.3218,
"step": 54750
},
{
"epoch": 5.898181035410612,
"grad_norm": 0.7335460186004639,
"learning_rate": 0.0002465036095248357,
"loss": 3.339,
"step": 54800
},
{
"epoch": 5.903562587450221,
"grad_norm": 0.7588884234428406,
"learning_rate": 0.0002461803684947742,
"loss": 3.3418,
"step": 54850
},
{
"epoch": 5.9089441394898286,
"grad_norm": 0.7557958364486694,
"learning_rate": 0.0002458571274647128,
"loss": 3.3497,
"step": 54900
},
{
"epoch": 5.914325691529437,
"grad_norm": 0.7636759281158447,
"learning_rate": 0.0002455338864346514,
"loss": 3.3302,
"step": 54950
},
{
"epoch": 5.919707243569046,
"grad_norm": 0.7415563464164734,
"learning_rate": 0.00024521064540459,
"loss": 3.3454,
"step": 55000
},
{
"epoch": 5.919707243569046,
"eval_accuracy": 0.38400386108902135,
"eval_loss": 3.3798177242279053,
"eval_runtime": 180.4085,
"eval_samples_per_second": 99.835,
"eval_steps_per_second": 6.241,
"step": 55000
},
{
"epoch": 5.925088795608653,
"grad_norm": 0.7452832460403442,
"learning_rate": 0.0002448874043745286,
"loss": 3.3509,
"step": 55050
},
{
"epoch": 5.930470347648262,
"grad_norm": 0.8210157752037048,
"learning_rate": 0.00024456416334446714,
"loss": 3.3223,
"step": 55100
},
{
"epoch": 5.93585189968787,
"grad_norm": 0.734848141670227,
"learning_rate": 0.000244247387135007,
"loss": 3.3249,
"step": 55150
},
{
"epoch": 5.941233451727478,
"grad_norm": 0.8083829283714294,
"learning_rate": 0.00024392414610494557,
"loss": 3.329,
"step": 55200
},
{
"epoch": 5.946615003767087,
"grad_norm": 0.7373672723770142,
"learning_rate": 0.00024360090507488414,
"loss": 3.339,
"step": 55250
},
{
"epoch": 5.951996555806694,
"grad_norm": 0.7348290681838989,
"learning_rate": 0.00024327766404482273,
"loss": 3.3362,
"step": 55300
},
{
"epoch": 5.957378107846303,
"grad_norm": 0.7888813018798828,
"learning_rate": 0.0002429544230147613,
"loss": 3.3275,
"step": 55350
},
{
"epoch": 5.962759659885911,
"grad_norm": 0.7368828058242798,
"learning_rate": 0.00024263118198469992,
"loss": 3.3428,
"step": 55400
},
{
"epoch": 5.968141211925519,
"grad_norm": 0.726843535900116,
"learning_rate": 0.0002423079409546385,
"loss": 3.357,
"step": 55450
},
{
"epoch": 5.973522763965128,
"grad_norm": 0.7297669053077698,
"learning_rate": 0.00024198469992457706,
"loss": 3.3171,
"step": 55500
},
{
"epoch": 5.978904316004736,
"grad_norm": 0.7472332119941711,
"learning_rate": 0.00024166145889451568,
"loss": 3.3452,
"step": 55550
},
{
"epoch": 5.984285868044344,
"grad_norm": 0.7742068767547607,
"learning_rate": 0.00024133821786445425,
"loss": 3.35,
"step": 55600
},
{
"epoch": 5.989667420083952,
"grad_norm": 0.7965028882026672,
"learning_rate": 0.0002410149768343928,
"loss": 3.3454,
"step": 55650
},
{
"epoch": 5.995048972123561,
"grad_norm": 0.7538635730743408,
"learning_rate": 0.0002406917358043314,
"loss": 3.3523,
"step": 55700
},
{
"epoch": 6.000430524163169,
"grad_norm": 0.7462757229804993,
"learning_rate": 0.00024036849477427,
"loss": 3.3303,
"step": 55750
},
{
"epoch": 6.005812076202777,
"grad_norm": 0.7339770793914795,
"learning_rate": 0.00024004525374420857,
"loss": 3.2492,
"step": 55800
},
{
"epoch": 6.011193628242385,
"grad_norm": 0.7494232654571533,
"learning_rate": 0.00023972201271414716,
"loss": 3.2566,
"step": 55850
},
{
"epoch": 6.016575180281993,
"grad_norm": 0.7096470594406128,
"learning_rate": 0.00023939877168408573,
"loss": 3.2373,
"step": 55900
},
{
"epoch": 6.021956732321602,
"grad_norm": 0.7256503701210022,
"learning_rate": 0.00023907553065402433,
"loss": 3.2391,
"step": 55950
},
{
"epoch": 6.0273382843612096,
"grad_norm": 0.7260343432426453,
"learning_rate": 0.00023875228962396292,
"loss": 3.2501,
"step": 56000
},
{
"epoch": 6.0273382843612096,
"eval_accuracy": 0.3840860026722091,
"eval_loss": 3.3830971717834473,
"eval_runtime": 180.8499,
"eval_samples_per_second": 99.591,
"eval_steps_per_second": 6.226,
"step": 56000
},
{
"epoch": 6.032719836400818,
"grad_norm": 0.8156185746192932,
"learning_rate": 0.0002384290485939015,
"loss": 3.2535,
"step": 56050
},
{
"epoch": 6.038101388440427,
"grad_norm": 0.7497657537460327,
"learning_rate": 0.00023810580756384006,
"loss": 3.2479,
"step": 56100
},
{
"epoch": 6.043482940480034,
"grad_norm": 0.7156153321266174,
"learning_rate": 0.00023778256653377868,
"loss": 3.2614,
"step": 56150
},
{
"epoch": 6.048864492519643,
"grad_norm": 0.7490081787109375,
"learning_rate": 0.00023745932550371725,
"loss": 3.2544,
"step": 56200
},
{
"epoch": 6.0542460445592505,
"grad_norm": 0.7716811299324036,
"learning_rate": 0.00023713608447365584,
"loss": 3.2517,
"step": 56250
},
{
"epoch": 6.059627596598859,
"grad_norm": 0.805608868598938,
"learning_rate": 0.00023681284344359444,
"loss": 3.2538,
"step": 56300
},
{
"epoch": 6.065009148638468,
"grad_norm": 0.8110917210578918,
"learning_rate": 0.000236489602413533,
"loss": 3.246,
"step": 56350
},
{
"epoch": 6.070390700678075,
"grad_norm": 0.8059680461883545,
"learning_rate": 0.0002361663613834716,
"loss": 3.2608,
"step": 56400
},
{
"epoch": 6.075772252717684,
"grad_norm": 0.7581197619438171,
"learning_rate": 0.00023584312035341017,
"loss": 3.2681,
"step": 56450
},
{
"epoch": 6.081153804757292,
"grad_norm": 0.7696651816368103,
"learning_rate": 0.00023551987932334876,
"loss": 3.2486,
"step": 56500
},
{
"epoch": 6.0865353567969,
"grad_norm": 0.7502686977386475,
"learning_rate": 0.00023519663829328735,
"loss": 3.2639,
"step": 56550
},
{
"epoch": 6.091916908836509,
"grad_norm": 0.7801194190979004,
"learning_rate": 0.00023487339726322592,
"loss": 3.2652,
"step": 56600
},
{
"epoch": 6.097298460876116,
"grad_norm": 0.7480630874633789,
"learning_rate": 0.0002345501562331645,
"loss": 3.2495,
"step": 56650
},
{
"epoch": 6.102680012915725,
"grad_norm": 0.7701714038848877,
"learning_rate": 0.0002342269152031031,
"loss": 3.27,
"step": 56700
},
{
"epoch": 6.108061564955333,
"grad_norm": 0.7400213479995728,
"learning_rate": 0.00023390367417304168,
"loss": 3.2617,
"step": 56750
},
{
"epoch": 6.113443116994941,
"grad_norm": 0.7020822763442993,
"learning_rate": 0.00023358043314298025,
"loss": 3.2603,
"step": 56800
},
{
"epoch": 6.11882466903455,
"grad_norm": 0.7655128240585327,
"learning_rate": 0.00023325719211291887,
"loss": 3.2421,
"step": 56850
},
{
"epoch": 6.124206221074158,
"grad_norm": 0.7835574150085449,
"learning_rate": 0.00023293395108285744,
"loss": 3.2615,
"step": 56900
},
{
"epoch": 6.129587773113766,
"grad_norm": 0.7708025574684143,
"learning_rate": 0.000232610710052796,
"loss": 3.2681,
"step": 56950
},
{
"epoch": 6.134969325153374,
"grad_norm": 0.7812603116035461,
"learning_rate": 0.0002322874690227346,
"loss": 3.2706,
"step": 57000
},
{
"epoch": 6.134969325153374,
"eval_accuracy": 0.3842589780696098,
"eval_loss": 3.382967472076416,
"eval_runtime": 180.3604,
"eval_samples_per_second": 99.861,
"eval_steps_per_second": 6.243,
"step": 57000
},
{
"epoch": 6.140350877192983,
"grad_norm": 0.7508556842803955,
"learning_rate": 0.0002319642279926732,
"loss": 3.2406,
"step": 57050
},
{
"epoch": 6.1457324292325906,
"grad_norm": 0.7822660207748413,
"learning_rate": 0.00023164098696261176,
"loss": 3.2669,
"step": 57100
},
{
"epoch": 6.151113981272199,
"grad_norm": 0.7589568495750427,
"learning_rate": 0.00023131774593255036,
"loss": 3.2541,
"step": 57150
},
{
"epoch": 6.156495533311807,
"grad_norm": 0.8089141845703125,
"learning_rate": 0.00023099450490248892,
"loss": 3.2655,
"step": 57200
},
{
"epoch": 6.161877085351415,
"grad_norm": 0.7813968658447266,
"learning_rate": 0.00023067126387242754,
"loss": 3.2779,
"step": 57250
},
{
"epoch": 6.167258637391024,
"grad_norm": 0.7517101168632507,
"learning_rate": 0.0002303480228423661,
"loss": 3.261,
"step": 57300
},
{
"epoch": 6.1726401894306315,
"grad_norm": 0.7565500140190125,
"learning_rate": 0.00023002478181230468,
"loss": 3.2513,
"step": 57350
},
{
"epoch": 6.17802174147024,
"grad_norm": 0.722113311290741,
"learning_rate": 0.00022970154078224327,
"loss": 3.2755,
"step": 57400
},
{
"epoch": 6.183403293509849,
"grad_norm": 0.805997371673584,
"learning_rate": 0.00022937829975218187,
"loss": 3.2674,
"step": 57450
},
{
"epoch": 6.188784845549456,
"grad_norm": 0.7368705868721008,
"learning_rate": 0.00022905505872212044,
"loss": 3.2627,
"step": 57500
},
{
"epoch": 6.194166397589065,
"grad_norm": 0.7440856099128723,
"learning_rate": 0.00022873181769205903,
"loss": 3.2576,
"step": 57550
},
{
"epoch": 6.1995479496286725,
"grad_norm": 0.8116312623023987,
"learning_rate": 0.0002284085766619976,
"loss": 3.2743,
"step": 57600
},
{
"epoch": 6.204929501668281,
"grad_norm": 0.7270071506500244,
"learning_rate": 0.0002280853356319362,
"loss": 3.2859,
"step": 57650
},
{
"epoch": 6.21031105370789,
"grad_norm": 0.7267986536026001,
"learning_rate": 0.0002277620946018748,
"loss": 3.2578,
"step": 57700
},
{
"epoch": 6.215692605747497,
"grad_norm": 0.7756433486938477,
"learning_rate": 0.00022743885357181336,
"loss": 3.2718,
"step": 57750
},
{
"epoch": 6.221074157787106,
"grad_norm": 0.7905489206314087,
"learning_rate": 0.00022711561254175192,
"loss": 3.2755,
"step": 57800
},
{
"epoch": 6.226455709826714,
"grad_norm": 0.8063260912895203,
"learning_rate": 0.00022679237151169054,
"loss": 3.2735,
"step": 57850
},
{
"epoch": 6.231837261866322,
"grad_norm": 0.7980824708938599,
"learning_rate": 0.0002264691304816291,
"loss": 3.2635,
"step": 57900
},
{
"epoch": 6.237218813905931,
"grad_norm": 0.7567322254180908,
"learning_rate": 0.00022614588945156768,
"loss": 3.2738,
"step": 57950
},
{
"epoch": 6.242600365945538,
"grad_norm": 0.7856761813163757,
"learning_rate": 0.0002258226484215063,
"loss": 3.28,
"step": 58000
},
{
"epoch": 6.242600365945538,
"eval_accuracy": 0.3846668610104653,
"eval_loss": 3.3798773288726807,
"eval_runtime": 180.5316,
"eval_samples_per_second": 99.766,
"eval_steps_per_second": 6.237,
"step": 58000
},
{
"epoch": 6.247981917985147,
"grad_norm": 0.7729969024658203,
"learning_rate": 0.00022549940739144487,
"loss": 3.2857,
"step": 58050
},
{
"epoch": 6.253363470024755,
"grad_norm": 0.8097330927848816,
"learning_rate": 0.00022517616636138344,
"loss": 3.2717,
"step": 58100
},
{
"epoch": 6.258745022064363,
"grad_norm": 0.8649580478668213,
"learning_rate": 0.00022485292533132203,
"loss": 3.2832,
"step": 58150
},
{
"epoch": 6.264126574103972,
"grad_norm": 0.771740734577179,
"learning_rate": 0.00022452968430126063,
"loss": 3.2734,
"step": 58200
},
{
"epoch": 6.26950812614358,
"grad_norm": 0.7663628458976746,
"learning_rate": 0.00022420644327119922,
"loss": 3.2736,
"step": 58250
},
{
"epoch": 6.274889678183188,
"grad_norm": 0.7669972777366638,
"learning_rate": 0.0002238832022411378,
"loss": 3.2712,
"step": 58300
},
{
"epoch": 6.280271230222796,
"grad_norm": 0.7995150089263916,
"learning_rate": 0.00022355996121107636,
"loss": 3.2764,
"step": 58350
},
{
"epoch": 6.285652782262405,
"grad_norm": 0.7507680654525757,
"learning_rate": 0.00022323672018101498,
"loss": 3.2882,
"step": 58400
},
{
"epoch": 6.2910343343020125,
"grad_norm": 0.8180110454559326,
"learning_rate": 0.00022291347915095355,
"loss": 3.2764,
"step": 58450
},
{
"epoch": 6.296415886341621,
"grad_norm": 0.7749719619750977,
"learning_rate": 0.0002225902381208921,
"loss": 3.272,
"step": 58500
},
{
"epoch": 6.301797438381229,
"grad_norm": 0.7502884864807129,
"learning_rate": 0.00022226699709083073,
"loss": 3.2762,
"step": 58550
},
{
"epoch": 6.307178990420837,
"grad_norm": 0.8392398357391357,
"learning_rate": 0.00022195022088137051,
"loss": 3.2646,
"step": 58600
},
{
"epoch": 6.312560542460446,
"grad_norm": 0.7521066665649414,
"learning_rate": 0.0002216269798513091,
"loss": 3.2673,
"step": 58650
},
{
"epoch": 6.3179420945000535,
"grad_norm": 0.7701011300086975,
"learning_rate": 0.00022130373882124768,
"loss": 3.2645,
"step": 58700
},
{
"epoch": 6.323323646539662,
"grad_norm": 0.8215457797050476,
"learning_rate": 0.00022098049779118627,
"loss": 3.2603,
"step": 58750
},
{
"epoch": 6.328705198579271,
"grad_norm": 0.7722597122192383,
"learning_rate": 0.00022065725676112487,
"loss": 3.2791,
"step": 58800
},
{
"epoch": 6.334086750618878,
"grad_norm": 0.7506366968154907,
"learning_rate": 0.00022033401573106343,
"loss": 3.2869,
"step": 58850
},
{
"epoch": 6.339468302658487,
"grad_norm": 0.7620330452919006,
"learning_rate": 0.000220010774701002,
"loss": 3.274,
"step": 58900
},
{
"epoch": 6.344849854698095,
"grad_norm": 0.7641044855117798,
"learning_rate": 0.00021968753367094062,
"loss": 3.2703,
"step": 58950
},
{
"epoch": 6.350231406737703,
"grad_norm": 0.7357539534568787,
"learning_rate": 0.0002193642926408792,
"loss": 3.277,
"step": 59000
},
{
"epoch": 6.350231406737703,
"eval_accuracy": 0.3850675728607251,
"eval_loss": 3.3782007694244385,
"eval_runtime": 180.5172,
"eval_samples_per_second": 99.774,
"eval_steps_per_second": 6.238,
"step": 59000
},
{
"epoch": 6.355612958777312,
"grad_norm": 0.793239951133728,
"learning_rate": 0.00021904105161081778,
"loss": 3.2845,
"step": 59050
},
{
"epoch": 6.360994510816919,
"grad_norm": 0.7626572251319885,
"learning_rate": 0.00021871781058075638,
"loss": 3.2751,
"step": 59100
},
{
"epoch": 6.366376062856528,
"grad_norm": 0.7418571710586548,
"learning_rate": 0.00021839456955069495,
"loss": 3.285,
"step": 59150
},
{
"epoch": 6.371757614896136,
"grad_norm": 0.7725799679756165,
"learning_rate": 0.00021807132852063354,
"loss": 3.2829,
"step": 59200
},
{
"epoch": 6.377139166935744,
"grad_norm": 0.731188952922821,
"learning_rate": 0.0002177480874905721,
"loss": 3.2764,
"step": 59250
},
{
"epoch": 6.382520718975353,
"grad_norm": 0.7765207290649414,
"learning_rate": 0.0002174248464605107,
"loss": 3.2717,
"step": 59300
},
{
"epoch": 6.387902271014961,
"grad_norm": 0.7506345510482788,
"learning_rate": 0.0002171016054304493,
"loss": 3.2679,
"step": 59350
},
{
"epoch": 6.393283823054569,
"grad_norm": 0.7452055215835571,
"learning_rate": 0.00021677836440038787,
"loss": 3.2577,
"step": 59400
},
{
"epoch": 6.398665375094177,
"grad_norm": 0.8073831796646118,
"learning_rate": 0.00021645512337032643,
"loss": 3.2606,
"step": 59450
},
{
"epoch": 6.404046927133785,
"grad_norm": 0.7789980173110962,
"learning_rate": 0.00021613188234026506,
"loss": 3.2712,
"step": 59500
},
{
"epoch": 6.4094284791733935,
"grad_norm": 0.8396959900856018,
"learning_rate": 0.00021580864131020362,
"loss": 3.2768,
"step": 59550
},
{
"epoch": 6.414810031213002,
"grad_norm": 0.797767162322998,
"learning_rate": 0.0002154854002801422,
"loss": 3.2813,
"step": 59600
},
{
"epoch": 6.42019158325261,
"grad_norm": 0.802899956703186,
"learning_rate": 0.0002151621592500808,
"loss": 3.2889,
"step": 59650
},
{
"epoch": 6.425573135292218,
"grad_norm": 0.8054869771003723,
"learning_rate": 0.00021483891822001938,
"loss": 3.2716,
"step": 59700
},
{
"epoch": 6.430954687331827,
"grad_norm": 0.7798833250999451,
"learning_rate": 0.00021451567718995795,
"loss": 3.2781,
"step": 59750
},
{
"epoch": 6.4363362393714345,
"grad_norm": 0.7423691153526306,
"learning_rate": 0.00021419243615989654,
"loss": 3.2763,
"step": 59800
},
{
"epoch": 6.441717791411043,
"grad_norm": 0.8051731586456299,
"learning_rate": 0.00021386919512983514,
"loss": 3.2666,
"step": 59850
},
{
"epoch": 6.447099343450651,
"grad_norm": 0.8098742365837097,
"learning_rate": 0.0002135459540997737,
"loss": 3.2656,
"step": 59900
},
{
"epoch": 6.452480895490259,
"grad_norm": 0.8203098177909851,
"learning_rate": 0.0002132227130697123,
"loss": 3.2729,
"step": 59950
},
{
"epoch": 6.457862447529868,
"grad_norm": 0.736182689666748,
"learning_rate": 0.00021289947203965087,
"loss": 3.2727,
"step": 60000
},
{
"epoch": 6.457862447529868,
"eval_accuracy": 0.385509246849691,
"eval_loss": 3.372286558151245,
"eval_runtime": 180.8685,
"eval_samples_per_second": 99.581,
"eval_steps_per_second": 6.226,
"step": 60000
},
{
"epoch": 6.4632439995694755,
"grad_norm": 0.7694886326789856,
"learning_rate": 0.0002125762310095895,
"loss": 3.2795,
"step": 60050
},
{
"epoch": 6.468625551609084,
"grad_norm": 0.7625947594642639,
"learning_rate": 0.00021225298997952806,
"loss": 3.2678,
"step": 60100
},
{
"epoch": 6.474007103648693,
"grad_norm": 0.7633724808692932,
"learning_rate": 0.00021192974894946662,
"loss": 3.2773,
"step": 60150
},
{
"epoch": 6.4793886556883,
"grad_norm": 0.7859768271446228,
"learning_rate": 0.00021160650791940524,
"loss": 3.2929,
"step": 60200
},
{
"epoch": 6.484770207727909,
"grad_norm": 0.7822881937026978,
"learning_rate": 0.0002112832668893438,
"loss": 3.2923,
"step": 60250
},
{
"epoch": 6.490151759767517,
"grad_norm": 0.7258760929107666,
"learning_rate": 0.00021096002585928238,
"loss": 3.2784,
"step": 60300
},
{
"epoch": 6.495533311807125,
"grad_norm": 0.7965183854103088,
"learning_rate": 0.00021063678482922097,
"loss": 3.2617,
"step": 60350
},
{
"epoch": 6.500914863846734,
"grad_norm": 0.8430307507514954,
"learning_rate": 0.00021031354379915957,
"loss": 3.2637,
"step": 60400
},
{
"epoch": 6.506296415886341,
"grad_norm": 0.7958040833473206,
"learning_rate": 0.00020999030276909814,
"loss": 3.2915,
"step": 60450
},
{
"epoch": 6.51167796792595,
"grad_norm": 0.7817371487617493,
"learning_rate": 0.00020966706173903673,
"loss": 3.2911,
"step": 60500
},
{
"epoch": 6.517059519965558,
"grad_norm": 0.7723913192749023,
"learning_rate": 0.0002093438207089753,
"loss": 3.2695,
"step": 60550
},
{
"epoch": 6.522441072005166,
"grad_norm": 0.7523823380470276,
"learning_rate": 0.00020902057967891387,
"loss": 3.2835,
"step": 60600
},
{
"epoch": 6.5278226240447745,
"grad_norm": 0.7971324920654297,
"learning_rate": 0.0002086973386488525,
"loss": 3.2922,
"step": 60650
},
{
"epoch": 6.533204176084383,
"grad_norm": 0.8252760767936707,
"learning_rate": 0.00020837409761879106,
"loss": 3.2898,
"step": 60700
},
{
"epoch": 6.538585728123991,
"grad_norm": 0.7614521980285645,
"learning_rate": 0.00020805085658872962,
"loss": 3.2906,
"step": 60750
},
{
"epoch": 6.543967280163599,
"grad_norm": 0.8246181011199951,
"learning_rate": 0.00020772761555866825,
"loss": 3.2725,
"step": 60800
},
{
"epoch": 6.549348832203208,
"grad_norm": 0.7531991004943848,
"learning_rate": 0.0002074043745286068,
"loss": 3.274,
"step": 60850
},
{
"epoch": 6.5547303842428155,
"grad_norm": 0.8107429146766663,
"learning_rate": 0.00020708113349854538,
"loss": 3.2757,
"step": 60900
},
{
"epoch": 6.560111936282424,
"grad_norm": 0.8336337208747864,
"learning_rate": 0.00020675789246848397,
"loss": 3.2756,
"step": 60950
},
{
"epoch": 6.565493488322032,
"grad_norm": 0.8028927445411682,
"learning_rate": 0.00020643465143842257,
"loss": 3.2838,
"step": 61000
},
{
"epoch": 6.565493488322032,
"eval_accuracy": 0.3859593957639064,
"eval_loss": 3.3682973384857178,
"eval_runtime": 180.3806,
"eval_samples_per_second": 99.85,
"eval_steps_per_second": 6.242,
"step": 61000
},
{
"epoch": 6.57087504036164,
"grad_norm": 0.8147567510604858,
"learning_rate": 0.00020611141040836116,
"loss": 3.2949,
"step": 61050
},
{
"epoch": 6.576256592401249,
"grad_norm": 0.8347201347351074,
"learning_rate": 0.00020578816937829973,
"loss": 3.2929,
"step": 61100
},
{
"epoch": 6.5816381444408565,
"grad_norm": 0.8149718046188354,
"learning_rate": 0.0002054649283482383,
"loss": 3.2933,
"step": 61150
},
{
"epoch": 6.587019696480465,
"grad_norm": 0.7641071677207947,
"learning_rate": 0.00020514168731817692,
"loss": 3.2689,
"step": 61200
},
{
"epoch": 6.592401248520073,
"grad_norm": 0.8402169942855835,
"learning_rate": 0.0002048184462881155,
"loss": 3.2796,
"step": 61250
},
{
"epoch": 6.597782800559681,
"grad_norm": 0.7858189940452576,
"learning_rate": 0.00020449520525805406,
"loss": 3.29,
"step": 61300
},
{
"epoch": 6.60316435259929,
"grad_norm": 0.7707937359809875,
"learning_rate": 0.00020417196422799268,
"loss": 3.2823,
"step": 61350
},
{
"epoch": 6.608545904638898,
"grad_norm": 0.7980074882507324,
"learning_rate": 0.00020384872319793125,
"loss": 3.2965,
"step": 61400
},
{
"epoch": 6.613927456678506,
"grad_norm": 0.771574854850769,
"learning_rate": 0.0002035254821678698,
"loss": 3.2852,
"step": 61450
},
{
"epoch": 6.619309008718115,
"grad_norm": 0.7880848050117493,
"learning_rate": 0.0002032022411378084,
"loss": 3.2822,
"step": 61500
},
{
"epoch": 6.624690560757722,
"grad_norm": 0.7877942323684692,
"learning_rate": 0.000202879000107747,
"loss": 3.264,
"step": 61550
},
{
"epoch": 6.630072112797331,
"grad_norm": 0.7295520901679993,
"learning_rate": 0.00020255575907768557,
"loss": 3.2691,
"step": 61600
},
{
"epoch": 6.635453664836939,
"grad_norm": 0.8145275712013245,
"learning_rate": 0.00020223251804762416,
"loss": 3.2715,
"step": 61650
},
{
"epoch": 6.640835216876547,
"grad_norm": 0.8078879714012146,
"learning_rate": 0.00020190927701756273,
"loss": 3.2752,
"step": 61700
},
{
"epoch": 6.6462167689161555,
"grad_norm": 1.0516327619552612,
"learning_rate": 0.00020158603598750133,
"loss": 3.2702,
"step": 61750
},
{
"epoch": 6.651598320955763,
"grad_norm": 0.8313896059989929,
"learning_rate": 0.00020126279495743992,
"loss": 3.2767,
"step": 61800
},
{
"epoch": 6.656979872995372,
"grad_norm": 0.812926709651947,
"learning_rate": 0.0002009395539273785,
"loss": 3.2784,
"step": 61850
},
{
"epoch": 6.66236142503498,
"grad_norm": 0.7894045114517212,
"learning_rate": 0.00020061631289731706,
"loss": 3.2954,
"step": 61900
},
{
"epoch": 6.667742977074588,
"grad_norm": 0.7732223272323608,
"learning_rate": 0.00020029307186725568,
"loss": 3.2946,
"step": 61950
},
{
"epoch": 6.6731245291141965,
"grad_norm": 0.7833398580551147,
"learning_rate": 0.00019996983083719425,
"loss": 3.2802,
"step": 62000
},
{
"epoch": 6.6731245291141965,
"eval_accuracy": 0.38636662678743505,
"eval_loss": 3.3642148971557617,
"eval_runtime": 180.6181,
"eval_samples_per_second": 99.719,
"eval_steps_per_second": 6.234,
"step": 62000
},
{
"epoch": 6.678506081153805,
"grad_norm": 0.7945963740348816,
"learning_rate": 0.00019964658980713284,
"loss": 3.306,
"step": 62050
},
{
"epoch": 6.683887633193413,
"grad_norm": 0.7843768000602722,
"learning_rate": 0.00019932334877707144,
"loss": 3.2765,
"step": 62100
},
{
"epoch": 6.689269185233021,
"grad_norm": 0.8199366927146912,
"learning_rate": 0.00019900657256761124,
"loss": 3.2743,
"step": 62150
},
{
"epoch": 6.69465073727263,
"grad_norm": 0.7921463251113892,
"learning_rate": 0.0001986833315375498,
"loss": 3.284,
"step": 62200
},
{
"epoch": 6.7000322893122375,
"grad_norm": 0.7658042311668396,
"learning_rate": 0.00019836009050748838,
"loss": 3.2779,
"step": 62250
},
{
"epoch": 6.705413841351846,
"grad_norm": 0.7855052947998047,
"learning_rate": 0.000198036849477427,
"loss": 3.2939,
"step": 62300
},
{
"epoch": 6.710795393391454,
"grad_norm": 0.778864860534668,
"learning_rate": 0.00019771360844736557,
"loss": 3.2865,
"step": 62350
},
{
"epoch": 6.716176945431062,
"grad_norm": 0.7535867691040039,
"learning_rate": 0.00019739036741730413,
"loss": 3.281,
"step": 62400
},
{
"epoch": 6.721558497470671,
"grad_norm": 0.8026406168937683,
"learning_rate": 0.00019706712638724276,
"loss": 3.2898,
"step": 62450
},
{
"epoch": 6.7269400495102785,
"grad_norm": 0.826941192150116,
"learning_rate": 0.00019674388535718132,
"loss": 3.2813,
"step": 62500
},
{
"epoch": 6.732321601549887,
"grad_norm": 0.8191684484481812,
"learning_rate": 0.0001964206443271199,
"loss": 3.279,
"step": 62550
},
{
"epoch": 6.737703153589496,
"grad_norm": 0.7996468544006348,
"learning_rate": 0.00019609740329705849,
"loss": 3.2748,
"step": 62600
},
{
"epoch": 6.743084705629103,
"grad_norm": 0.8293940424919128,
"learning_rate": 0.00019577416226699708,
"loss": 3.2804,
"step": 62650
},
{
"epoch": 6.748466257668712,
"grad_norm": 0.7844554781913757,
"learning_rate": 0.00019545092123693565,
"loss": 3.2701,
"step": 62700
},
{
"epoch": 6.75384780970832,
"grad_norm": 0.7604687809944153,
"learning_rate": 0.00019512768020687424,
"loss": 3.2818,
"step": 62750
},
{
"epoch": 6.759229361747928,
"grad_norm": 0.8484272956848145,
"learning_rate": 0.0001948044391768128,
"loss": 3.2771,
"step": 62800
},
{
"epoch": 6.7646109137875365,
"grad_norm": 0.7642599940299988,
"learning_rate": 0.00019448119814675143,
"loss": 3.2808,
"step": 62850
},
{
"epoch": 6.769992465827144,
"grad_norm": 0.7840580344200134,
"learning_rate": 0.00019415795711669,
"loss": 3.2957,
"step": 62900
},
{
"epoch": 6.775374017866753,
"grad_norm": 0.8181833028793335,
"learning_rate": 0.00019383471608662857,
"loss": 3.2691,
"step": 62950
},
{
"epoch": 6.780755569906361,
"grad_norm": 0.8215237259864807,
"learning_rate": 0.0001935114750565672,
"loss": 3.2696,
"step": 63000
},
{
"epoch": 6.780755569906361,
"eval_accuracy": 0.38650168232698845,
"eval_loss": 3.359816312789917,
"eval_runtime": 180.6996,
"eval_samples_per_second": 99.674,
"eval_steps_per_second": 6.231,
"step": 63000
},
{
"epoch": 6.786137121945969,
"grad_norm": 0.7587153911590576,
"learning_rate": 0.00019318823402650576,
"loss": 3.2695,
"step": 63050
},
{
"epoch": 6.7915186739855775,
"grad_norm": 0.7708876729011536,
"learning_rate": 0.00019286499299644432,
"loss": 3.2752,
"step": 63100
},
{
"epoch": 6.796900226025185,
"grad_norm": 0.8366973400115967,
"learning_rate": 0.00019254175196638292,
"loss": 3.293,
"step": 63150
},
{
"epoch": 6.802281778064794,
"grad_norm": 0.8045485019683838,
"learning_rate": 0.0001922185109363215,
"loss": 3.2756,
"step": 63200
},
{
"epoch": 6.807663330104402,
"grad_norm": 0.7623797059059143,
"learning_rate": 0.00019189526990626008,
"loss": 3.2853,
"step": 63250
},
{
"epoch": 6.813044882144011,
"grad_norm": 0.7878138422966003,
"learning_rate": 0.00019157202887619867,
"loss": 3.2875,
"step": 63300
},
{
"epoch": 6.8184264341836185,
"grad_norm": 0.7722912430763245,
"learning_rate": 0.00019124878784613724,
"loss": 3.2808,
"step": 63350
},
{
"epoch": 6.823807986223227,
"grad_norm": 0.7824761271476746,
"learning_rate": 0.00019092554681607584,
"loss": 3.2744,
"step": 63400
},
{
"epoch": 6.829189538262835,
"grad_norm": 0.8032478094100952,
"learning_rate": 0.00019060230578601443,
"loss": 3.2716,
"step": 63450
},
{
"epoch": 6.834571090302443,
"grad_norm": 0.7636817097663879,
"learning_rate": 0.000190279064755953,
"loss": 3.2809,
"step": 63500
},
{
"epoch": 6.839952642342052,
"grad_norm": 0.8288126587867737,
"learning_rate": 0.00018995582372589157,
"loss": 3.2843,
"step": 63550
},
{
"epoch": 6.8453341943816595,
"grad_norm": 0.7764488458633423,
"learning_rate": 0.0001896325826958302,
"loss": 3.2944,
"step": 63600
},
{
"epoch": 6.850715746421268,
"grad_norm": 0.8440558910369873,
"learning_rate": 0.00018930934166576876,
"loss": 3.282,
"step": 63650
},
{
"epoch": 6.856097298460876,
"grad_norm": 0.8410781025886536,
"learning_rate": 0.00018898610063570732,
"loss": 3.2914,
"step": 63700
},
{
"epoch": 6.861478850500484,
"grad_norm": 0.7732300758361816,
"learning_rate": 0.00018866285960564595,
"loss": 3.2623,
"step": 63750
},
{
"epoch": 6.866860402540093,
"grad_norm": 0.7925925850868225,
"learning_rate": 0.0001883396185755845,
"loss": 3.2792,
"step": 63800
},
{
"epoch": 6.8722419545797,
"grad_norm": 0.7949431538581848,
"learning_rate": 0.0001880163775455231,
"loss": 3.2922,
"step": 63850
},
{
"epoch": 6.877623506619309,
"grad_norm": 0.7886269092559814,
"learning_rate": 0.00018769313651546168,
"loss": 3.28,
"step": 63900
},
{
"epoch": 6.8830050586589175,
"grad_norm": 0.8459096550941467,
"learning_rate": 0.00018736989548540027,
"loss": 3.2812,
"step": 63950
},
{
"epoch": 6.888386610698525,
"grad_norm": 0.798220694065094,
"learning_rate": 0.00018704665445533886,
"loss": 3.2895,
"step": 64000
},
{
"epoch": 6.888386610698525,
"eval_accuracy": 0.3871914108588348,
"eval_loss": 3.3550844192504883,
"eval_runtime": 180.8293,
"eval_samples_per_second": 99.602,
"eval_steps_per_second": 6.227,
"step": 64000
},
{
"epoch": 6.893768162738134,
"grad_norm": 0.788962185382843,
"learning_rate": 0.00018672341342527743,
"loss": 3.2834,
"step": 64050
},
{
"epoch": 6.899149714777742,
"grad_norm": 0.7914458513259888,
"learning_rate": 0.000186400172395216,
"loss": 3.2871,
"step": 64100
},
{
"epoch": 6.90453126681735,
"grad_norm": 0.8202913403511047,
"learning_rate": 0.00018607693136515462,
"loss": 3.2844,
"step": 64150
},
{
"epoch": 6.9099128188569585,
"grad_norm": 0.8197557330131531,
"learning_rate": 0.0001857536903350932,
"loss": 3.2812,
"step": 64200
},
{
"epoch": 6.915294370896566,
"grad_norm": 0.8909661769866943,
"learning_rate": 0.00018543044930503176,
"loss": 3.2823,
"step": 64250
},
{
"epoch": 6.920675922936175,
"grad_norm": 0.8012292385101318,
"learning_rate": 0.00018510720827497035,
"loss": 3.2831,
"step": 64300
},
{
"epoch": 6.926057474975783,
"grad_norm": 0.8419049978256226,
"learning_rate": 0.00018478396724490895,
"loss": 3.2819,
"step": 64350
},
{
"epoch": 6.931439027015391,
"grad_norm": 0.8665248155593872,
"learning_rate": 0.0001844607262148475,
"loss": 3.2778,
"step": 64400
},
{
"epoch": 6.9368205790549995,
"grad_norm": 0.8091945052146912,
"learning_rate": 0.0001841374851847861,
"loss": 3.2969,
"step": 64450
},
{
"epoch": 6.942202131094608,
"grad_norm": 0.7917461395263672,
"learning_rate": 0.00018381424415472468,
"loss": 3.2777,
"step": 64500
},
{
"epoch": 6.947583683134216,
"grad_norm": 0.8483121395111084,
"learning_rate": 0.00018349100312466327,
"loss": 3.2774,
"step": 64550
},
{
"epoch": 6.952965235173824,
"grad_norm": 0.8231863379478455,
"learning_rate": 0.00018316776209460186,
"loss": 3.2817,
"step": 64600
},
{
"epoch": 6.958346787213433,
"grad_norm": 0.7838043570518494,
"learning_rate": 0.00018284452106454043,
"loss": 3.2871,
"step": 64650
},
{
"epoch": 6.9637283392530405,
"grad_norm": 0.8054148554801941,
"learning_rate": 0.000182521280034479,
"loss": 3.2853,
"step": 64700
},
{
"epoch": 6.969109891292649,
"grad_norm": 0.8586058020591736,
"learning_rate": 0.00018219803900441762,
"loss": 3.2725,
"step": 64750
},
{
"epoch": 6.974491443332257,
"grad_norm": 0.8905388712882996,
"learning_rate": 0.0001818747979743562,
"loss": 3.2941,
"step": 64800
},
{
"epoch": 6.979872995371865,
"grad_norm": 0.8023561835289001,
"learning_rate": 0.00018155155694429478,
"loss": 3.2772,
"step": 64850
},
{
"epoch": 6.985254547411474,
"grad_norm": 0.8267327547073364,
"learning_rate": 0.00018122831591423338,
"loss": 3.3077,
"step": 64900
},
{
"epoch": 6.990636099451081,
"grad_norm": 0.8111370801925659,
"learning_rate": 0.00018090507488417195,
"loss": 3.2822,
"step": 64950
},
{
"epoch": 6.99601765149069,
"grad_norm": 0.7818148732185364,
"learning_rate": 0.00018058829867471175,
"loss": 3.2834,
"step": 65000
},
{
"epoch": 6.99601765149069,
"eval_accuracy": 0.38743957405460305,
"eval_loss": 3.350639581680298,
"eval_runtime": 180.6606,
"eval_samples_per_second": 99.695,
"eval_steps_per_second": 6.233,
"step": 65000
},
{
"epoch": 7.0013992035302985,
"grad_norm": 0.8154913783073425,
"learning_rate": 0.00018026505764465035,
"loss": 3.2506,
"step": 65050
},
{
"epoch": 7.006780755569906,
"grad_norm": 0.776719868183136,
"learning_rate": 0.00017994181661458894,
"loss": 3.1834,
"step": 65100
},
{
"epoch": 7.012162307609515,
"grad_norm": 0.7705662846565247,
"learning_rate": 0.0001796185755845275,
"loss": 3.2001,
"step": 65150
},
{
"epoch": 7.017543859649122,
"grad_norm": 0.8426011800765991,
"learning_rate": 0.00017929533455446608,
"loss": 3.1784,
"step": 65200
},
{
"epoch": 7.022925411688731,
"grad_norm": 0.8200896978378296,
"learning_rate": 0.0001789720935244047,
"loss": 3.1967,
"step": 65250
},
{
"epoch": 7.0283069637283395,
"grad_norm": 0.8123102188110352,
"learning_rate": 0.00017864885249434327,
"loss": 3.201,
"step": 65300
},
{
"epoch": 7.033688515767947,
"grad_norm": 0.8369794487953186,
"learning_rate": 0.00017832561146428183,
"loss": 3.1928,
"step": 65350
},
{
"epoch": 7.039070067807556,
"grad_norm": 0.7779251933097839,
"learning_rate": 0.00017800237043422046,
"loss": 3.1874,
"step": 65400
},
{
"epoch": 7.044451619847164,
"grad_norm": 0.8082521557807922,
"learning_rate": 0.00017767912940415902,
"loss": 3.1967,
"step": 65450
},
{
"epoch": 7.049833171886772,
"grad_norm": 0.8156419992446899,
"learning_rate": 0.0001773558883740976,
"loss": 3.1956,
"step": 65500
},
{
"epoch": 7.0552147239263805,
"grad_norm": 0.8252975344657898,
"learning_rate": 0.00017703264734403619,
"loss": 3.2095,
"step": 65550
},
{
"epoch": 7.060596275965988,
"grad_norm": 0.7841750383377075,
"learning_rate": 0.00017670940631397475,
"loss": 3.1895,
"step": 65600
},
{
"epoch": 7.065977828005597,
"grad_norm": 0.8294128179550171,
"learning_rate": 0.00017638616528391337,
"loss": 3.212,
"step": 65650
},
{
"epoch": 7.071359380045205,
"grad_norm": 0.8671795129776001,
"learning_rate": 0.00017606292425385194,
"loss": 3.2094,
"step": 65700
},
{
"epoch": 7.076740932084813,
"grad_norm": 0.7949682474136353,
"learning_rate": 0.0001757396832237905,
"loss": 3.2189,
"step": 65750
},
{
"epoch": 7.0821224841244215,
"grad_norm": 0.8465704917907715,
"learning_rate": 0.00017541644219372913,
"loss": 3.2139,
"step": 65800
},
{
"epoch": 7.08750403616403,
"grad_norm": 0.8210230469703674,
"learning_rate": 0.0001750932011636677,
"loss": 3.2113,
"step": 65850
},
{
"epoch": 7.092885588203638,
"grad_norm": 0.8009158372879028,
"learning_rate": 0.00017476996013360627,
"loss": 3.2013,
"step": 65900
},
{
"epoch": 7.098267140243246,
"grad_norm": 0.8548141717910767,
"learning_rate": 0.00017444671910354486,
"loss": 3.2163,
"step": 65950
},
{
"epoch": 7.103648692282855,
"grad_norm": 0.8232263922691345,
"learning_rate": 0.00017412347807348346,
"loss": 3.2163,
"step": 66000
},
{
"epoch": 7.103648692282855,
"eval_accuracy": 0.3872667073100902,
"eval_loss": 3.3580427169799805,
"eval_runtime": 180.5751,
"eval_samples_per_second": 99.742,
"eval_steps_per_second": 6.236,
"step": 66000
},
{
"epoch": 7.109030244322462,
"grad_norm": 0.7871417999267578,
"learning_rate": 0.00017380023704342202,
"loss": 3.217,
"step": 66050
},
{
"epoch": 7.114411796362071,
"grad_norm": 0.8225372433662415,
"learning_rate": 0.00017347699601336062,
"loss": 3.2348,
"step": 66100
},
{
"epoch": 7.119793348401679,
"grad_norm": 0.8037530779838562,
"learning_rate": 0.00017315375498329919,
"loss": 3.2072,
"step": 66150
},
{
"epoch": 7.125174900441287,
"grad_norm": 1.7943295240402222,
"learning_rate": 0.00017283051395323778,
"loss": 3.2096,
"step": 66200
},
{
"epoch": 7.130556452480896,
"grad_norm": 0.8043410181999207,
"learning_rate": 0.00017250727292317638,
"loss": 3.2008,
"step": 66250
},
{
"epoch": 7.135938004520503,
"grad_norm": 0.8701448440551758,
"learning_rate": 0.00017218403189311494,
"loss": 3.2097,
"step": 66300
},
{
"epoch": 7.141319556560112,
"grad_norm": 0.8197288513183594,
"learning_rate": 0.0001718607908630535,
"loss": 3.1995,
"step": 66350
},
{
"epoch": 7.1467011085997205,
"grad_norm": 0.8066554665565491,
"learning_rate": 0.00017153754983299213,
"loss": 3.2064,
"step": 66400
},
{
"epoch": 7.152082660639328,
"grad_norm": 0.7715657353401184,
"learning_rate": 0.0001712143088029307,
"loss": 3.2109,
"step": 66450
},
{
"epoch": 7.157464212678937,
"grad_norm": 0.8049308061599731,
"learning_rate": 0.00017089106777286927,
"loss": 3.2056,
"step": 66500
},
{
"epoch": 7.162845764718545,
"grad_norm": 0.8044945001602173,
"learning_rate": 0.0001705678267428079,
"loss": 3.2161,
"step": 66550
},
{
"epoch": 7.168227316758153,
"grad_norm": 0.8120958805084229,
"learning_rate": 0.00017024458571274646,
"loss": 3.2157,
"step": 66600
},
{
"epoch": 7.1736088687977615,
"grad_norm": 0.8076058030128479,
"learning_rate": 0.00016992134468268505,
"loss": 3.2389,
"step": 66650
},
{
"epoch": 7.178990420837369,
"grad_norm": 0.8148431181907654,
"learning_rate": 0.00016959810365262362,
"loss": 3.2187,
"step": 66700
},
{
"epoch": 7.184371972876978,
"grad_norm": 0.8567858338356018,
"learning_rate": 0.0001692748626225622,
"loss": 3.2188,
"step": 66750
},
{
"epoch": 7.189753524916586,
"grad_norm": 0.8196995854377747,
"learning_rate": 0.0001689516215925008,
"loss": 3.2203,
"step": 66800
},
{
"epoch": 7.195135076956194,
"grad_norm": 0.8134042620658875,
"learning_rate": 0.00016862838056243938,
"loss": 3.2023,
"step": 66850
},
{
"epoch": 7.2005166289958025,
"grad_norm": 0.8222635984420776,
"learning_rate": 0.00016830513953237794,
"loss": 3.2136,
"step": 66900
},
{
"epoch": 7.205898181035411,
"grad_norm": 0.8523157835006714,
"learning_rate": 0.00016798189850231657,
"loss": 3.2292,
"step": 66950
},
{
"epoch": 7.211279733075019,
"grad_norm": 0.8117284178733826,
"learning_rate": 0.00016765865747225513,
"loss": 3.2243,
"step": 67000
},
{
"epoch": 7.211279733075019,
"eval_accuracy": 0.38720662226312885,
"eval_loss": 3.3582334518432617,
"eval_runtime": 180.7195,
"eval_samples_per_second": 99.663,
"eval_steps_per_second": 6.231,
"step": 67000
},
{
"epoch": 7.216661285114627,
"grad_norm": 0.849255383014679,
"learning_rate": 0.0001673354164421937,
"loss": 3.2088,
"step": 67050
},
{
"epoch": 7.222042837154235,
"grad_norm": 0.8112828135490417,
"learning_rate": 0.00016701217541213232,
"loss": 3.2221,
"step": 67100
},
{
"epoch": 7.2274243891938434,
"grad_norm": 0.8703665733337402,
"learning_rate": 0.0001666889343820709,
"loss": 3.2114,
"step": 67150
},
{
"epoch": 7.232805941233452,
"grad_norm": 0.7933388352394104,
"learning_rate": 0.00016636569335200946,
"loss": 3.2238,
"step": 67200
},
{
"epoch": 7.23818749327306,
"grad_norm": 0.8398449420928955,
"learning_rate": 0.00016604245232194805,
"loss": 3.2391,
"step": 67250
},
{
"epoch": 7.243569045312668,
"grad_norm": 0.8231996297836304,
"learning_rate": 0.00016571921129188665,
"loss": 3.2372,
"step": 67300
},
{
"epoch": 7.248950597352277,
"grad_norm": 0.8363311886787415,
"learning_rate": 0.00016539597026182521,
"loss": 3.2078,
"step": 67350
},
{
"epoch": 7.254332149391884,
"grad_norm": 0.8070518970489502,
"learning_rate": 0.0001650727292317638,
"loss": 3.2155,
"step": 67400
},
{
"epoch": 7.259713701431493,
"grad_norm": 0.8043374419212341,
"learning_rate": 0.00016474948820170238,
"loss": 3.229,
"step": 67450
},
{
"epoch": 7.265095253471101,
"grad_norm": 0.8224925398826599,
"learning_rate": 0.00016442624717164094,
"loss": 3.2222,
"step": 67500
},
{
"epoch": 7.270476805510709,
"grad_norm": 0.8394501209259033,
"learning_rate": 0.00016410300614157957,
"loss": 3.2113,
"step": 67550
},
{
"epoch": 7.275858357550318,
"grad_norm": 0.8679465055465698,
"learning_rate": 0.00016377976511151813,
"loss": 3.2274,
"step": 67600
},
{
"epoch": 7.281239909589925,
"grad_norm": 0.8839657306671143,
"learning_rate": 0.00016345652408145675,
"loss": 3.2073,
"step": 67650
},
{
"epoch": 7.286621461629534,
"grad_norm": 0.9003349542617798,
"learning_rate": 0.00016313328305139532,
"loss": 3.2276,
"step": 67700
},
{
"epoch": 7.2920030136691425,
"grad_norm": 0.8355953097343445,
"learning_rate": 0.0001628100420213339,
"loss": 3.229,
"step": 67750
},
{
"epoch": 7.29738456570875,
"grad_norm": 0.8046534657478333,
"learning_rate": 0.00016248680099127248,
"loss": 3.2386,
"step": 67800
},
{
"epoch": 7.302766117748359,
"grad_norm": 0.8797202110290527,
"learning_rate": 0.00016216355996121105,
"loss": 3.2217,
"step": 67850
},
{
"epoch": 7.308147669787967,
"grad_norm": 0.9474117755889893,
"learning_rate": 0.00016184031893114965,
"loss": 3.2168,
"step": 67900
},
{
"epoch": 7.313529221827575,
"grad_norm": 0.8707678318023682,
"learning_rate": 0.00016151707790108824,
"loss": 3.2345,
"step": 67950
},
{
"epoch": 7.3189107738671835,
"grad_norm": 0.8038205504417419,
"learning_rate": 0.0001611938368710268,
"loss": 3.222,
"step": 68000
},
{
"epoch": 7.3189107738671835,
"eval_accuracy": 0.387976102014631,
"eval_loss": 3.352640390396118,
"eval_runtime": 180.5346,
"eval_samples_per_second": 99.765,
"eval_steps_per_second": 6.237,
"step": 68000
},
{
"epoch": 7.324292325906791,
"grad_norm": 0.8763945698738098,
"learning_rate": 0.00016087059584096538,
"loss": 3.2126,
"step": 68050
},
{
"epoch": 7.3296738779464,
"grad_norm": 0.8443440198898315,
"learning_rate": 0.000160547354810904,
"loss": 3.2391,
"step": 68100
},
{
"epoch": 7.335055429986008,
"grad_norm": 0.8131680488586426,
"learning_rate": 0.00016023057860144378,
"loss": 3.2449,
"step": 68150
},
{
"epoch": 7.340436982025616,
"grad_norm": 0.8589426279067993,
"learning_rate": 0.0001599073375713824,
"loss": 3.2291,
"step": 68200
},
{
"epoch": 7.3458185340652244,
"grad_norm": 0.8745540380477905,
"learning_rate": 0.00015958409654132097,
"loss": 3.2265,
"step": 68250
},
{
"epoch": 7.351200086104833,
"grad_norm": 0.838469386100769,
"learning_rate": 0.00015926085551125953,
"loss": 3.2466,
"step": 68300
},
{
"epoch": 7.356581638144441,
"grad_norm": 0.8351563811302185,
"learning_rate": 0.00015893761448119813,
"loss": 3.2189,
"step": 68350
},
{
"epoch": 7.361963190184049,
"grad_norm": 0.8260790705680847,
"learning_rate": 0.00015861437345113672,
"loss": 3.2165,
"step": 68400
},
{
"epoch": 7.367344742223658,
"grad_norm": 0.8453501462936401,
"learning_rate": 0.00015829759724167653,
"loss": 3.2279,
"step": 68450
},
{
"epoch": 7.372726294263265,
"grad_norm": 0.8862749934196472,
"learning_rate": 0.0001579743562116151,
"loss": 3.2315,
"step": 68500
},
{
"epoch": 7.378107846302874,
"grad_norm": 0.7972759008407593,
"learning_rate": 0.00015765111518155372,
"loss": 3.2317,
"step": 68550
},
{
"epoch": 7.383489398342482,
"grad_norm": 0.8577588796615601,
"learning_rate": 0.0001573278741514923,
"loss": 3.2332,
"step": 68600
},
{
"epoch": 7.38887095038209,
"grad_norm": 0.8090537190437317,
"learning_rate": 0.00015700463312143085,
"loss": 3.2207,
"step": 68650
},
{
"epoch": 7.394252502421699,
"grad_norm": 0.8941483497619629,
"learning_rate": 0.00015668139209136945,
"loss": 3.2325,
"step": 68700
},
{
"epoch": 7.399634054461306,
"grad_norm": 0.8350239396095276,
"learning_rate": 0.00015635815106130804,
"loss": 3.2347,
"step": 68750
},
{
"epoch": 7.405015606500915,
"grad_norm": 0.834892749786377,
"learning_rate": 0.0001560349100312466,
"loss": 3.2314,
"step": 68800
},
{
"epoch": 7.4103971585405235,
"grad_norm": 0.8487468957901001,
"learning_rate": 0.0001557116690011852,
"loss": 3.2407,
"step": 68850
},
{
"epoch": 7.415778710580131,
"grad_norm": 0.8758741021156311,
"learning_rate": 0.00015538842797112377,
"loss": 3.2153,
"step": 68900
},
{
"epoch": 7.42116026261974,
"grad_norm": 0.879677951335907,
"learning_rate": 0.00015506518694106237,
"loss": 3.2249,
"step": 68950
},
{
"epoch": 7.426541814659347,
"grad_norm": 0.8545063138008118,
"learning_rate": 0.00015474194591100096,
"loss": 3.2349,
"step": 69000
},
{
"epoch": 7.426541814659347,
"eval_accuracy": 0.3882562091594167,
"eval_loss": 3.3482072353363037,
"eval_runtime": 180.8861,
"eval_samples_per_second": 99.571,
"eval_steps_per_second": 6.225,
"step": 69000
},
{
"epoch": 7.431923366698956,
"grad_norm": 0.8772327303886414,
"learning_rate": 0.00015441870488093953,
"loss": 3.2284,
"step": 69050
},
{
"epoch": 7.4373049187385645,
"grad_norm": 0.9024936556816101,
"learning_rate": 0.0001540954638508781,
"loss": 3.218,
"step": 69100
},
{
"epoch": 7.442686470778172,
"grad_norm": 0.8099319338798523,
"learning_rate": 0.00015377222282081672,
"loss": 3.2183,
"step": 69150
},
{
"epoch": 7.448068022817781,
"grad_norm": 0.793427050113678,
"learning_rate": 0.0001534489817907553,
"loss": 3.2307,
"step": 69200
},
{
"epoch": 7.453449574857389,
"grad_norm": 0.8657283186912537,
"learning_rate": 0.00015312574076069388,
"loss": 3.2321,
"step": 69250
},
{
"epoch": 7.458831126896997,
"grad_norm": 0.8640968799591064,
"learning_rate": 0.00015280249973063248,
"loss": 3.2242,
"step": 69300
},
{
"epoch": 7.4642126789366054,
"grad_norm": 0.8989497423171997,
"learning_rate": 0.00015247925870057104,
"loss": 3.2395,
"step": 69350
},
{
"epoch": 7.469594230976213,
"grad_norm": 0.8708150386810303,
"learning_rate": 0.00015215601767050964,
"loss": 3.2317,
"step": 69400
},
{
"epoch": 7.474975783015822,
"grad_norm": 0.8274281024932861,
"learning_rate": 0.0001518327766404482,
"loss": 3.2348,
"step": 69450
},
{
"epoch": 7.48035733505543,
"grad_norm": 0.841109573841095,
"learning_rate": 0.0001515095356103868,
"loss": 3.2263,
"step": 69500
},
{
"epoch": 7.485738887095038,
"grad_norm": 0.8123463988304138,
"learning_rate": 0.0001511862945803254,
"loss": 3.2116,
"step": 69550
},
{
"epoch": 7.491120439134646,
"grad_norm": 0.8069713115692139,
"learning_rate": 0.00015086305355026396,
"loss": 3.201,
"step": 69600
},
{
"epoch": 7.496501991174255,
"grad_norm": 0.8293700218200684,
"learning_rate": 0.00015053981252020253,
"loss": 3.2439,
"step": 69650
},
{
"epoch": 7.501883543213863,
"grad_norm": 0.8899844884872437,
"learning_rate": 0.00015021657149014115,
"loss": 3.2287,
"step": 69700
},
{
"epoch": 7.507265095253471,
"grad_norm": 0.8449057936668396,
"learning_rate": 0.00014989333046007972,
"loss": 3.2416,
"step": 69750
},
{
"epoch": 7.51264664729308,
"grad_norm": 0.8326901197433472,
"learning_rate": 0.00014957008943001832,
"loss": 3.2332,
"step": 69800
},
{
"epoch": 7.518028199332687,
"grad_norm": 0.8048696517944336,
"learning_rate": 0.00014924684839995688,
"loss": 3.2153,
"step": 69850
},
{
"epoch": 7.523409751372296,
"grad_norm": 0.8521336913108826,
"learning_rate": 0.00014892360736989548,
"loss": 3.2298,
"step": 69900
},
{
"epoch": 7.528791303411904,
"grad_norm": 0.8536204695701599,
"learning_rate": 0.00014860036633983407,
"loss": 3.2237,
"step": 69950
},
{
"epoch": 7.534172855451512,
"grad_norm": 0.8090736865997314,
"learning_rate": 0.00014827712530977264,
"loss": 3.2364,
"step": 70000
},
{
"epoch": 7.534172855451512,
"eval_accuracy": 0.38876535659171535,
"eval_loss": 3.3461689949035645,
"eval_runtime": 180.6754,
"eval_samples_per_second": 99.687,
"eval_steps_per_second": 6.232,
"step": 70000
},
{
"epoch": 7.539554407491121,
"grad_norm": 0.8323525786399841,
"learning_rate": 0.00014795388427971123,
"loss": 3.2362,
"step": 70050
},
{
"epoch": 7.544935959530728,
"grad_norm": 0.8688218593597412,
"learning_rate": 0.0001476306432496498,
"loss": 3.237,
"step": 70100
},
{
"epoch": 7.550317511570337,
"grad_norm": 0.8065937161445618,
"learning_rate": 0.0001473074022195884,
"loss": 3.2411,
"step": 70150
},
{
"epoch": 7.5556990636099455,
"grad_norm": 0.8267635703086853,
"learning_rate": 0.00014698416118952696,
"loss": 3.2375,
"step": 70200
},
{
"epoch": 7.561080615649553,
"grad_norm": 0.8378524780273438,
"learning_rate": 0.00014666092015946556,
"loss": 3.2159,
"step": 70250
},
{
"epoch": 7.566462167689162,
"grad_norm": 0.8994044661521912,
"learning_rate": 0.00014633767912940415,
"loss": 3.2369,
"step": 70300
},
{
"epoch": 7.57184371972877,
"grad_norm": 0.8720497488975525,
"learning_rate": 0.00014601443809934272,
"loss": 3.2369,
"step": 70350
},
{
"epoch": 7.577225271768378,
"grad_norm": 0.8321914076805115,
"learning_rate": 0.00014569119706928132,
"loss": 3.2346,
"step": 70400
},
{
"epoch": 7.5826068238079865,
"grad_norm": 0.844092607498169,
"learning_rate": 0.0001453679560392199,
"loss": 3.2344,
"step": 70450
},
{
"epoch": 7.587988375847594,
"grad_norm": 0.8993640542030334,
"learning_rate": 0.00014504471500915848,
"loss": 3.2362,
"step": 70500
},
{
"epoch": 7.593369927887203,
"grad_norm": 0.8442127108573914,
"learning_rate": 0.00014472147397909707,
"loss": 3.2303,
"step": 70550
},
{
"epoch": 7.598751479926811,
"grad_norm": 0.8459310531616211,
"learning_rate": 0.00014439823294903564,
"loss": 3.2299,
"step": 70600
},
{
"epoch": 7.604133031966419,
"grad_norm": 0.9453602433204651,
"learning_rate": 0.00014407499191897423,
"loss": 3.2242,
"step": 70650
},
{
"epoch": 7.609514584006027,
"grad_norm": 0.8440732955932617,
"learning_rate": 0.0001437517508889128,
"loss": 3.2356,
"step": 70700
},
{
"epoch": 7.614896136045635,
"grad_norm": 0.8589253425598145,
"learning_rate": 0.0001434285098588514,
"loss": 3.2333,
"step": 70750
},
{
"epoch": 7.620277688085244,
"grad_norm": 0.8273172974586487,
"learning_rate": 0.00014310526882879,
"loss": 3.2155,
"step": 70800
},
{
"epoch": 7.625659240124852,
"grad_norm": 0.8450290560722351,
"learning_rate": 0.00014278202779872856,
"loss": 3.2351,
"step": 70850
},
{
"epoch": 7.63104079216446,
"grad_norm": 0.8752638697624207,
"learning_rate": 0.00014245878676866715,
"loss": 3.2436,
"step": 70900
},
{
"epoch": 7.636422344204068,
"grad_norm": 0.8159819841384888,
"learning_rate": 0.00014213554573860575,
"loss": 3.2332,
"step": 70950
},
{
"epoch": 7.641803896243677,
"grad_norm": 0.8391739130020142,
"learning_rate": 0.00014181230470854434,
"loss": 3.2297,
"step": 71000
},
{
"epoch": 7.641803896243677,
"eval_accuracy": 0.3891160881135805,
"eval_loss": 3.3415868282318115,
"eval_runtime": 180.8754,
"eval_samples_per_second": 99.577,
"eval_steps_per_second": 6.225,
"step": 71000
},
{
"epoch": 7.647185448283285,
"grad_norm": 0.8811625838279724,
"learning_rate": 0.0001414890636784829,
"loss": 3.2345,
"step": 71050
},
{
"epoch": 7.652567000322893,
"grad_norm": 0.8388746976852417,
"learning_rate": 0.0001411658226484215,
"loss": 3.227,
"step": 71100
},
{
"epoch": 7.657948552362502,
"grad_norm": 0.8501401543617249,
"learning_rate": 0.00014084258161836007,
"loss": 3.2577,
"step": 71150
},
{
"epoch": 7.663330104402109,
"grad_norm": 0.8466740846633911,
"learning_rate": 0.00014051934058829867,
"loss": 3.2052,
"step": 71200
},
{
"epoch": 7.668711656441718,
"grad_norm": 0.8789371252059937,
"learning_rate": 0.00014019609955823723,
"loss": 3.2209,
"step": 71250
},
{
"epoch": 7.674093208481326,
"grad_norm": 0.8286100625991821,
"learning_rate": 0.00013987285852817583,
"loss": 3.2018,
"step": 71300
},
{
"epoch": 7.679474760520934,
"grad_norm": 0.8479146361351013,
"learning_rate": 0.0001395496174981144,
"loss": 3.2163,
"step": 71350
},
{
"epoch": 7.684856312560543,
"grad_norm": 0.9216192960739136,
"learning_rate": 0.000139226376468053,
"loss": 3.226,
"step": 71400
},
{
"epoch": 7.69023786460015,
"grad_norm": 0.9375202655792236,
"learning_rate": 0.00013890313543799159,
"loss": 3.2309,
"step": 71450
},
{
"epoch": 7.695619416639759,
"grad_norm": 0.8177657127380371,
"learning_rate": 0.00013857989440793018,
"loss": 3.219,
"step": 71500
},
{
"epoch": 7.7010009686793675,
"grad_norm": 0.846137285232544,
"learning_rate": 0.00013825665337786875,
"loss": 3.2353,
"step": 71550
},
{
"epoch": 7.706382520718975,
"grad_norm": 0.8408653140068054,
"learning_rate": 0.00013793341234780734,
"loss": 3.244,
"step": 71600
},
{
"epoch": 7.711764072758584,
"grad_norm": 0.8572009801864624,
"learning_rate": 0.00013761017131774594,
"loss": 3.2277,
"step": 71650
},
{
"epoch": 7.717145624798192,
"grad_norm": 0.8709561824798584,
"learning_rate": 0.0001372869302876845,
"loss": 3.2533,
"step": 71700
},
{
"epoch": 7.7225271768378,
"grad_norm": 0.8801485896110535,
"learning_rate": 0.0001369636892576231,
"loss": 3.2471,
"step": 71750
},
{
"epoch": 7.727908728877408,
"grad_norm": 0.8573904633522034,
"learning_rate": 0.00013664044822756167,
"loss": 3.2491,
"step": 71800
},
{
"epoch": 7.733290280917016,
"grad_norm": 0.8685466051101685,
"learning_rate": 0.00013631720719750026,
"loss": 3.2406,
"step": 71850
},
{
"epoch": 7.738671832956625,
"grad_norm": 0.8761990070343018,
"learning_rate": 0.00013599396616743883,
"loss": 3.2162,
"step": 71900
},
{
"epoch": 7.744053384996233,
"grad_norm": 0.8514412641525269,
"learning_rate": 0.00013567072513737742,
"loss": 3.2311,
"step": 71950
},
{
"epoch": 7.749434937035841,
"grad_norm": 0.8694790005683899,
"learning_rate": 0.00013534748410731602,
"loss": 3.2361,
"step": 72000
},
{
"epoch": 7.749434937035841,
"eval_accuracy": 0.38934903990505476,
"eval_loss": 3.3384485244750977,
"eval_runtime": 180.5632,
"eval_samples_per_second": 99.749,
"eval_steps_per_second": 6.236,
"step": 72000
},
{
"epoch": 7.754816489075449,
"grad_norm": 0.9411042928695679,
"learning_rate": 0.0001350242430772546,
"loss": 3.245,
"step": 72050
},
{
"epoch": 7.760198041115058,
"grad_norm": 0.9091503024101257,
"learning_rate": 0.00013470100204719318,
"loss": 3.2592,
"step": 72100
},
{
"epoch": 7.765579593154666,
"grad_norm": 0.8613893389701843,
"learning_rate": 0.00013437776101713178,
"loss": 3.2299,
"step": 72150
},
{
"epoch": 7.770961145194274,
"grad_norm": 0.8697505593299866,
"learning_rate": 0.00013405451998707034,
"loss": 3.231,
"step": 72200
},
{
"epoch": 7.776342697233883,
"grad_norm": 0.8704033493995667,
"learning_rate": 0.00013373127895700894,
"loss": 3.2344,
"step": 72250
},
{
"epoch": 7.78172424927349,
"grad_norm": 0.8434908986091614,
"learning_rate": 0.00013340803792694753,
"loss": 3.2269,
"step": 72300
},
{
"epoch": 7.787105801313099,
"grad_norm": 0.8213037252426147,
"learning_rate": 0.0001330847968968861,
"loss": 3.2196,
"step": 72350
},
{
"epoch": 7.792487353352707,
"grad_norm": 0.8428940176963806,
"learning_rate": 0.00013276155586682467,
"loss": 3.2337,
"step": 72400
},
{
"epoch": 7.797868905392315,
"grad_norm": 0.8586910367012024,
"learning_rate": 0.00013243831483676326,
"loss": 3.2219,
"step": 72450
},
{
"epoch": 7.803250457431924,
"grad_norm": 0.9171772003173828,
"learning_rate": 0.00013211507380670186,
"loss": 3.2315,
"step": 72500
},
{
"epoch": 7.808632009471531,
"grad_norm": 0.9385636448860168,
"learning_rate": 0.00013179183277664042,
"loss": 3.233,
"step": 72550
},
{
"epoch": 7.81401356151114,
"grad_norm": 0.8959430456161499,
"learning_rate": 0.00013146859174657902,
"loss": 3.2515,
"step": 72600
},
{
"epoch": 7.819395113550748,
"grad_norm": 0.8392729759216309,
"learning_rate": 0.00013114535071651761,
"loss": 3.2207,
"step": 72650
},
{
"epoch": 7.824776665590356,
"grad_norm": 0.8786283135414124,
"learning_rate": 0.00013082210968645618,
"loss": 3.232,
"step": 72700
},
{
"epoch": 7.830158217629965,
"grad_norm": 0.8381286263465881,
"learning_rate": 0.00013049886865639478,
"loss": 3.2142,
"step": 72750
},
{
"epoch": 7.835539769669572,
"grad_norm": 0.8869178891181946,
"learning_rate": 0.00013017562762633337,
"loss": 3.2404,
"step": 72800
},
{
"epoch": 7.840921321709181,
"grad_norm": 0.8395768404006958,
"learning_rate": 0.00012985238659627194,
"loss": 3.2477,
"step": 72850
},
{
"epoch": 7.846302873748789,
"grad_norm": 0.8443187475204468,
"learning_rate": 0.00012952914556621053,
"loss": 3.2534,
"step": 72900
},
{
"epoch": 7.851684425788397,
"grad_norm": 0.8238486051559448,
"learning_rate": 0.0001292059045361491,
"loss": 3.2415,
"step": 72950
},
{
"epoch": 7.857065977828006,
"grad_norm": 0.8638015985488892,
"learning_rate": 0.0001288826635060877,
"loss": 3.2416,
"step": 73000
},
{
"epoch": 7.857065977828006,
"eval_accuracy": 0.3898458009081426,
"eval_loss": 3.3356986045837402,
"eval_runtime": 180.5467,
"eval_samples_per_second": 99.758,
"eval_steps_per_second": 6.237,
"step": 73000
},
{
"epoch": 7.862447529867614,
"grad_norm": 0.8598248958587646,
"learning_rate": 0.00012855942247602626,
"loss": 3.2224,
"step": 73050
},
{
"epoch": 7.867829081907222,
"grad_norm": 0.8552663326263428,
"learning_rate": 0.00012823618144596486,
"loss": 3.2338,
"step": 73100
},
{
"epoch": 7.87321063394683,
"grad_norm": 0.8366514444351196,
"learning_rate": 0.00012791294041590345,
"loss": 3.2301,
"step": 73150
},
{
"epoch": 7.878592185986438,
"grad_norm": 0.870034396648407,
"learning_rate": 0.00012758969938584202,
"loss": 3.2463,
"step": 73200
},
{
"epoch": 7.883973738026047,
"grad_norm": 0.9482511878013611,
"learning_rate": 0.00012726645835578061,
"loss": 3.2368,
"step": 73250
},
{
"epoch": 7.889355290065655,
"grad_norm": 0.8701103329658508,
"learning_rate": 0.00012694968214632045,
"loss": 3.2315,
"step": 73300
},
{
"epoch": 7.894736842105263,
"grad_norm": 0.8550585508346558,
"learning_rate": 0.00012662644111625902,
"loss": 3.2292,
"step": 73350
},
{
"epoch": 7.900118394144871,
"grad_norm": 0.84943026304245,
"learning_rate": 0.0001263032000861976,
"loss": 3.2401,
"step": 73400
},
{
"epoch": 7.90549994618448,
"grad_norm": 0.8325279355049133,
"learning_rate": 0.00012597995905613618,
"loss": 3.2095,
"step": 73450
},
{
"epoch": 7.910881498224088,
"grad_norm": 0.8745725750923157,
"learning_rate": 0.00012565671802607477,
"loss": 3.2367,
"step": 73500
},
{
"epoch": 7.916263050263696,
"grad_norm": 0.8808631896972656,
"learning_rate": 0.00012533347699601334,
"loss": 3.2522,
"step": 73550
},
{
"epoch": 7.921644602303305,
"grad_norm": 0.8490099906921387,
"learning_rate": 0.00012501023596595193,
"loss": 3.2407,
"step": 73600
},
{
"epoch": 7.927026154342912,
"grad_norm": 0.8633183836936951,
"learning_rate": 0.0001246869949358905,
"loss": 3.2352,
"step": 73650
},
{
"epoch": 7.932407706382521,
"grad_norm": 0.8482357263565063,
"learning_rate": 0.0001243637539058291,
"loss": 3.2172,
"step": 73700
},
{
"epoch": 7.937789258422129,
"grad_norm": 0.8842774033546448,
"learning_rate": 0.0001240405128757677,
"loss": 3.2256,
"step": 73750
},
{
"epoch": 7.943170810461737,
"grad_norm": 0.9809737205505371,
"learning_rate": 0.00012371727184570629,
"loss": 3.231,
"step": 73800
},
{
"epoch": 7.948552362501346,
"grad_norm": 0.8404791355133057,
"learning_rate": 0.00012339403081564485,
"loss": 3.2285,
"step": 73850
},
{
"epoch": 7.953933914540953,
"grad_norm": 0.8805235624313354,
"learning_rate": 0.00012307078978558345,
"loss": 3.2412,
"step": 73900
},
{
"epoch": 7.959315466580562,
"grad_norm": 0.8662030100822449,
"learning_rate": 0.00012274754875552202,
"loss": 3.2325,
"step": 73950
},
{
"epoch": 7.96469701862017,
"grad_norm": 0.8386152386665344,
"learning_rate": 0.0001224243077254606,
"loss": 3.2293,
"step": 74000
},
{
"epoch": 7.96469701862017,
"eval_accuracy": 0.3901281897635724,
"eval_loss": 3.3309998512268066,
"eval_runtime": 180.7262,
"eval_samples_per_second": 99.659,
"eval_steps_per_second": 6.23,
"step": 74000
},
{
"epoch": 7.970078570659778,
"grad_norm": 0.8542754650115967,
"learning_rate": 0.00012210106669539918,
"loss": 3.2285,
"step": 74050
},
{
"epoch": 7.975460122699387,
"grad_norm": 0.9284147024154663,
"learning_rate": 0.00012177782566533779,
"loss": 3.2351,
"step": 74100
},
{
"epoch": 7.980841674738995,
"grad_norm": 0.8983561396598816,
"learning_rate": 0.00012145458463527635,
"loss": 3.2408,
"step": 74150
},
{
"epoch": 7.986223226778603,
"grad_norm": 0.8538508415222168,
"learning_rate": 0.00012113134360521495,
"loss": 3.238,
"step": 74200
},
{
"epoch": 7.991604778818211,
"grad_norm": 0.8690817356109619,
"learning_rate": 0.00012080810257515353,
"loss": 3.2251,
"step": 74250
},
{
"epoch": 7.996986330857819,
"grad_norm": 0.8762375712394714,
"learning_rate": 0.00012048486154509212,
"loss": 3.2278,
"step": 74300
},
{
"epoch": 8.002367882897428,
"grad_norm": 0.823405385017395,
"learning_rate": 0.00012016162051503069,
"loss": 3.1888,
"step": 74350
},
{
"epoch": 8.007749434937036,
"grad_norm": 0.8836029171943665,
"learning_rate": 0.00011983837948496929,
"loss": 3.1696,
"step": 74400
},
{
"epoch": 8.013130986976645,
"grad_norm": 0.8643078207969666,
"learning_rate": 0.00011951513845490787,
"loss": 3.1547,
"step": 74450
},
{
"epoch": 8.018512539016251,
"grad_norm": 0.8536393046379089,
"learning_rate": 0.00011919189742484645,
"loss": 3.1705,
"step": 74500
},
{
"epoch": 8.02389409105586,
"grad_norm": 0.8264943957328796,
"learning_rate": 0.00011886865639478503,
"loss": 3.1485,
"step": 74550
},
{
"epoch": 8.029275643095469,
"grad_norm": 0.88083416223526,
"learning_rate": 0.00011854541536472362,
"loss": 3.1529,
"step": 74600
},
{
"epoch": 8.034657195135077,
"grad_norm": 0.8759968876838684,
"learning_rate": 0.00011822217433466219,
"loss": 3.176,
"step": 74650
},
{
"epoch": 8.040038747174686,
"grad_norm": 0.8657451868057251,
"learning_rate": 0.00011789893330460079,
"loss": 3.1421,
"step": 74700
},
{
"epoch": 8.045420299214294,
"grad_norm": 0.8699697256088257,
"learning_rate": 0.00011757569227453937,
"loss": 3.1628,
"step": 74750
},
{
"epoch": 8.050801851253901,
"grad_norm": 0.859023928642273,
"learning_rate": 0.00011725245124447796,
"loss": 3.17,
"step": 74800
},
{
"epoch": 8.05618340329351,
"grad_norm": 0.8847795724868774,
"learning_rate": 0.00011692921021441653,
"loss": 3.1528,
"step": 74850
},
{
"epoch": 8.061564955333118,
"grad_norm": 0.8535637259483337,
"learning_rate": 0.00011660596918435512,
"loss": 3.175,
"step": 74900
},
{
"epoch": 8.066946507372727,
"grad_norm": 0.8353862166404724,
"learning_rate": 0.00011628272815429372,
"loss": 3.168,
"step": 74950
},
{
"epoch": 8.072328059412335,
"grad_norm": 0.8807953000068665,
"learning_rate": 0.00011595948712423229,
"loss": 3.1452,
"step": 75000
},
{
"epoch": 8.072328059412335,
"eval_accuracy": 0.3901538318450966,
"eval_loss": 3.3356988430023193,
"eval_runtime": 180.8043,
"eval_samples_per_second": 99.616,
"eval_steps_per_second": 6.228,
"step": 75000
},
{
"epoch": 8.077709611451942,
"grad_norm": 0.8582302927970886,
"learning_rate": 0.00011563624609417088,
"loss": 3.1795,
"step": 75050
},
{
"epoch": 8.08309116349155,
"grad_norm": 0.8993620872497559,
"learning_rate": 0.00011531300506410946,
"loss": 3.172,
"step": 75100
},
{
"epoch": 8.088472715531159,
"grad_norm": 0.863032877445221,
"learning_rate": 0.00011498976403404804,
"loss": 3.1669,
"step": 75150
},
{
"epoch": 8.093854267570768,
"grad_norm": 0.8714480996131897,
"learning_rate": 0.00011466652300398663,
"loss": 3.1687,
"step": 75200
},
{
"epoch": 8.099235819610376,
"grad_norm": 0.8861761093139648,
"learning_rate": 0.00011434328197392522,
"loss": 3.1669,
"step": 75250
},
{
"epoch": 8.104617371649983,
"grad_norm": 0.8841901421546936,
"learning_rate": 0.0001140200409438638,
"loss": 3.1596,
"step": 75300
},
{
"epoch": 8.109998923689592,
"grad_norm": 0.9461728930473328,
"learning_rate": 0.00011370326473440361,
"loss": 3.1537,
"step": 75350
},
{
"epoch": 8.1153804757292,
"grad_norm": 0.8345040082931519,
"learning_rate": 0.0001133800237043422,
"loss": 3.1692,
"step": 75400
},
{
"epoch": 8.120762027768809,
"grad_norm": 0.8404425382614136,
"learning_rate": 0.00011305678267428077,
"loss": 3.1557,
"step": 75450
},
{
"epoch": 8.126143579808417,
"grad_norm": 0.9137583374977112,
"learning_rate": 0.00011273354164421936,
"loss": 3.1746,
"step": 75500
},
{
"epoch": 8.131525131848026,
"grad_norm": 0.892432689666748,
"learning_rate": 0.00011241030061415795,
"loss": 3.1794,
"step": 75550
},
{
"epoch": 8.136906683887632,
"grad_norm": 0.8653923273086548,
"learning_rate": 0.00011208705958409654,
"loss": 3.1651,
"step": 75600
},
{
"epoch": 8.142288235927241,
"grad_norm": 0.8782545328140259,
"learning_rate": 0.00011176381855403511,
"loss": 3.1522,
"step": 75650
},
{
"epoch": 8.14766978796685,
"grad_norm": 0.9366098642349243,
"learning_rate": 0.0001114405775239737,
"loss": 3.1548,
"step": 75700
},
{
"epoch": 8.153051340006458,
"grad_norm": 0.8603706359863281,
"learning_rate": 0.00011111733649391228,
"loss": 3.1867,
"step": 75750
},
{
"epoch": 8.158432892046067,
"grad_norm": 0.8969858288764954,
"learning_rate": 0.00011079409546385086,
"loss": 3.1751,
"step": 75800
},
{
"epoch": 8.163814444085673,
"grad_norm": 0.8933687806129456,
"learning_rate": 0.00011047085443378945,
"loss": 3.1702,
"step": 75850
},
{
"epoch": 8.169195996125282,
"grad_norm": 0.8683410286903381,
"learning_rate": 0.00011014761340372804,
"loss": 3.1628,
"step": 75900
},
{
"epoch": 8.17457754816489,
"grad_norm": 0.8602153658866882,
"learning_rate": 0.00010982437237366661,
"loss": 3.1739,
"step": 75950
},
{
"epoch": 8.1799591002045,
"grad_norm": 0.8595947623252869,
"learning_rate": 0.0001095011313436052,
"loss": 3.1608,
"step": 76000
},
{
"epoch": 8.1799591002045,
"eval_accuracy": 0.39043980624582436,
"eval_loss": 3.3351192474365234,
"eval_runtime": 179.5075,
"eval_samples_per_second": 100.336,
"eval_steps_per_second": 6.273,
"step": 76000
},
{
"epoch": 8.185340652244108,
"grad_norm": 0.8788470029830933,
"learning_rate": 0.0001091778903135438,
"loss": 3.1627,
"step": 76050
},
{
"epoch": 8.190722204283716,
"grad_norm": 0.9367756843566895,
"learning_rate": 0.00010885464928348238,
"loss": 3.1624,
"step": 76100
},
{
"epoch": 8.196103756323323,
"grad_norm": 0.902297854423523,
"learning_rate": 0.00010853140825342096,
"loss": 3.1812,
"step": 76150
},
{
"epoch": 8.201485308362932,
"grad_norm": 0.8547227382659912,
"learning_rate": 0.00010820816722335954,
"loss": 3.1807,
"step": 76200
},
{
"epoch": 8.20686686040254,
"grad_norm": 0.8972384929656982,
"learning_rate": 0.00010788492619329814,
"loss": 3.1558,
"step": 76250
},
{
"epoch": 8.212248412442149,
"grad_norm": 0.8367764353752136,
"learning_rate": 0.0001075616851632367,
"loss": 3.1766,
"step": 76300
},
{
"epoch": 8.217629964481757,
"grad_norm": 0.9024211764335632,
"learning_rate": 0.0001072384441331753,
"loss": 3.1712,
"step": 76350
},
{
"epoch": 8.223011516521364,
"grad_norm": 0.9114934802055359,
"learning_rate": 0.00010691520310311388,
"loss": 3.1711,
"step": 76400
},
{
"epoch": 8.228393068560973,
"grad_norm": 0.8747788071632385,
"learning_rate": 0.00010659196207305246,
"loss": 3.1774,
"step": 76450
},
{
"epoch": 8.233774620600581,
"grad_norm": 0.8949354887008667,
"learning_rate": 0.00010626872104299104,
"loss": 3.1705,
"step": 76500
},
{
"epoch": 8.23915617264019,
"grad_norm": 0.8727163076400757,
"learning_rate": 0.00010594548001292964,
"loss": 3.1873,
"step": 76550
},
{
"epoch": 8.244537724679798,
"grad_norm": 0.9465581178665161,
"learning_rate": 0.00010562223898286823,
"loss": 3.1834,
"step": 76600
},
{
"epoch": 8.249919276719407,
"grad_norm": 0.8704432249069214,
"learning_rate": 0.0001052989979528068,
"loss": 3.1673,
"step": 76650
},
{
"epoch": 8.255300828759013,
"grad_norm": 0.8336291909217834,
"learning_rate": 0.00010497575692274539,
"loss": 3.1781,
"step": 76700
},
{
"epoch": 8.260682380798622,
"grad_norm": 0.8544026613235474,
"learning_rate": 0.00010465251589268397,
"loss": 3.1756,
"step": 76750
},
{
"epoch": 8.26606393283823,
"grad_norm": 0.8753527402877808,
"learning_rate": 0.00010432927486262254,
"loss": 3.1786,
"step": 76800
},
{
"epoch": 8.27144548487784,
"grad_norm": 0.8853304386138916,
"learning_rate": 0.00010400603383256114,
"loss": 3.186,
"step": 76850
},
{
"epoch": 8.276827036917448,
"grad_norm": 0.8864724040031433,
"learning_rate": 0.00010368279280249973,
"loss": 3.173,
"step": 76900
},
{
"epoch": 8.282208588957054,
"grad_norm": 0.8877735733985901,
"learning_rate": 0.0001033595517724383,
"loss": 3.1759,
"step": 76950
},
{
"epoch": 8.287590140996663,
"grad_norm": 0.8727827072143555,
"learning_rate": 0.00010303631074237689,
"loss": 3.184,
"step": 77000
},
{
"epoch": 8.287590140996663,
"eval_accuracy": 0.3906263632542018,
"eval_loss": 3.3312065601348877,
"eval_runtime": 179.6044,
"eval_samples_per_second": 100.281,
"eval_steps_per_second": 6.269,
"step": 77000
},
{
"epoch": 8.292971693036272,
"grad_norm": 0.9259955883026123,
"learning_rate": 0.00010271306971231547,
"loss": 3.1869,
"step": 77050
},
{
"epoch": 8.29835324507588,
"grad_norm": 0.8535556793212891,
"learning_rate": 0.00010238982868225407,
"loss": 3.1768,
"step": 77100
},
{
"epoch": 8.303734797115489,
"grad_norm": 0.8989574909210205,
"learning_rate": 0.00010206658765219264,
"loss": 3.1815,
"step": 77150
},
{
"epoch": 8.309116349155097,
"grad_norm": 0.8857195377349854,
"learning_rate": 0.00010174334662213123,
"loss": 3.1588,
"step": 77200
},
{
"epoch": 8.314497901194704,
"grad_norm": 0.8919908404350281,
"learning_rate": 0.00010142010559206981,
"loss": 3.1797,
"step": 77250
},
{
"epoch": 8.319879453234313,
"grad_norm": 0.8413748145103455,
"learning_rate": 0.00010109686456200839,
"loss": 3.1667,
"step": 77300
},
{
"epoch": 8.325261005273921,
"grad_norm": 0.9747196435928345,
"learning_rate": 0.00010077362353194697,
"loss": 3.1803,
"step": 77350
},
{
"epoch": 8.33064255731353,
"grad_norm": 0.9089459180831909,
"learning_rate": 0.00010045038250188557,
"loss": 3.1852,
"step": 77400
},
{
"epoch": 8.336024109353138,
"grad_norm": 0.8968145847320557,
"learning_rate": 0.00010012714147182414,
"loss": 3.1821,
"step": 77450
},
{
"epoch": 8.341405661392745,
"grad_norm": 0.8776057958602905,
"learning_rate": 9.980390044176273e-05,
"loss": 3.1656,
"step": 77500
},
{
"epoch": 8.346787213432354,
"grad_norm": 0.9172366261482239,
"learning_rate": 9.948065941170133e-05,
"loss": 3.1746,
"step": 77550
},
{
"epoch": 8.352168765471962,
"grad_norm": 0.9139503240585327,
"learning_rate": 9.91574183816399e-05,
"loss": 3.1718,
"step": 77600
},
{
"epoch": 8.35755031751157,
"grad_norm": 0.8708044290542603,
"learning_rate": 9.883417735157849e-05,
"loss": 3.1651,
"step": 77650
},
{
"epoch": 8.36293186955118,
"grad_norm": 0.8740552067756653,
"learning_rate": 9.851093632151707e-05,
"loss": 3.1701,
"step": 77700
},
{
"epoch": 8.368313421590786,
"grad_norm": 0.8782268762588501,
"learning_rate": 9.818769529145566e-05,
"loss": 3.1765,
"step": 77750
},
{
"epoch": 8.373694973630395,
"grad_norm": 0.9119528532028198,
"learning_rate": 9.786445426139423e-05,
"loss": 3.1792,
"step": 77800
},
{
"epoch": 8.379076525670003,
"grad_norm": 0.8632680177688599,
"learning_rate": 9.754121323133283e-05,
"loss": 3.1724,
"step": 77850
},
{
"epoch": 8.384458077709612,
"grad_norm": 0.8710095882415771,
"learning_rate": 9.72179722012714e-05,
"loss": 3.184,
"step": 77900
},
{
"epoch": 8.38983962974922,
"grad_norm": 1.0001767873764038,
"learning_rate": 9.689473117120999e-05,
"loss": 3.1691,
"step": 77950
},
{
"epoch": 8.395221181788829,
"grad_norm": 0.8792982697486877,
"learning_rate": 9.657149014114857e-05,
"loss": 3.1742,
"step": 78000
},
{
"epoch": 8.395221181788829,
"eval_accuracy": 0.3911284482487925,
"eval_loss": 3.327723264694214,
"eval_runtime": 179.8658,
"eval_samples_per_second": 100.136,
"eval_steps_per_second": 6.26,
"step": 78000
},
{
"epoch": 8.400602733828435,
"grad_norm": 0.8982104063034058,
"learning_rate": 9.624824911108716e-05,
"loss": 3.1786,
"step": 78050
},
{
"epoch": 8.405984285868044,
"grad_norm": 0.9011202454566956,
"learning_rate": 9.592500808102574e-05,
"loss": 3.1867,
"step": 78100
},
{
"epoch": 8.411365837907653,
"grad_norm": 0.9007724523544312,
"learning_rate": 9.560176705096433e-05,
"loss": 3.1988,
"step": 78150
},
{
"epoch": 8.416747389947261,
"grad_norm": 0.8967268466949463,
"learning_rate": 9.52785260209029e-05,
"loss": 3.1872,
"step": 78200
},
{
"epoch": 8.42212894198687,
"grad_norm": 0.9627910256385803,
"learning_rate": 9.49552849908415e-05,
"loss": 3.175,
"step": 78250
},
{
"epoch": 8.427510494026476,
"grad_norm": 0.873896062374115,
"learning_rate": 9.463204396078007e-05,
"loss": 3.1978,
"step": 78300
},
{
"epoch": 8.432892046066085,
"grad_norm": 0.8684766888618469,
"learning_rate": 9.430880293071866e-05,
"loss": 3.1839,
"step": 78350
},
{
"epoch": 8.438273598105694,
"grad_norm": 0.8813990950584412,
"learning_rate": 9.398556190065726e-05,
"loss": 3.1705,
"step": 78400
},
{
"epoch": 8.443655150145302,
"grad_norm": 0.9459818005561829,
"learning_rate": 9.366232087059583e-05,
"loss": 3.1823,
"step": 78450
},
{
"epoch": 8.44903670218491,
"grad_norm": 0.8727448582649231,
"learning_rate": 9.333907984053442e-05,
"loss": 3.156,
"step": 78500
},
{
"epoch": 8.45441825422452,
"grad_norm": 0.8859150409698486,
"learning_rate": 9.3015838810473e-05,
"loss": 3.1686,
"step": 78550
},
{
"epoch": 8.459799806264126,
"grad_norm": 0.903968334197998,
"learning_rate": 9.269906260101281e-05,
"loss": 3.1785,
"step": 78600
},
{
"epoch": 8.465181358303735,
"grad_norm": 0.9025602340698242,
"learning_rate": 9.23758215709514e-05,
"loss": 3.1681,
"step": 78650
},
{
"epoch": 8.470562910343343,
"grad_norm": 0.8712466359138489,
"learning_rate": 9.205258054088998e-05,
"loss": 3.1776,
"step": 78700
},
{
"epoch": 8.475944462382952,
"grad_norm": 0.8721148371696472,
"learning_rate": 9.172933951082856e-05,
"loss": 3.1735,
"step": 78750
},
{
"epoch": 8.48132601442256,
"grad_norm": 0.8949872851371765,
"learning_rate": 9.140609848076715e-05,
"loss": 3.1736,
"step": 78800
},
{
"epoch": 8.486707566462167,
"grad_norm": 0.9402435421943665,
"learning_rate": 9.108285745070574e-05,
"loss": 3.1829,
"step": 78850
},
{
"epoch": 8.492089118501776,
"grad_norm": 0.9432632327079773,
"learning_rate": 9.075961642064432e-05,
"loss": 3.1734,
"step": 78900
},
{
"epoch": 8.497470670541384,
"grad_norm": 0.9873622059822083,
"learning_rate": 9.04363753905829e-05,
"loss": 3.1805,
"step": 78950
},
{
"epoch": 8.502852222580993,
"grad_norm": 0.8836618065834045,
"learning_rate": 9.011313436052148e-05,
"loss": 3.1759,
"step": 79000
},
{
"epoch": 8.502852222580993,
"eval_accuracy": 0.39123579730195324,
"eval_loss": 3.325312852859497,
"eval_runtime": 179.5604,
"eval_samples_per_second": 100.306,
"eval_steps_per_second": 6.271,
"step": 79000
},
{
"epoch": 8.508233774620601,
"grad_norm": 0.8848540186882019,
"learning_rate": 8.978989333046008e-05,
"loss": 3.197,
"step": 79050
},
{
"epoch": 8.513615326660208,
"grad_norm": 0.8550401329994202,
"learning_rate": 8.946665230039865e-05,
"loss": 3.1957,
"step": 79100
},
{
"epoch": 8.518996878699816,
"grad_norm": 0.9009631872177124,
"learning_rate": 8.914341127033724e-05,
"loss": 3.176,
"step": 79150
},
{
"epoch": 8.524378430739425,
"grad_norm": 0.9329746961593628,
"learning_rate": 8.882017024027582e-05,
"loss": 3.166,
"step": 79200
},
{
"epoch": 8.529759982779034,
"grad_norm": 0.9039963483810425,
"learning_rate": 8.84969292102144e-05,
"loss": 3.1773,
"step": 79250
},
{
"epoch": 8.535141534818642,
"grad_norm": 0.8915432691574097,
"learning_rate": 8.817368818015298e-05,
"loss": 3.1771,
"step": 79300
},
{
"epoch": 8.54052308685825,
"grad_norm": 0.8851172924041748,
"learning_rate": 8.785044715009158e-05,
"loss": 3.1684,
"step": 79350
},
{
"epoch": 8.545904638897857,
"grad_norm": 0.8741281032562256,
"learning_rate": 8.752720612003017e-05,
"loss": 3.174,
"step": 79400
},
{
"epoch": 8.551286190937466,
"grad_norm": 0.8948149681091309,
"learning_rate": 8.720396508996874e-05,
"loss": 3.1767,
"step": 79450
},
{
"epoch": 8.556667742977075,
"grad_norm": 0.8773640990257263,
"learning_rate": 8.688072405990734e-05,
"loss": 3.1672,
"step": 79500
},
{
"epoch": 8.562049295016683,
"grad_norm": 0.9167670607566833,
"learning_rate": 8.655748302984592e-05,
"loss": 3.1816,
"step": 79550
},
{
"epoch": 8.567430847056292,
"grad_norm": 0.9075756669044495,
"learning_rate": 8.62342419997845e-05,
"loss": 3.181,
"step": 79600
},
{
"epoch": 8.572812399095898,
"grad_norm": 0.8566796183586121,
"learning_rate": 8.591100096972308e-05,
"loss": 3.1908,
"step": 79650
},
{
"epoch": 8.578193951135507,
"grad_norm": 0.8454053401947021,
"learning_rate": 8.558775993966167e-05,
"loss": 3.1871,
"step": 79700
},
{
"epoch": 8.583575503175116,
"grad_norm": 0.9006174206733704,
"learning_rate": 8.526451890960024e-05,
"loss": 3.1881,
"step": 79750
},
{
"epoch": 8.588957055214724,
"grad_norm": 0.9350295662879944,
"learning_rate": 8.494127787953884e-05,
"loss": 3.1843,
"step": 79800
},
{
"epoch": 8.594338607254333,
"grad_norm": 0.9077459573745728,
"learning_rate": 8.461803684947742e-05,
"loss": 3.1737,
"step": 79850
},
{
"epoch": 8.599720159293941,
"grad_norm": 0.8846340775489807,
"learning_rate": 8.429479581941601e-05,
"loss": 3.1663,
"step": 79900
},
{
"epoch": 8.605101711333548,
"grad_norm": 0.9019478559494019,
"learning_rate": 8.397155478935458e-05,
"loss": 3.1775,
"step": 79950
},
{
"epoch": 8.610483263373157,
"grad_norm": 0.8935966491699219,
"learning_rate": 8.364831375929317e-05,
"loss": 3.1707,
"step": 80000
},
{
"epoch": 8.610483263373157,
"eval_accuracy": 0.3917088719754975,
"eval_loss": 3.32266902923584,
"eval_runtime": 179.6899,
"eval_samples_per_second": 100.234,
"eval_steps_per_second": 6.266,
"step": 80000
},
{
"epoch": 8.615864815412765,
"grad_norm": 0.946051299571991,
"learning_rate": 8.332507272923177e-05,
"loss": 3.1791,
"step": 80050
},
{
"epoch": 8.621246367452374,
"grad_norm": 0.9058233499526978,
"learning_rate": 8.300183169917034e-05,
"loss": 3.177,
"step": 80100
},
{
"epoch": 8.626627919491982,
"grad_norm": 0.8954179286956787,
"learning_rate": 8.267859066910892e-05,
"loss": 3.1708,
"step": 80150
},
{
"epoch": 8.632009471531589,
"grad_norm": 0.9015591740608215,
"learning_rate": 8.235534963904751e-05,
"loss": 3.1996,
"step": 80200
},
{
"epoch": 8.637391023571197,
"grad_norm": 0.8927049040794373,
"learning_rate": 8.203210860898608e-05,
"loss": 3.1693,
"step": 80250
},
{
"epoch": 8.642772575610806,
"grad_norm": 0.901568591594696,
"learning_rate": 8.170886757892467e-05,
"loss": 3.1794,
"step": 80300
},
{
"epoch": 8.648154127650415,
"grad_norm": 0.9400429725646973,
"learning_rate": 8.138562654886327e-05,
"loss": 3.1847,
"step": 80350
},
{
"epoch": 8.653535679690023,
"grad_norm": 0.9230552911758423,
"learning_rate": 8.106238551880185e-05,
"loss": 3.1876,
"step": 80400
},
{
"epoch": 8.658917231729632,
"grad_norm": 0.8878161311149597,
"learning_rate": 8.073914448874043e-05,
"loss": 3.2039,
"step": 80450
},
{
"epoch": 8.664298783769238,
"grad_norm": 0.9797596335411072,
"learning_rate": 8.041590345867901e-05,
"loss": 3.2002,
"step": 80500
},
{
"epoch": 8.669680335808847,
"grad_norm": 0.8710808753967285,
"learning_rate": 8.00926624286176e-05,
"loss": 3.1984,
"step": 80550
},
{
"epoch": 8.675061887848456,
"grad_norm": 0.8963496088981628,
"learning_rate": 7.976942139855617e-05,
"loss": 3.1836,
"step": 80600
},
{
"epoch": 8.680443439888064,
"grad_norm": 0.9148070812225342,
"learning_rate": 7.944618036849477e-05,
"loss": 3.1782,
"step": 80650
},
{
"epoch": 8.685824991927673,
"grad_norm": 0.9107878804206848,
"learning_rate": 7.912293933843335e-05,
"loss": 3.1832,
"step": 80700
},
{
"epoch": 8.69120654396728,
"grad_norm": 0.931254506111145,
"learning_rate": 7.879969830837193e-05,
"loss": 3.172,
"step": 80750
},
{
"epoch": 8.696588096006888,
"grad_norm": 0.8768410682678223,
"learning_rate": 7.847645727831051e-05,
"loss": 3.1846,
"step": 80800
},
{
"epoch": 8.701969648046497,
"grad_norm": 0.8722245693206787,
"learning_rate": 7.815321624824911e-05,
"loss": 3.1826,
"step": 80850
},
{
"epoch": 8.707351200086105,
"grad_norm": 0.9657986760139465,
"learning_rate": 7.78299752181877e-05,
"loss": 3.1677,
"step": 80900
},
{
"epoch": 8.712732752125714,
"grad_norm": 0.8934928774833679,
"learning_rate": 7.750673418812627e-05,
"loss": 3.1852,
"step": 80950
},
{
"epoch": 8.718114304165322,
"grad_norm": 0.9235027432441711,
"learning_rate": 7.718349315806486e-05,
"loss": 3.1837,
"step": 81000
},
{
"epoch": 8.718114304165322,
"eval_accuracy": 0.39210328195826405,
"eval_loss": 3.3185155391693115,
"eval_runtime": 179.7373,
"eval_samples_per_second": 100.207,
"eval_steps_per_second": 6.265,
"step": 81000
},
{
"epoch": 8.723495856204929,
"grad_norm": 0.8857280611991882,
"learning_rate": 7.686025212800344e-05,
"loss": 3.1725,
"step": 81050
},
{
"epoch": 8.728877408244538,
"grad_norm": 0.9040824770927429,
"learning_rate": 7.653701109794203e-05,
"loss": 3.198,
"step": 81100
},
{
"epoch": 8.734258960284146,
"grad_norm": 0.8779809474945068,
"learning_rate": 7.621377006788061e-05,
"loss": 3.1761,
"step": 81150
},
{
"epoch": 8.739640512323755,
"grad_norm": 0.9397569894790649,
"learning_rate": 7.58905290378192e-05,
"loss": 3.1935,
"step": 81200
},
{
"epoch": 8.745022064363363,
"grad_norm": 0.9214434623718262,
"learning_rate": 7.556728800775777e-05,
"loss": 3.1911,
"step": 81250
},
{
"epoch": 8.75040361640297,
"grad_norm": 0.8609477281570435,
"learning_rate": 7.524404697769636e-05,
"loss": 3.1853,
"step": 81300
},
{
"epoch": 8.755785168442578,
"grad_norm": 0.9056183695793152,
"learning_rate": 7.492080594763495e-05,
"loss": 3.1735,
"step": 81350
},
{
"epoch": 8.761166720482187,
"grad_norm": 0.8735660314559937,
"learning_rate": 7.459756491757353e-05,
"loss": 3.176,
"step": 81400
},
{
"epoch": 8.766548272521796,
"grad_norm": 0.846092700958252,
"learning_rate": 7.427432388751212e-05,
"loss": 3.1834,
"step": 81450
},
{
"epoch": 8.771929824561404,
"grad_norm": 0.9211810827255249,
"learning_rate": 7.39510828574507e-05,
"loss": 3.1885,
"step": 81500
},
{
"epoch": 8.777311376601011,
"grad_norm": 0.9464531540870667,
"learning_rate": 7.362784182738928e-05,
"loss": 3.1756,
"step": 81550
},
{
"epoch": 8.78269292864062,
"grad_norm": 0.9161049127578735,
"learning_rate": 7.330460079732786e-05,
"loss": 3.1791,
"step": 81600
},
{
"epoch": 8.788074480680228,
"grad_norm": 0.8961696624755859,
"learning_rate": 7.298135976726645e-05,
"loss": 3.1933,
"step": 81650
},
{
"epoch": 8.793456032719837,
"grad_norm": 0.942209780216217,
"learning_rate": 7.265811873720504e-05,
"loss": 3.167,
"step": 81700
},
{
"epoch": 8.798837584759445,
"grad_norm": 0.8983950018882751,
"learning_rate": 7.233487770714362e-05,
"loss": 3.1915,
"step": 81750
},
{
"epoch": 8.804219136799054,
"grad_norm": 0.969327986240387,
"learning_rate": 7.20116366770822e-05,
"loss": 3.1829,
"step": 81800
},
{
"epoch": 8.80960068883866,
"grad_norm": 0.8701090812683105,
"learning_rate": 7.16883956470208e-05,
"loss": 3.1765,
"step": 81850
},
{
"epoch": 8.814982240878269,
"grad_norm": 0.9157487154006958,
"learning_rate": 7.136515461695938e-05,
"loss": 3.1855,
"step": 81900
},
{
"epoch": 8.820363792917878,
"grad_norm": 0.9211909174919128,
"learning_rate": 7.104191358689796e-05,
"loss": 3.1932,
"step": 81950
},
{
"epoch": 8.825745344957486,
"grad_norm": 0.9057191610336304,
"learning_rate": 7.071867255683654e-05,
"loss": 3.1755,
"step": 82000
},
{
"epoch": 8.825745344957486,
"eval_accuracy": 0.39230526767671114,
"eval_loss": 3.315615653991699,
"eval_runtime": 179.8786,
"eval_samples_per_second": 100.129,
"eval_steps_per_second": 6.26,
"step": 82000
},
{
"epoch": 8.831126896997095,
"grad_norm": 0.8885180354118347,
"learning_rate": 7.039543152677512e-05,
"loss": 3.1984,
"step": 82050
},
{
"epoch": 8.836508449036701,
"grad_norm": 0.8651913404464722,
"learning_rate": 7.007219049671372e-05,
"loss": 3.19,
"step": 82100
},
{
"epoch": 8.84189000107631,
"grad_norm": 0.8910512924194336,
"learning_rate": 6.97489494666523e-05,
"loss": 3.1838,
"step": 82150
},
{
"epoch": 8.847271553115919,
"grad_norm": 0.9161734580993652,
"learning_rate": 6.942570843659088e-05,
"loss": 3.161,
"step": 82200
},
{
"epoch": 8.852653105155527,
"grad_norm": 0.9313191175460815,
"learning_rate": 6.910246740652946e-05,
"loss": 3.2002,
"step": 82250
},
{
"epoch": 8.858034657195136,
"grad_norm": 0.9104998707771301,
"learning_rate": 6.877922637646804e-05,
"loss": 3.173,
"step": 82300
},
{
"epoch": 8.863416209234742,
"grad_norm": 0.9300284385681152,
"learning_rate": 6.845598534640663e-05,
"loss": 3.1875,
"step": 82350
},
{
"epoch": 8.868797761274351,
"grad_norm": 0.9142300486564636,
"learning_rate": 6.813274431634522e-05,
"loss": 3.1742,
"step": 82400
},
{
"epoch": 8.87417931331396,
"grad_norm": 0.9559773206710815,
"learning_rate": 6.78095032862838e-05,
"loss": 3.1745,
"step": 82450
},
{
"epoch": 8.879560865353568,
"grad_norm": 0.9085149168968201,
"learning_rate": 6.748626225622238e-05,
"loss": 3.1852,
"step": 82500
},
{
"epoch": 8.884942417393177,
"grad_norm": 0.8920027017593384,
"learning_rate": 6.716302122616096e-05,
"loss": 3.1765,
"step": 82550
},
{
"epoch": 8.890323969432785,
"grad_norm": 0.8755592703819275,
"learning_rate": 6.684624501670078e-05,
"loss": 3.1659,
"step": 82600
},
{
"epoch": 8.895705521472392,
"grad_norm": 0.8625423908233643,
"learning_rate": 6.652300398663936e-05,
"loss": 3.1941,
"step": 82650
},
{
"epoch": 8.901087073512,
"grad_norm": 0.9189068675041199,
"learning_rate": 6.619976295657794e-05,
"loss": 3.1668,
"step": 82700
},
{
"epoch": 8.906468625551609,
"grad_norm": 0.8962160348892212,
"learning_rate": 6.587652192651654e-05,
"loss": 3.1717,
"step": 82750
},
{
"epoch": 8.911850177591218,
"grad_norm": 0.9272757768630981,
"learning_rate": 6.555328089645512e-05,
"loss": 3.1858,
"step": 82800
},
{
"epoch": 8.917231729630826,
"grad_norm": 0.8833206295967102,
"learning_rate": 6.52300398663937e-05,
"loss": 3.188,
"step": 82850
},
{
"epoch": 8.922613281670433,
"grad_norm": 0.9083338379859924,
"learning_rate": 6.490679883633229e-05,
"loss": 3.1798,
"step": 82900
},
{
"epoch": 8.927994833710041,
"grad_norm": 0.9264318346977234,
"learning_rate": 6.458355780627087e-05,
"loss": 3.1807,
"step": 82950
},
{
"epoch": 8.93337638574965,
"grad_norm": 0.9590030908584595,
"learning_rate": 6.426031677620946e-05,
"loss": 3.1712,
"step": 83000
},
{
"epoch": 8.93337638574965,
"eval_accuracy": 0.39272173419570405,
"eval_loss": 3.3121018409729004,
"eval_runtime": 179.6448,
"eval_samples_per_second": 100.259,
"eval_steps_per_second": 6.268,
"step": 83000
},
{
"epoch": 8.938757937789259,
"grad_norm": 0.8892170786857605,
"learning_rate": 6.393707574614804e-05,
"loss": 3.1905,
"step": 83050
},
{
"epoch": 8.944139489828867,
"grad_norm": 0.8800289034843445,
"learning_rate": 6.361383471608662e-05,
"loss": 3.176,
"step": 83100
},
{
"epoch": 8.949521041868476,
"grad_norm": 0.9177758097648621,
"learning_rate": 6.329059368602521e-05,
"loss": 3.1671,
"step": 83150
},
{
"epoch": 8.954902593908082,
"grad_norm": 0.9445570707321167,
"learning_rate": 6.29673526559638e-05,
"loss": 3.1747,
"step": 83200
},
{
"epoch": 8.960284145947691,
"grad_norm": 0.91263747215271,
"learning_rate": 6.264411162590237e-05,
"loss": 3.1787,
"step": 83250
},
{
"epoch": 8.9656656979873,
"grad_norm": 0.9258759021759033,
"learning_rate": 6.232087059584096e-05,
"loss": 3.1714,
"step": 83300
},
{
"epoch": 8.971047250026908,
"grad_norm": 0.9283280372619629,
"learning_rate": 6.199762956577954e-05,
"loss": 3.1852,
"step": 83350
},
{
"epoch": 8.976428802066517,
"grad_norm": 0.9237959980964661,
"learning_rate": 6.167438853571813e-05,
"loss": 3.1738,
"step": 83400
},
{
"epoch": 8.981810354106123,
"grad_norm": 0.9457531571388245,
"learning_rate": 6.135114750565671e-05,
"loss": 3.1898,
"step": 83450
},
{
"epoch": 8.987191906145732,
"grad_norm": 0.8861507773399353,
"learning_rate": 6.10279064755953e-05,
"loss": 3.1828,
"step": 83500
},
{
"epoch": 8.99257345818534,
"grad_norm": 0.9238635897636414,
"learning_rate": 6.070466544553388e-05,
"loss": 3.1701,
"step": 83550
},
{
"epoch": 8.997955010224949,
"grad_norm": 0.9327380061149597,
"learning_rate": 6.038142441547246e-05,
"loss": 3.1951,
"step": 83600
},
{
"epoch": 9.003336562264558,
"grad_norm": 0.9319394826889038,
"learning_rate": 6.005818338541105e-05,
"loss": 3.1399,
"step": 83650
},
{
"epoch": 9.008718114304166,
"grad_norm": 0.8817294239997864,
"learning_rate": 5.973494235534963e-05,
"loss": 3.1182,
"step": 83700
},
{
"epoch": 9.014099666343773,
"grad_norm": 0.9379280209541321,
"learning_rate": 5.941170132528822e-05,
"loss": 3.1158,
"step": 83750
},
{
"epoch": 9.019481218383381,
"grad_norm": 0.8813740015029907,
"learning_rate": 5.90884602952268e-05,
"loss": 3.1219,
"step": 83800
},
{
"epoch": 9.02486277042299,
"grad_norm": 0.9050074815750122,
"learning_rate": 5.876521926516538e-05,
"loss": 3.101,
"step": 83850
},
{
"epoch": 9.030244322462599,
"grad_norm": 0.9405961036682129,
"learning_rate": 5.844197823510397e-05,
"loss": 3.1274,
"step": 83900
},
{
"epoch": 9.035625874502207,
"grad_norm": 0.9838739633560181,
"learning_rate": 5.811873720504255e-05,
"loss": 3.1278,
"step": 83950
},
{
"epoch": 9.041007426541814,
"grad_norm": 0.9542596340179443,
"learning_rate": 5.7795496174981145e-05,
"loss": 3.1222,
"step": 84000
},
{
"epoch": 9.041007426541814,
"eval_accuracy": 0.3926494800253074,
"eval_loss": 3.314357280731201,
"eval_runtime": 179.6808,
"eval_samples_per_second": 100.239,
"eval_steps_per_second": 6.267,
"step": 84000
},
{
"epoch": 9.046388978581422,
"grad_norm": 0.9362724423408508,
"learning_rate": 5.7472255144919726e-05,
"loss": 3.1182,
"step": 84050
},
{
"epoch": 9.051770530621031,
"grad_norm": 0.9181835651397705,
"learning_rate": 5.714901411485831e-05,
"loss": 3.1153,
"step": 84100
},
{
"epoch": 9.05715208266064,
"grad_norm": 0.9418762922286987,
"learning_rate": 5.6825773084796895e-05,
"loss": 3.1096,
"step": 84150
},
{
"epoch": 9.062533634700248,
"grad_norm": 0.9246426224708557,
"learning_rate": 5.6502532054735476e-05,
"loss": 3.1255,
"step": 84200
},
{
"epoch": 9.067915186739857,
"grad_norm": 0.9008504152297974,
"learning_rate": 5.6179291024674064e-05,
"loss": 3.1114,
"step": 84250
},
{
"epoch": 9.073296738779463,
"grad_norm": 0.8827772736549377,
"learning_rate": 5.5856049994612645e-05,
"loss": 3.1209,
"step": 84300
},
{
"epoch": 9.078678290819072,
"grad_norm": 0.8775843381881714,
"learning_rate": 5.5532808964551227e-05,
"loss": 3.1176,
"step": 84350
},
{
"epoch": 9.08405984285868,
"grad_norm": 0.9226499795913696,
"learning_rate": 5.5209567934489814e-05,
"loss": 3.1154,
"step": 84400
},
{
"epoch": 9.089441394898289,
"grad_norm": 0.8536249399185181,
"learning_rate": 5.4886326904428396e-05,
"loss": 3.1195,
"step": 84450
},
{
"epoch": 9.094822946937898,
"grad_norm": 0.8758836984634399,
"learning_rate": 5.4563085874366983e-05,
"loss": 3.1041,
"step": 84500
},
{
"epoch": 9.100204498977504,
"grad_norm": 0.9015653729438782,
"learning_rate": 5.4239844844305565e-05,
"loss": 3.1224,
"step": 84550
},
{
"epoch": 9.105586051017113,
"grad_norm": 0.9079980850219727,
"learning_rate": 5.392306863484538e-05,
"loss": 3.1171,
"step": 84600
},
{
"epoch": 9.110967603056721,
"grad_norm": 0.9092751145362854,
"learning_rate": 5.359982760478396e-05,
"loss": 3.1239,
"step": 84650
},
{
"epoch": 9.11634915509633,
"grad_norm": 0.9043470621109009,
"learning_rate": 5.327658657472255e-05,
"loss": 3.1177,
"step": 84700
},
{
"epoch": 9.121730707135939,
"grad_norm": 0.924089252948761,
"learning_rate": 5.295334554466113e-05,
"loss": 3.1266,
"step": 84750
},
{
"epoch": 9.127112259175545,
"grad_norm": 0.8986655473709106,
"learning_rate": 5.2630104514599716e-05,
"loss": 3.1482,
"step": 84800
},
{
"epoch": 9.132493811215154,
"grad_norm": 0.898731529712677,
"learning_rate": 5.23068634845383e-05,
"loss": 3.1273,
"step": 84850
},
{
"epoch": 9.137875363254762,
"grad_norm": 0.9368207454681396,
"learning_rate": 5.198362245447688e-05,
"loss": 3.1113,
"step": 84900
},
{
"epoch": 9.143256915294371,
"grad_norm": 0.9111401438713074,
"learning_rate": 5.166038142441547e-05,
"loss": 3.1205,
"step": 84950
},
{
"epoch": 9.14863846733398,
"grad_norm": 0.8901679515838623,
"learning_rate": 5.1337140394354054e-05,
"loss": 3.1353,
"step": 85000
},
{
"epoch": 9.14863846733398,
"eval_accuracy": 0.39298576071309327,
"eval_loss": 3.314081907272339,
"eval_runtime": 179.6507,
"eval_samples_per_second": 100.256,
"eval_steps_per_second": 6.268,
"step": 85000
},
{
"epoch": 9.154020019373588,
"grad_norm": 0.9217543601989746,
"learning_rate": 5.101389936429264e-05,
"loss": 3.1371,
"step": 85050
},
{
"epoch": 9.159401571413195,
"grad_norm": 0.8988335728645325,
"learning_rate": 5.069065833423122e-05,
"loss": 3.1121,
"step": 85100
},
{
"epoch": 9.164783123452803,
"grad_norm": 0.960673451423645,
"learning_rate": 5.0367417304169804e-05,
"loss": 3.1377,
"step": 85150
},
{
"epoch": 9.170164675492412,
"grad_norm": 0.912464439868927,
"learning_rate": 5.004417627410839e-05,
"loss": 3.1354,
"step": 85200
},
{
"epoch": 9.17554622753202,
"grad_norm": 0.8949055075645447,
"learning_rate": 4.972093524404697e-05,
"loss": 3.1273,
"step": 85250
},
{
"epoch": 9.180927779571629,
"grad_norm": 0.9239495396614075,
"learning_rate": 4.939769421398556e-05,
"loss": 3.1125,
"step": 85300
},
{
"epoch": 9.186309331611236,
"grad_norm": 0.9031249284744263,
"learning_rate": 4.907445318392414e-05,
"loss": 3.1272,
"step": 85350
},
{
"epoch": 9.191690883650844,
"grad_norm": 0.9332590103149414,
"learning_rate": 4.875121215386272e-05,
"loss": 3.1247,
"step": 85400
},
{
"epoch": 9.197072435690453,
"grad_norm": 0.9094883799552917,
"learning_rate": 4.842797112380131e-05,
"loss": 3.1337,
"step": 85450
},
{
"epoch": 9.202453987730062,
"grad_norm": 0.9142782092094421,
"learning_rate": 4.810473009373989e-05,
"loss": 3.1185,
"step": 85500
},
{
"epoch": 9.20783553976967,
"grad_norm": 0.8732017874717712,
"learning_rate": 4.778148906367848e-05,
"loss": 3.107,
"step": 85550
},
{
"epoch": 9.213217091809279,
"grad_norm": 0.8603156208992004,
"learning_rate": 4.745824803361706e-05,
"loss": 3.1289,
"step": 85600
},
{
"epoch": 9.218598643848885,
"grad_norm": 0.9196754693984985,
"learning_rate": 4.713500700355564e-05,
"loss": 3.1223,
"step": 85650
},
{
"epoch": 9.223980195888494,
"grad_norm": 0.8952550292015076,
"learning_rate": 4.681176597349424e-05,
"loss": 3.0968,
"step": 85700
},
{
"epoch": 9.229361747928102,
"grad_norm": 0.9112910628318787,
"learning_rate": 4.648852494343282e-05,
"loss": 3.1297,
"step": 85750
},
{
"epoch": 9.234743299967711,
"grad_norm": 0.8979288339614868,
"learning_rate": 4.6165283913371406e-05,
"loss": 3.1185,
"step": 85800
},
{
"epoch": 9.24012485200732,
"grad_norm": 0.9608327150344849,
"learning_rate": 4.584204288330999e-05,
"loss": 3.1162,
"step": 85850
},
{
"epoch": 9.245506404046926,
"grad_norm": 0.8818651437759399,
"learning_rate": 4.551880185324857e-05,
"loss": 3.1501,
"step": 85900
},
{
"epoch": 9.250887956086535,
"grad_norm": 0.9698681235313416,
"learning_rate": 4.5195560823187156e-05,
"loss": 3.1213,
"step": 85950
},
{
"epoch": 9.256269508126143,
"grad_norm": 0.958239734172821,
"learning_rate": 4.487231979312574e-05,
"loss": 3.139,
"step": 86000
},
{
"epoch": 9.256269508126143,
"eval_accuracy": 0.3931503698381322,
"eval_loss": 3.3128864765167236,
"eval_runtime": 179.9322,
"eval_samples_per_second": 100.099,
"eval_steps_per_second": 6.258,
"step": 86000
},
{
"epoch": 9.261651060165752,
"grad_norm": 0.9176580905914307,
"learning_rate": 4.4549078763064325e-05,
"loss": 3.1481,
"step": 86050
},
{
"epoch": 9.26703261220536,
"grad_norm": 0.9287602305412292,
"learning_rate": 4.4225837733002906e-05,
"loss": 3.1331,
"step": 86100
},
{
"epoch": 9.272414164244967,
"grad_norm": 0.9146842360496521,
"learning_rate": 4.390259670294149e-05,
"loss": 3.1283,
"step": 86150
},
{
"epoch": 9.277795716284576,
"grad_norm": 0.9413586258888245,
"learning_rate": 4.3579355672880075e-05,
"loss": 3.1207,
"step": 86200
},
{
"epoch": 9.283177268324184,
"grad_norm": 0.9226080775260925,
"learning_rate": 4.3256114642818656e-05,
"loss": 3.1259,
"step": 86250
},
{
"epoch": 9.288558820363793,
"grad_norm": 0.9166672229766846,
"learning_rate": 4.2932873612757244e-05,
"loss": 3.146,
"step": 86300
},
{
"epoch": 9.293940372403402,
"grad_norm": 0.9550894498825073,
"learning_rate": 4.2609632582695825e-05,
"loss": 3.1108,
"step": 86350
},
{
"epoch": 9.29932192444301,
"grad_norm": 0.9045702219009399,
"learning_rate": 4.2286391552634406e-05,
"loss": 3.1302,
"step": 86400
},
{
"epoch": 9.304703476482617,
"grad_norm": 0.9491354823112488,
"learning_rate": 4.1963150522572994e-05,
"loss": 3.1328,
"step": 86450
},
{
"epoch": 9.310085028522225,
"grad_norm": 0.9591734409332275,
"learning_rate": 4.1639909492511575e-05,
"loss": 3.1423,
"step": 86500
},
{
"epoch": 9.315466580561834,
"grad_norm": 0.9250282049179077,
"learning_rate": 4.131666846245017e-05,
"loss": 3.126,
"step": 86550
},
{
"epoch": 9.320848132601443,
"grad_norm": 0.8863346576690674,
"learning_rate": 4.099989225298998e-05,
"loss": 3.1328,
"step": 86600
},
{
"epoch": 9.326229684641051,
"grad_norm": 0.92470782995224,
"learning_rate": 4.0676651222928564e-05,
"loss": 3.1243,
"step": 86650
},
{
"epoch": 9.331611236680658,
"grad_norm": 0.8946364521980286,
"learning_rate": 4.035341019286714e-05,
"loss": 3.1377,
"step": 86700
},
{
"epoch": 9.336992788720266,
"grad_norm": 0.871094286441803,
"learning_rate": 4.003016916280573e-05,
"loss": 3.1324,
"step": 86750
},
{
"epoch": 9.342374340759875,
"grad_norm": 0.9345404505729675,
"learning_rate": 3.9706928132744314e-05,
"loss": 3.1413,
"step": 86800
},
{
"epoch": 9.347755892799483,
"grad_norm": 0.8760411143302917,
"learning_rate": 3.93836871026829e-05,
"loss": 3.1308,
"step": 86850
},
{
"epoch": 9.353137444839092,
"grad_norm": 0.9380771517753601,
"learning_rate": 3.906044607262148e-05,
"loss": 3.1174,
"step": 86900
},
{
"epoch": 9.3585189968787,
"grad_norm": 0.8624494075775146,
"learning_rate": 3.8737205042560064e-05,
"loss": 3.13,
"step": 86950
},
{
"epoch": 9.363900548918307,
"grad_norm": 0.9666469693183899,
"learning_rate": 3.841396401249865e-05,
"loss": 3.1295,
"step": 87000
},
{
"epoch": 9.363900548918307,
"eval_accuracy": 0.3934488393209586,
"eval_loss": 3.3093247413635254,
"eval_runtime": 179.5385,
"eval_samples_per_second": 100.318,
"eval_steps_per_second": 6.272,
"step": 87000
},
{
"epoch": 9.369282100957916,
"grad_norm": 0.91963791847229,
"learning_rate": 3.809072298243723e-05,
"loss": 3.1292,
"step": 87050
},
{
"epoch": 9.374663652997524,
"grad_norm": 0.9316657185554504,
"learning_rate": 3.776748195237582e-05,
"loss": 3.1515,
"step": 87100
},
{
"epoch": 9.380045205037133,
"grad_norm": 0.9758146405220032,
"learning_rate": 3.74442409223144e-05,
"loss": 3.1249,
"step": 87150
},
{
"epoch": 9.385426757076742,
"grad_norm": 0.904312789440155,
"learning_rate": 3.712099989225299e-05,
"loss": 3.1384,
"step": 87200
},
{
"epoch": 9.390808309116348,
"grad_norm": 0.9568246006965637,
"learning_rate": 3.679775886219157e-05,
"loss": 3.1241,
"step": 87250
},
{
"epoch": 9.396189861155957,
"grad_norm": 0.9116389155387878,
"learning_rate": 3.647451783213015e-05,
"loss": 3.1473,
"step": 87300
},
{
"epoch": 9.401571413195565,
"grad_norm": 0.9002155065536499,
"learning_rate": 3.615127680206874e-05,
"loss": 3.1357,
"step": 87350
},
{
"epoch": 9.406952965235174,
"grad_norm": 0.984461784362793,
"learning_rate": 3.582803577200732e-05,
"loss": 3.1345,
"step": 87400
},
{
"epoch": 9.412334517274783,
"grad_norm": 0.9499680995941162,
"learning_rate": 3.550479474194591e-05,
"loss": 3.1226,
"step": 87450
},
{
"epoch": 9.417716069314391,
"grad_norm": 0.9205060005187988,
"learning_rate": 3.518155371188449e-05,
"loss": 3.127,
"step": 87500
},
{
"epoch": 9.423097621353998,
"grad_norm": 0.9197761416435242,
"learning_rate": 3.485831268182308e-05,
"loss": 3.1202,
"step": 87550
},
{
"epoch": 9.428479173393606,
"grad_norm": 0.9335700273513794,
"learning_rate": 3.453507165176166e-05,
"loss": 3.1419,
"step": 87600
},
{
"epoch": 9.433860725433215,
"grad_norm": 1.0131042003631592,
"learning_rate": 3.421183062170025e-05,
"loss": 3.1274,
"step": 87650
},
{
"epoch": 9.439242277472824,
"grad_norm": 0.9497924447059631,
"learning_rate": 3.388858959163883e-05,
"loss": 3.1297,
"step": 87700
},
{
"epoch": 9.444623829512432,
"grad_norm": 0.935814380645752,
"learning_rate": 3.3565348561577416e-05,
"loss": 3.128,
"step": 87750
},
{
"epoch": 9.450005381552039,
"grad_norm": 0.9703516960144043,
"learning_rate": 3.3242107531516e-05,
"loss": 3.1104,
"step": 87800
},
{
"epoch": 9.455386933591647,
"grad_norm": 0.935706377029419,
"learning_rate": 3.291886650145458e-05,
"loss": 3.1187,
"step": 87850
},
{
"epoch": 9.460768485631256,
"grad_norm": 0.9084662795066833,
"learning_rate": 3.2595625471393166e-05,
"loss": 3.1274,
"step": 87900
},
{
"epoch": 9.466150037670864,
"grad_norm": 0.9186142683029175,
"learning_rate": 3.2272384441331754e-05,
"loss": 3.1346,
"step": 87950
},
{
"epoch": 9.471531589710473,
"grad_norm": 0.9566908478736877,
"learning_rate": 3.1949143411270335e-05,
"loss": 3.1359,
"step": 88000
},
{
"epoch": 9.471531589710473,
"eval_accuracy": 0.3937906613060229,
"eval_loss": 3.3066892623901367,
"eval_runtime": 179.5664,
"eval_samples_per_second": 100.303,
"eval_steps_per_second": 6.271,
"step": 88000
},
{
"epoch": 9.476913141750082,
"grad_norm": 0.9515846967697144,
"learning_rate": 3.1625902381208916e-05,
"loss": 3.1377,
"step": 88050
},
{
"epoch": 9.482294693789688,
"grad_norm": 0.9604725241661072,
"learning_rate": 3.1302661351147504e-05,
"loss": 3.147,
"step": 88100
},
{
"epoch": 9.487676245829297,
"grad_norm": 0.9489278197288513,
"learning_rate": 3.0979420321086085e-05,
"loss": 3.1266,
"step": 88150
},
{
"epoch": 9.493057797868905,
"grad_norm": 0.8774126172065735,
"learning_rate": 3.065617929102467e-05,
"loss": 3.1176,
"step": 88200
},
{
"epoch": 9.498439349908514,
"grad_norm": 0.8997475504875183,
"learning_rate": 3.0332938260963258e-05,
"loss": 3.1124,
"step": 88250
},
{
"epoch": 9.503820901948123,
"grad_norm": 0.9679112434387207,
"learning_rate": 3.000969723090184e-05,
"loss": 3.1337,
"step": 88300
},
{
"epoch": 9.50920245398773,
"grad_norm": 0.8895472288131714,
"learning_rate": 2.9686456200840423e-05,
"loss": 3.1079,
"step": 88350
},
{
"epoch": 9.514584006027338,
"grad_norm": 0.8873099684715271,
"learning_rate": 2.9363215170779008e-05,
"loss": 3.1358,
"step": 88400
},
{
"epoch": 9.519965558066946,
"grad_norm": 0.9306134581565857,
"learning_rate": 2.9039974140717592e-05,
"loss": 3.1186,
"step": 88450
},
{
"epoch": 9.525347110106555,
"grad_norm": 0.9422151446342468,
"learning_rate": 2.871673311065618e-05,
"loss": 3.1145,
"step": 88500
},
{
"epoch": 9.530728662146164,
"grad_norm": 0.8907070755958557,
"learning_rate": 2.839349208059476e-05,
"loss": 3.1338,
"step": 88550
},
{
"epoch": 9.536110214185772,
"grad_norm": 0.9752025008201599,
"learning_rate": 2.8070251050533346e-05,
"loss": 3.1358,
"step": 88600
},
{
"epoch": 9.541491766225379,
"grad_norm": 0.9604821801185608,
"learning_rate": 2.775347484107316e-05,
"loss": 3.155,
"step": 88650
},
{
"epoch": 9.546873318264987,
"grad_norm": 0.9035794734954834,
"learning_rate": 2.7430233811011744e-05,
"loss": 3.1376,
"step": 88700
},
{
"epoch": 9.552254870304596,
"grad_norm": 0.909589946269989,
"learning_rate": 2.7106992780950328e-05,
"loss": 3.1384,
"step": 88750
},
{
"epoch": 9.557636422344205,
"grad_norm": 0.9262523651123047,
"learning_rate": 2.678375175088891e-05,
"loss": 3.1124,
"step": 88800
},
{
"epoch": 9.563017974383813,
"grad_norm": 0.8929489850997925,
"learning_rate": 2.6466975541428722e-05,
"loss": 3.13,
"step": 88850
},
{
"epoch": 9.56839952642342,
"grad_norm": 0.9192137718200684,
"learning_rate": 2.6143734511367307e-05,
"loss": 3.1142,
"step": 88900
},
{
"epoch": 9.573781078463028,
"grad_norm": 0.9199954271316528,
"learning_rate": 2.582049348130589e-05,
"loss": 3.1292,
"step": 88950
},
{
"epoch": 9.579162630502637,
"grad_norm": 0.9602384567260742,
"learning_rate": 2.5497252451244476e-05,
"loss": 3.1312,
"step": 89000
},
{
"epoch": 9.579162630502637,
"eval_accuracy": 0.39399927485062675,
"eval_loss": 3.3048999309539795,
"eval_runtime": 179.8132,
"eval_samples_per_second": 100.165,
"eval_steps_per_second": 6.262,
"step": 89000
},
{
"epoch": 9.584544182542245,
"grad_norm": 0.9000515937805176,
"learning_rate": 2.517401142118306e-05,
"loss": 3.1282,
"step": 89050
},
{
"epoch": 9.589925734581854,
"grad_norm": 0.9287469387054443,
"learning_rate": 2.485077039112164e-05,
"loss": 3.1414,
"step": 89100
},
{
"epoch": 9.59530728662146,
"grad_norm": 0.8891144394874573,
"learning_rate": 2.4527529361060226e-05,
"loss": 3.1132,
"step": 89150
},
{
"epoch": 9.60068883866107,
"grad_norm": 0.9419028759002686,
"learning_rate": 2.4204288330998814e-05,
"loss": 3.1305,
"step": 89200
},
{
"epoch": 9.606070390700678,
"grad_norm": 0.9434340000152588,
"learning_rate": 2.38810473009374e-05,
"loss": 3.1396,
"step": 89250
},
{
"epoch": 9.611451942740286,
"grad_norm": 0.9384384155273438,
"learning_rate": 2.3557806270875983e-05,
"loss": 3.1392,
"step": 89300
},
{
"epoch": 9.616833494779895,
"grad_norm": 0.9376377463340759,
"learning_rate": 2.3234565240814564e-05,
"loss": 3.1508,
"step": 89350
},
{
"epoch": 9.622215046819504,
"grad_norm": 0.9515202641487122,
"learning_rate": 2.291132421075315e-05,
"loss": 3.1305,
"step": 89400
},
{
"epoch": 9.62759659885911,
"grad_norm": 0.8941816091537476,
"learning_rate": 2.2588083180691733e-05,
"loss": 3.1137,
"step": 89450
},
{
"epoch": 9.632978150898719,
"grad_norm": 0.9425456523895264,
"learning_rate": 2.2264842150630318e-05,
"loss": 3.1185,
"step": 89500
},
{
"epoch": 9.638359702938327,
"grad_norm": 0.9340146780014038,
"learning_rate": 2.1941601120568905e-05,
"loss": 3.1293,
"step": 89550
},
{
"epoch": 9.643741254977936,
"grad_norm": 0.9706893563270569,
"learning_rate": 2.1618360090507483e-05,
"loss": 3.1119,
"step": 89600
},
{
"epoch": 9.649122807017545,
"grad_norm": 0.8813906311988831,
"learning_rate": 2.1301583881047296e-05,
"loss": 3.144,
"step": 89650
},
{
"epoch": 9.654504359057151,
"grad_norm": 0.8951605558395386,
"learning_rate": 2.097834285098588e-05,
"loss": 3.1292,
"step": 89700
},
{
"epoch": 9.65988591109676,
"grad_norm": 0.9236047863960266,
"learning_rate": 2.065510182092447e-05,
"loss": 3.1305,
"step": 89750
},
{
"epoch": 9.665267463136368,
"grad_norm": 0.8858392238616943,
"learning_rate": 2.0331860790863053e-05,
"loss": 3.1305,
"step": 89800
},
{
"epoch": 9.670649015175977,
"grad_norm": 0.931576669216156,
"learning_rate": 2.0008619760801638e-05,
"loss": 3.1385,
"step": 89850
},
{
"epoch": 9.676030567215586,
"grad_norm": 0.9866983890533447,
"learning_rate": 1.968537873074022e-05,
"loss": 3.1275,
"step": 89900
},
{
"epoch": 9.681412119255192,
"grad_norm": 0.9134666323661804,
"learning_rate": 1.9362137700678803e-05,
"loss": 3.1039,
"step": 89950
},
{
"epoch": 9.6867936712948,
"grad_norm": 0.9204062819480896,
"learning_rate": 1.9038896670617388e-05,
"loss": 3.1326,
"step": 90000
},
{
"epoch": 9.6867936712948,
"eval_accuracy": 0.3943079577049077,
"eval_loss": 3.302588701248169,
"eval_runtime": 179.7361,
"eval_samples_per_second": 100.208,
"eval_steps_per_second": 6.265,
"step": 90000
},
{
"epoch": 9.69217522333441,
"grad_norm": 0.8718501329421997,
"learning_rate": 1.8715655640555972e-05,
"loss": 3.1206,
"step": 90050
},
{
"epoch": 9.697556775374018,
"grad_norm": 0.9207223653793335,
"learning_rate": 1.8392414610494557e-05,
"loss": 3.1523,
"step": 90100
},
{
"epoch": 9.702938327413626,
"grad_norm": 0.9116508364677429,
"learning_rate": 1.806917358043314e-05,
"loss": 3.1253,
"step": 90150
},
{
"epoch": 9.708319879453235,
"grad_norm": 0.9184285402297974,
"learning_rate": 1.7745932550371726e-05,
"loss": 3.1539,
"step": 90200
},
{
"epoch": 9.713701431492842,
"grad_norm": 0.9284878373146057,
"learning_rate": 1.742269152031031e-05,
"loss": 3.1282,
"step": 90250
},
{
"epoch": 9.71908298353245,
"grad_norm": 0.9029609560966492,
"learning_rate": 1.7099450490248895e-05,
"loss": 3.128,
"step": 90300
},
{
"epoch": 9.724464535572059,
"grad_norm": 0.9100344777107239,
"learning_rate": 1.677620946018748e-05,
"loss": 3.1314,
"step": 90350
},
{
"epoch": 9.729846087611667,
"grad_norm": 0.9491696953773499,
"learning_rate": 1.6452968430126064e-05,
"loss": 3.1323,
"step": 90400
},
{
"epoch": 9.735227639651276,
"grad_norm": 0.9546681046485901,
"learning_rate": 1.6129727400064645e-05,
"loss": 3.1522,
"step": 90450
},
{
"epoch": 9.740609191690883,
"grad_norm": 0.967365026473999,
"learning_rate": 1.580648637000323e-05,
"loss": 3.1408,
"step": 90500
},
{
"epoch": 9.745990743730491,
"grad_norm": 0.9326353669166565,
"learning_rate": 1.5483245339941817e-05,
"loss": 3.1218,
"step": 90550
},
{
"epoch": 9.7513722957701,
"grad_norm": 0.9593531489372253,
"learning_rate": 1.5160004309880398e-05,
"loss": 3.1252,
"step": 90600
},
{
"epoch": 9.756753847809708,
"grad_norm": 0.9383837580680847,
"learning_rate": 1.4836763279818985e-05,
"loss": 3.1253,
"step": 90650
},
{
"epoch": 9.762135399849317,
"grad_norm": 0.9309119582176208,
"learning_rate": 1.4513522249757567e-05,
"loss": 3.1303,
"step": 90700
},
{
"epoch": 9.767516951888926,
"grad_norm": 0.8812274932861328,
"learning_rate": 1.4190281219696152e-05,
"loss": 3.1207,
"step": 90750
},
{
"epoch": 9.772898503928532,
"grad_norm": 0.9175823330879211,
"learning_rate": 1.3867040189634736e-05,
"loss": 3.1227,
"step": 90800
},
{
"epoch": 9.77828005596814,
"grad_norm": 0.9752672910690308,
"learning_rate": 1.3543799159573321e-05,
"loss": 3.1246,
"step": 90850
},
{
"epoch": 9.78366160800775,
"grad_norm": 0.9278659820556641,
"learning_rate": 1.3220558129511905e-05,
"loss": 3.1261,
"step": 90900
},
{
"epoch": 9.789043160047358,
"grad_norm": 0.9404040575027466,
"learning_rate": 1.2897317099450488e-05,
"loss": 3.1244,
"step": 90950
},
{
"epoch": 9.794424712086967,
"grad_norm": 0.9082286953926086,
"learning_rate": 1.2574076069389073e-05,
"loss": 3.1159,
"step": 91000
},
{
"epoch": 9.794424712086967,
"eval_accuracy": 0.394532651876908,
"eval_loss": 3.3007988929748535,
"eval_runtime": 180.0492,
"eval_samples_per_second": 100.034,
"eval_steps_per_second": 6.254,
"step": 91000
},
{
"epoch": 9.799806264126573,
"grad_norm": 0.9126229286193848,
"learning_rate": 1.2250835039327659e-05,
"loss": 3.1382,
"step": 91050
},
{
"epoch": 9.805187816166182,
"grad_norm": 0.9428226351737976,
"learning_rate": 1.1927594009266242e-05,
"loss": 3.149,
"step": 91100
},
{
"epoch": 9.81056936820579,
"grad_norm": 0.933461606502533,
"learning_rate": 1.1604352979204826e-05,
"loss": 3.1239,
"step": 91150
},
{
"epoch": 9.815950920245399,
"grad_norm": 0.9387831687927246,
"learning_rate": 1.1281111949143409e-05,
"loss": 3.1187,
"step": 91200
},
{
"epoch": 9.821332472285007,
"grad_norm": 0.9336826205253601,
"learning_rate": 1.0957870919081995e-05,
"loss": 3.138,
"step": 91250
},
{
"epoch": 9.826714024324616,
"grad_norm": 0.943970799446106,
"learning_rate": 1.063462988902058e-05,
"loss": 3.1231,
"step": 91300
},
{
"epoch": 9.832095576364223,
"grad_norm": 0.9266682267189026,
"learning_rate": 1.0311388858959162e-05,
"loss": 3.1375,
"step": 91350
},
{
"epoch": 9.837477128403831,
"grad_norm": 0.9433984756469727,
"learning_rate": 9.988147828897747e-06,
"loss": 3.1164,
"step": 91400
},
{
"epoch": 9.84285868044344,
"grad_norm": 0.9360959529876709,
"learning_rate": 9.66490679883633e-06,
"loss": 3.1196,
"step": 91450
},
{
"epoch": 9.848240232483048,
"grad_norm": 0.9291725158691406,
"learning_rate": 9.341665768774916e-06,
"loss": 3.1343,
"step": 91500
},
{
"epoch": 9.853621784522657,
"grad_norm": 1.001280426979065,
"learning_rate": 9.018424738713499e-06,
"loss": 3.1302,
"step": 91550
},
{
"epoch": 9.859003336562264,
"grad_norm": 0.9111364483833313,
"learning_rate": 8.695183708652085e-06,
"loss": 3.1544,
"step": 91600
},
{
"epoch": 9.864384888601872,
"grad_norm": 0.9523101449012756,
"learning_rate": 8.37194267859067e-06,
"loss": 3.1224,
"step": 91650
},
{
"epoch": 9.869766440641481,
"grad_norm": 0.9617007374763489,
"learning_rate": 8.048701648529252e-06,
"loss": 3.1031,
"step": 91700
},
{
"epoch": 9.87514799268109,
"grad_norm": 0.9206680655479431,
"learning_rate": 7.725460618467837e-06,
"loss": 3.143,
"step": 91750
},
{
"epoch": 9.880529544720698,
"grad_norm": 0.911401629447937,
"learning_rate": 7.40221958840642e-06,
"loss": 3.1169,
"step": 91800
},
{
"epoch": 9.885911096760307,
"grad_norm": 0.9321516752243042,
"learning_rate": 7.078978558345006e-06,
"loss": 3.1209,
"step": 91850
},
{
"epoch": 9.891292648799913,
"grad_norm": 0.9269621968269348,
"learning_rate": 6.75573752828359e-06,
"loss": 3.1273,
"step": 91900
},
{
"epoch": 9.896674200839522,
"grad_norm": 0.9078811407089233,
"learning_rate": 6.432496498222174e-06,
"loss": 3.12,
"step": 91950
},
{
"epoch": 9.90205575287913,
"grad_norm": 0.9647493362426758,
"learning_rate": 6.1092554681607575e-06,
"loss": 3.1335,
"step": 92000
},
{
"epoch": 9.90205575287913,
"eval_accuracy": 0.39470041193569355,
"eval_loss": 3.298912763595581,
"eval_runtime": 179.4924,
"eval_samples_per_second": 100.344,
"eval_steps_per_second": 6.273,
"step": 92000
},
{
"epoch": 9.907437304918739,
"grad_norm": 0.8831244111061096,
"learning_rate": 5.786014438099342e-06,
"loss": 3.1265,
"step": 92050
},
{
"epoch": 9.912818856958348,
"grad_norm": 0.9248406887054443,
"learning_rate": 5.4627734080379264e-06,
"loss": 3.1265,
"step": 92100
},
{
"epoch": 9.918200408997954,
"grad_norm": 0.8974929451942444,
"learning_rate": 5.139532377976511e-06,
"loss": 3.1342,
"step": 92150
},
{
"epoch": 9.923581961037563,
"grad_norm": 0.8904796838760376,
"learning_rate": 4.816291347915095e-06,
"loss": 3.141,
"step": 92200
},
{
"epoch": 9.928963513077171,
"grad_norm": 0.9588956236839294,
"learning_rate": 4.493050317853679e-06,
"loss": 3.132,
"step": 92250
},
{
"epoch": 9.93434506511678,
"grad_norm": 0.8969722390174866,
"learning_rate": 4.169809287792264e-06,
"loss": 3.1247,
"step": 92300
},
{
"epoch": 9.939726617156388,
"grad_norm": 0.9294554591178894,
"learning_rate": 3.846568257730847e-06,
"loss": 3.1151,
"step": 92350
},
{
"epoch": 9.945108169195997,
"grad_norm": 0.9306110143661499,
"learning_rate": 3.523327227669432e-06,
"loss": 3.1447,
"step": 92400
},
{
"epoch": 9.950489721235604,
"grad_norm": 0.9280375838279724,
"learning_rate": 3.2000861976080162e-06,
"loss": 3.1198,
"step": 92450
},
{
"epoch": 9.955871273275212,
"grad_norm": 0.9354221820831299,
"learning_rate": 2.8768451675466007e-06,
"loss": 3.1236,
"step": 92500
},
{
"epoch": 9.961252825314821,
"grad_norm": 0.933273434638977,
"learning_rate": 2.5536041374851848e-06,
"loss": 3.119,
"step": 92550
},
{
"epoch": 9.96663437735443,
"grad_norm": 0.9233914613723755,
"learning_rate": 2.230363107423769e-06,
"loss": 3.1258,
"step": 92600
},
{
"epoch": 9.972015929394038,
"grad_norm": 0.8901816010475159,
"learning_rate": 1.9071220773623531e-06,
"loss": 3.1333,
"step": 92650
},
{
"epoch": 9.977397481433645,
"grad_norm": 0.904162585735321,
"learning_rate": 1.5838810473009372e-06,
"loss": 3.1192,
"step": 92700
},
{
"epoch": 9.982779033473253,
"grad_norm": 0.9620854258537292,
"learning_rate": 1.2606400172395215e-06,
"loss": 3.1128,
"step": 92750
},
{
"epoch": 9.988160585512862,
"grad_norm": 0.962112545967102,
"learning_rate": 9.373989871781058e-07,
"loss": 3.1307,
"step": 92800
},
{
"epoch": 9.99354213755247,
"grad_norm": 0.9469469785690308,
"learning_rate": 6.1415795711669e-07,
"loss": 3.139,
"step": 92850
},
{
"epoch": 9.998923689592079,
"grad_norm": 0.9218598008155823,
"learning_rate": 2.909169270552742e-07,
"loss": 3.1296,
"step": 92900
},
{
"epoch": 10.0,
"step": 92910,
"total_flos": 7.7683166281728e+17,
"train_loss": 3.452293256932703,
"train_runtime": 79255.5774,
"train_samples_per_second": 37.512,
"train_steps_per_second": 1.172
}
],
"logging_steps": 50,
"max_steps": 92910,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.7683166281728e+17,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}