100M__634 / checkpoint-80000 /trainer_state.json
craa's picture
Training in progress, step 80000, checkpoint
0574667 verified
Invalid JSON: Unexpected token 'I', ..."ad_norm": Infinity, "... is not valid JSON
{
"best_metric": 3.3235464096069336,
"best_model_checkpoint": "/scratch/cl5625/exceptions/models/100M__634/checkpoint-80000",
"epoch": 8.625336927223719,
"eval_steps": 1000,
"global_step": 80000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005390835579514825,
"grad_norm": 1.412644624710083,
"learning_rate": 0.0003,
"loss": 8.6259,
"step": 50
},
{
"epoch": 0.01078167115902965,
"grad_norm": 3.4297802448272705,
"learning_rate": 0.0006,
"loss": 6.925,
"step": 100
},
{
"epoch": 0.016172506738544475,
"grad_norm": 1.0797241926193237,
"learning_rate": 0.0005996762007555315,
"loss": 6.4904,
"step": 150
},
{
"epoch": 0.0215633423180593,
"grad_norm": 1.8681477308273315,
"learning_rate": 0.000599352401511063,
"loss": 6.2514,
"step": 200
},
{
"epoch": 0.026954177897574125,
"grad_norm": 1.0239183902740479,
"learning_rate": 0.0005990286022665946,
"loss": 6.0911,
"step": 250
},
{
"epoch": 0.03234501347708895,
"grad_norm": 1.5482831001281738,
"learning_rate": 0.0005987048030221263,
"loss": 5.9845,
"step": 300
},
{
"epoch": 0.03773584905660377,
"grad_norm": 1.6334092617034912,
"learning_rate": 0.0005983810037776578,
"loss": 5.875,
"step": 350
},
{
"epoch": 0.0431266846361186,
"grad_norm": 1.5844093561172485,
"learning_rate": 0.0005980572045331894,
"loss": 5.7818,
"step": 400
},
{
"epoch": 0.04851752021563342,
"grad_norm": 1.2689578533172607,
"learning_rate": 0.0005977334052887209,
"loss": 5.712,
"step": 450
},
{
"epoch": 0.05390835579514825,
"grad_norm": 1.3288975954055786,
"learning_rate": 0.0005974096060442526,
"loss": 5.6535,
"step": 500
},
{
"epoch": 0.05929919137466307,
"grad_norm": 2.041428804397583,
"learning_rate": 0.0005970858067997841,
"loss": 5.5725,
"step": 550
},
{
"epoch": 0.0646900269541779,
"grad_norm": 1.4345282316207886,
"learning_rate": 0.0005967620075553157,
"loss": 5.5081,
"step": 600
},
{
"epoch": 0.07008086253369272,
"grad_norm": 1.1454274654388428,
"learning_rate": 0.0005964382083108472,
"loss": 5.4223,
"step": 650
},
{
"epoch": 0.07547169811320754,
"grad_norm": 1.3169114589691162,
"learning_rate": 0.0005961144090663788,
"loss": 5.3711,
"step": 700
},
{
"epoch": 0.08086253369272237,
"grad_norm": 1.3919389247894287,
"learning_rate": 0.0005957906098219104,
"loss": 5.3003,
"step": 750
},
{
"epoch": 0.0862533692722372,
"grad_norm": 1.0709481239318848,
"learning_rate": 0.0005954668105774419,
"loss": 5.2637,
"step": 800
},
{
"epoch": 0.09164420485175202,
"grad_norm": 0.8944941759109497,
"learning_rate": 0.0005951430113329735,
"loss": 5.1967,
"step": 850
},
{
"epoch": 0.09703504043126684,
"grad_norm": 1.1984753608703613,
"learning_rate": 0.0005948192120885051,
"loss": 5.1709,
"step": 900
},
{
"epoch": 0.10242587601078167,
"grad_norm": 1.0900869369506836,
"learning_rate": 0.0005944954128440366,
"loss": 5.1332,
"step": 950
},
{
"epoch": 0.1078167115902965,
"grad_norm": 1.4634649753570557,
"learning_rate": 0.0005941716135995682,
"loss": 5.0778,
"step": 1000
},
{
"epoch": 0.1078167115902965,
"eval_accuracy": 0.22697957684263617,
"eval_loss": 5.024423599243164,
"eval_runtime": 183.2168,
"eval_samples_per_second": 98.304,
"eval_steps_per_second": 6.146,
"step": 1000
},
{
"epoch": 0.11320754716981132,
"grad_norm": 1.0724071264266968,
"learning_rate": 0.0005938478143550997,
"loss": 5.039,
"step": 1050
},
{
"epoch": 0.11859838274932614,
"grad_norm": 0.7434335947036743,
"learning_rate": 0.0005935240151106314,
"loss": 5.0145,
"step": 1100
},
{
"epoch": 0.12398921832884097,
"grad_norm": 1.1407990455627441,
"learning_rate": 0.0005932002158661629,
"loss": 4.9884,
"step": 1150
},
{
"epoch": 0.1293800539083558,
"grad_norm": 1.3579697608947754,
"learning_rate": 0.0005928764166216945,
"loss": 4.9232,
"step": 1200
},
{
"epoch": 0.1347708894878706,
"grad_norm": 1.170091986656189,
"learning_rate": 0.000592552617377226,
"loss": 4.9022,
"step": 1250
},
{
"epoch": 0.14016172506738545,
"grad_norm": 1.056681513786316,
"learning_rate": 0.0005922288181327577,
"loss": 4.8796,
"step": 1300
},
{
"epoch": 0.14555256064690028,
"grad_norm": 0.8261628746986389,
"learning_rate": 0.0005919050188882893,
"loss": 4.8425,
"step": 1350
},
{
"epoch": 0.1509433962264151,
"grad_norm": 0.7253502011299133,
"learning_rate": 0.0005915812196438207,
"loss": 4.836,
"step": 1400
},
{
"epoch": 0.15633423180592992,
"grad_norm": 1.0881081819534302,
"learning_rate": 0.0005912574203993524,
"loss": 4.8436,
"step": 1450
},
{
"epoch": 0.16172506738544473,
"grad_norm": 0.8581550717353821,
"learning_rate": 0.0005909336211548839,
"loss": 4.8009,
"step": 1500
},
{
"epoch": 0.16711590296495957,
"grad_norm": 0.9875918030738831,
"learning_rate": 0.0005906098219104155,
"loss": 4.7541,
"step": 1550
},
{
"epoch": 0.1725067385444744,
"grad_norm": 0.8188138008117676,
"learning_rate": 0.000590286022665947,
"loss": 4.744,
"step": 1600
},
{
"epoch": 0.1778975741239892,
"grad_norm": 0.8326888084411621,
"learning_rate": 0.0005899622234214787,
"loss": 4.7177,
"step": 1650
},
{
"epoch": 0.18328840970350405,
"grad_norm": 1.0679171085357666,
"learning_rate": 0.0005896384241770102,
"loss": 4.6933,
"step": 1700
},
{
"epoch": 0.18867924528301888,
"grad_norm": 0.8685047626495361,
"learning_rate": 0.0005893146249325418,
"loss": 4.7025,
"step": 1750
},
{
"epoch": 0.1940700808625337,
"grad_norm": 0.8494108319282532,
"learning_rate": 0.0005889908256880733,
"loss": 4.6703,
"step": 1800
},
{
"epoch": 0.19946091644204852,
"grad_norm": 1.175525426864624,
"learning_rate": 0.0005886670264436049,
"loss": 4.6366,
"step": 1850
},
{
"epoch": 0.20485175202156333,
"grad_norm": 1.0531870126724243,
"learning_rate": 0.0005883432271991365,
"loss": 4.6263,
"step": 1900
},
{
"epoch": 0.21024258760107817,
"grad_norm": 0.9467228651046753,
"learning_rate": 0.0005880194279546681,
"loss": 4.6254,
"step": 1950
},
{
"epoch": 0.215633423180593,
"grad_norm": 0.9411425590515137,
"learning_rate": 0.0005876956287101996,
"loss": 4.5738,
"step": 2000
},
{
"epoch": 0.215633423180593,
"eval_accuracy": 0.271918085284042,
"eval_loss": 4.5008015632629395,
"eval_runtime": 181.4309,
"eval_samples_per_second": 99.272,
"eval_steps_per_second": 6.206,
"step": 2000
},
{
"epoch": 0.2210242587601078,
"grad_norm": 0.920215368270874,
"learning_rate": 0.0005873718294657312,
"loss": 4.5559,
"step": 2050
},
{
"epoch": 0.22641509433962265,
"grad_norm": 0.7012114524841309,
"learning_rate": 0.0005870480302212628,
"loss": 4.5445,
"step": 2100
},
{
"epoch": 0.23180592991913745,
"grad_norm": 0.9538075923919678,
"learning_rate": 0.0005867242309767943,
"loss": 4.5259,
"step": 2150
},
{
"epoch": 0.2371967654986523,
"grad_norm": 0.7847324013710022,
"learning_rate": 0.0005864004317323259,
"loss": 4.4889,
"step": 2200
},
{
"epoch": 0.24258760107816713,
"grad_norm": 1.0440484285354614,
"learning_rate": 0.0005860766324878575,
"loss": 4.504,
"step": 2250
},
{
"epoch": 0.24797843665768193,
"grad_norm": 0.782093346118927,
"learning_rate": 0.000585752833243389,
"loss": 4.4768,
"step": 2300
},
{
"epoch": 0.25336927223719674,
"grad_norm": 0.9644035696983337,
"learning_rate": 0.0005854290339989206,
"loss": 4.4522,
"step": 2350
},
{
"epoch": 0.2587601078167116,
"grad_norm": 1.0412849187850952,
"learning_rate": 0.0005851052347544521,
"loss": 4.4489,
"step": 2400
},
{
"epoch": 0.2641509433962264,
"grad_norm": 0.7987921237945557,
"learning_rate": 0.0005847814355099838,
"loss": 4.4304,
"step": 2450
},
{
"epoch": 0.2695417789757412,
"grad_norm": 0.8646295070648193,
"learning_rate": 0.0005844576362655154,
"loss": 4.4196,
"step": 2500
},
{
"epoch": 0.2749326145552561,
"grad_norm": 1.0314178466796875,
"learning_rate": 0.0005841338370210469,
"loss": 4.4017,
"step": 2550
},
{
"epoch": 0.2803234501347709,
"grad_norm": 0.9287024140357971,
"learning_rate": 0.0005838100377765785,
"loss": 4.3729,
"step": 2600
},
{
"epoch": 0.2857142857142857,
"grad_norm": 0.8492061495780945,
"learning_rate": 0.0005834862385321101,
"loss": 4.37,
"step": 2650
},
{
"epoch": 0.29110512129380056,
"grad_norm": 0.7161704301834106,
"learning_rate": 0.0005831624392876417,
"loss": 4.3608,
"step": 2700
},
{
"epoch": 0.29649595687331537,
"grad_norm": 1.0816575288772583,
"learning_rate": 0.0005828386400431731,
"loss": 4.3705,
"step": 2750
},
{
"epoch": 0.3018867924528302,
"grad_norm": 0.8127713799476624,
"learning_rate": 0.0005825148407987048,
"loss": 4.3681,
"step": 2800
},
{
"epoch": 0.30727762803234504,
"grad_norm": 0.9167231321334839,
"learning_rate": 0.0005821910415542363,
"loss": 4.33,
"step": 2850
},
{
"epoch": 0.31266846361185985,
"grad_norm": 0.8297504782676697,
"learning_rate": 0.0005818672423097679,
"loss": 4.3137,
"step": 2900
},
{
"epoch": 0.31805929919137466,
"grad_norm": 0.9186894297599792,
"learning_rate": 0.0005815434430652994,
"loss": 4.3441,
"step": 2950
},
{
"epoch": 0.32345013477088946,
"grad_norm": 0.7359763979911804,
"learning_rate": 0.0005812196438208311,
"loss": 4.309,
"step": 3000
},
{
"epoch": 0.32345013477088946,
"eval_accuracy": 0.29948495271589304,
"eval_loss": 4.228354454040527,
"eval_runtime": 183.1945,
"eval_samples_per_second": 98.316,
"eval_steps_per_second": 6.146,
"step": 3000
},
{
"epoch": 0.3288409703504043,
"grad_norm": 0.7310630679130554,
"learning_rate": 0.0005808958445763626,
"loss": 4.3024,
"step": 3050
},
{
"epoch": 0.33423180592991913,
"grad_norm": 0.8260939717292786,
"learning_rate": 0.0005805720453318942,
"loss": 4.2831,
"step": 3100
},
{
"epoch": 0.33962264150943394,
"grad_norm": 0.7478753924369812,
"learning_rate": 0.0005802482460874257,
"loss": 4.2722,
"step": 3150
},
{
"epoch": 0.3450134770889488,
"grad_norm": 0.8138642907142639,
"learning_rate": 0.0005799244468429573,
"loss": 4.2816,
"step": 3200
},
{
"epoch": 0.3504043126684636,
"grad_norm": 0.7284408211708069,
"learning_rate": 0.0005796006475984889,
"loss": 4.2495,
"step": 3250
},
{
"epoch": 0.3557951482479784,
"grad_norm": 0.6755571365356445,
"learning_rate": 0.0005792768483540205,
"loss": 4.2595,
"step": 3300
},
{
"epoch": 0.3611859838274933,
"grad_norm": 0.9542193412780762,
"learning_rate": 0.000578953049109552,
"loss": 4.2538,
"step": 3350
},
{
"epoch": 0.3665768194070081,
"grad_norm": 0.6383764743804932,
"learning_rate": 0.0005786292498650836,
"loss": 4.2319,
"step": 3400
},
{
"epoch": 0.3719676549865229,
"grad_norm": 0.8126682639122009,
"learning_rate": 0.0005783054506206152,
"loss": 4.2272,
"step": 3450
},
{
"epoch": 0.37735849056603776,
"grad_norm": 0.7132463455200195,
"learning_rate": 0.0005779816513761467,
"loss": 4.2186,
"step": 3500
},
{
"epoch": 0.38274932614555257,
"grad_norm": 0.818424642086029,
"learning_rate": 0.0005776578521316782,
"loss": 4.1982,
"step": 3550
},
{
"epoch": 0.3881401617250674,
"grad_norm": 0.6916500926017761,
"learning_rate": 0.0005773340528872099,
"loss": 4.2057,
"step": 3600
},
{
"epoch": 0.3935309973045822,
"grad_norm": 0.6858584880828857,
"learning_rate": 0.0005770102536427414,
"loss": 4.2202,
"step": 3650
},
{
"epoch": 0.39892183288409705,
"grad_norm": 0.7574723362922668,
"learning_rate": 0.000576686454398273,
"loss": 4.2073,
"step": 3700
},
{
"epoch": 0.40431266846361186,
"grad_norm": 0.7107515931129456,
"learning_rate": 0.0005763626551538045,
"loss": 4.1939,
"step": 3750
},
{
"epoch": 0.40970350404312667,
"grad_norm": 0.7424683570861816,
"learning_rate": 0.0005760388559093362,
"loss": 4.1827,
"step": 3800
},
{
"epoch": 0.41509433962264153,
"grad_norm": 0.6541684865951538,
"learning_rate": 0.0005757150566648678,
"loss": 4.1782,
"step": 3850
},
{
"epoch": 0.42048517520215634,
"grad_norm": 0.797566294670105,
"learning_rate": 0.0005753912574203993,
"loss": 4.17,
"step": 3900
},
{
"epoch": 0.42587601078167114,
"grad_norm": 0.5999880433082581,
"learning_rate": 0.0005750674581759309,
"loss": 4.1651,
"step": 3950
},
{
"epoch": 0.431266846361186,
"grad_norm": 0.8264360427856445,
"learning_rate": 0.0005747436589314624,
"loss": 4.1524,
"step": 4000
},
{
"epoch": 0.431266846361186,
"eval_accuracy": 0.312240258480874,
"eval_loss": 4.087605953216553,
"eval_runtime": 183.2024,
"eval_samples_per_second": 98.312,
"eval_steps_per_second": 6.146,
"step": 4000
},
{
"epoch": 0.4366576819407008,
"grad_norm": 0.7332233786582947,
"learning_rate": 0.0005744198596869941,
"loss": 4.1654,
"step": 4050
},
{
"epoch": 0.4420485175202156,
"grad_norm": 0.5775137543678284,
"learning_rate": 0.0005740960604425255,
"loss": 4.158,
"step": 4100
},
{
"epoch": 0.4474393530997305,
"grad_norm": 0.8009674549102783,
"learning_rate": 0.0005737722611980572,
"loss": 4.1392,
"step": 4150
},
{
"epoch": 0.4528301886792453,
"grad_norm": 0.6772514581680298,
"learning_rate": 0.0005734484619535887,
"loss": 4.1186,
"step": 4200
},
{
"epoch": 0.4582210242587601,
"grad_norm": 0.6068300604820251,
"learning_rate": 0.0005731246627091203,
"loss": 4.1518,
"step": 4250
},
{
"epoch": 0.4636118598382749,
"grad_norm": 0.6603842973709106,
"learning_rate": 0.0005728008634646518,
"loss": 4.1228,
"step": 4300
},
{
"epoch": 0.46900269541778977,
"grad_norm": 0.6870344877243042,
"learning_rate": 0.0005724770642201835,
"loss": 4.1306,
"step": 4350
},
{
"epoch": 0.4743935309973046,
"grad_norm": 0.8701305985450745,
"learning_rate": 0.000572153264975715,
"loss": 4.1236,
"step": 4400
},
{
"epoch": 0.4797843665768194,
"grad_norm": 0.646145224571228,
"learning_rate": 0.0005718294657312466,
"loss": 4.0979,
"step": 4450
},
{
"epoch": 0.48517520215633425,
"grad_norm": 0.5847651362419128,
"learning_rate": 0.0005715056664867781,
"loss": 4.0943,
"step": 4500
},
{
"epoch": 0.49056603773584906,
"grad_norm": 0.7485958933830261,
"learning_rate": 0.0005711818672423097,
"loss": 4.0872,
"step": 4550
},
{
"epoch": 0.49595687331536387,
"grad_norm": 0.6113314628601074,
"learning_rate": 0.0005708580679978413,
"loss": 4.0971,
"step": 4600
},
{
"epoch": 0.5013477088948787,
"grad_norm": 0.814666748046875,
"learning_rate": 0.0005705342687533729,
"loss": 4.088,
"step": 4650
},
{
"epoch": 0.5067385444743935,
"grad_norm": 0.6813623905181885,
"learning_rate": 0.0005702104695089044,
"loss": 4.0697,
"step": 4700
},
{
"epoch": 0.5121293800539084,
"grad_norm": 0.7049392461776733,
"learning_rate": 0.000569886670264436,
"loss": 4.083,
"step": 4750
},
{
"epoch": 0.5175202156334232,
"grad_norm": 0.6689280867576599,
"learning_rate": 0.0005695628710199675,
"loss": 4.0704,
"step": 4800
},
{
"epoch": 0.522911051212938,
"grad_norm": 0.5872611403465271,
"learning_rate": 0.0005692390717754991,
"loss": 4.0659,
"step": 4850
},
{
"epoch": 0.5283018867924528,
"grad_norm": 0.7064571976661682,
"learning_rate": 0.0005689152725310306,
"loss": 4.0586,
"step": 4900
},
{
"epoch": 0.5336927223719676,
"grad_norm": 0.7021653652191162,
"learning_rate": 0.0005685914732865623,
"loss": 4.0619,
"step": 4950
},
{
"epoch": 0.5390835579514824,
"grad_norm": 0.6203348636627197,
"learning_rate": 0.0005682676740420939,
"loss": 4.0714,
"step": 5000
},
{
"epoch": 0.5390835579514824,
"eval_accuracy": 0.32160700663358477,
"eval_loss": 3.9901158809661865,
"eval_runtime": 183.2916,
"eval_samples_per_second": 98.264,
"eval_steps_per_second": 6.143,
"step": 5000
},
{
"epoch": 0.5444743935309974,
"grad_norm": 0.7498524188995361,
"learning_rate": 0.0005679438747976254,
"loss": 4.0504,
"step": 5050
},
{
"epoch": 0.5498652291105122,
"grad_norm": 0.6415616869926453,
"learning_rate": 0.000567620075553157,
"loss": 4.045,
"step": 5100
},
{
"epoch": 0.555256064690027,
"grad_norm": 0.68656986951828,
"learning_rate": 0.0005672962763086886,
"loss": 4.0469,
"step": 5150
},
{
"epoch": 0.5606469002695418,
"grad_norm": 0.6866742968559265,
"learning_rate": 0.0005669724770642202,
"loss": 4.0368,
"step": 5200
},
{
"epoch": 0.5660377358490566,
"grad_norm": 0.6404621601104736,
"learning_rate": 0.0005666486778197517,
"loss": 4.0323,
"step": 5250
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.6272525191307068,
"learning_rate": 0.0005663248785752833,
"loss": 4.0431,
"step": 5300
},
{
"epoch": 0.5768194070080862,
"grad_norm": 0.6219531297683716,
"learning_rate": 0.0005660010793308148,
"loss": 4.0283,
"step": 5350
},
{
"epoch": 0.5822102425876011,
"grad_norm": 0.5969902276992798,
"learning_rate": 0.0005656772800863465,
"loss": 4.0455,
"step": 5400
},
{
"epoch": 0.5876010781671159,
"grad_norm": 0.7162837982177734,
"learning_rate": 0.0005653534808418779,
"loss": 4.0187,
"step": 5450
},
{
"epoch": 0.5929919137466307,
"grad_norm": 0.5840233564376831,
"learning_rate": 0.0005650296815974096,
"loss": 4.0264,
"step": 5500
},
{
"epoch": 0.5983827493261455,
"grad_norm": 0.5966022610664368,
"learning_rate": 0.0005647058823529411,
"loss": 4.0154,
"step": 5550
},
{
"epoch": 0.6037735849056604,
"grad_norm": 0.5551064610481262,
"learning_rate": 0.0005643820831084727,
"loss": 4.0069,
"step": 5600
},
{
"epoch": 0.6091644204851752,
"grad_norm": 0.7473669052124023,
"learning_rate": 0.0005640582838640042,
"loss": 4.0204,
"step": 5650
},
{
"epoch": 0.6145552560646901,
"grad_norm": 0.6089016795158386,
"learning_rate": 0.0005637344846195358,
"loss": 4.0132,
"step": 5700
},
{
"epoch": 0.6199460916442049,
"grad_norm": 0.5742376446723938,
"learning_rate": 0.0005634106853750674,
"loss": 4.02,
"step": 5750
},
{
"epoch": 0.6253369272237197,
"grad_norm": 0.857323169708252,
"learning_rate": 0.000563086886130599,
"loss": 3.9884,
"step": 5800
},
{
"epoch": 0.6307277628032345,
"grad_norm": 0.6711622476577759,
"learning_rate": 0.0005627630868861305,
"loss": 3.9889,
"step": 5850
},
{
"epoch": 0.6361185983827493,
"grad_norm": 0.6008835434913635,
"learning_rate": 0.0005624392876416621,
"loss": 3.9763,
"step": 5900
},
{
"epoch": 0.6415094339622641,
"grad_norm": 0.6592042446136475,
"learning_rate": 0.0005621154883971937,
"loss": 3.9816,
"step": 5950
},
{
"epoch": 0.6469002695417789,
"grad_norm": 0.6811545491218567,
"learning_rate": 0.0005617916891527253,
"loss": 3.9631,
"step": 6000
},
{
"epoch": 0.6469002695417789,
"eval_accuracy": 0.328109556010618,
"eval_loss": 3.9180023670196533,
"eval_runtime": 183.5563,
"eval_samples_per_second": 98.122,
"eval_steps_per_second": 6.134,
"step": 6000
},
{
"epoch": 0.6522911051212938,
"grad_norm": 0.838979959487915,
"learning_rate": 0.0005614678899082568,
"loss": 3.9977,
"step": 6050
},
{
"epoch": 0.6576819407008087,
"grad_norm": 0.6069373488426208,
"learning_rate": 0.0005611440906637884,
"loss": 3.9844,
"step": 6100
},
{
"epoch": 0.6630727762803235,
"grad_norm": 0.6294558048248291,
"learning_rate": 0.00056082029141932,
"loss": 3.9769,
"step": 6150
},
{
"epoch": 0.6684636118598383,
"grad_norm": 0.6527539491653442,
"learning_rate": 0.0005604964921748515,
"loss": 3.9875,
"step": 6200
},
{
"epoch": 0.6738544474393531,
"grad_norm": 0.6204699873924255,
"learning_rate": 0.000560172692930383,
"loss": 3.949,
"step": 6250
},
{
"epoch": 0.6792452830188679,
"grad_norm": 0.6665420532226562,
"learning_rate": 0.0005598488936859147,
"loss": 3.96,
"step": 6300
},
{
"epoch": 0.6846361185983828,
"grad_norm": 0.6489077806472778,
"learning_rate": 0.0005595250944414463,
"loss": 3.9534,
"step": 6350
},
{
"epoch": 0.6900269541778976,
"grad_norm": 0.6623448729515076,
"learning_rate": 0.0005592012951969778,
"loss": 3.9454,
"step": 6400
},
{
"epoch": 0.6954177897574124,
"grad_norm": 0.7035852670669556,
"learning_rate": 0.0005588774959525094,
"loss": 3.9722,
"step": 6450
},
{
"epoch": 0.7008086253369272,
"grad_norm": 0.5686920881271362,
"learning_rate": 0.000558553696708041,
"loss": 3.9525,
"step": 6500
},
{
"epoch": 0.706199460916442,
"grad_norm": 0.5780633091926575,
"learning_rate": 0.0005582298974635726,
"loss": 3.9525,
"step": 6550
},
{
"epoch": 0.7115902964959568,
"grad_norm": 0.6254565715789795,
"learning_rate": 0.0005579060982191041,
"loss": 3.9494,
"step": 6600
},
{
"epoch": 0.7169811320754716,
"grad_norm": 0.6464234590530396,
"learning_rate": 0.0005575822989746357,
"loss": 3.9432,
"step": 6650
},
{
"epoch": 0.7223719676549866,
"grad_norm": 0.5895872712135315,
"learning_rate": 0.0005572584997301672,
"loss": 3.9378,
"step": 6700
},
{
"epoch": 0.7277628032345014,
"grad_norm": 0.6661088466644287,
"learning_rate": 0.0005569347004856989,
"loss": 3.9405,
"step": 6750
},
{
"epoch": 0.7331536388140162,
"grad_norm": 0.5486308932304382,
"learning_rate": 0.0005566109012412303,
"loss": 3.9499,
"step": 6800
},
{
"epoch": 0.738544474393531,
"grad_norm": 0.5206322073936462,
"learning_rate": 0.000556287101996762,
"loss": 3.9308,
"step": 6850
},
{
"epoch": 0.7439353099730458,
"grad_norm": 0.60262531042099,
"learning_rate": 0.0005559633027522935,
"loss": 3.9279,
"step": 6900
},
{
"epoch": 0.7493261455525606,
"grad_norm": 0.6978471875190735,
"learning_rate": 0.0005556395035078251,
"loss": 3.9152,
"step": 6950
},
{
"epoch": 0.7547169811320755,
"grad_norm": 0.5400425791740417,
"learning_rate": 0.0005553157042633566,
"loss": 3.9285,
"step": 7000
},
{
"epoch": 0.7547169811320755,
"eval_accuracy": 0.33344778104183126,
"eval_loss": 3.8615846633911133,
"eval_runtime": 182.602,
"eval_samples_per_second": 98.635,
"eval_steps_per_second": 6.166,
"step": 7000
},
{
"epoch": 0.7601078167115903,
"grad_norm": 0.5189043283462524,
"learning_rate": 0.0005549919050188882,
"loss": 3.9302,
"step": 7050
},
{
"epoch": 0.7654986522911051,
"grad_norm": 0.5751085877418518,
"learning_rate": 0.0005546681057744198,
"loss": 3.9332,
"step": 7100
},
{
"epoch": 0.77088948787062,
"grad_norm": 0.6791032552719116,
"learning_rate": 0.0005543443065299514,
"loss": 3.9073,
"step": 7150
},
{
"epoch": 0.7762803234501348,
"grad_norm": 0.7200894951820374,
"learning_rate": 0.000554020507285483,
"loss": 3.9193,
"step": 7200
},
{
"epoch": 0.7816711590296496,
"grad_norm": 0.6101612448692322,
"learning_rate": 0.0005536967080410145,
"loss": 3.919,
"step": 7250
},
{
"epoch": 0.7870619946091644,
"grad_norm": 0.5997413396835327,
"learning_rate": 0.0005533729087965462,
"loss": 3.9079,
"step": 7300
},
{
"epoch": 0.7924528301886793,
"grad_norm": 0.5748898386955261,
"learning_rate": 0.0005530491095520777,
"loss": 3.9162,
"step": 7350
},
{
"epoch": 0.7978436657681941,
"grad_norm": 0.6359212398529053,
"learning_rate": 0.0005527253103076093,
"loss": 3.8807,
"step": 7400
},
{
"epoch": 0.8032345013477089,
"grad_norm": 0.6088876724243164,
"learning_rate": 0.0005524015110631408,
"loss": 3.8902,
"step": 7450
},
{
"epoch": 0.8086253369272237,
"grad_norm": 0.6242630481719971,
"learning_rate": 0.0005520777118186724,
"loss": 3.9035,
"step": 7500
},
{
"epoch": 0.8140161725067385,
"grad_norm": 0.622336208820343,
"learning_rate": 0.0005517539125742039,
"loss": 3.8986,
"step": 7550
},
{
"epoch": 0.8194070080862533,
"grad_norm": 0.5424439311027527,
"learning_rate": 0.0005514301133297355,
"loss": 3.9166,
"step": 7600
},
{
"epoch": 0.8247978436657682,
"grad_norm": 0.5808700323104858,
"learning_rate": 0.0005511063140852671,
"loss": 3.9005,
"step": 7650
},
{
"epoch": 0.8301886792452831,
"grad_norm": 0.5555144548416138,
"learning_rate": 0.0005507825148407987,
"loss": 3.8722,
"step": 7700
},
{
"epoch": 0.8355795148247979,
"grad_norm": 0.7033872604370117,
"learning_rate": 0.0005504587155963302,
"loss": 3.8852,
"step": 7750
},
{
"epoch": 0.8409703504043127,
"grad_norm": 0.5577759742736816,
"learning_rate": 0.0005501349163518618,
"loss": 3.9008,
"step": 7800
},
{
"epoch": 0.8463611859838275,
"grad_norm": Infinity,
"learning_rate": 0.0005498175930922827,
"loss": 3.888,
"step": 7850
},
{
"epoch": 0.8517520215633423,
"grad_norm": 0.6767401099205017,
"learning_rate": 0.0005494937938478143,
"loss": 3.8863,
"step": 7900
},
{
"epoch": 0.8571428571428571,
"grad_norm": 0.5402886271476746,
"learning_rate": 0.0005491699946033459,
"loss": 3.8885,
"step": 7950
},
{
"epoch": 0.862533692722372,
"grad_norm": 0.5724102258682251,
"learning_rate": 0.0005488461953588775,
"loss": 3.8676,
"step": 8000
},
{
"epoch": 0.862533692722372,
"eval_accuracy": 0.3375052058314874,
"eval_loss": 3.816683292388916,
"eval_runtime": 181.6977,
"eval_samples_per_second": 99.126,
"eval_steps_per_second": 6.197,
"step": 8000
},
{
"epoch": 0.8679245283018868,
"grad_norm": 0.5436832308769226,
"learning_rate": 0.000548522396114409,
"loss": 3.8838,
"step": 8050
},
{
"epoch": 0.8733153638814016,
"grad_norm": 0.6710783243179321,
"learning_rate": 0.0005481985968699406,
"loss": 3.8623,
"step": 8100
},
{
"epoch": 0.8787061994609164,
"grad_norm": 0.5671558380126953,
"learning_rate": 0.0005478747976254721,
"loss": 3.8746,
"step": 8150
},
{
"epoch": 0.8840970350404312,
"grad_norm": 0.6092495322227478,
"learning_rate": 0.0005475509983810037,
"loss": 3.8635,
"step": 8200
},
{
"epoch": 0.889487870619946,
"grad_norm": 0.6280654072761536,
"learning_rate": 0.0005472271991365352,
"loss": 3.8772,
"step": 8250
},
{
"epoch": 0.894878706199461,
"grad_norm": 0.5402874946594238,
"learning_rate": 0.0005469033998920669,
"loss": 3.8525,
"step": 8300
},
{
"epoch": 0.9002695417789758,
"grad_norm": 0.5597426295280457,
"learning_rate": 0.0005465796006475984,
"loss": 3.8685,
"step": 8350
},
{
"epoch": 0.9056603773584906,
"grad_norm": 0.5542333126068115,
"learning_rate": 0.00054625580140313,
"loss": 3.8677,
"step": 8400
},
{
"epoch": 0.9110512129380054,
"grad_norm": 0.5573087930679321,
"learning_rate": 0.0005459320021586615,
"loss": 3.8402,
"step": 8450
},
{
"epoch": 0.9164420485175202,
"grad_norm": 0.5278663039207458,
"learning_rate": 0.0005456082029141932,
"loss": 3.8521,
"step": 8500
},
{
"epoch": 0.921832884097035,
"grad_norm": 0.6010989546775818,
"learning_rate": 0.0005452844036697248,
"loss": 3.8536,
"step": 8550
},
{
"epoch": 0.9272237196765498,
"grad_norm": 0.6041896939277649,
"learning_rate": 0.0005449606044252563,
"loss": 3.8816,
"step": 8600
},
{
"epoch": 0.9326145552560647,
"grad_norm": 0.6230564117431641,
"learning_rate": 0.0005446368051807879,
"loss": 3.8547,
"step": 8650
},
{
"epoch": 0.9380053908355795,
"grad_norm": 0.6061603426933289,
"learning_rate": 0.0005443130059363194,
"loss": 3.8612,
"step": 8700
},
{
"epoch": 0.9433962264150944,
"grad_norm": 0.532725989818573,
"learning_rate": 0.0005439892066918511,
"loss": 3.8453,
"step": 8750
},
{
"epoch": 0.9487870619946092,
"grad_norm": 0.6650099158287048,
"learning_rate": 0.0005436654074473825,
"loss": 3.845,
"step": 8800
},
{
"epoch": 0.954177897574124,
"grad_norm": 0.5258676409721375,
"learning_rate": 0.0005433416082029142,
"loss": 3.8474,
"step": 8850
},
{
"epoch": 0.9595687331536388,
"grad_norm": 0.5918166637420654,
"learning_rate": 0.0005430178089584457,
"loss": 3.8476,
"step": 8900
},
{
"epoch": 0.9649595687331537,
"grad_norm": 0.7267642617225647,
"learning_rate": 0.0005426940097139773,
"loss": 3.8489,
"step": 8950
},
{
"epoch": 0.9703504043126685,
"grad_norm": 0.639377772808075,
"learning_rate": 0.0005423702104695088,
"loss": 3.8648,
"step": 9000
},
{
"epoch": 0.9703504043126685,
"eval_accuracy": 0.3408996306996996,
"eval_loss": 3.779827356338501,
"eval_runtime": 181.5426,
"eval_samples_per_second": 99.211,
"eval_steps_per_second": 6.202,
"step": 9000
},
{
"epoch": 0.9757412398921833,
"grad_norm": 0.6780883073806763,
"learning_rate": 0.0005420464112250404,
"loss": 3.8448,
"step": 9050
},
{
"epoch": 0.9811320754716981,
"grad_norm": 0.5915326476097107,
"learning_rate": 0.000541722611980572,
"loss": 3.8335,
"step": 9100
},
{
"epoch": 0.9865229110512129,
"grad_norm": 0.5880796313285828,
"learning_rate": 0.0005413988127361036,
"loss": 3.8434,
"step": 9150
},
{
"epoch": 0.9919137466307277,
"grad_norm": 0.551231861114502,
"learning_rate": 0.0005410750134916351,
"loss": 3.8469,
"step": 9200
},
{
"epoch": 0.9973045822102425,
"grad_norm": 0.542789101600647,
"learning_rate": 0.0005407512142471667,
"loss": 3.8382,
"step": 9250
},
{
"epoch": 1.0026954177897573,
"grad_norm": 0.5782141089439392,
"learning_rate": 0.0005404274150026983,
"loss": 3.8143,
"step": 9300
},
{
"epoch": 1.0080862533692723,
"grad_norm": 0.585417628288269,
"learning_rate": 0.0005401036157582299,
"loss": 3.782,
"step": 9350
},
{
"epoch": 1.013477088948787,
"grad_norm": 0.5786333680152893,
"learning_rate": 0.0005397798165137614,
"loss": 3.7919,
"step": 9400
},
{
"epoch": 1.0188679245283019,
"grad_norm": 0.5752071738243103,
"learning_rate": 0.000539456017269293,
"loss": 3.7809,
"step": 9450
},
{
"epoch": 1.0242587601078168,
"grad_norm": 0.5453478693962097,
"learning_rate": 0.0005391322180248245,
"loss": 3.7775,
"step": 9500
},
{
"epoch": 1.0296495956873315,
"grad_norm": 0.5964511632919312,
"learning_rate": 0.0005388084187803561,
"loss": 3.7881,
"step": 9550
},
{
"epoch": 1.0350404312668464,
"grad_norm": 0.6188067197799683,
"learning_rate": 0.0005384846195358876,
"loss": 3.7672,
"step": 9600
},
{
"epoch": 1.0404312668463611,
"grad_norm": 0.5592512488365173,
"learning_rate": 0.0005381608202914193,
"loss": 3.7615,
"step": 9650
},
{
"epoch": 1.045822102425876,
"grad_norm": 0.5626137256622314,
"learning_rate": 0.0005378370210469509,
"loss": 3.7723,
"step": 9700
},
{
"epoch": 1.0512129380053907,
"grad_norm": 0.6109785437583923,
"learning_rate": 0.0005375132218024824,
"loss": 3.7644,
"step": 9750
},
{
"epoch": 1.0566037735849056,
"grad_norm": 0.6150305271148682,
"learning_rate": 0.000537189422558014,
"loss": 3.7717,
"step": 9800
},
{
"epoch": 1.0619946091644206,
"grad_norm": 0.5786086320877075,
"learning_rate": 0.0005368656233135455,
"loss": 3.7537,
"step": 9850
},
{
"epoch": 1.0673854447439353,
"grad_norm": 0.5935583710670471,
"learning_rate": 0.0005365418240690772,
"loss": 3.7646,
"step": 9900
},
{
"epoch": 1.0727762803234502,
"grad_norm": 0.5563629269599915,
"learning_rate": 0.0005362180248246087,
"loss": 3.7625,
"step": 9950
},
{
"epoch": 1.0781671159029649,
"grad_norm": 0.5583340525627136,
"learning_rate": 0.0005358942255801403,
"loss": 3.7661,
"step": 10000
},
{
"epoch": 1.0781671159029649,
"eval_accuracy": 0.3447591985806456,
"eval_loss": 3.7474043369293213,
"eval_runtime": 179.3333,
"eval_samples_per_second": 100.433,
"eval_steps_per_second": 6.279,
"step": 10000
},
{
"epoch": 1.0835579514824798,
"grad_norm": 0.5278469324111938,
"learning_rate": 0.0005355704263356718,
"loss": 3.7572,
"step": 10050
},
{
"epoch": 1.0889487870619945,
"grad_norm": 0.599793553352356,
"learning_rate": 0.0005352466270912035,
"loss": 3.7482,
"step": 10100
},
{
"epoch": 1.0943396226415094,
"grad_norm": 0.594103991985321,
"learning_rate": 0.000534922827846735,
"loss": 3.7811,
"step": 10150
},
{
"epoch": 1.0997304582210243,
"grad_norm": 0.539659321308136,
"learning_rate": 0.0005345990286022666,
"loss": 3.7808,
"step": 10200
},
{
"epoch": 1.105121293800539,
"grad_norm": 0.5949695110321045,
"learning_rate": 0.0005342752293577981,
"loss": 3.7483,
"step": 10250
},
{
"epoch": 1.110512129380054,
"grad_norm": 0.6354559659957886,
"learning_rate": 0.0005339514301133297,
"loss": 3.7743,
"step": 10300
},
{
"epoch": 1.1159029649595686,
"grad_norm": 0.5751685500144958,
"learning_rate": 0.0005336276308688612,
"loss": 3.7727,
"step": 10350
},
{
"epoch": 1.1212938005390836,
"grad_norm": 0.6152482032775879,
"learning_rate": 0.0005333038316243928,
"loss": 3.7604,
"step": 10400
},
{
"epoch": 1.1266846361185983,
"grad_norm": 0.6259979009628296,
"learning_rate": 0.0005329800323799244,
"loss": 3.7242,
"step": 10450
},
{
"epoch": 1.1320754716981132,
"grad_norm": 0.5735751390457153,
"learning_rate": 0.000532656233135456,
"loss": 3.75,
"step": 10500
},
{
"epoch": 1.137466307277628,
"grad_norm": 0.5335408449172974,
"learning_rate": 0.0005323324338909875,
"loss": 3.7497,
"step": 10550
},
{
"epoch": 1.1428571428571428,
"grad_norm": 0.5535331964492798,
"learning_rate": 0.0005320086346465191,
"loss": 3.757,
"step": 10600
},
{
"epoch": 1.1482479784366577,
"grad_norm": 0.5518361926078796,
"learning_rate": 0.0005316848354020507,
"loss": 3.7635,
"step": 10650
},
{
"epoch": 1.1536388140161726,
"grad_norm": 0.6065233945846558,
"learning_rate": 0.0005313610361575823,
"loss": 3.7482,
"step": 10700
},
{
"epoch": 1.1590296495956873,
"grad_norm": 0.5510832667350769,
"learning_rate": 0.0005310372369131138,
"loss": 3.7464,
"step": 10750
},
{
"epoch": 1.1644204851752022,
"grad_norm": 0.5745865702629089,
"learning_rate": 0.0005307134376686454,
"loss": 3.7237,
"step": 10800
},
{
"epoch": 1.169811320754717,
"grad_norm": 0.6756680011749268,
"learning_rate": 0.000530389638424177,
"loss": 3.7627,
"step": 10850
},
{
"epoch": 1.1752021563342319,
"grad_norm": 0.6508898138999939,
"learning_rate": 0.0005300658391797085,
"loss": 3.7616,
"step": 10900
},
{
"epoch": 1.1805929919137466,
"grad_norm": 0.6372632384300232,
"learning_rate": 0.00052974203993524,
"loss": 3.7205,
"step": 10950
},
{
"epoch": 1.1859838274932615,
"grad_norm": 0.584235668182373,
"learning_rate": 0.0005294182406907717,
"loss": 3.7415,
"step": 11000
},
{
"epoch": 1.1859838274932615,
"eval_accuracy": 0.3469530090385078,
"eval_loss": 3.7233991622924805,
"eval_runtime": 179.2943,
"eval_samples_per_second": 100.455,
"eval_steps_per_second": 6.28,
"step": 11000
},
{
"epoch": 1.1913746630727764,
"grad_norm": 0.5607488751411438,
"learning_rate": 0.0005290944414463033,
"loss": 3.7504,
"step": 11050
},
{
"epoch": 1.196765498652291,
"grad_norm": 0.6091164946556091,
"learning_rate": 0.0005287706422018348,
"loss": 3.744,
"step": 11100
},
{
"epoch": 1.202156334231806,
"grad_norm": 0.6206035614013672,
"learning_rate": 0.0005284468429573664,
"loss": 3.7378,
"step": 11150
},
{
"epoch": 1.2075471698113207,
"grad_norm": 0.6528010964393616,
"learning_rate": 0.0005281230437128979,
"loss": 3.7509,
"step": 11200
},
{
"epoch": 1.2129380053908356,
"grad_norm": 0.520330011844635,
"learning_rate": 0.0005278057204533189,
"loss": 3.7608,
"step": 11250
},
{
"epoch": 1.2183288409703503,
"grad_norm": 0.6227236390113831,
"learning_rate": 0.0005274819212088505,
"loss": 3.7286,
"step": 11300
},
{
"epoch": 1.2237196765498652,
"grad_norm": 0.5682984590530396,
"learning_rate": 0.0005271581219643821,
"loss": 3.7505,
"step": 11350
},
{
"epoch": 1.2291105121293802,
"grad_norm": 0.6123554706573486,
"learning_rate": 0.0005268343227199136,
"loss": 3.7508,
"step": 11400
},
{
"epoch": 1.2345013477088949,
"grad_norm": 0.507030189037323,
"learning_rate": 0.0005265105234754452,
"loss": 3.7553,
"step": 11450
},
{
"epoch": 1.2398921832884098,
"grad_norm": 0.5759048461914062,
"learning_rate": 0.0005261867242309767,
"loss": 3.7192,
"step": 11500
},
{
"epoch": 1.2452830188679245,
"grad_norm": 0.5370573997497559,
"learning_rate": 0.0005258629249865083,
"loss": 3.7409,
"step": 11550
},
{
"epoch": 1.2506738544474394,
"grad_norm": 0.5597508549690247,
"learning_rate": 0.0005255391257420398,
"loss": 3.739,
"step": 11600
},
{
"epoch": 1.256064690026954,
"grad_norm": 0.6163648366928101,
"learning_rate": 0.0005252153264975715,
"loss": 3.7364,
"step": 11650
},
{
"epoch": 1.261455525606469,
"grad_norm": 0.561890721321106,
"learning_rate": 0.000524891527253103,
"loss": 3.7371,
"step": 11700
},
{
"epoch": 1.266846361185984,
"grad_norm": 0.6164953708648682,
"learning_rate": 0.0005245677280086346,
"loss": 3.7437,
"step": 11750
},
{
"epoch": 1.2722371967654986,
"grad_norm": 0.5438469648361206,
"learning_rate": 0.0005242439287641661,
"loss": 3.7317,
"step": 11800
},
{
"epoch": 1.2776280323450135,
"grad_norm": 0.5996424555778503,
"learning_rate": 0.0005239201295196978,
"loss": 3.7139,
"step": 11850
},
{
"epoch": 1.2830188679245282,
"grad_norm": 0.5443153381347656,
"learning_rate": 0.0005235963302752293,
"loss": 3.7189,
"step": 11900
},
{
"epoch": 1.2884097035040432,
"grad_norm": 0.5056083798408508,
"learning_rate": 0.0005232725310307609,
"loss": 3.7286,
"step": 11950
},
{
"epoch": 1.2938005390835579,
"grad_norm": 0.5346047878265381,
"learning_rate": 0.0005229487317862924,
"loss": 3.7241,
"step": 12000
},
{
"epoch": 1.2938005390835579,
"eval_accuracy": 0.3486271327339533,
"eval_loss": 3.701392889022827,
"eval_runtime": 179.3961,
"eval_samples_per_second": 100.398,
"eval_steps_per_second": 6.277,
"step": 12000
},
{
"epoch": 1.2991913746630728,
"grad_norm": 0.561946451663971,
"learning_rate": 0.000522624932541824,
"loss": 3.7065,
"step": 12050
},
{
"epoch": 1.3045822102425877,
"grad_norm": 0.6094053387641907,
"learning_rate": 0.0005223011332973557,
"loss": 3.7432,
"step": 12100
},
{
"epoch": 1.3099730458221024,
"grad_norm": 0.548875093460083,
"learning_rate": 0.0005219773340528872,
"loss": 3.7251,
"step": 12150
},
{
"epoch": 1.3153638814016173,
"grad_norm": 0.5728870630264282,
"learning_rate": 0.0005216535348084188,
"loss": 3.7107,
"step": 12200
},
{
"epoch": 1.320754716981132,
"grad_norm": 0.6108008623123169,
"learning_rate": 0.0005213297355639503,
"loss": 3.7369,
"step": 12250
},
{
"epoch": 1.326145552560647,
"grad_norm": 0.5879302024841309,
"learning_rate": 0.0005210059363194819,
"loss": 3.7043,
"step": 12300
},
{
"epoch": 1.3315363881401616,
"grad_norm": 0.550482988357544,
"learning_rate": 0.0005206821370750134,
"loss": 3.7288,
"step": 12350
},
{
"epoch": 1.3369272237196765,
"grad_norm": 0.5876588821411133,
"learning_rate": 0.000520358337830545,
"loss": 3.7,
"step": 12400
},
{
"epoch": 1.3423180592991915,
"grad_norm": 0.5548244118690491,
"learning_rate": 0.0005200345385860766,
"loss": 3.7202,
"step": 12450
},
{
"epoch": 1.3477088948787062,
"grad_norm": 0.6892051696777344,
"learning_rate": 0.0005197107393416082,
"loss": 3.7287,
"step": 12500
},
{
"epoch": 1.353099730458221,
"grad_norm": 0.5671435594558716,
"learning_rate": 0.0005193869400971397,
"loss": 3.7361,
"step": 12550
},
{
"epoch": 1.3584905660377358,
"grad_norm": 0.5692296624183655,
"learning_rate": 0.0005190631408526713,
"loss": 3.7137,
"step": 12600
},
{
"epoch": 1.3638814016172507,
"grad_norm": 0.5560773611068726,
"learning_rate": 0.0005187393416082029,
"loss": 3.7092,
"step": 12650
},
{
"epoch": 1.3692722371967654,
"grad_norm": 0.5214106440544128,
"learning_rate": 0.0005184155423637345,
"loss": 3.7076,
"step": 12700
},
{
"epoch": 1.3746630727762803,
"grad_norm": 0.5432198643684387,
"learning_rate": 0.000518091743119266,
"loss": 3.7224,
"step": 12750
},
{
"epoch": 1.3800539083557952,
"grad_norm": 0.5707585215568542,
"learning_rate": 0.0005177679438747976,
"loss": 3.7136,
"step": 12800
},
{
"epoch": 1.38544474393531,
"grad_norm": 0.583604633808136,
"learning_rate": 0.0005174441446303291,
"loss": 3.7356,
"step": 12850
},
{
"epoch": 1.3908355795148248,
"grad_norm": 0.567251443862915,
"learning_rate": 0.0005171203453858607,
"loss": 3.7341,
"step": 12900
},
{
"epoch": 1.3962264150943398,
"grad_norm": 0.5615735054016113,
"learning_rate": 0.0005167965461413922,
"loss": 3.7331,
"step": 12950
},
{
"epoch": 1.4016172506738545,
"grad_norm": 0.6066398024559021,
"learning_rate": 0.0005164727468969239,
"loss": 3.715,
"step": 13000
},
{
"epoch": 1.4016172506738545,
"eval_accuracy": 0.3512098118773035,
"eval_loss": 3.67993426322937,
"eval_runtime": 181.5275,
"eval_samples_per_second": 99.219,
"eval_steps_per_second": 6.203,
"step": 13000
},
{
"epoch": 1.4070080862533692,
"grad_norm": 0.6397793292999268,
"learning_rate": 0.0005161489476524554,
"loss": 3.6967,
"step": 13050
},
{
"epoch": 1.412398921832884,
"grad_norm": 0.6502065062522888,
"learning_rate": 0.000515825148407987,
"loss": 3.7131,
"step": 13100
},
{
"epoch": 1.417789757412399,
"grad_norm": 0.5458613038063049,
"learning_rate": 0.0005155013491635185,
"loss": 3.7147,
"step": 13150
},
{
"epoch": 1.4231805929919137,
"grad_norm": 0.5654726028442383,
"learning_rate": 0.0005151775499190501,
"loss": 3.7392,
"step": 13200
},
{
"epoch": 1.4285714285714286,
"grad_norm": 0.5382741689682007,
"learning_rate": 0.0005148537506745818,
"loss": 3.6966,
"step": 13250
},
{
"epoch": 1.4339622641509435,
"grad_norm": 0.5260798931121826,
"learning_rate": 0.0005145299514301133,
"loss": 3.703,
"step": 13300
},
{
"epoch": 1.4393530997304582,
"grad_norm": 0.5853448510169983,
"learning_rate": 0.0005142061521856449,
"loss": 3.684,
"step": 13350
},
{
"epoch": 1.444743935309973,
"grad_norm": 0.538550615310669,
"learning_rate": 0.0005138823529411764,
"loss": 3.7219,
"step": 13400
},
{
"epoch": 1.4501347708894878,
"grad_norm": 0.596662700176239,
"learning_rate": 0.0005135585536967081,
"loss": 3.7194,
"step": 13450
},
{
"epoch": 1.4555256064690028,
"grad_norm": 0.6186038851737976,
"learning_rate": 0.0005132347544522396,
"loss": 3.7033,
"step": 13500
},
{
"epoch": 1.4609164420485174,
"grad_norm": 0.542559802532196,
"learning_rate": 0.0005129109552077712,
"loss": 3.7194,
"step": 13550
},
{
"epoch": 1.4663072776280324,
"grad_norm": 0.5352370738983154,
"learning_rate": 0.0005125871559633027,
"loss": 3.7046,
"step": 13600
},
{
"epoch": 1.4716981132075473,
"grad_norm": 0.5471335649490356,
"learning_rate": 0.0005122633567188343,
"loss": 3.7091,
"step": 13650
},
{
"epoch": 1.477088948787062,
"grad_norm": 0.6399145126342773,
"learning_rate": 0.0005119395574743658,
"loss": 3.7208,
"step": 13700
},
{
"epoch": 1.482479784366577,
"grad_norm": 0.5687584280967712,
"learning_rate": 0.0005116222342147868,
"loss": 3.7,
"step": 13750
},
{
"epoch": 1.4878706199460916,
"grad_norm": 0.5827769041061401,
"learning_rate": 0.0005112984349703183,
"loss": 3.7004,
"step": 13800
},
{
"epoch": 1.4932614555256065,
"grad_norm": 0.5126985907554626,
"learning_rate": 0.00051097463572585,
"loss": 3.6955,
"step": 13850
},
{
"epoch": 1.4986522911051212,
"grad_norm": 0.6048992872238159,
"learning_rate": 0.0005106508364813815,
"loss": 3.6853,
"step": 13900
},
{
"epoch": 1.5040431266846361,
"grad_norm": 0.5209577679634094,
"learning_rate": 0.0005103270372369131,
"loss": 3.7061,
"step": 13950
},
{
"epoch": 1.509433962264151,
"grad_norm": 0.558080792427063,
"learning_rate": 0.0005100032379924446,
"loss": 3.6982,
"step": 14000
},
{
"epoch": 1.509433962264151,
"eval_accuracy": 0.35338667248466665,
"eval_loss": 3.6556079387664795,
"eval_runtime": 181.0746,
"eval_samples_per_second": 99.467,
"eval_steps_per_second": 6.218,
"step": 14000
},
{
"epoch": 1.5148247978436657,
"grad_norm": 0.5223732590675354,
"learning_rate": 0.0005096794387479762,
"loss": 3.6743,
"step": 14050
},
{
"epoch": 1.5202156334231804,
"grad_norm": 0.5266443490982056,
"learning_rate": 0.0005093556395035078,
"loss": 3.7049,
"step": 14100
},
{
"epoch": 1.5256064690026954,
"grad_norm": 0.5410740971565247,
"learning_rate": 0.0005090318402590394,
"loss": 3.6871,
"step": 14150
},
{
"epoch": 1.5309973045822103,
"grad_norm": 0.5198672413825989,
"learning_rate": 0.0005087080410145709,
"loss": 3.6848,
"step": 14200
},
{
"epoch": 1.536388140161725,
"grad_norm": 0.5388085246086121,
"learning_rate": 0.0005083842417701025,
"loss": 3.6925,
"step": 14250
},
{
"epoch": 1.54177897574124,
"grad_norm": 0.5680968761444092,
"learning_rate": 0.000508060442525634,
"loss": 3.7015,
"step": 14300
},
{
"epoch": 1.5471698113207548,
"grad_norm": 0.529816746711731,
"learning_rate": 0.0005077366432811656,
"loss": 3.7089,
"step": 14350
},
{
"epoch": 1.5525606469002695,
"grad_norm": 0.588886022567749,
"learning_rate": 0.0005074128440366971,
"loss": 3.6898,
"step": 14400
},
{
"epoch": 1.5579514824797842,
"grad_norm": 0.53922438621521,
"learning_rate": 0.0005070890447922288,
"loss": 3.7101,
"step": 14450
},
{
"epoch": 1.5633423180592994,
"grad_norm": 0.5846419930458069,
"learning_rate": 0.0005067652455477604,
"loss": 3.6752,
"step": 14500
},
{
"epoch": 1.568733153638814,
"grad_norm": 0.5448037981987,
"learning_rate": 0.0005064414463032919,
"loss": 3.6868,
"step": 14550
},
{
"epoch": 1.5741239892183287,
"grad_norm": 0.5100300908088684,
"learning_rate": 0.0005061176470588235,
"loss": 3.6773,
"step": 14600
},
{
"epoch": 1.5795148247978437,
"grad_norm": 0.5662325620651245,
"learning_rate": 0.0005057938478143551,
"loss": 3.693,
"step": 14650
},
{
"epoch": 1.5849056603773586,
"grad_norm": 0.5295085906982422,
"learning_rate": 0.0005054700485698867,
"loss": 3.6999,
"step": 14700
},
{
"epoch": 1.5902964959568733,
"grad_norm": 0.6152496337890625,
"learning_rate": 0.0005051462493254182,
"loss": 3.6639,
"step": 14750
},
{
"epoch": 1.595687331536388,
"grad_norm": 0.5873838067054749,
"learning_rate": 0.0005048224500809498,
"loss": 3.6879,
"step": 14800
},
{
"epoch": 1.6010781671159031,
"grad_norm": 0.5197713375091553,
"learning_rate": 0.0005044986508364813,
"loss": 3.6784,
"step": 14850
},
{
"epoch": 1.6064690026954178,
"grad_norm": 0.5193688869476318,
"learning_rate": 0.0005041748515920129,
"loss": 3.6756,
"step": 14900
},
{
"epoch": 1.6118598382749325,
"grad_norm": 0.5456000566482544,
"learning_rate": 0.0005038510523475444,
"loss": 3.6745,
"step": 14950
},
{
"epoch": 1.6172506738544474,
"grad_norm": 0.565951943397522,
"learning_rate": 0.0005035272531030761,
"loss": 3.6829,
"step": 15000
},
{
"epoch": 1.6172506738544474,
"eval_accuracy": 0.3553477484568302,
"eval_loss": 3.6376187801361084,
"eval_runtime": 181.6027,
"eval_samples_per_second": 99.178,
"eval_steps_per_second": 6.2,
"step": 15000
},
{
"epoch": 1.6226415094339623,
"grad_norm": 0.5930358171463013,
"learning_rate": 0.0005032034538586076,
"loss": 3.6899,
"step": 15050
},
{
"epoch": 1.628032345013477,
"grad_norm": 0.5067706108093262,
"learning_rate": 0.0005028796546141392,
"loss": 3.6659,
"step": 15100
},
{
"epoch": 1.633423180592992,
"grad_norm": 0.5201679468154907,
"learning_rate": 0.0005025558553696707,
"loss": 3.6864,
"step": 15150
},
{
"epoch": 1.6388140161725069,
"grad_norm": 0.5754110217094421,
"learning_rate": 0.0005022320561252023,
"loss": 3.6606,
"step": 15200
},
{
"epoch": 1.6442048517520216,
"grad_norm": 0.592934250831604,
"learning_rate": 0.0005019082568807339,
"loss": 3.7035,
"step": 15250
},
{
"epoch": 1.6495956873315363,
"grad_norm": 0.5429046154022217,
"learning_rate": 0.0005015844576362655,
"loss": 3.687,
"step": 15300
},
{
"epoch": 1.6549865229110512,
"grad_norm": 0.5521410703659058,
"learning_rate": 0.000501260658391797,
"loss": 3.6814,
"step": 15350
},
{
"epoch": 1.6603773584905661,
"grad_norm": 0.5523940324783325,
"learning_rate": 0.0005009368591473286,
"loss": 3.671,
"step": 15400
},
{
"epoch": 1.6657681940700808,
"grad_norm": 0.5145202875137329,
"learning_rate": 0.0005006130599028602,
"loss": 3.6823,
"step": 15450
},
{
"epoch": 1.6711590296495957,
"grad_norm": 0.541299045085907,
"learning_rate": 0.0005002892606583918,
"loss": 3.6941,
"step": 15500
},
{
"epoch": 1.6765498652291106,
"grad_norm": 0.578633189201355,
"learning_rate": 0.0004999654614139233,
"loss": 3.6671,
"step": 15550
},
{
"epoch": 1.6819407008086253,
"grad_norm": 0.5630112290382385,
"learning_rate": 0.0004996416621694549,
"loss": 3.6661,
"step": 15600
},
{
"epoch": 1.68733153638814,
"grad_norm": 0.546072244644165,
"learning_rate": 0.0004993178629249864,
"loss": 3.663,
"step": 15650
},
{
"epoch": 1.692722371967655,
"grad_norm": 0.6002097725868225,
"learning_rate": 0.000498994063680518,
"loss": 3.6783,
"step": 15700
},
{
"epoch": 1.6981132075471699,
"grad_norm": 0.6249855756759644,
"learning_rate": 0.000498676740420939,
"loss": 3.6534,
"step": 15750
},
{
"epoch": 1.7035040431266846,
"grad_norm": 0.5264080166816711,
"learning_rate": 0.0004983529411764705,
"loss": 3.6742,
"step": 15800
},
{
"epoch": 1.7088948787061995,
"grad_norm": 0.5488578081130981,
"learning_rate": 0.0004980291419320022,
"loss": 3.6645,
"step": 15850
},
{
"epoch": 1.7142857142857144,
"grad_norm": 0.5747875571250916,
"learning_rate": 0.0004977053426875337,
"loss": 3.6661,
"step": 15900
},
{
"epoch": 1.719676549865229,
"grad_norm": 0.5004916191101074,
"learning_rate": 0.0004973815434430653,
"loss": 3.6556,
"step": 15950
},
{
"epoch": 1.7250673854447438,
"grad_norm": 0.5628464221954346,
"learning_rate": 0.0004970577441985968,
"loss": 3.6554,
"step": 16000
},
{
"epoch": 1.7250673854447438,
"eval_accuracy": 0.3569113721653684,
"eval_loss": 3.6237761974334717,
"eval_runtime": 181.1991,
"eval_samples_per_second": 99.399,
"eval_steps_per_second": 6.214,
"step": 16000
},
{
"epoch": 1.7304582210242587,
"grad_norm": 0.5369266271591187,
"learning_rate": 0.0004967339449541284,
"loss": 3.6758,
"step": 16050
},
{
"epoch": 1.7358490566037736,
"grad_norm": 0.575958788394928,
"learning_rate": 0.00049641014570966,
"loss": 3.6766,
"step": 16100
},
{
"epoch": 1.7412398921832883,
"grad_norm": 0.6171749234199524,
"learning_rate": 0.0004960863464651916,
"loss": 3.658,
"step": 16150
},
{
"epoch": 1.7466307277628033,
"grad_norm": 0.5248631238937378,
"learning_rate": 0.0004957625472207231,
"loss": 3.6652,
"step": 16200
},
{
"epoch": 1.7520215633423182,
"grad_norm": 0.5663668513298035,
"learning_rate": 0.0004954387479762547,
"loss": 3.6645,
"step": 16250
},
{
"epoch": 1.7574123989218329,
"grad_norm": 0.5405056476593018,
"learning_rate": 0.0004951149487317862,
"loss": 3.6633,
"step": 16300
},
{
"epoch": 1.7628032345013476,
"grad_norm": 0.5469680428504944,
"learning_rate": 0.0004947911494873178,
"loss": 3.6642,
"step": 16350
},
{
"epoch": 1.7681940700808625,
"grad_norm": 0.5379207134246826,
"learning_rate": 0.0004944673502428493,
"loss": 3.6666,
"step": 16400
},
{
"epoch": 1.7735849056603774,
"grad_norm": 0.5439175963401794,
"learning_rate": 0.000494143550998381,
"loss": 3.6703,
"step": 16450
},
{
"epoch": 1.778975741239892,
"grad_norm": 0.5585680603981018,
"learning_rate": 0.0004938197517539125,
"loss": 3.666,
"step": 16500
},
{
"epoch": 1.784366576819407,
"grad_norm": 0.5458288788795471,
"learning_rate": 0.0004934959525094441,
"loss": 3.6453,
"step": 16550
},
{
"epoch": 1.789757412398922,
"grad_norm": 0.5485697388648987,
"learning_rate": 0.0004931721532649756,
"loss": 3.6609,
"step": 16600
},
{
"epoch": 1.7951482479784366,
"grad_norm": 0.5749149918556213,
"learning_rate": 0.0004928483540205073,
"loss": 3.6645,
"step": 16650
},
{
"epoch": 1.8005390835579513,
"grad_norm": 0.5598737001419067,
"learning_rate": 0.0004925245547760388,
"loss": 3.6582,
"step": 16700
},
{
"epoch": 1.8059299191374663,
"grad_norm": 0.5720735788345337,
"learning_rate": 0.0004922007555315704,
"loss": 3.6479,
"step": 16750
},
{
"epoch": 1.8113207547169812,
"grad_norm": 0.544169008731842,
"learning_rate": 0.000491876956287102,
"loss": 3.6539,
"step": 16800
},
{
"epoch": 1.8167115902964959,
"grad_norm": 0.587104082107544,
"learning_rate": 0.0004915531570426335,
"loss": 3.6497,
"step": 16850
},
{
"epoch": 1.8221024258760108,
"grad_norm": 0.556251585483551,
"learning_rate": 0.0004912293577981652,
"loss": 3.6587,
"step": 16900
},
{
"epoch": 1.8274932614555257,
"grad_norm": 0.5477184057235718,
"learning_rate": 0.0004909055585536966,
"loss": 3.6421,
"step": 16950
},
{
"epoch": 1.8328840970350404,
"grad_norm": 0.5440017580986023,
"learning_rate": 0.0004905817593092283,
"loss": 3.6345,
"step": 17000
},
{
"epoch": 1.8328840970350404,
"eval_accuracy": 0.3582564949165117,
"eval_loss": 3.606245994567871,
"eval_runtime": 181.6324,
"eval_samples_per_second": 99.162,
"eval_steps_per_second": 6.199,
"step": 17000
},
{
"epoch": 1.838274932614555,
"grad_norm": 0.5022566318511963,
"learning_rate": 0.0004902579600647598,
"loss": 3.6459,
"step": 17050
},
{
"epoch": 1.8436657681940702,
"grad_norm": 0.572801947593689,
"learning_rate": 0.0004899341608202914,
"loss": 3.646,
"step": 17100
},
{
"epoch": 1.849056603773585,
"grad_norm": 0.5857950448989868,
"learning_rate": 0.0004896103615758229,
"loss": 3.6343,
"step": 17150
},
{
"epoch": 1.8544474393530996,
"grad_norm": 0.529326856136322,
"learning_rate": 0.0004892865623313546,
"loss": 3.6621,
"step": 17200
},
{
"epoch": 1.8598382749326146,
"grad_norm": 0.5415377616882324,
"learning_rate": 0.0004889627630868861,
"loss": 3.6692,
"step": 17250
},
{
"epoch": 1.8652291105121295,
"grad_norm": 0.5492255091667175,
"learning_rate": 0.0004886389638424177,
"loss": 3.6534,
"step": 17300
},
{
"epoch": 1.8706199460916442,
"grad_norm": 0.5427126884460449,
"learning_rate": 0.0004883151645979492,
"loss": 3.6425,
"step": 17350
},
{
"epoch": 1.8760107816711589,
"grad_norm": 0.561755359172821,
"learning_rate": 0.00048799136535348077,
"loss": 3.6583,
"step": 17400
},
{
"epoch": 1.881401617250674,
"grad_norm": 0.5410698056221008,
"learning_rate": 0.0004876675661090124,
"loss": 3.6306,
"step": 17450
},
{
"epoch": 1.8867924528301887,
"grad_norm": 0.542617678642273,
"learning_rate": 0.0004873437668645439,
"loss": 3.6534,
"step": 17500
},
{
"epoch": 1.8921832884097034,
"grad_norm": 0.5104948878288269,
"learning_rate": 0.00048701996762007553,
"loss": 3.6543,
"step": 17550
},
{
"epoch": 1.8975741239892183,
"grad_norm": 0.6261042356491089,
"learning_rate": 0.0004866961683756071,
"loss": 3.6425,
"step": 17600
},
{
"epoch": 1.9029649595687332,
"grad_norm": 0.5958898067474365,
"learning_rate": 0.0004863723691311387,
"loss": 3.6418,
"step": 17650
},
{
"epoch": 1.908355795148248,
"grad_norm": 0.5822218656539917,
"learning_rate": 0.00048604856988667024,
"loss": 3.6586,
"step": 17700
},
{
"epoch": 1.9137466307277629,
"grad_norm": 0.5634023547172546,
"learning_rate": 0.0004857247706422018,
"loss": 3.6335,
"step": 17750
},
{
"epoch": 1.9191374663072778,
"grad_norm": 0.5490560531616211,
"learning_rate": 0.00048540744738262274,
"loss": 3.6368,
"step": 17800
},
{
"epoch": 1.9245283018867925,
"grad_norm": 0.5121088027954102,
"learning_rate": 0.00048508364813815434,
"loss": 3.6266,
"step": 17850
},
{
"epoch": 1.9299191374663072,
"grad_norm": 0.5225231647491455,
"learning_rate": 0.00048475984889368584,
"loss": 3.6324,
"step": 17900
},
{
"epoch": 1.935309973045822,
"grad_norm": 0.5291708111763,
"learning_rate": 0.0004844360496492175,
"loss": 3.6424,
"step": 17950
},
{
"epoch": 1.940700808625337,
"grad_norm": 0.5437564849853516,
"learning_rate": 0.000484112250404749,
"loss": 3.6469,
"step": 18000
},
{
"epoch": 1.940700808625337,
"eval_accuracy": 0.3598998698447057,
"eval_loss": 3.5913586616516113,
"eval_runtime": 181.4123,
"eval_samples_per_second": 99.282,
"eval_steps_per_second": 6.207,
"step": 18000
},
{
"epoch": 1.9460916442048517,
"grad_norm": 0.5686632394790649,
"learning_rate": 0.00048378845116028055,
"loss": 3.6235,
"step": 18050
},
{
"epoch": 1.9514824797843666,
"grad_norm": 0.5239707231521606,
"learning_rate": 0.00048346465191581215,
"loss": 3.6487,
"step": 18100
},
{
"epoch": 1.9568733153638815,
"grad_norm": 0.5632966756820679,
"learning_rate": 0.0004831408526713437,
"loss": 3.642,
"step": 18150
},
{
"epoch": 1.9622641509433962,
"grad_norm": 0.5901452898979187,
"learning_rate": 0.0004828170534268753,
"loss": 3.6363,
"step": 18200
},
{
"epoch": 1.967654986522911,
"grad_norm": 0.5481223464012146,
"learning_rate": 0.00048249325418240686,
"loss": 3.6498,
"step": 18250
},
{
"epoch": 1.9730458221024259,
"grad_norm": 0.5510320067405701,
"learning_rate": 0.00048216945493793846,
"loss": 3.6471,
"step": 18300
},
{
"epoch": 1.9784366576819408,
"grad_norm": 0.5957186222076416,
"learning_rate": 0.00048184565569347,
"loss": 3.6295,
"step": 18350
},
{
"epoch": 1.9838274932614555,
"grad_norm": 0.5391411781311035,
"learning_rate": 0.0004815218564490016,
"loss": 3.6625,
"step": 18400
},
{
"epoch": 1.9892183288409704,
"grad_norm": 0.5556368231773376,
"learning_rate": 0.00048119805720453317,
"loss": 3.6166,
"step": 18450
},
{
"epoch": 1.9946091644204853,
"grad_norm": 0.6190194487571716,
"learning_rate": 0.0004808742579600647,
"loss": 3.6231,
"step": 18500
},
{
"epoch": 2.0,
"grad_norm": 1.197678804397583,
"learning_rate": 0.0004805504587155963,
"loss": 3.6412,
"step": 18550
},
{
"epoch": 2.0053908355795147,
"grad_norm": 0.5590324401855469,
"learning_rate": 0.0004802266594711278,
"loss": 3.5499,
"step": 18600
},
{
"epoch": 2.01078167115903,
"grad_norm": 0.5855118632316589,
"learning_rate": 0.0004799028602266594,
"loss": 3.5515,
"step": 18650
},
{
"epoch": 2.0161725067385445,
"grad_norm": 0.5683425664901733,
"learning_rate": 0.000479579060982191,
"loss": 3.5434,
"step": 18700
},
{
"epoch": 2.0215633423180592,
"grad_norm": 0.5408341884613037,
"learning_rate": 0.0004792552617377226,
"loss": 3.5613,
"step": 18750
},
{
"epoch": 2.026954177897574,
"grad_norm": 0.5905522108078003,
"learning_rate": 0.00047893146249325413,
"loss": 3.5492,
"step": 18800
},
{
"epoch": 2.032345013477089,
"grad_norm": 0.5483571887016296,
"learning_rate": 0.0004786076632487857,
"loss": 3.5309,
"step": 18850
},
{
"epoch": 2.0377358490566038,
"grad_norm": 0.5604623556137085,
"learning_rate": 0.0004782838640043173,
"loss": 3.5583,
"step": 18900
},
{
"epoch": 2.0431266846361185,
"grad_norm": 0.5344943404197693,
"learning_rate": 0.00047796006475984883,
"loss": 3.5482,
"step": 18950
},
{
"epoch": 2.0485175202156336,
"grad_norm": 0.5930701494216919,
"learning_rate": 0.00047763626551538044,
"loss": 3.5639,
"step": 19000
},
{
"epoch": 2.0485175202156336,
"eval_accuracy": 0.36086101329031256,
"eval_loss": 3.584812641143799,
"eval_runtime": 181.1798,
"eval_samples_per_second": 99.41,
"eval_steps_per_second": 6.215,
"step": 19000
},
{
"epoch": 2.0539083557951483,
"grad_norm": 0.597104012966156,
"learning_rate": 0.000477312466270912,
"loss": 3.5241,
"step": 19050
},
{
"epoch": 2.059299191374663,
"grad_norm": 0.5604715943336487,
"learning_rate": 0.0004769886670264436,
"loss": 3.5646,
"step": 19100
},
{
"epoch": 2.0646900269541777,
"grad_norm": 0.5276287198066711,
"learning_rate": 0.00047666486778197515,
"loss": 3.549,
"step": 19150
},
{
"epoch": 2.070080862533693,
"grad_norm": 0.5561823844909668,
"learning_rate": 0.00047634106853750675,
"loss": 3.5711,
"step": 19200
},
{
"epoch": 2.0754716981132075,
"grad_norm": 0.6093335151672363,
"learning_rate": 0.00047601726929303825,
"loss": 3.5684,
"step": 19250
},
{
"epoch": 2.0808625336927222,
"grad_norm": 0.588876485824585,
"learning_rate": 0.0004756934700485698,
"loss": 3.5646,
"step": 19300
},
{
"epoch": 2.0862533692722374,
"grad_norm": 0.5568276643753052,
"learning_rate": 0.0004753696708041014,
"loss": 3.5551,
"step": 19350
},
{
"epoch": 2.091644204851752,
"grad_norm": 0.5360884666442871,
"learning_rate": 0.00047504587155963295,
"loss": 3.5605,
"step": 19400
},
{
"epoch": 2.0970350404312668,
"grad_norm": 0.5348809361457825,
"learning_rate": 0.00047472207231516456,
"loss": 3.5486,
"step": 19450
},
{
"epoch": 2.1024258760107815,
"grad_norm": 0.6352576017379761,
"learning_rate": 0.0004743982730706961,
"loss": 3.5433,
"step": 19500
},
{
"epoch": 2.1078167115902966,
"grad_norm": 0.570564329624176,
"learning_rate": 0.0004740744738262277,
"loss": 3.5608,
"step": 19550
},
{
"epoch": 2.1132075471698113,
"grad_norm": 0.5465729832649231,
"learning_rate": 0.00047375067458175926,
"loss": 3.5578,
"step": 19600
},
{
"epoch": 2.118598382749326,
"grad_norm": 0.5874576568603516,
"learning_rate": 0.00047342687533729087,
"loss": 3.5579,
"step": 19650
},
{
"epoch": 2.123989218328841,
"grad_norm": 0.59136563539505,
"learning_rate": 0.0004731030760928224,
"loss": 3.5486,
"step": 19700
},
{
"epoch": 2.129380053908356,
"grad_norm": 0.6302415728569031,
"learning_rate": 0.00047277927684835397,
"loss": 3.5422,
"step": 19750
},
{
"epoch": 2.1347708894878705,
"grad_norm": 0.544438898563385,
"learning_rate": 0.0004724619535887749,
"loss": 3.5742,
"step": 19800
},
{
"epoch": 2.1401617250673857,
"grad_norm": 0.6034201383590698,
"learning_rate": 0.0004721381543443065,
"loss": 3.5728,
"step": 19850
},
{
"epoch": 2.1455525606469004,
"grad_norm": 0.6693742871284485,
"learning_rate": 0.000471814355099838,
"loss": 3.5634,
"step": 19900
},
{
"epoch": 2.150943396226415,
"grad_norm": 0.5606766939163208,
"learning_rate": 0.0004714905558553697,
"loss": 3.5611,
"step": 19950
},
{
"epoch": 2.1563342318059298,
"grad_norm": 0.600206196308136,
"learning_rate": 0.0004711667566109012,
"loss": 3.5606,
"step": 20000
},
{
"epoch": 2.1563342318059298,
"eval_accuracy": 0.36217603919153124,
"eval_loss": 3.5728728771209717,
"eval_runtime": 181.6212,
"eval_samples_per_second": 99.168,
"eval_steps_per_second": 6.2,
"step": 20000
},
{
"epoch": 2.161725067385445,
"grad_norm": 0.5841259956359863,
"learning_rate": 0.00047084295736643273,
"loss": 3.5593,
"step": 20050
},
{
"epoch": 2.1671159029649596,
"grad_norm": 0.6095578670501709,
"learning_rate": 0.00047051915812196433,
"loss": 3.5696,
"step": 20100
},
{
"epoch": 2.1725067385444743,
"grad_norm": 0.5807099938392639,
"learning_rate": 0.0004701953588774959,
"loss": 3.546,
"step": 20150
},
{
"epoch": 2.177897574123989,
"grad_norm": 0.5789870023727417,
"learning_rate": 0.0004698715596330275,
"loss": 3.5722,
"step": 20200
},
{
"epoch": 2.183288409703504,
"grad_norm": 0.5594269037246704,
"learning_rate": 0.00046954776038855904,
"loss": 3.5715,
"step": 20250
},
{
"epoch": 2.188679245283019,
"grad_norm": 0.643621027469635,
"learning_rate": 0.00046922396114409064,
"loss": 3.5454,
"step": 20300
},
{
"epoch": 2.1940700808625335,
"grad_norm": 0.5453854203224182,
"learning_rate": 0.0004689001618996222,
"loss": 3.5642,
"step": 20350
},
{
"epoch": 2.1994609164420487,
"grad_norm": 0.5129548907279968,
"learning_rate": 0.0004685763626551538,
"loss": 3.5621,
"step": 20400
},
{
"epoch": 2.2048517520215634,
"grad_norm": 0.6147149205207825,
"learning_rate": 0.00046825256341068535,
"loss": 3.5631,
"step": 20450
},
{
"epoch": 2.210242587601078,
"grad_norm": 0.5020909905433655,
"learning_rate": 0.0004679287641662169,
"loss": 3.5656,
"step": 20500
},
{
"epoch": 2.215633423180593,
"grad_norm": 0.6170671582221985,
"learning_rate": 0.0004676049649217485,
"loss": 3.5674,
"step": 20550
},
{
"epoch": 2.221024258760108,
"grad_norm": 0.5955941081047058,
"learning_rate": 0.00046728764166216946,
"loss": 3.5746,
"step": 20600
},
{
"epoch": 2.2264150943396226,
"grad_norm": 0.5670434832572937,
"learning_rate": 0.00046696384241770095,
"loss": 3.5567,
"step": 20650
},
{
"epoch": 2.2318059299191373,
"grad_norm": 0.570024311542511,
"learning_rate": 0.00046664004317323256,
"loss": 3.5609,
"step": 20700
},
{
"epoch": 2.2371967654986524,
"grad_norm": 0.5753904581069946,
"learning_rate": 0.0004663162439287641,
"loss": 3.563,
"step": 20750
},
{
"epoch": 2.242587601078167,
"grad_norm": 0.5676912665367126,
"learning_rate": 0.00046599244468429566,
"loss": 3.5655,
"step": 20800
},
{
"epoch": 2.247978436657682,
"grad_norm": 0.5626962780952454,
"learning_rate": 0.00046566864543982726,
"loss": 3.5484,
"step": 20850
},
{
"epoch": 2.2533692722371965,
"grad_norm": 0.6296677589416504,
"learning_rate": 0.0004653448461953588,
"loss": 3.5597,
"step": 20900
},
{
"epoch": 2.2587601078167117,
"grad_norm": 0.59935063123703,
"learning_rate": 0.0004650210469508904,
"loss": 3.5682,
"step": 20950
},
{
"epoch": 2.2641509433962264,
"grad_norm": 0.5628940463066101,
"learning_rate": 0.00046469724770642197,
"loss": 3.5497,
"step": 21000
},
{
"epoch": 2.2641509433962264,
"eval_accuracy": 0.3636019996912085,
"eval_loss": 3.5614914894104004,
"eval_runtime": 180.9956,
"eval_samples_per_second": 99.511,
"eval_steps_per_second": 6.221,
"step": 21000
},
{
"epoch": 2.269541778975741,
"grad_norm": 0.5957950353622437,
"learning_rate": 0.0004643734484619536,
"loss": 3.5417,
"step": 21050
},
{
"epoch": 2.274932614555256,
"grad_norm": 0.5501925349235535,
"learning_rate": 0.0004640496492174851,
"loss": 3.5728,
"step": 21100
},
{
"epoch": 2.280323450134771,
"grad_norm": 0.5697695016860962,
"learning_rate": 0.00046372584997301673,
"loss": 3.5687,
"step": 21150
},
{
"epoch": 2.2857142857142856,
"grad_norm": 0.5633260607719421,
"learning_rate": 0.0004634020507285483,
"loss": 3.5589,
"step": 21200
},
{
"epoch": 2.2911051212938007,
"grad_norm": 0.5334222316741943,
"learning_rate": 0.0004630782514840798,
"loss": 3.5327,
"step": 21250
},
{
"epoch": 2.2964959568733154,
"grad_norm": 0.5983673334121704,
"learning_rate": 0.0004627544522396114,
"loss": 3.5524,
"step": 21300
},
{
"epoch": 2.30188679245283,
"grad_norm": 0.5501682758331299,
"learning_rate": 0.00046243065299514293,
"loss": 3.5701,
"step": 21350
},
{
"epoch": 2.3072776280323453,
"grad_norm": 0.5882470011711121,
"learning_rate": 0.00046210685375067454,
"loss": 3.546,
"step": 21400
},
{
"epoch": 2.31266846361186,
"grad_norm": 0.5191405415534973,
"learning_rate": 0.0004617830545062061,
"loss": 3.5579,
"step": 21450
},
{
"epoch": 2.3180592991913747,
"grad_norm": 0.5627714395523071,
"learning_rate": 0.0004614592552617377,
"loss": 3.5624,
"step": 21500
},
{
"epoch": 2.3234501347708894,
"grad_norm": 0.578151285648346,
"learning_rate": 0.00046113545601726924,
"loss": 3.5502,
"step": 21550
},
{
"epoch": 2.3288409703504045,
"grad_norm": 0.5719261169433594,
"learning_rate": 0.00046081165677280085,
"loss": 3.5608,
"step": 21600
},
{
"epoch": 2.334231805929919,
"grad_norm": 0.5572363138198853,
"learning_rate": 0.0004604878575283324,
"loss": 3.5599,
"step": 21650
},
{
"epoch": 2.339622641509434,
"grad_norm": 0.6042853593826294,
"learning_rate": 0.00046016405828386395,
"loss": 3.561,
"step": 21700
},
{
"epoch": 2.3450134770889486,
"grad_norm": 0.5274094343185425,
"learning_rate": 0.00045984025903939555,
"loss": 3.5605,
"step": 21750
},
{
"epoch": 2.3504043126684637,
"grad_norm": 0.5416309237480164,
"learning_rate": 0.0004595164597949271,
"loss": 3.5678,
"step": 21800
},
{
"epoch": 2.3557951482479784,
"grad_norm": 0.5653731822967529,
"learning_rate": 0.0004591926605504587,
"loss": 3.5622,
"step": 21850
},
{
"epoch": 2.361185983827493,
"grad_norm": 0.5564022064208984,
"learning_rate": 0.0004588688613059902,
"loss": 3.5496,
"step": 21900
},
{
"epoch": 2.3665768194070083,
"grad_norm": 0.5706598162651062,
"learning_rate": 0.00045854506206152186,
"loss": 3.5628,
"step": 21950
},
{
"epoch": 2.371967654986523,
"grad_norm": 0.5681759715080261,
"learning_rate": 0.00045822126281705336,
"loss": 3.5624,
"step": 22000
},
{
"epoch": 2.371967654986523,
"eval_accuracy": 0.36438560431812655,
"eval_loss": 3.550971269607544,
"eval_runtime": 181.5333,
"eval_samples_per_second": 99.216,
"eval_steps_per_second": 6.203,
"step": 22000
},
{
"epoch": 2.3773584905660377,
"grad_norm": 0.6144738793373108,
"learning_rate": 0.00045789746357258497,
"loss": 3.538,
"step": 22050
},
{
"epoch": 2.382749326145553,
"grad_norm": 0.5621081590652466,
"learning_rate": 0.0004575736643281165,
"loss": 3.5622,
"step": 22100
},
{
"epoch": 2.3881401617250675,
"grad_norm": 0.5389125943183899,
"learning_rate": 0.00045724986508364807,
"loss": 3.5571,
"step": 22150
},
{
"epoch": 2.393530997304582,
"grad_norm": 0.6281595230102539,
"learning_rate": 0.00045692606583917967,
"loss": 3.5816,
"step": 22200
},
{
"epoch": 2.398921832884097,
"grad_norm": 0.573566734790802,
"learning_rate": 0.0004566022665947112,
"loss": 3.551,
"step": 22250
},
{
"epoch": 2.404312668463612,
"grad_norm": 0.6374450922012329,
"learning_rate": 0.0004562784673502428,
"loss": 3.5344,
"step": 22300
},
{
"epoch": 2.4097035040431267,
"grad_norm": 0.5555562376976013,
"learning_rate": 0.0004559546681057744,
"loss": 3.5563,
"step": 22350
},
{
"epoch": 2.4150943396226414,
"grad_norm": 0.5741384029388428,
"learning_rate": 0.000455630868861306,
"loss": 3.5455,
"step": 22400
},
{
"epoch": 2.420485175202156,
"grad_norm": 0.6011936664581299,
"learning_rate": 0.00045530706961683753,
"loss": 3.5751,
"step": 22450
},
{
"epoch": 2.4258760107816713,
"grad_norm": 0.6304056644439697,
"learning_rate": 0.00045498327037236914,
"loss": 3.5478,
"step": 22500
},
{
"epoch": 2.431266846361186,
"grad_norm": 0.5553408861160278,
"learning_rate": 0.0004546594711279007,
"loss": 3.5519,
"step": 22550
},
{
"epoch": 2.4366576819407006,
"grad_norm": 0.514491081237793,
"learning_rate": 0.0004543356718834322,
"loss": 3.5529,
"step": 22600
},
{
"epoch": 2.442048517520216,
"grad_norm": 0.5898559093475342,
"learning_rate": 0.0004540118726389638,
"loss": 3.5497,
"step": 22650
},
{
"epoch": 2.4474393530997305,
"grad_norm": 0.5183122158050537,
"learning_rate": 0.00045368807339449534,
"loss": 3.539,
"step": 22700
},
{
"epoch": 2.452830188679245,
"grad_norm": 0.588056206703186,
"learning_rate": 0.00045336427415002694,
"loss": 3.5642,
"step": 22750
},
{
"epoch": 2.4582210242587603,
"grad_norm": 0.5579840540885925,
"learning_rate": 0.0004530404749055585,
"loss": 3.5741,
"step": 22800
},
{
"epoch": 2.463611859838275,
"grad_norm": 0.6022590398788452,
"learning_rate": 0.0004527166756610901,
"loss": 3.5405,
"step": 22850
},
{
"epoch": 2.4690026954177897,
"grad_norm": 0.5468297600746155,
"learning_rate": 0.00045239287641662165,
"loss": 3.5504,
"step": 22900
},
{
"epoch": 2.4743935309973044,
"grad_norm": 0.576793372631073,
"learning_rate": 0.0004520690771721532,
"loss": 3.5537,
"step": 22950
},
{
"epoch": 2.4797843665768196,
"grad_norm": 0.5995750427246094,
"learning_rate": 0.0004517452779276848,
"loss": 3.5459,
"step": 23000
},
{
"epoch": 2.4797843665768196,
"eval_accuracy": 0.3654815859975112,
"eval_loss": 3.5396082401275635,
"eval_runtime": 180.9737,
"eval_samples_per_second": 99.523,
"eval_steps_per_second": 6.222,
"step": 23000
},
{
"epoch": 2.4851752021563343,
"grad_norm": 0.6596251130104065,
"learning_rate": 0.00045142147868321636,
"loss": 3.5446,
"step": 23050
},
{
"epoch": 2.490566037735849,
"grad_norm": 0.5252313017845154,
"learning_rate": 0.00045109767943874796,
"loss": 3.5398,
"step": 23100
},
{
"epoch": 2.4959568733153636,
"grad_norm": 0.6100819706916809,
"learning_rate": 0.0004507738801942795,
"loss": 3.5374,
"step": 23150
},
{
"epoch": 2.501347708894879,
"grad_norm": 0.5994240045547485,
"learning_rate": 0.0004504500809498111,
"loss": 3.5518,
"step": 23200
},
{
"epoch": 2.5067385444743935,
"grad_norm": 0.5911756753921509,
"learning_rate": 0.0004501262817053426,
"loss": 3.5651,
"step": 23250
},
{
"epoch": 2.512129380053908,
"grad_norm": 0.5529770851135254,
"learning_rate": 0.00044980248246087427,
"loss": 3.5623,
"step": 23300
},
{
"epoch": 2.5175202156334233,
"grad_norm": 0.5851297378540039,
"learning_rate": 0.00044947868321640577,
"loss": 3.5372,
"step": 23350
},
{
"epoch": 2.522911051212938,
"grad_norm": 0.5575948357582092,
"learning_rate": 0.0004491548839719373,
"loss": 3.5416,
"step": 23400
},
{
"epoch": 2.5283018867924527,
"grad_norm": 0.5790318250656128,
"learning_rate": 0.0004488310847274689,
"loss": 3.5464,
"step": 23450
},
{
"epoch": 2.533692722371968,
"grad_norm": 0.5353867411613464,
"learning_rate": 0.0004485072854830005,
"loss": 3.5237,
"step": 23500
},
{
"epoch": 2.5390835579514826,
"grad_norm": 0.6083977818489075,
"learning_rate": 0.0004481834862385321,
"loss": 3.5601,
"step": 23550
},
{
"epoch": 2.5444743935309972,
"grad_norm": 0.5814011096954346,
"learning_rate": 0.00044785968699406363,
"loss": 3.5533,
"step": 23600
},
{
"epoch": 2.5498652291105124,
"grad_norm": 0.5686614513397217,
"learning_rate": 0.00044753588774959523,
"loss": 3.5471,
"step": 23650
},
{
"epoch": 2.555256064690027,
"grad_norm": 0.6276265978813171,
"learning_rate": 0.0004472120885051268,
"loss": 3.5638,
"step": 23700
},
{
"epoch": 2.560646900269542,
"grad_norm": 0.5724161863327026,
"learning_rate": 0.0004468882892606584,
"loss": 3.5684,
"step": 23750
},
{
"epoch": 2.5660377358490565,
"grad_norm": 0.6355558633804321,
"learning_rate": 0.00044656449001618994,
"loss": 3.5441,
"step": 23800
},
{
"epoch": 2.571428571428571,
"grad_norm": 0.6126329898834229,
"learning_rate": 0.0004462406907717215,
"loss": 3.5433,
"step": 23850
},
{
"epoch": 2.5768194070080863,
"grad_norm": 0.6300326585769653,
"learning_rate": 0.0004459168915272531,
"loss": 3.5409,
"step": 23900
},
{
"epoch": 2.582210242587601,
"grad_norm": 0.5199249386787415,
"learning_rate": 0.0004455930922827846,
"loss": 3.5678,
"step": 23950
},
{
"epoch": 2.5876010781671157,
"grad_norm": 0.5622717142105103,
"learning_rate": 0.0004452692930383162,
"loss": 3.5482,
"step": 24000
},
{
"epoch": 2.5876010781671157,
"eval_accuracy": 0.36659712519670246,
"eval_loss": 3.5317554473876953,
"eval_runtime": 181.2936,
"eval_samples_per_second": 99.347,
"eval_steps_per_second": 6.211,
"step": 24000
},
{
"epoch": 2.592991913746631,
"grad_norm": 0.6593669056892395,
"learning_rate": 0.00044494549379384775,
"loss": 3.5524,
"step": 24050
},
{
"epoch": 2.5983827493261455,
"grad_norm": 0.6096122860908508,
"learning_rate": 0.00044462169454937935,
"loss": 3.5285,
"step": 24100
},
{
"epoch": 2.6037735849056602,
"grad_norm": 0.5589276552200317,
"learning_rate": 0.0004442978953049109,
"loss": 3.5507,
"step": 24150
},
{
"epoch": 2.6091644204851754,
"grad_norm": 0.593991219997406,
"learning_rate": 0.0004439740960604425,
"loss": 3.5445,
"step": 24200
},
{
"epoch": 2.61455525606469,
"grad_norm": 0.5771310329437256,
"learning_rate": 0.00044365029681597406,
"loss": 3.5493,
"step": 24250
},
{
"epoch": 2.6199460916442048,
"grad_norm": 0.6104580760002136,
"learning_rate": 0.0004433264975715056,
"loss": 3.5519,
"step": 24300
},
{
"epoch": 2.62533692722372,
"grad_norm": 0.5800246000289917,
"learning_rate": 0.0004430026983270372,
"loss": 3.5551,
"step": 24350
},
{
"epoch": 2.6307277628032346,
"grad_norm": 0.5594913363456726,
"learning_rate": 0.00044267889908256876,
"loss": 3.5318,
"step": 24400
},
{
"epoch": 2.6361185983827493,
"grad_norm": 0.570894181728363,
"learning_rate": 0.00044235509983810037,
"loss": 3.5575,
"step": 24450
},
{
"epoch": 2.641509433962264,
"grad_norm": 0.5746343731880188,
"learning_rate": 0.0004420313005936319,
"loss": 3.5216,
"step": 24500
},
{
"epoch": 2.6469002695417787,
"grad_norm": 0.5852980613708496,
"learning_rate": 0.0004417075013491635,
"loss": 3.527,
"step": 24550
},
{
"epoch": 2.652291105121294,
"grad_norm": 0.6170436143875122,
"learning_rate": 0.0004413837021046951,
"loss": 3.5347,
"step": 24600
},
{
"epoch": 2.6576819407008085,
"grad_norm": 0.5268665552139282,
"learning_rate": 0.00044106637884511597,
"loss": 3.5423,
"step": 24650
},
{
"epoch": 2.6630727762803232,
"grad_norm": 0.5274102687835693,
"learning_rate": 0.0004407425796006475,
"loss": 3.542,
"step": 24700
},
{
"epoch": 2.6684636118598384,
"grad_norm": 0.562574028968811,
"learning_rate": 0.00044041878035617913,
"loss": 3.5585,
"step": 24750
},
{
"epoch": 2.673854447439353,
"grad_norm": 0.6228138208389282,
"learning_rate": 0.0004400949811117107,
"loss": 3.5354,
"step": 24800
},
{
"epoch": 2.6792452830188678,
"grad_norm": 0.5509768724441528,
"learning_rate": 0.0004397711818672423,
"loss": 3.534,
"step": 24850
},
{
"epoch": 2.684636118598383,
"grad_norm": 0.5812339186668396,
"learning_rate": 0.00043944738262277383,
"loss": 3.5431,
"step": 24900
},
{
"epoch": 2.6900269541778976,
"grad_norm": 0.5285933017730713,
"learning_rate": 0.00043912358337830544,
"loss": 3.5385,
"step": 24950
},
{
"epoch": 2.6954177897574123,
"grad_norm": 0.5929449200630188,
"learning_rate": 0.000438799784133837,
"loss": 3.5542,
"step": 25000
},
{
"epoch": 2.6954177897574123,
"eval_accuracy": 0.3673601944278236,
"eval_loss": 3.5220768451690674,
"eval_runtime": 181.2911,
"eval_samples_per_second": 99.349,
"eval_steps_per_second": 6.211,
"step": 25000
},
{
"epoch": 2.7008086253369274,
"grad_norm": 0.6103118658065796,
"learning_rate": 0.00043847598488936854,
"loss": 3.5537,
"step": 25050
},
{
"epoch": 2.706199460916442,
"grad_norm": 0.6209373474121094,
"learning_rate": 0.00043815218564490014,
"loss": 3.5647,
"step": 25100
},
{
"epoch": 2.711590296495957,
"grad_norm": 0.6222750544548035,
"learning_rate": 0.0004378283864004317,
"loss": 3.5444,
"step": 25150
},
{
"epoch": 2.7169811320754715,
"grad_norm": 0.5424869060516357,
"learning_rate": 0.0004375045871559633,
"loss": 3.5295,
"step": 25200
},
{
"epoch": 2.7223719676549867,
"grad_norm": 0.5619545578956604,
"learning_rate": 0.00043718078791149485,
"loss": 3.5323,
"step": 25250
},
{
"epoch": 2.7277628032345014,
"grad_norm": 0.5697439312934875,
"learning_rate": 0.00043685698866702645,
"loss": 3.5397,
"step": 25300
},
{
"epoch": 2.733153638814016,
"grad_norm": 0.5689240097999573,
"learning_rate": 0.00043653318942255795,
"loss": 3.5332,
"step": 25350
},
{
"epoch": 2.7385444743935308,
"grad_norm": 0.5533002018928528,
"learning_rate": 0.00043620939017808956,
"loss": 3.5223,
"step": 25400
},
{
"epoch": 2.743935309973046,
"grad_norm": 0.5482644438743591,
"learning_rate": 0.0004358855909336211,
"loss": 3.5409,
"step": 25450
},
{
"epoch": 2.7493261455525606,
"grad_norm": 0.5769422650337219,
"learning_rate": 0.00043556179168915266,
"loss": 3.5584,
"step": 25500
},
{
"epoch": 2.7547169811320753,
"grad_norm": 0.6086459159851074,
"learning_rate": 0.00043523799244468426,
"loss": 3.5519,
"step": 25550
},
{
"epoch": 2.7601078167115904,
"grad_norm": 0.5854797959327698,
"learning_rate": 0.0004349141932002158,
"loss": 3.5419,
"step": 25600
},
{
"epoch": 2.765498652291105,
"grad_norm": 0.5399619340896606,
"learning_rate": 0.0004345903939557474,
"loss": 3.5286,
"step": 25650
},
{
"epoch": 2.77088948787062,
"grad_norm": 0.5609824061393738,
"learning_rate": 0.00043426659471127897,
"loss": 3.5512,
"step": 25700
},
{
"epoch": 2.776280323450135,
"grad_norm": 0.5754132866859436,
"learning_rate": 0.00043394279546681057,
"loss": 3.5341,
"step": 25750
},
{
"epoch": 2.7816711590296497,
"grad_norm": 0.5857130289077759,
"learning_rate": 0.0004336189962223421,
"loss": 3.5377,
"step": 25800
},
{
"epoch": 2.7870619946091644,
"grad_norm": 0.5504029393196106,
"learning_rate": 0.0004332951969778737,
"loss": 3.5221,
"step": 25850
},
{
"epoch": 2.7924528301886795,
"grad_norm": 0.56480473279953,
"learning_rate": 0.0004329713977334053,
"loss": 3.5429,
"step": 25900
},
{
"epoch": 2.797843665768194,
"grad_norm": 0.5525421500205994,
"learning_rate": 0.0004326475984889368,
"loss": 3.5136,
"step": 25950
},
{
"epoch": 2.803234501347709,
"grad_norm": 0.6140046715736389,
"learning_rate": 0.0004323237992444684,
"loss": 3.5342,
"step": 26000
},
{
"epoch": 2.803234501347709,
"eval_accuracy": 0.3681909544080531,
"eval_loss": 3.5146098136901855,
"eval_runtime": 181.2808,
"eval_samples_per_second": 99.354,
"eval_steps_per_second": 6.211,
"step": 26000
},
{
"epoch": 2.8086253369272236,
"grad_norm": 0.5526379346847534,
"learning_rate": 0.00043199999999999993,
"loss": 3.5372,
"step": 26050
},
{
"epoch": 2.8140161725067383,
"grad_norm": 0.5372548699378967,
"learning_rate": 0.00043167620075553153,
"loss": 3.5251,
"step": 26100
},
{
"epoch": 2.8194070080862534,
"grad_norm": 0.5878198742866516,
"learning_rate": 0.0004313524015110631,
"loss": 3.536,
"step": 26150
},
{
"epoch": 2.824797843665768,
"grad_norm": 0.6414244174957275,
"learning_rate": 0.0004310286022665947,
"loss": 3.5469,
"step": 26200
},
{
"epoch": 2.830188679245283,
"grad_norm": 0.5342559218406677,
"learning_rate": 0.00043070480302212624,
"loss": 3.533,
"step": 26250
},
{
"epoch": 2.835579514824798,
"grad_norm": 0.5910995602607727,
"learning_rate": 0.0004303810037776578,
"loss": 3.532,
"step": 26300
},
{
"epoch": 2.8409703504043127,
"grad_norm": 0.4914022982120514,
"learning_rate": 0.0004300572045331894,
"loss": 3.5451,
"step": 26350
},
{
"epoch": 2.8463611859838274,
"grad_norm": 0.5698440074920654,
"learning_rate": 0.00042973340528872095,
"loss": 3.528,
"step": 26400
},
{
"epoch": 2.8517520215633425,
"grad_norm": 0.5266543030738831,
"learning_rate": 0.00042940960604425255,
"loss": 3.5381,
"step": 26450
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.5502971410751343,
"learning_rate": 0.0004290858067997841,
"loss": 3.5261,
"step": 26500
},
{
"epoch": 2.862533692722372,
"grad_norm": 0.5623043775558472,
"learning_rate": 0.0004287620075553157,
"loss": 3.5085,
"step": 26550
},
{
"epoch": 2.867924528301887,
"grad_norm": 0.5682037472724915,
"learning_rate": 0.00042843820831084726,
"loss": 3.5304,
"step": 26600
},
{
"epoch": 2.8733153638814017,
"grad_norm": 0.5245981216430664,
"learning_rate": 0.00042811440906637886,
"loss": 3.5259,
"step": 26650
},
{
"epoch": 2.8787061994609164,
"grad_norm": 0.558342456817627,
"learning_rate": 0.00042779060982191036,
"loss": 3.5226,
"step": 26700
},
{
"epoch": 2.884097035040431,
"grad_norm": 0.5356988310813904,
"learning_rate": 0.0004274668105774419,
"loss": 3.5257,
"step": 26750
},
{
"epoch": 2.889487870619946,
"grad_norm": 0.5896562933921814,
"learning_rate": 0.0004271430113329735,
"loss": 3.5067,
"step": 26800
},
{
"epoch": 2.894878706199461,
"grad_norm": 0.5992358922958374,
"learning_rate": 0.00042682568807339447,
"loss": 3.5249,
"step": 26850
},
{
"epoch": 2.9002695417789757,
"grad_norm": 0.5337636470794678,
"learning_rate": 0.000426501888828926,
"loss": 3.5128,
"step": 26900
},
{
"epoch": 2.9056603773584904,
"grad_norm": 0.6004590392112732,
"learning_rate": 0.0004261780895844576,
"loss": 3.5133,
"step": 26950
},
{
"epoch": 2.9110512129380055,
"grad_norm": 0.5698912739753723,
"learning_rate": 0.00042585429033998917,
"loss": 3.5381,
"step": 27000
},
{
"epoch": 2.9110512129380055,
"eval_accuracy": 0.3692894351038575,
"eval_loss": 3.504966974258423,
"eval_runtime": 181.0594,
"eval_samples_per_second": 99.476,
"eval_steps_per_second": 6.219,
"step": 27000
},
{
"epoch": 2.91644204851752,
"grad_norm": 0.6112388372421265,
"learning_rate": 0.0004255304910955207,
"loss": 3.5501,
"step": 27050
},
{
"epoch": 2.921832884097035,
"grad_norm": 0.5333527326583862,
"learning_rate": 0.0004252066918510523,
"loss": 3.5325,
"step": 27100
},
{
"epoch": 2.92722371967655,
"grad_norm": 0.5607829689979553,
"learning_rate": 0.0004248828926065839,
"loss": 3.5464,
"step": 27150
},
{
"epoch": 2.9326145552560647,
"grad_norm": 0.5657780766487122,
"learning_rate": 0.0004245590933621155,
"loss": 3.5333,
"step": 27200
},
{
"epoch": 2.9380053908355794,
"grad_norm": 0.5746095180511475,
"learning_rate": 0.00042423529411764703,
"loss": 3.5474,
"step": 27250
},
{
"epoch": 2.9433962264150946,
"grad_norm": 0.604708194732666,
"learning_rate": 0.00042391149487317864,
"loss": 3.5284,
"step": 27300
},
{
"epoch": 2.9487870619946093,
"grad_norm": 0.6213203072547913,
"learning_rate": 0.00042358769562871013,
"loss": 3.5173,
"step": 27350
},
{
"epoch": 2.954177897574124,
"grad_norm": 0.5948765277862549,
"learning_rate": 0.00042326389638424174,
"loss": 3.5149,
"step": 27400
},
{
"epoch": 2.9595687331536387,
"grad_norm": 0.5501255989074707,
"learning_rate": 0.0004229400971397733,
"loss": 3.5339,
"step": 27450
},
{
"epoch": 2.964959568733154,
"grad_norm": 0.5301875472068787,
"learning_rate": 0.00042261629789530484,
"loss": 3.5331,
"step": 27500
},
{
"epoch": 2.9703504043126685,
"grad_norm": 0.5574668049812317,
"learning_rate": 0.00042229249865083644,
"loss": 3.522,
"step": 27550
},
{
"epoch": 2.975741239892183,
"grad_norm": 0.5755630135536194,
"learning_rate": 0.000421968699406368,
"loss": 3.5198,
"step": 27600
},
{
"epoch": 2.981132075471698,
"grad_norm": 0.54078608751297,
"learning_rate": 0.0004216449001618996,
"loss": 3.5438,
"step": 27650
},
{
"epoch": 2.986522911051213,
"grad_norm": 0.5769183039665222,
"learning_rate": 0.00042132110091743115,
"loss": 3.5085,
"step": 27700
},
{
"epoch": 2.9919137466307277,
"grad_norm": 0.5652152895927429,
"learning_rate": 0.00042099730167296275,
"loss": 3.5019,
"step": 27750
},
{
"epoch": 2.9973045822102424,
"grad_norm": 0.5847728848457336,
"learning_rate": 0.0004206735024284943,
"loss": 3.5239,
"step": 27800
},
{
"epoch": 3.0026954177897576,
"grad_norm": 0.5741530656814575,
"learning_rate": 0.0004203497031840259,
"loss": 3.4812,
"step": 27850
},
{
"epoch": 3.0080862533692723,
"grad_norm": 0.5797563195228577,
"learning_rate": 0.00042002590393955746,
"loss": 3.4285,
"step": 27900
},
{
"epoch": 3.013477088948787,
"grad_norm": 0.634753406047821,
"learning_rate": 0.00041970210469508896,
"loss": 3.4059,
"step": 27950
},
{
"epoch": 3.018867924528302,
"grad_norm": 0.5791683197021484,
"learning_rate": 0.00041937830545062056,
"loss": 3.4278,
"step": 28000
},
{
"epoch": 3.018867924528302,
"eval_accuracy": 0.37001849598109265,
"eval_loss": 3.502011299133301,
"eval_runtime": 181.3383,
"eval_samples_per_second": 99.323,
"eval_steps_per_second": 6.209,
"step": 28000
},
{
"epoch": 3.024258760107817,
"grad_norm": 0.603844165802002,
"learning_rate": 0.0004190545062061521,
"loss": 3.4184,
"step": 28050
},
{
"epoch": 3.0296495956873315,
"grad_norm": 0.5584900975227356,
"learning_rate": 0.0004187307069616837,
"loss": 3.435,
"step": 28100
},
{
"epoch": 3.035040431266846,
"grad_norm": 0.5561067461967468,
"learning_rate": 0.00041840690771721527,
"loss": 3.4382,
"step": 28150
},
{
"epoch": 3.0404312668463613,
"grad_norm": 0.6057212948799133,
"learning_rate": 0.00041808310847274687,
"loss": 3.435,
"step": 28200
},
{
"epoch": 3.045822102425876,
"grad_norm": 0.5753740668296814,
"learning_rate": 0.0004177593092282784,
"loss": 3.4403,
"step": 28250
},
{
"epoch": 3.0512129380053907,
"grad_norm": 0.5986172556877136,
"learning_rate": 0.00041743550998381,
"loss": 3.4394,
"step": 28300
},
{
"epoch": 3.056603773584906,
"grad_norm": 0.5863624811172485,
"learning_rate": 0.0004171117107393416,
"loss": 3.4487,
"step": 28350
},
{
"epoch": 3.0619946091644206,
"grad_norm": 0.6170995831489563,
"learning_rate": 0.00041678791149487313,
"loss": 3.4238,
"step": 28400
},
{
"epoch": 3.0673854447439353,
"grad_norm": 0.5697750449180603,
"learning_rate": 0.00041646411225040473,
"loss": 3.4469,
"step": 28450
},
{
"epoch": 3.07277628032345,
"grad_norm": 0.5869051814079285,
"learning_rate": 0.0004161403130059363,
"loss": 3.4314,
"step": 28500
},
{
"epoch": 3.078167115902965,
"grad_norm": 0.6245196461677551,
"learning_rate": 0.0004158165137614679,
"loss": 3.4267,
"step": 28550
},
{
"epoch": 3.08355795148248,
"grad_norm": 0.5763593912124634,
"learning_rate": 0.00041549271451699944,
"loss": 3.4496,
"step": 28600
},
{
"epoch": 3.0889487870619945,
"grad_norm": 0.6235705018043518,
"learning_rate": 0.00041516891527253104,
"loss": 3.4518,
"step": 28650
},
{
"epoch": 3.0943396226415096,
"grad_norm": 0.5487541556358337,
"learning_rate": 0.00041484511602806254,
"loss": 3.4529,
"step": 28700
},
{
"epoch": 3.0997304582210243,
"grad_norm": 0.5920215845108032,
"learning_rate": 0.0004145213167835941,
"loss": 3.4479,
"step": 28750
},
{
"epoch": 3.105121293800539,
"grad_norm": 0.6436508893966675,
"learning_rate": 0.0004141975175391257,
"loss": 3.4435,
"step": 28800
},
{
"epoch": 3.1105121293800537,
"grad_norm": 0.6300854682922363,
"learning_rate": 0.00041387371829465725,
"loss": 3.4504,
"step": 28850
},
{
"epoch": 3.115902964959569,
"grad_norm": 0.6098328828811646,
"learning_rate": 0.00041354991905018885,
"loss": 3.4489,
"step": 28900
},
{
"epoch": 3.1212938005390836,
"grad_norm": 0.5779024958610535,
"learning_rate": 0.0004132261198057204,
"loss": 3.4428,
"step": 28950
},
{
"epoch": 3.1266846361185983,
"grad_norm": 0.6418079137802124,
"learning_rate": 0.00041290879654614135,
"loss": 3.4513,
"step": 29000
},
{
"epoch": 3.1266846361185983,
"eval_accuracy": 0.37081785527674377,
"eval_loss": 3.4941792488098145,
"eval_runtime": 181.2333,
"eval_samples_per_second": 99.38,
"eval_steps_per_second": 6.213,
"step": 29000
},
{
"epoch": 3.1320754716981134,
"grad_norm": 0.5885114073753357,
"learning_rate": 0.00041258499730167296,
"loss": 3.431,
"step": 29050
},
{
"epoch": 3.137466307277628,
"grad_norm": 0.5644581913948059,
"learning_rate": 0.0004122611980572045,
"loss": 3.4565,
"step": 29100
},
{
"epoch": 3.142857142857143,
"grad_norm": 0.5936179161071777,
"learning_rate": 0.00041193739881273606,
"loss": 3.451,
"step": 29150
},
{
"epoch": 3.1482479784366575,
"grad_norm": 0.5671179294586182,
"learning_rate": 0.00041161359956826766,
"loss": 3.4676,
"step": 29200
},
{
"epoch": 3.1536388140161726,
"grad_norm": 0.5778055191040039,
"learning_rate": 0.0004112898003237992,
"loss": 3.4678,
"step": 29250
},
{
"epoch": 3.1590296495956873,
"grad_norm": 0.6444634795188904,
"learning_rate": 0.0004109660010793308,
"loss": 3.4602,
"step": 29300
},
{
"epoch": 3.164420485175202,
"grad_norm": 0.5693305730819702,
"learning_rate": 0.0004106422018348623,
"loss": 3.4477,
"step": 29350
},
{
"epoch": 3.169811320754717,
"grad_norm": 0.6001082062721252,
"learning_rate": 0.0004103184025903939,
"loss": 3.4583,
"step": 29400
},
{
"epoch": 3.175202156334232,
"grad_norm": 0.598551332950592,
"learning_rate": 0.00040999460334592547,
"loss": 3.4711,
"step": 29450
},
{
"epoch": 3.1805929919137466,
"grad_norm": 0.6061559319496155,
"learning_rate": 0.000409670804101457,
"loss": 3.4438,
"step": 29500
},
{
"epoch": 3.1859838274932613,
"grad_norm": 0.6079073548316956,
"learning_rate": 0.00040934700485698863,
"loss": 3.4549,
"step": 29550
},
{
"epoch": 3.1913746630727764,
"grad_norm": 0.5677346587181091,
"learning_rate": 0.0004090232056125202,
"loss": 3.4684,
"step": 29600
},
{
"epoch": 3.196765498652291,
"grad_norm": 0.5982911586761475,
"learning_rate": 0.0004086994063680518,
"loss": 3.4518,
"step": 29650
},
{
"epoch": 3.202156334231806,
"grad_norm": 0.5999668836593628,
"learning_rate": 0.00040837560712358333,
"loss": 3.4704,
"step": 29700
},
{
"epoch": 3.207547169811321,
"grad_norm": 0.6423681974411011,
"learning_rate": 0.00040805180787911494,
"loss": 3.4663,
"step": 29750
},
{
"epoch": 3.2129380053908356,
"grad_norm": 0.6018577218055725,
"learning_rate": 0.0004077280086346465,
"loss": 3.4421,
"step": 29800
},
{
"epoch": 3.2183288409703503,
"grad_norm": 0.6331573128700256,
"learning_rate": 0.0004074042093901781,
"loss": 3.4647,
"step": 29850
},
{
"epoch": 3.223719676549865,
"grad_norm": 0.5911356210708618,
"learning_rate": 0.00040708041014570964,
"loss": 3.4587,
"step": 29900
},
{
"epoch": 3.22911051212938,
"grad_norm": 0.6494351625442505,
"learning_rate": 0.00040675661090124114,
"loss": 3.4525,
"step": 29950
},
{
"epoch": 3.234501347708895,
"grad_norm": 0.6582809090614319,
"learning_rate": 0.0004064328116567728,
"loss": 3.4627,
"step": 30000
},
{
"epoch": 3.234501347708895,
"eval_accuracy": 0.37125040242313323,
"eval_loss": 3.4898300170898438,
"eval_runtime": 181.4533,
"eval_samples_per_second": 99.26,
"eval_steps_per_second": 6.205,
"step": 30000
},
{
"epoch": 3.2398921832884096,
"grad_norm": 0.567914605140686,
"learning_rate": 0.0004061090124123043,
"loss": 3.439,
"step": 30050
},
{
"epoch": 3.2452830188679247,
"grad_norm": 0.8371458053588867,
"learning_rate": 0.0004057852131678359,
"loss": 3.457,
"step": 30100
},
{
"epoch": 3.2506738544474394,
"grad_norm": 0.5806177258491516,
"learning_rate": 0.00040546141392336745,
"loss": 3.4592,
"step": 30150
},
{
"epoch": 3.256064690026954,
"grad_norm": 0.5861149430274963,
"learning_rate": 0.00040513761467889906,
"loss": 3.4733,
"step": 30200
},
{
"epoch": 3.2614555256064692,
"grad_norm": 0.5776110291481018,
"learning_rate": 0.0004048138154344306,
"loss": 3.4591,
"step": 30250
},
{
"epoch": 3.266846361185984,
"grad_norm": 0.6323752403259277,
"learning_rate": 0.0004044900161899622,
"loss": 3.4368,
"step": 30300
},
{
"epoch": 3.2722371967654986,
"grad_norm": 0.5726909637451172,
"learning_rate": 0.00040416621694549376,
"loss": 3.4548,
"step": 30350
},
{
"epoch": 3.2776280323450133,
"grad_norm": 0.6342945098876953,
"learning_rate": 0.0004038424177010253,
"loss": 3.4659,
"step": 30400
},
{
"epoch": 3.2830188679245285,
"grad_norm": 0.8313270211219788,
"learning_rate": 0.0004035186184565569,
"loss": 3.4545,
"step": 30450
},
{
"epoch": 3.288409703504043,
"grad_norm": 0.6741660833358765,
"learning_rate": 0.00040319481921208847,
"loss": 3.4664,
"step": 30500
},
{
"epoch": 3.293800539083558,
"grad_norm": 0.6140432357788086,
"learning_rate": 0.00040287101996762007,
"loss": 3.4624,
"step": 30550
},
{
"epoch": 3.2991913746630726,
"grad_norm": 0.5720652937889099,
"learning_rate": 0.0004025472207231516,
"loss": 3.4554,
"step": 30600
},
{
"epoch": 3.3045822102425877,
"grad_norm": 0.597766637802124,
"learning_rate": 0.00040222342147868323,
"loss": 3.4516,
"step": 30650
},
{
"epoch": 3.3099730458221024,
"grad_norm": 0.644696831703186,
"learning_rate": 0.0004018996222342147,
"loss": 3.4671,
"step": 30700
},
{
"epoch": 3.315363881401617,
"grad_norm": 0.5738349556922913,
"learning_rate": 0.00040157582298974633,
"loss": 3.4429,
"step": 30750
},
{
"epoch": 3.3207547169811322,
"grad_norm": 0.6259222030639648,
"learning_rate": 0.0004012520237452779,
"loss": 3.4399,
"step": 30800
},
{
"epoch": 3.326145552560647,
"grad_norm": 0.6019585728645325,
"learning_rate": 0.00040092822450080943,
"loss": 3.4413,
"step": 30850
},
{
"epoch": 3.3315363881401616,
"grad_norm": 0.5780307054519653,
"learning_rate": 0.00040060442525634103,
"loss": 3.4696,
"step": 30900
},
{
"epoch": 3.3369272237196768,
"grad_norm": 0.5876522660255432,
"learning_rate": 0.0004002806260118726,
"loss": 3.4682,
"step": 30950
},
{
"epoch": 3.3423180592991915,
"grad_norm": 0.58372563123703,
"learning_rate": 0.0003999568267674042,
"loss": 3.4463,
"step": 31000
},
{
"epoch": 3.3423180592991915,
"eval_accuracy": 0.37201792642265474,
"eval_loss": 3.4831817150115967,
"eval_runtime": 181.0431,
"eval_samples_per_second": 99.485,
"eval_steps_per_second": 6.22,
"step": 31000
},
{
"epoch": 3.347708894878706,
"grad_norm": 0.5882640480995178,
"learning_rate": 0.00039963950350782514,
"loss": 3.4554,
"step": 31050
},
{
"epoch": 3.353099730458221,
"grad_norm": 0.6506269574165344,
"learning_rate": 0.0003993157042633567,
"loss": 3.4735,
"step": 31100
},
{
"epoch": 3.358490566037736,
"grad_norm": 0.5855028033256531,
"learning_rate": 0.00039899190501888824,
"loss": 3.4553,
"step": 31150
},
{
"epoch": 3.3638814016172507,
"grad_norm": 0.5879990458488464,
"learning_rate": 0.00039866810577441985,
"loss": 3.4785,
"step": 31200
},
{
"epoch": 3.3692722371967654,
"grad_norm": 0.5899655222892761,
"learning_rate": 0.0003983443065299514,
"loss": 3.4739,
"step": 31250
},
{
"epoch": 3.37466307277628,
"grad_norm": 0.5837485790252686,
"learning_rate": 0.000398020507285483,
"loss": 3.4662,
"step": 31300
},
{
"epoch": 3.3800539083557952,
"grad_norm": 0.584699809551239,
"learning_rate": 0.0003976967080410145,
"loss": 3.4639,
"step": 31350
},
{
"epoch": 3.38544474393531,
"grad_norm": 0.5864905118942261,
"learning_rate": 0.0003973729087965461,
"loss": 3.4694,
"step": 31400
},
{
"epoch": 3.3908355795148246,
"grad_norm": 0.6279877424240112,
"learning_rate": 0.00039704910955207765,
"loss": 3.4608,
"step": 31450
},
{
"epoch": 3.3962264150943398,
"grad_norm": 0.7739274501800537,
"learning_rate": 0.00039672531030760926,
"loss": 3.4622,
"step": 31500
},
{
"epoch": 3.4016172506738545,
"grad_norm": 0.6367618441581726,
"learning_rate": 0.0003964015110631408,
"loss": 3.4693,
"step": 31550
},
{
"epoch": 3.407008086253369,
"grad_norm": 0.622832179069519,
"learning_rate": 0.00039607771181867236,
"loss": 3.4576,
"step": 31600
},
{
"epoch": 3.4123989218328843,
"grad_norm": 0.5996997356414795,
"learning_rate": 0.00039575391257420397,
"loss": 3.4453,
"step": 31650
},
{
"epoch": 3.417789757412399,
"grad_norm": 0.5975577235221863,
"learning_rate": 0.0003954301133297355,
"loss": 3.4643,
"step": 31700
},
{
"epoch": 3.4231805929919137,
"grad_norm": 0.6235230565071106,
"learning_rate": 0.0003951063140852671,
"loss": 3.466,
"step": 31750
},
{
"epoch": 3.4285714285714284,
"grad_norm": 0.5875454545021057,
"learning_rate": 0.00039478251484079867,
"loss": 3.4483,
"step": 31800
},
{
"epoch": 3.4339622641509435,
"grad_norm": 0.6354802846908569,
"learning_rate": 0.0003944587155963303,
"loss": 3.4622,
"step": 31850
},
{
"epoch": 3.439353099730458,
"grad_norm": 0.6226475834846497,
"learning_rate": 0.0003941349163518618,
"loss": 3.4724,
"step": 31900
},
{
"epoch": 3.444743935309973,
"grad_norm": 0.5510740280151367,
"learning_rate": 0.00039381111710739343,
"loss": 3.4656,
"step": 31950
},
{
"epoch": 3.450134770889488,
"grad_norm": 0.5717306733131409,
"learning_rate": 0.000393487317862925,
"loss": 3.461,
"step": 32000
},
{
"epoch": 3.450134770889488,
"eval_accuracy": 0.3726614774771799,
"eval_loss": 3.4775688648223877,
"eval_runtime": 181.8998,
"eval_samples_per_second": 99.016,
"eval_steps_per_second": 6.19,
"step": 32000
},
{
"epoch": 3.4555256064690028,
"grad_norm": 0.591214120388031,
"learning_rate": 0.0003931635186184565,
"loss": 3.4622,
"step": 32050
},
{
"epoch": 3.4609164420485174,
"grad_norm": 0.6022748351097107,
"learning_rate": 0.0003928397193739881,
"loss": 3.4683,
"step": 32100
},
{
"epoch": 3.466307277628032,
"grad_norm": 0.5917901992797852,
"learning_rate": 0.00039251592012951963,
"loss": 3.4647,
"step": 32150
},
{
"epoch": 3.4716981132075473,
"grad_norm": 0.5644868016242981,
"learning_rate": 0.00039219212088505124,
"loss": 3.4447,
"step": 32200
},
{
"epoch": 3.477088948787062,
"grad_norm": 0.5835516452789307,
"learning_rate": 0.0003918683216405828,
"loss": 3.46,
"step": 32250
},
{
"epoch": 3.4824797843665767,
"grad_norm": 0.6037916541099548,
"learning_rate": 0.0003915445223961144,
"loss": 3.4795,
"step": 32300
},
{
"epoch": 3.487870619946092,
"grad_norm": 0.6386679410934448,
"learning_rate": 0.00039122072315164594,
"loss": 3.4523,
"step": 32350
},
{
"epoch": 3.4932614555256065,
"grad_norm": 0.5861144661903381,
"learning_rate": 0.0003908969239071775,
"loss": 3.4626,
"step": 32400
},
{
"epoch": 3.498652291105121,
"grad_norm": 0.6120802164077759,
"learning_rate": 0.0003905731246627091,
"loss": 3.4513,
"step": 32450
},
{
"epoch": 3.5040431266846364,
"grad_norm": 0.5773576498031616,
"learning_rate": 0.00039024932541824065,
"loss": 3.4613,
"step": 32500
},
{
"epoch": 3.509433962264151,
"grad_norm": 0.6126224994659424,
"learning_rate": 0.00038992552617377225,
"loss": 3.4569,
"step": 32550
},
{
"epoch": 3.5148247978436657,
"grad_norm": 0.567841649055481,
"learning_rate": 0.0003896017269293038,
"loss": 3.4605,
"step": 32600
},
{
"epoch": 3.5202156334231804,
"grad_norm": 0.6432450413703918,
"learning_rate": 0.0003892779276848354,
"loss": 3.4536,
"step": 32650
},
{
"epoch": 3.525606469002695,
"grad_norm": 0.5933105945587158,
"learning_rate": 0.0003889541284403669,
"loss": 3.4661,
"step": 32700
},
{
"epoch": 3.5309973045822103,
"grad_norm": 0.6399059295654297,
"learning_rate": 0.0003886303291958985,
"loss": 3.4539,
"step": 32750
},
{
"epoch": 3.536388140161725,
"grad_norm": 0.5856630802154541,
"learning_rate": 0.00038830652995143006,
"loss": 3.458,
"step": 32800
},
{
"epoch": 3.5417789757412397,
"grad_norm": 0.6280727386474609,
"learning_rate": 0.0003879827307069616,
"loss": 3.4386,
"step": 32850
},
{
"epoch": 3.547169811320755,
"grad_norm": 0.5890994668006897,
"learning_rate": 0.0003876589314624932,
"loss": 3.4536,
"step": 32900
},
{
"epoch": 3.5525606469002695,
"grad_norm": 0.5725958943367004,
"learning_rate": 0.00038733513221802477,
"loss": 3.4458,
"step": 32950
},
{
"epoch": 3.557951482479784,
"grad_norm": 0.5895776748657227,
"learning_rate": 0.00038701133297355637,
"loss": 3.4454,
"step": 33000
},
{
"epoch": 3.557951482479784,
"eval_accuracy": 0.3733202399359991,
"eval_loss": 3.471012830734253,
"eval_runtime": 181.5491,
"eval_samples_per_second": 99.207,
"eval_steps_per_second": 6.202,
"step": 33000
},
{
"epoch": 3.5633423180592994,
"grad_norm": 0.6420202851295471,
"learning_rate": 0.0003866940097139773,
"loss": 3.4538,
"step": 33050
},
{
"epoch": 3.568733153638814,
"grad_norm": 0.6431132555007935,
"learning_rate": 0.0003863702104695089,
"loss": 3.465,
"step": 33100
},
{
"epoch": 3.5741239892183287,
"grad_norm": 0.5854994058609009,
"learning_rate": 0.0003860464112250404,
"loss": 3.4379,
"step": 33150
},
{
"epoch": 3.579514824797844,
"grad_norm": 0.5908634066581726,
"learning_rate": 0.00038572261198057203,
"loss": 3.4458,
"step": 33200
},
{
"epoch": 3.5849056603773586,
"grad_norm": 0.6732088327407837,
"learning_rate": 0.0003853988127361036,
"loss": 3.4777,
"step": 33250
},
{
"epoch": 3.5902964959568733,
"grad_norm": 0.6526401042938232,
"learning_rate": 0.0003850750134916352,
"loss": 3.466,
"step": 33300
},
{
"epoch": 3.595687331536388,
"grad_norm": 0.5792251229286194,
"learning_rate": 0.0003847512142471667,
"loss": 3.4519,
"step": 33350
},
{
"epoch": 3.601078167115903,
"grad_norm": 0.6356695890426636,
"learning_rate": 0.0003844274150026983,
"loss": 3.4533,
"step": 33400
},
{
"epoch": 3.606469002695418,
"grad_norm": 0.6451331973075867,
"learning_rate": 0.00038410361575822984,
"loss": 3.4628,
"step": 33450
},
{
"epoch": 3.6118598382749325,
"grad_norm": 0.6042508482933044,
"learning_rate": 0.00038377981651376144,
"loss": 3.4444,
"step": 33500
},
{
"epoch": 3.617250673854447,
"grad_norm": 0.6613562703132629,
"learning_rate": 0.000383456017269293,
"loss": 3.4521,
"step": 33550
},
{
"epoch": 3.6226415094339623,
"grad_norm": 0.630096971988678,
"learning_rate": 0.00038313221802482454,
"loss": 3.4584,
"step": 33600
},
{
"epoch": 3.628032345013477,
"grad_norm": 0.6092630624771118,
"learning_rate": 0.00038280841878035615,
"loss": 3.4683,
"step": 33650
},
{
"epoch": 3.6334231805929917,
"grad_norm": 0.5494804978370667,
"learning_rate": 0.0003824910955207771,
"loss": 3.4633,
"step": 33700
},
{
"epoch": 3.638814016172507,
"grad_norm": 0.6219018697738647,
"learning_rate": 0.00038216729627630865,
"loss": 3.4468,
"step": 33750
},
{
"epoch": 3.6442048517520216,
"grad_norm": 0.5919956564903259,
"learning_rate": 0.00038184349703184026,
"loss": 3.4635,
"step": 33800
},
{
"epoch": 3.6495956873315363,
"grad_norm": 0.6003981828689575,
"learning_rate": 0.0003815196977873718,
"loss": 3.4612,
"step": 33850
},
{
"epoch": 3.6549865229110514,
"grad_norm": 0.6288914680480957,
"learning_rate": 0.0003811958985429034,
"loss": 3.451,
"step": 33900
},
{
"epoch": 3.660377358490566,
"grad_norm": 0.6367020010948181,
"learning_rate": 0.00038087209929843496,
"loss": 3.4472,
"step": 33950
},
{
"epoch": 3.665768194070081,
"grad_norm": 0.6496438980102539,
"learning_rate": 0.00038054830005396646,
"loss": 3.4627,
"step": 34000
},
{
"epoch": 3.665768194070081,
"eval_accuracy": 0.3740616872424451,
"eval_loss": 3.463249444961548,
"eval_runtime": 181.8587,
"eval_samples_per_second": 99.038,
"eval_steps_per_second": 6.192,
"step": 34000
},
{
"epoch": 3.671159029649596,
"grad_norm": 0.653170645236969,
"learning_rate": 0.00038022450080949806,
"loss": 3.4457,
"step": 34050
},
{
"epoch": 3.6765498652291106,
"grad_norm": 0.6262568831443787,
"learning_rate": 0.0003799007015650296,
"loss": 3.4609,
"step": 34100
},
{
"epoch": 3.6819407008086253,
"grad_norm": 0.6341161131858826,
"learning_rate": 0.0003795769023205612,
"loss": 3.4609,
"step": 34150
},
{
"epoch": 3.68733153638814,
"grad_norm": 0.6720100045204163,
"learning_rate": 0.00037925310307609277,
"loss": 3.4422,
"step": 34200
},
{
"epoch": 3.6927223719676547,
"grad_norm": 0.6978814601898193,
"learning_rate": 0.0003789293038316244,
"loss": 3.4553,
"step": 34250
},
{
"epoch": 3.69811320754717,
"grad_norm": 0.6090667843818665,
"learning_rate": 0.0003786055045871559,
"loss": 3.4458,
"step": 34300
},
{
"epoch": 3.7035040431266846,
"grad_norm": 0.5765736103057861,
"learning_rate": 0.0003782817053426875,
"loss": 3.4688,
"step": 34350
},
{
"epoch": 3.7088948787061993,
"grad_norm": 0.6406888365745544,
"learning_rate": 0.0003779579060982191,
"loss": 3.4534,
"step": 34400
},
{
"epoch": 3.7142857142857144,
"grad_norm": 0.6476412415504456,
"learning_rate": 0.00037763410685375063,
"loss": 3.4412,
"step": 34450
},
{
"epoch": 3.719676549865229,
"grad_norm": 0.81647789478302,
"learning_rate": 0.00037731030760928223,
"loss": 3.4558,
"step": 34500
},
{
"epoch": 3.725067385444744,
"grad_norm": 0.6053717732429504,
"learning_rate": 0.0003769865083648138,
"loss": 3.4615,
"step": 34550
},
{
"epoch": 3.730458221024259,
"grad_norm": 0.7041976451873779,
"learning_rate": 0.0003766627091203454,
"loss": 3.4616,
"step": 34600
},
{
"epoch": 3.7358490566037736,
"grad_norm": 0.6277915239334106,
"learning_rate": 0.00037633890987587694,
"loss": 3.453,
"step": 34650
},
{
"epoch": 3.7412398921832883,
"grad_norm": 0.5918686985969543,
"learning_rate": 0.00037601511063140855,
"loss": 3.4564,
"step": 34700
},
{
"epoch": 3.7466307277628035,
"grad_norm": 0.6289958357810974,
"learning_rate": 0.00037569131138694004,
"loss": 3.4394,
"step": 34750
},
{
"epoch": 3.752021563342318,
"grad_norm": 0.6078783869743347,
"learning_rate": 0.0003753675121424716,
"loss": 3.4725,
"step": 34800
},
{
"epoch": 3.757412398921833,
"grad_norm": 0.6587103009223938,
"learning_rate": 0.0003750437128980032,
"loss": 3.461,
"step": 34850
},
{
"epoch": 3.7628032345013476,
"grad_norm": 0.6301190853118896,
"learning_rate": 0.00037471991365353475,
"loss": 3.4595,
"step": 34900
},
{
"epoch": 3.7681940700808623,
"grad_norm": 0.6365683078765869,
"learning_rate": 0.00037439611440906635,
"loss": 3.4571,
"step": 34950
},
{
"epoch": 3.7735849056603774,
"grad_norm": 0.6449572443962097,
"learning_rate": 0.0003740723151645979,
"loss": 3.444,
"step": 35000
},
{
"epoch": 3.7735849056603774,
"eval_accuracy": 0.37428866312508957,
"eval_loss": 3.458812952041626,
"eval_runtime": 181.8189,
"eval_samples_per_second": 99.06,
"eval_steps_per_second": 6.193,
"step": 35000
},
{
"epoch": 3.778975741239892,
"grad_norm": 0.6829414963722229,
"learning_rate": 0.0003737485159201295,
"loss": 3.4582,
"step": 35050
},
{
"epoch": 3.784366576819407,
"grad_norm": 0.6117042303085327,
"learning_rate": 0.00037342471667566106,
"loss": 3.4517,
"step": 35100
},
{
"epoch": 3.789757412398922,
"grad_norm": 0.6252544522285461,
"learning_rate": 0.00037310091743119266,
"loss": 3.4467,
"step": 35150
},
{
"epoch": 3.7951482479784366,
"grad_norm": 0.6546263694763184,
"learning_rate": 0.0003727771181867242,
"loss": 3.4705,
"step": 35200
},
{
"epoch": 3.8005390835579513,
"grad_norm": 0.6286924481391907,
"learning_rate": 0.00037245331894225576,
"loss": 3.4452,
"step": 35250
},
{
"epoch": 3.8059299191374665,
"grad_norm": 0.6140318512916565,
"learning_rate": 0.00037212951969778737,
"loss": 3.4407,
"step": 35300
},
{
"epoch": 3.811320754716981,
"grad_norm": 0.6014268398284912,
"learning_rate": 0.00037180572045331887,
"loss": 3.4401,
"step": 35350
},
{
"epoch": 3.816711590296496,
"grad_norm": 0.6223017573356628,
"learning_rate": 0.00037148192120885047,
"loss": 3.4541,
"step": 35400
},
{
"epoch": 3.822102425876011,
"grad_norm": 0.6230337023735046,
"learning_rate": 0.000371158121964382,
"loss": 3.4638,
"step": 35450
},
{
"epoch": 3.8274932614555257,
"grad_norm": 0.6094276905059814,
"learning_rate": 0.0003708343227199136,
"loss": 3.444,
"step": 35500
},
{
"epoch": 3.8328840970350404,
"grad_norm": 0.618259072303772,
"learning_rate": 0.0003705105234754452,
"loss": 3.4502,
"step": 35550
},
{
"epoch": 3.838274932614555,
"grad_norm": 0.6385563015937805,
"learning_rate": 0.0003701867242309768,
"loss": 3.4472,
"step": 35600
},
{
"epoch": 3.8436657681940702,
"grad_norm": 0.6820311546325684,
"learning_rate": 0.00036986292498650833,
"loss": 3.4482,
"step": 35650
},
{
"epoch": 3.849056603773585,
"grad_norm": 0.6169026494026184,
"learning_rate": 0.0003695391257420399,
"loss": 3.4487,
"step": 35700
},
{
"epoch": 3.8544474393530996,
"grad_norm": 0.6038658022880554,
"learning_rate": 0.0003692153264975715,
"loss": 3.4566,
"step": 35750
},
{
"epoch": 3.8598382749326143,
"grad_norm": 0.6171316504478455,
"learning_rate": 0.00036889152725310304,
"loss": 3.4453,
"step": 35800
},
{
"epoch": 3.8652291105121295,
"grad_norm": 0.6223262548446655,
"learning_rate": 0.00036856772800863464,
"loss": 3.4441,
"step": 35850
},
{
"epoch": 3.870619946091644,
"grad_norm": 0.6220224499702454,
"learning_rate": 0.0003682439287641662,
"loss": 3.4563,
"step": 35900
},
{
"epoch": 3.876010781671159,
"grad_norm": 0.5950342416763306,
"learning_rate": 0.0003679201295196978,
"loss": 3.4536,
"step": 35950
},
{
"epoch": 3.881401617250674,
"grad_norm": 0.6365296244621277,
"learning_rate": 0.00036759633027522935,
"loss": 3.4513,
"step": 36000
},
{
"epoch": 3.881401617250674,
"eval_accuracy": 0.37520667137423414,
"eval_loss": 3.452235221862793,
"eval_runtime": 181.5679,
"eval_samples_per_second": 99.197,
"eval_steps_per_second": 6.202,
"step": 36000
},
{
"epoch": 3.8867924528301887,
"grad_norm": 0.6023239493370056,
"learning_rate": 0.00036727253103076084,
"loss": 3.4493,
"step": 36050
},
{
"epoch": 3.8921832884097034,
"grad_norm": 0.5765787959098816,
"learning_rate": 0.00036694873178629245,
"loss": 3.458,
"step": 36100
},
{
"epoch": 3.8975741239892185,
"grad_norm": 0.6466318368911743,
"learning_rate": 0.000366624932541824,
"loss": 3.4327,
"step": 36150
},
{
"epoch": 3.9029649595687332,
"grad_norm": 0.6745396852493286,
"learning_rate": 0.0003663011332973556,
"loss": 3.4447,
"step": 36200
},
{
"epoch": 3.908355795148248,
"grad_norm": 0.6407946348190308,
"learning_rate": 0.00036597733405288715,
"loss": 3.4635,
"step": 36250
},
{
"epoch": 3.913746630727763,
"grad_norm": 0.6124605536460876,
"learning_rate": 0.00036565353480841876,
"loss": 3.4481,
"step": 36300
},
{
"epoch": 3.9191374663072778,
"grad_norm": 0.6159313321113586,
"learning_rate": 0.0003653297355639503,
"loss": 3.4557,
"step": 36350
},
{
"epoch": 3.9245283018867925,
"grad_norm": 0.6387877464294434,
"learning_rate": 0.0003650059363194819,
"loss": 3.4407,
"step": 36400
},
{
"epoch": 3.929919137466307,
"grad_norm": 0.6212127804756165,
"learning_rate": 0.00036468213707501347,
"loss": 3.4654,
"step": 36450
},
{
"epoch": 3.935309973045822,
"grad_norm": 0.6384363174438477,
"learning_rate": 0.000364358337830545,
"loss": 3.4575,
"step": 36500
},
{
"epoch": 3.940700808625337,
"grad_norm": 0.5889016389846802,
"learning_rate": 0.0003640345385860766,
"loss": 3.4574,
"step": 36550
},
{
"epoch": 3.9460916442048517,
"grad_norm": 0.6244920492172241,
"learning_rate": 0.00036371073934160817,
"loss": 3.4584,
"step": 36600
},
{
"epoch": 3.9514824797843664,
"grad_norm": 0.6703576445579529,
"learning_rate": 0.0003633869400971398,
"loss": 3.4541,
"step": 36650
},
{
"epoch": 3.9568733153638815,
"grad_norm": 0.720070481300354,
"learning_rate": 0.00036306961683756073,
"loss": 3.4476,
"step": 36700
},
{
"epoch": 3.9622641509433962,
"grad_norm": 0.6241286993026733,
"learning_rate": 0.0003627458175930922,
"loss": 3.4797,
"step": 36750
},
{
"epoch": 3.967654986522911,
"grad_norm": 0.637942373752594,
"learning_rate": 0.0003624220183486238,
"loss": 3.4629,
"step": 36800
},
{
"epoch": 3.973045822102426,
"grad_norm": 0.6324536800384521,
"learning_rate": 0.0003620982191041554,
"loss": 3.4389,
"step": 36850
},
{
"epoch": 3.9784366576819408,
"grad_norm": 0.6562104821205139,
"learning_rate": 0.00036177441985968693,
"loss": 3.4525,
"step": 36900
},
{
"epoch": 3.9838274932614555,
"grad_norm": 0.634568452835083,
"learning_rate": 0.00036145062061521854,
"loss": 3.4295,
"step": 36950
},
{
"epoch": 3.9892183288409706,
"grad_norm": 0.6241843104362488,
"learning_rate": 0.0003611268213707501,
"loss": 3.4411,
"step": 37000
},
{
"epoch": 3.9892183288409706,
"eval_accuracy": 0.3758031757283356,
"eval_loss": 3.4459524154663086,
"eval_runtime": 181.806,
"eval_samples_per_second": 99.067,
"eval_steps_per_second": 6.193,
"step": 37000
},
{
"epoch": 3.9946091644204853,
"grad_norm": 0.5959682464599609,
"learning_rate": 0.0003608030221262817,
"loss": 3.4464,
"step": 37050
},
{
"epoch": 4.0,
"grad_norm": 1.1611329317092896,
"learning_rate": 0.00036047922288181324,
"loss": 3.4311,
"step": 37100
},
{
"epoch": 4.005390835579515,
"grad_norm": 0.6404488682746887,
"learning_rate": 0.00036015542363734485,
"loss": 3.3566,
"step": 37150
},
{
"epoch": 4.010781671159029,
"grad_norm": 0.6125085949897766,
"learning_rate": 0.0003598316243928764,
"loss": 3.3577,
"step": 37200
},
{
"epoch": 4.0161725067385445,
"grad_norm": 0.6174116730690002,
"learning_rate": 0.00035950782514840795,
"loss": 3.3508,
"step": 37250
},
{
"epoch": 4.02156334231806,
"grad_norm": 0.6580213308334351,
"learning_rate": 0.00035918402590393955,
"loss": 3.3726,
"step": 37300
},
{
"epoch": 4.026954177897574,
"grad_norm": 0.6208986043930054,
"learning_rate": 0.00035886022665947105,
"loss": 3.3577,
"step": 37350
},
{
"epoch": 4.032345013477089,
"grad_norm": 0.6044255495071411,
"learning_rate": 0.0003585364274150027,
"loss": 3.353,
"step": 37400
},
{
"epoch": 4.037735849056604,
"grad_norm": 0.660776674747467,
"learning_rate": 0.0003582126281705342,
"loss": 3.3675,
"step": 37450
},
{
"epoch": 4.0431266846361185,
"grad_norm": 0.6297988891601562,
"learning_rate": 0.0003578888289260658,
"loss": 3.3677,
"step": 37500
},
{
"epoch": 4.048517520215634,
"grad_norm": 0.6227061748504639,
"learning_rate": 0.00035756502968159736,
"loss": 3.3648,
"step": 37550
},
{
"epoch": 4.053908355795148,
"grad_norm": 0.6688127517700195,
"learning_rate": 0.00035724123043712896,
"loss": 3.3517,
"step": 37600
},
{
"epoch": 4.059299191374663,
"grad_norm": 0.6485785841941833,
"learning_rate": 0.0003569174311926605,
"loss": 3.3742,
"step": 37650
},
{
"epoch": 4.064690026954178,
"grad_norm": 0.6131174564361572,
"learning_rate": 0.00035659363194819206,
"loss": 3.3793,
"step": 37700
},
{
"epoch": 4.070080862533692,
"grad_norm": 0.665742814540863,
"learning_rate": 0.00035626983270372367,
"loss": 3.3788,
"step": 37750
},
{
"epoch": 4.0754716981132075,
"grad_norm": 0.6330723166465759,
"learning_rate": 0.0003559460334592552,
"loss": 3.3495,
"step": 37800
},
{
"epoch": 4.080862533692723,
"grad_norm": 0.6350688934326172,
"learning_rate": 0.0003556222342147868,
"loss": 3.3652,
"step": 37850
},
{
"epoch": 4.086253369272237,
"grad_norm": 0.6854297518730164,
"learning_rate": 0.0003552984349703184,
"loss": 3.3649,
"step": 37900
},
{
"epoch": 4.091644204851752,
"grad_norm": 0.618898868560791,
"learning_rate": 0.00035497463572585,
"loss": 3.3665,
"step": 37950
},
{
"epoch": 4.097035040431267,
"grad_norm": 0.6617404818534851,
"learning_rate": 0.00035465083648138153,
"loss": 3.3781,
"step": 38000
},
{
"epoch": 4.097035040431267,
"eval_accuracy": 0.37639392147938294,
"eval_loss": 3.4480299949645996,
"eval_runtime": 181.7693,
"eval_samples_per_second": 99.087,
"eval_steps_per_second": 6.195,
"step": 38000
},
{
"epoch": 4.1024258760107815,
"grad_norm": 0.6297301054000854,
"learning_rate": 0.00035432703723691314,
"loss": 3.373,
"step": 38050
},
{
"epoch": 4.107816711590297,
"grad_norm": 0.6363517045974731,
"learning_rate": 0.00035400323799244463,
"loss": 3.3811,
"step": 38100
},
{
"epoch": 4.113207547169812,
"grad_norm": 0.6690065860748291,
"learning_rate": 0.0003536794387479762,
"loss": 3.3791,
"step": 38150
},
{
"epoch": 4.118598382749326,
"grad_norm": 0.5962274074554443,
"learning_rate": 0.0003533556395035078,
"loss": 3.3629,
"step": 38200
},
{
"epoch": 4.123989218328841,
"grad_norm": 0.6696664690971375,
"learning_rate": 0.00035303184025903934,
"loss": 3.3706,
"step": 38250
},
{
"epoch": 4.129380053908355,
"grad_norm": 0.6310851573944092,
"learning_rate": 0.00035270804101457094,
"loss": 3.3778,
"step": 38300
},
{
"epoch": 4.1347708894878705,
"grad_norm": 0.6481786370277405,
"learning_rate": 0.0003523842417701025,
"loss": 3.3564,
"step": 38350
},
{
"epoch": 4.140161725067386,
"grad_norm": 0.5998034477233887,
"learning_rate": 0.0003520604425256341,
"loss": 3.3726,
"step": 38400
},
{
"epoch": 4.1455525606469,
"grad_norm": 0.642882227897644,
"learning_rate": 0.00035173664328116565,
"loss": 3.3581,
"step": 38450
},
{
"epoch": 4.150943396226415,
"grad_norm": 0.5714357495307922,
"learning_rate": 0.00035141284403669725,
"loss": 3.3907,
"step": 38500
},
{
"epoch": 4.15633423180593,
"grad_norm": 0.6234452128410339,
"learning_rate": 0.0003510890447922288,
"loss": 3.3647,
"step": 38550
},
{
"epoch": 4.1617250673854445,
"grad_norm": 0.6253894567489624,
"learning_rate": 0.00035076524554776035,
"loss": 3.3738,
"step": 38600
},
{
"epoch": 4.16711590296496,
"grad_norm": 0.6415000557899475,
"learning_rate": 0.00035044144630329196,
"loss": 3.3739,
"step": 38650
},
{
"epoch": 4.172506738544475,
"grad_norm": 0.6307880282402039,
"learning_rate": 0.00035011764705882346,
"loss": 3.3956,
"step": 38700
},
{
"epoch": 4.177897574123989,
"grad_norm": 0.6561170816421509,
"learning_rate": 0.0003497938478143551,
"loss": 3.3671,
"step": 38750
},
{
"epoch": 4.183288409703504,
"grad_norm": 0.6375702619552612,
"learning_rate": 0.0003494700485698866,
"loss": 3.3902,
"step": 38800
},
{
"epoch": 4.188679245283019,
"grad_norm": 0.6860988140106201,
"learning_rate": 0.0003491462493254182,
"loss": 3.3791,
"step": 38850
},
{
"epoch": 4.1940700808625335,
"grad_norm": 0.6401588320732117,
"learning_rate": 0.00034882245008094977,
"loss": 3.3893,
"step": 38900
},
{
"epoch": 4.199460916442049,
"grad_norm": 0.6353194713592529,
"learning_rate": 0.0003484986508364813,
"loss": 3.3833,
"step": 38950
},
{
"epoch": 4.204851752021563,
"grad_norm": 0.6985985040664673,
"learning_rate": 0.0003481748515920129,
"loss": 3.3767,
"step": 39000
},
{
"epoch": 4.204851752021563,
"eval_accuracy": 0.3767065158376252,
"eval_loss": 3.442295789718628,
"eval_runtime": 181.7129,
"eval_samples_per_second": 99.118,
"eval_steps_per_second": 6.197,
"step": 39000
},
{
"epoch": 4.210242587601078,
"grad_norm": 0.6564880609512329,
"learning_rate": 0.00034785105234754447,
"loss": 3.3834,
"step": 39050
},
{
"epoch": 4.215633423180593,
"grad_norm": 0.6590373516082764,
"learning_rate": 0.0003475272531030761,
"loss": 3.3852,
"step": 39100
},
{
"epoch": 4.2210242587601075,
"grad_norm": 0.63273686170578,
"learning_rate": 0.00034720345385860763,
"loss": 3.3931,
"step": 39150
},
{
"epoch": 4.226415094339623,
"grad_norm": 0.6495418548583984,
"learning_rate": 0.00034687965461413923,
"loss": 3.389,
"step": 39200
},
{
"epoch": 4.231805929919138,
"grad_norm": 0.6506909728050232,
"learning_rate": 0.0003465558553696708,
"loss": 3.3861,
"step": 39250
},
{
"epoch": 4.237196765498652,
"grad_norm": 0.6353509426116943,
"learning_rate": 0.0003462320561252024,
"loss": 3.3829,
"step": 39300
},
{
"epoch": 4.242587601078167,
"grad_norm": 0.6566866040229797,
"learning_rate": 0.00034590825688073394,
"loss": 3.3971,
"step": 39350
},
{
"epoch": 4.247978436657682,
"grad_norm": 0.6250466108322144,
"learning_rate": 0.00034558445763626543,
"loss": 3.3818,
"step": 39400
},
{
"epoch": 4.2533692722371965,
"grad_norm": 0.6622294783592224,
"learning_rate": 0.00034526065839179704,
"loss": 3.3849,
"step": 39450
},
{
"epoch": 4.258760107816712,
"grad_norm": 0.6352642774581909,
"learning_rate": 0.0003449368591473286,
"loss": 3.3847,
"step": 39500
},
{
"epoch": 4.264150943396227,
"grad_norm": 0.6231552958488464,
"learning_rate": 0.0003446130599028602,
"loss": 3.3864,
"step": 39550
},
{
"epoch": 4.269541778975741,
"grad_norm": 0.6041277647018433,
"learning_rate": 0.00034428926065839174,
"loss": 3.3811,
"step": 39600
},
{
"epoch": 4.274932614555256,
"grad_norm": 0.6078187227249146,
"learning_rate": 0.00034396546141392335,
"loss": 3.3788,
"step": 39650
},
{
"epoch": 4.280323450134771,
"grad_norm": 0.6268119812011719,
"learning_rate": 0.0003436416621694549,
"loss": 3.3947,
"step": 39700
},
{
"epoch": 4.285714285714286,
"grad_norm": 0.6630831956863403,
"learning_rate": 0.0003433178629249865,
"loss": 3.3834,
"step": 39750
},
{
"epoch": 4.291105121293801,
"grad_norm": 0.6812276244163513,
"learning_rate": 0.00034299406368051806,
"loss": 3.3911,
"step": 39800
},
{
"epoch": 4.296495956873315,
"grad_norm": 0.6586547493934631,
"learning_rate": 0.0003426702644360496,
"loss": 3.3586,
"step": 39850
},
{
"epoch": 4.30188679245283,
"grad_norm": 0.6651487350463867,
"learning_rate": 0.0003423464651915812,
"loss": 3.3987,
"step": 39900
},
{
"epoch": 4.307277628032345,
"grad_norm": 0.6707507371902466,
"learning_rate": 0.00034202266594711276,
"loss": 3.3748,
"step": 39950
},
{
"epoch": 4.3126684636118595,
"grad_norm": 0.6894720792770386,
"learning_rate": 0.00034169886670264437,
"loss": 3.3947,
"step": 40000
},
{
"epoch": 4.3126684636118595,
"eval_accuracy": 0.3773171450671426,
"eval_loss": 3.438546895980835,
"eval_runtime": 181.5176,
"eval_samples_per_second": 99.225,
"eval_steps_per_second": 6.203,
"step": 40000
},
{
"epoch": 4.318059299191375,
"grad_norm": 0.5998165607452393,
"learning_rate": 0.0003413815434430653,
"loss": 3.4067,
"step": 40050
},
{
"epoch": 4.32345013477089,
"grad_norm": 0.6333771347999573,
"learning_rate": 0.0003410577441985968,
"loss": 3.3767,
"step": 40100
},
{
"epoch": 4.328840970350404,
"grad_norm": 0.6432942152023315,
"learning_rate": 0.00034073394495412837,
"loss": 3.3926,
"step": 40150
},
{
"epoch": 4.334231805929919,
"grad_norm": 0.6244867444038391,
"learning_rate": 0.00034041014570965997,
"loss": 3.4092,
"step": 40200
},
{
"epoch": 4.339622641509434,
"grad_norm": 0.624860405921936,
"learning_rate": 0.0003400863464651915,
"loss": 3.3898,
"step": 40250
},
{
"epoch": 4.345013477088949,
"grad_norm": 0.6423871517181396,
"learning_rate": 0.0003397625472207231,
"loss": 3.4,
"step": 40300
},
{
"epoch": 4.350404312668464,
"grad_norm": 0.6460752487182617,
"learning_rate": 0.0003394387479762547,
"loss": 3.4063,
"step": 40350
},
{
"epoch": 4.355795148247978,
"grad_norm": 0.6665635108947754,
"learning_rate": 0.0003391149487317863,
"loss": 3.3966,
"step": 40400
},
{
"epoch": 4.361185983827493,
"grad_norm": 0.6376999020576477,
"learning_rate": 0.00033879114948731783,
"loss": 3.4036,
"step": 40450
},
{
"epoch": 4.366576819407008,
"grad_norm": 0.6386997103691101,
"learning_rate": 0.00033846735024284944,
"loss": 3.3693,
"step": 40500
},
{
"epoch": 4.3719676549865225,
"grad_norm": 0.6432493329048157,
"learning_rate": 0.000338143550998381,
"loss": 3.3786,
"step": 40550
},
{
"epoch": 4.377358490566038,
"grad_norm": 0.6118904948234558,
"learning_rate": 0.00033781975175391254,
"loss": 3.3857,
"step": 40600
},
{
"epoch": 4.382749326145553,
"grad_norm": 0.6847822666168213,
"learning_rate": 0.00033749595250944414,
"loss": 3.3869,
"step": 40650
},
{
"epoch": 4.388140161725067,
"grad_norm": 0.6140970587730408,
"learning_rate": 0.00033717215326497564,
"loss": 3.3911,
"step": 40700
},
{
"epoch": 4.393530997304582,
"grad_norm": 0.6901987195014954,
"learning_rate": 0.0003368483540205073,
"loss": 3.3914,
"step": 40750
},
{
"epoch": 4.398921832884097,
"grad_norm": 0.5972651243209839,
"learning_rate": 0.0003365245547760388,
"loss": 3.3906,
"step": 40800
},
{
"epoch": 4.404312668463612,
"grad_norm": 0.7040302157402039,
"learning_rate": 0.0003362007555315704,
"loss": 3.3822,
"step": 40850
},
{
"epoch": 4.409703504043127,
"grad_norm": 0.6535053849220276,
"learning_rate": 0.00033587695628710195,
"loss": 3.3886,
"step": 40900
},
{
"epoch": 4.415094339622642,
"grad_norm": 0.6086871027946472,
"learning_rate": 0.00033555315704263355,
"loss": 3.3965,
"step": 40950
},
{
"epoch": 4.420485175202156,
"grad_norm": 0.6123448610305786,
"learning_rate": 0.0003352293577981651,
"loss": 3.389,
"step": 41000
},
{
"epoch": 4.420485175202156,
"eval_accuracy": 0.37761822221927654,
"eval_loss": 3.433894395828247,
"eval_runtime": 181.957,
"eval_samples_per_second": 98.985,
"eval_steps_per_second": 6.188,
"step": 41000
},
{
"epoch": 4.425876010781671,
"grad_norm": 0.6953914761543274,
"learning_rate": 0.00033490555855369665,
"loss": 3.3761,
"step": 41050
},
{
"epoch": 4.431266846361186,
"grad_norm": 0.6522581577301025,
"learning_rate": 0.00033458175930922826,
"loss": 3.3945,
"step": 41100
},
{
"epoch": 4.436657681940701,
"grad_norm": 0.6128993630409241,
"learning_rate": 0.0003342579600647598,
"loss": 3.3911,
"step": 41150
},
{
"epoch": 4.442048517520216,
"grad_norm": 0.6275089979171753,
"learning_rate": 0.0003339341608202914,
"loss": 3.3761,
"step": 41200
},
{
"epoch": 4.44743935309973,
"grad_norm": 0.6717378497123718,
"learning_rate": 0.00033361036157582297,
"loss": 3.3872,
"step": 41250
},
{
"epoch": 4.452830188679245,
"grad_norm": 0.6755267381668091,
"learning_rate": 0.00033328656233135457,
"loss": 3.3795,
"step": 41300
},
{
"epoch": 4.45822102425876,
"grad_norm": 0.6669474840164185,
"learning_rate": 0.0003329627630868861,
"loss": 3.4041,
"step": 41350
},
{
"epoch": 4.463611859838275,
"grad_norm": 0.6538794040679932,
"learning_rate": 0.0003326389638424177,
"loss": 3.3974,
"step": 41400
},
{
"epoch": 4.46900269541779,
"grad_norm": 0.6036146283149719,
"learning_rate": 0.0003323151645979492,
"loss": 3.387,
"step": 41450
},
{
"epoch": 4.474393530997305,
"grad_norm": 0.6463048458099365,
"learning_rate": 0.00033199136535348077,
"loss": 3.3739,
"step": 41500
},
{
"epoch": 4.479784366576819,
"grad_norm": 0.6582531332969666,
"learning_rate": 0.0003316675661090124,
"loss": 3.3918,
"step": 41550
},
{
"epoch": 4.485175202156334,
"grad_norm": 0.6958242654800415,
"learning_rate": 0.00033134376686454393,
"loss": 3.3972,
"step": 41600
},
{
"epoch": 4.490566037735849,
"grad_norm": 0.6295124292373657,
"learning_rate": 0.00033101996762007553,
"loss": 3.3811,
"step": 41650
},
{
"epoch": 4.495956873315364,
"grad_norm": 0.627613365650177,
"learning_rate": 0.0003306961683756071,
"loss": 3.4056,
"step": 41700
},
{
"epoch": 4.501347708894879,
"grad_norm": 0.6739616990089417,
"learning_rate": 0.0003303723691311387,
"loss": 3.3856,
"step": 41750
},
{
"epoch": 4.506738544474393,
"grad_norm": 0.6398614645004272,
"learning_rate": 0.00033004856988667024,
"loss": 3.3847,
"step": 41800
},
{
"epoch": 4.512129380053908,
"grad_norm": 0.6853240728378296,
"learning_rate": 0.0003297247706422018,
"loss": 3.3945,
"step": 41850
},
{
"epoch": 4.517520215633423,
"grad_norm": 0.6887792348861694,
"learning_rate": 0.0003294009713977334,
"loss": 3.3924,
"step": 41900
},
{
"epoch": 4.5229110512129385,
"grad_norm": 0.6502972841262817,
"learning_rate": 0.00032907717215326494,
"loss": 3.3939,
"step": 41950
},
{
"epoch": 4.528301886792453,
"grad_norm": 0.6029436588287354,
"learning_rate": 0.00032875337290879655,
"loss": 3.3849,
"step": 42000
},
{
"epoch": 4.528301886792453,
"eval_accuracy": 0.37859935779624126,
"eval_loss": 3.428929090499878,
"eval_runtime": 181.7172,
"eval_samples_per_second": 99.116,
"eval_steps_per_second": 6.196,
"step": 42000
},
{
"epoch": 4.533692722371968,
"grad_norm": 0.62586510181427,
"learning_rate": 0.00032842957366432805,
"loss": 3.382,
"step": 42050
},
{
"epoch": 4.539083557951482,
"grad_norm": 0.6761049628257751,
"learning_rate": 0.0003281057744198597,
"loss": 3.4097,
"step": 42100
},
{
"epoch": 4.544474393530997,
"grad_norm": 0.6782863140106201,
"learning_rate": 0.0003277819751753912,
"loss": 3.3976,
"step": 42150
},
{
"epoch": 4.549865229110512,
"grad_norm": 0.6204219460487366,
"learning_rate": 0.0003274581759309228,
"loss": 3.3791,
"step": 42200
},
{
"epoch": 4.555256064690027,
"grad_norm": 0.8040519952774048,
"learning_rate": 0.00032713437668645436,
"loss": 3.3998,
"step": 42250
},
{
"epoch": 4.560646900269542,
"grad_norm": 0.699002742767334,
"learning_rate": 0.0003268105774419859,
"loss": 3.3999,
"step": 42300
},
{
"epoch": 4.566037735849057,
"grad_norm": 0.6923718452453613,
"learning_rate": 0.0003264867781975175,
"loss": 3.382,
"step": 42350
},
{
"epoch": 4.571428571428571,
"grad_norm": 0.6704239845275879,
"learning_rate": 0.00032616297895304906,
"loss": 3.4073,
"step": 42400
},
{
"epoch": 4.576819407008086,
"grad_norm": 0.6082397103309631,
"learning_rate": 0.00032583917970858067,
"loss": 3.3739,
"step": 42450
},
{
"epoch": 4.5822102425876015,
"grad_norm": 0.6278915405273438,
"learning_rate": 0.0003255153804641122,
"loss": 3.3761,
"step": 42500
},
{
"epoch": 4.587601078167116,
"grad_norm": 0.628350555896759,
"learning_rate": 0.0003251915812196438,
"loss": 3.3917,
"step": 42550
},
{
"epoch": 4.592991913746631,
"grad_norm": 0.6095200181007385,
"learning_rate": 0.00032486778197517537,
"loss": 3.387,
"step": 42600
},
{
"epoch": 4.598382749326145,
"grad_norm": 0.6962038278579712,
"learning_rate": 0.000324543982730707,
"loss": 3.4012,
"step": 42650
},
{
"epoch": 4.60377358490566,
"grad_norm": 0.6633439660072327,
"learning_rate": 0.00032422018348623853,
"loss": 3.3961,
"step": 42700
},
{
"epoch": 4.609164420485175,
"grad_norm": 0.6363673210144043,
"learning_rate": 0.0003239028602266595,
"loss": 3.382,
"step": 42750
},
{
"epoch": 4.6145552560646905,
"grad_norm": 0.6578346490859985,
"learning_rate": 0.000323579060982191,
"loss": 3.4043,
"step": 42800
},
{
"epoch": 4.619946091644205,
"grad_norm": 0.6377918124198914,
"learning_rate": 0.0003232552617377226,
"loss": 3.3988,
"step": 42850
},
{
"epoch": 4.62533692722372,
"grad_norm": 0.5883036851882935,
"learning_rate": 0.00032293146249325413,
"loss": 3.3863,
"step": 42900
},
{
"epoch": 4.630727762803234,
"grad_norm": 0.6473262906074524,
"learning_rate": 0.00032260766324878574,
"loss": 3.3877,
"step": 42950
},
{
"epoch": 4.636118598382749,
"grad_norm": 0.6402585506439209,
"learning_rate": 0.0003222838640043173,
"loss": 3.4041,
"step": 43000
},
{
"epoch": 4.636118598382749,
"eval_accuracy": 0.37907547475064435,
"eval_loss": 3.424429178237915,
"eval_runtime": 182.0489,
"eval_samples_per_second": 98.935,
"eval_steps_per_second": 6.185,
"step": 43000
},
{
"epoch": 4.6415094339622645,
"grad_norm": 0.6390340924263,
"learning_rate": 0.00032196006475984884,
"loss": 3.3832,
"step": 43050
},
{
"epoch": 4.646900269541779,
"grad_norm": 0.6697818636894226,
"learning_rate": 0.00032163626551538044,
"loss": 3.3847,
"step": 43100
},
{
"epoch": 4.652291105121294,
"grad_norm": 0.6102224588394165,
"learning_rate": 0.000321312466270912,
"loss": 3.3822,
"step": 43150
},
{
"epoch": 4.657681940700809,
"grad_norm": 0.6941066384315491,
"learning_rate": 0.0003209886670264436,
"loss": 3.3905,
"step": 43200
},
{
"epoch": 4.663072776280323,
"grad_norm": 0.6344305276870728,
"learning_rate": 0.00032066486778197515,
"loss": 3.3888,
"step": 43250
},
{
"epoch": 4.668463611859838,
"grad_norm": 0.6397703886032104,
"learning_rate": 0.00032034106853750675,
"loss": 3.3968,
"step": 43300
},
{
"epoch": 4.6738544474393535,
"grad_norm": 0.615652322769165,
"learning_rate": 0.0003200172692930383,
"loss": 3.3742,
"step": 43350
},
{
"epoch": 4.679245283018868,
"grad_norm": 0.6831540465354919,
"learning_rate": 0.0003196934700485699,
"loss": 3.383,
"step": 43400
},
{
"epoch": 4.684636118598383,
"grad_norm": 0.670258104801178,
"learning_rate": 0.0003193696708041014,
"loss": 3.391,
"step": 43450
},
{
"epoch": 4.690026954177897,
"grad_norm": 0.6558921337127686,
"learning_rate": 0.00031904587155963296,
"loss": 3.3886,
"step": 43500
},
{
"epoch": 4.695417789757412,
"grad_norm": 0.6475565433502197,
"learning_rate": 0.00031872207231516456,
"loss": 3.3769,
"step": 43550
},
{
"epoch": 4.7008086253369274,
"grad_norm": 0.7060928344726562,
"learning_rate": 0.0003183982730706961,
"loss": 3.3797,
"step": 43600
},
{
"epoch": 4.706199460916442,
"grad_norm": 0.6488223671913147,
"learning_rate": 0.0003180744738262277,
"loss": 3.3987,
"step": 43650
},
{
"epoch": 4.711590296495957,
"grad_norm": 0.651401937007904,
"learning_rate": 0.00031775067458175927,
"loss": 3.389,
"step": 43700
},
{
"epoch": 4.716981132075472,
"grad_norm": 0.6610608100891113,
"learning_rate": 0.00031742687533729087,
"loss": 3.3873,
"step": 43750
},
{
"epoch": 4.722371967654986,
"grad_norm": 0.6515519618988037,
"learning_rate": 0.0003171030760928224,
"loss": 3.3807,
"step": 43800
},
{
"epoch": 4.727762803234501,
"grad_norm": 0.6313045620918274,
"learning_rate": 0.000316779276848354,
"loss": 3.3998,
"step": 43850
},
{
"epoch": 4.7331536388140165,
"grad_norm": 0.6147524118423462,
"learning_rate": 0.0003164554776038856,
"loss": 3.4078,
"step": 43900
},
{
"epoch": 4.738544474393531,
"grad_norm": 0.6559792160987854,
"learning_rate": 0.00031613167835941713,
"loss": 3.4056,
"step": 43950
},
{
"epoch": 4.743935309973046,
"grad_norm": 0.6663808822631836,
"learning_rate": 0.00031580787911494873,
"loss": 3.3693,
"step": 44000
},
{
"epoch": 4.743935309973046,
"eval_accuracy": 0.3795856000589333,
"eval_loss": 3.4179606437683105,
"eval_runtime": 181.5494,
"eval_samples_per_second": 99.207,
"eval_steps_per_second": 6.202,
"step": 44000
},
{
"epoch": 4.74932614555256,
"grad_norm": 0.6471132636070251,
"learning_rate": 0.00031548407987048023,
"loss": 3.4089,
"step": 44050
},
{
"epoch": 4.754716981132075,
"grad_norm": 0.6646540760993958,
"learning_rate": 0.0003151602806260119,
"loss": 3.3837,
"step": 44100
},
{
"epoch": 4.7601078167115904,
"grad_norm": 0.6546226143836975,
"learning_rate": 0.0003148364813815434,
"loss": 3.3773,
"step": 44150
},
{
"epoch": 4.765498652291106,
"grad_norm": 0.5949311852455139,
"learning_rate": 0.000314512682137075,
"loss": 3.3991,
"step": 44200
},
{
"epoch": 4.77088948787062,
"grad_norm": 0.6574331521987915,
"learning_rate": 0.00031418888289260654,
"loss": 3.3919,
"step": 44250
},
{
"epoch": 4.776280323450135,
"grad_norm": 0.6199044585227966,
"learning_rate": 0.0003138650836481381,
"loss": 3.3865,
"step": 44300
},
{
"epoch": 4.781671159029649,
"grad_norm": 0.6571835875511169,
"learning_rate": 0.0003135412844036697,
"loss": 3.3986,
"step": 44350
},
{
"epoch": 4.787061994609164,
"grad_norm": 0.6753513813018799,
"learning_rate": 0.00031321748515920124,
"loss": 3.3776,
"step": 44400
},
{
"epoch": 4.7924528301886795,
"grad_norm": 0.6330493092536926,
"learning_rate": 0.00031289368591473285,
"loss": 3.3942,
"step": 44450
},
{
"epoch": 4.797843665768194,
"grad_norm": 0.6691889762878418,
"learning_rate": 0.0003125698866702644,
"loss": 3.3777,
"step": 44500
},
{
"epoch": 4.803234501347709,
"grad_norm": 0.6553536653518677,
"learning_rate": 0.000312246087425796,
"loss": 3.3769,
"step": 44550
},
{
"epoch": 4.808625336927224,
"grad_norm": 0.648166298866272,
"learning_rate": 0.00031192228818132756,
"loss": 3.3905,
"step": 44600
},
{
"epoch": 4.814016172506738,
"grad_norm": 0.6351345777511597,
"learning_rate": 0.00031159848893685916,
"loss": 3.3821,
"step": 44650
},
{
"epoch": 4.819407008086253,
"grad_norm": 0.591360330581665,
"learning_rate": 0.0003112746896923907,
"loss": 3.3744,
"step": 44700
},
{
"epoch": 4.824797843665769,
"grad_norm": 0.7283137440681458,
"learning_rate": 0.0003109508904479222,
"loss": 3.3773,
"step": 44750
},
{
"epoch": 4.830188679245283,
"grad_norm": 0.6123622059822083,
"learning_rate": 0.0003106270912034538,
"loss": 3.391,
"step": 44800
},
{
"epoch": 4.835579514824798,
"grad_norm": 0.6461575031280518,
"learning_rate": 0.00031030329195898536,
"loss": 3.3865,
"step": 44850
},
{
"epoch": 4.840970350404312,
"grad_norm": 0.6624020934104919,
"learning_rate": 0.00030997949271451697,
"loss": 3.3887,
"step": 44900
},
{
"epoch": 4.846361185983827,
"grad_norm": 0.6682701110839844,
"learning_rate": 0.0003096556934700485,
"loss": 3.3842,
"step": 44950
},
{
"epoch": 4.8517520215633425,
"grad_norm": 0.6075751781463623,
"learning_rate": 0.0003093318942255801,
"loss": 3.3953,
"step": 45000
},
{
"epoch": 4.8517520215633425,
"eval_accuracy": 0.379875703269398,
"eval_loss": 3.41325306892395,
"eval_runtime": 182.0922,
"eval_samples_per_second": 98.911,
"eval_steps_per_second": 6.184,
"step": 45000
},
{
"epoch": 4.857142857142857,
"grad_norm": 0.618019163608551,
"learning_rate": 0.0003090080949811117,
"loss": 3.3711,
"step": 45050
},
{
"epoch": 4.862533692722372,
"grad_norm": 0.6713760495185852,
"learning_rate": 0.0003086842957366433,
"loss": 3.3965,
"step": 45100
},
{
"epoch": 4.867924528301887,
"grad_norm": 0.7070072889328003,
"learning_rate": 0.00030836049649217483,
"loss": 3.3691,
"step": 45150
},
{
"epoch": 4.873315363881401,
"grad_norm": 0.6715174913406372,
"learning_rate": 0.0003080431732325958,
"loss": 3.3933,
"step": 45200
},
{
"epoch": 4.878706199460916,
"grad_norm": 0.6704628467559814,
"learning_rate": 0.00030771937398812733,
"loss": 3.3891,
"step": 45250
},
{
"epoch": 4.884097035040432,
"grad_norm": 0.6364473700523376,
"learning_rate": 0.00030739557474365894,
"loss": 3.394,
"step": 45300
},
{
"epoch": 4.889487870619946,
"grad_norm": 0.6634293794631958,
"learning_rate": 0.0003070717754991905,
"loss": 3.3871,
"step": 45350
},
{
"epoch": 4.894878706199461,
"grad_norm": 0.6384555101394653,
"learning_rate": 0.0003067479762547221,
"loss": 3.3828,
"step": 45400
},
{
"epoch": 4.900269541778976,
"grad_norm": 0.6213516592979431,
"learning_rate": 0.0003064241770102536,
"loss": 3.3905,
"step": 45450
},
{
"epoch": 4.90566037735849,
"grad_norm": 0.6964032649993896,
"learning_rate": 0.00030610037776578514,
"loss": 3.3905,
"step": 45500
},
{
"epoch": 4.9110512129380055,
"grad_norm": 0.6544898152351379,
"learning_rate": 0.00030577657852131674,
"loss": 3.3783,
"step": 45550
},
{
"epoch": 4.916442048517521,
"grad_norm": 0.6489967107772827,
"learning_rate": 0.0003054527792768483,
"loss": 3.4039,
"step": 45600
},
{
"epoch": 4.921832884097035,
"grad_norm": 0.6411947011947632,
"learning_rate": 0.0003051289800323799,
"loss": 3.376,
"step": 45650
},
{
"epoch": 4.92722371967655,
"grad_norm": 0.624286949634552,
"learning_rate": 0.00030480518078791145,
"loss": 3.3895,
"step": 45700
},
{
"epoch": 4.932614555256064,
"grad_norm": 0.6586293578147888,
"learning_rate": 0.00030448138154344305,
"loss": 3.3934,
"step": 45750
},
{
"epoch": 4.938005390835579,
"grad_norm": 0.683337390422821,
"learning_rate": 0.0003041575822989746,
"loss": 3.3861,
"step": 45800
},
{
"epoch": 4.943396226415095,
"grad_norm": 0.6735714673995972,
"learning_rate": 0.0003038337830545062,
"loss": 3.3883,
"step": 45850
},
{
"epoch": 4.948787061994609,
"grad_norm": 0.687961995601654,
"learning_rate": 0.00030350998381003776,
"loss": 3.3902,
"step": 45900
},
{
"epoch": 4.954177897574124,
"grad_norm": 0.6364692449569702,
"learning_rate": 0.0003031861845655693,
"loss": 3.3726,
"step": 45950
},
{
"epoch": 4.959568733153639,
"grad_norm": 0.6474451422691345,
"learning_rate": 0.0003028623853211009,
"loss": 3.3841,
"step": 46000
},
{
"epoch": 4.959568733153639,
"eval_accuracy": 0.3804872017220179,
"eval_loss": 3.409752368927002,
"eval_runtime": 181.5189,
"eval_samples_per_second": 99.224,
"eval_steps_per_second": 6.203,
"step": 46000
},
{
"epoch": 4.964959568733153,
"grad_norm": 0.630465030670166,
"learning_rate": 0.00030253858607663247,
"loss": 3.3909,
"step": 46050
},
{
"epoch": 4.9703504043126685,
"grad_norm": 0.655409038066864,
"learning_rate": 0.00030221478683216407,
"loss": 3.3874,
"step": 46100
},
{
"epoch": 4.975741239892184,
"grad_norm": 0.6540499925613403,
"learning_rate": 0.00030189098758769557,
"loss": 3.3845,
"step": 46150
},
{
"epoch": 4.981132075471698,
"grad_norm": 0.6544473171234131,
"learning_rate": 0.00030156718834322717,
"loss": 3.3843,
"step": 46200
},
{
"epoch": 4.986522911051213,
"grad_norm": 0.6690456867218018,
"learning_rate": 0.0003012433890987587,
"loss": 3.4047,
"step": 46250
},
{
"epoch": 4.991913746630727,
"grad_norm": 0.6413595080375671,
"learning_rate": 0.0003009195898542903,
"loss": 3.3936,
"step": 46300
},
{
"epoch": 4.997304582210242,
"grad_norm": 0.6787312030792236,
"learning_rate": 0.0003005957906098219,
"loss": 3.382,
"step": 46350
},
{
"epoch": 5.002695417789758,
"grad_norm": 0.6582103371620178,
"learning_rate": 0.00030027199136535343,
"loss": 3.3388,
"step": 46400
},
{
"epoch": 5.008086253369272,
"grad_norm": 0.6663526296615601,
"learning_rate": 0.00029994819212088503,
"loss": 3.288,
"step": 46450
},
{
"epoch": 5.013477088948787,
"grad_norm": 0.6610379219055176,
"learning_rate": 0.0002996243928764166,
"loss": 3.2923,
"step": 46500
},
{
"epoch": 5.018867924528302,
"grad_norm": 0.652783989906311,
"learning_rate": 0.0002993005936319482,
"loss": 3.3012,
"step": 46550
},
{
"epoch": 5.024258760107816,
"grad_norm": 0.6830565333366394,
"learning_rate": 0.00029897679438747974,
"loss": 3.299,
"step": 46600
},
{
"epoch": 5.0296495956873315,
"grad_norm": 0.7056826949119568,
"learning_rate": 0.0002986529951430113,
"loss": 3.3031,
"step": 46650
},
{
"epoch": 5.035040431266847,
"grad_norm": 0.6342096924781799,
"learning_rate": 0.0002983291958985429,
"loss": 3.2964,
"step": 46700
},
{
"epoch": 5.040431266846361,
"grad_norm": 0.6923931241035461,
"learning_rate": 0.00029800539665407444,
"loss": 3.3102,
"step": 46750
},
{
"epoch": 5.045822102425876,
"grad_norm": 0.677852988243103,
"learning_rate": 0.000297681597409606,
"loss": 3.2961,
"step": 46800
},
{
"epoch": 5.051212938005391,
"grad_norm": 0.6728231310844421,
"learning_rate": 0.0002973577981651376,
"loss": 3.314,
"step": 46850
},
{
"epoch": 5.056603773584905,
"grad_norm": 0.6960410475730896,
"learning_rate": 0.00029703399892066915,
"loss": 3.3012,
"step": 46900
},
{
"epoch": 5.061994609164421,
"grad_norm": 0.6692420244216919,
"learning_rate": 0.00029671019967620076,
"loss": 3.2907,
"step": 46950
},
{
"epoch": 5.067385444743936,
"grad_norm": 0.672247052192688,
"learning_rate": 0.0002963864004317323,
"loss": 3.3078,
"step": 47000
},
{
"epoch": 5.067385444743936,
"eval_accuracy": 0.3803283512000331,
"eval_loss": 3.413437843322754,
"eval_runtime": 181.7151,
"eval_samples_per_second": 99.117,
"eval_steps_per_second": 6.197,
"step": 47000
},
{
"epoch": 5.07277628032345,
"grad_norm": 0.6593713164329529,
"learning_rate": 0.00029606260118726386,
"loss": 3.3107,
"step": 47050
},
{
"epoch": 5.078167115902965,
"grad_norm": 0.6897931694984436,
"learning_rate": 0.0002957388019427954,
"loss": 3.3144,
"step": 47100
},
{
"epoch": 5.083557951482479,
"grad_norm": 0.682111918926239,
"learning_rate": 0.000295415002698327,
"loss": 3.3084,
"step": 47150
},
{
"epoch": 5.0889487870619945,
"grad_norm": 0.6645491719245911,
"learning_rate": 0.00029509120345385856,
"loss": 3.3045,
"step": 47200
},
{
"epoch": 5.09433962264151,
"grad_norm": 0.7227234840393066,
"learning_rate": 0.00029476740420939017,
"loss": 3.3093,
"step": 47250
},
{
"epoch": 5.099730458221024,
"grad_norm": 0.6667834520339966,
"learning_rate": 0.0002944436049649217,
"loss": 3.3061,
"step": 47300
},
{
"epoch": 5.105121293800539,
"grad_norm": 0.6737233996391296,
"learning_rate": 0.0002941198057204533,
"loss": 3.2953,
"step": 47350
},
{
"epoch": 5.110512129380054,
"grad_norm": 0.6560413837432861,
"learning_rate": 0.00029379600647598487,
"loss": 3.3114,
"step": 47400
},
{
"epoch": 5.115902964959568,
"grad_norm": 0.669833242893219,
"learning_rate": 0.0002934722072315164,
"loss": 3.3264,
"step": 47450
},
{
"epoch": 5.121293800539084,
"grad_norm": 0.6529123187065125,
"learning_rate": 0.000293148407987048,
"loss": 3.3197,
"step": 47500
},
{
"epoch": 5.126684636118599,
"grad_norm": 0.6688790917396545,
"learning_rate": 0.0002928246087425796,
"loss": 3.3277,
"step": 47550
},
{
"epoch": 5.132075471698113,
"grad_norm": 0.7134573459625244,
"learning_rate": 0.00029250080949811113,
"loss": 3.3201,
"step": 47600
},
{
"epoch": 5.137466307277628,
"grad_norm": 0.6406720280647278,
"learning_rate": 0.00029217701025364273,
"loss": 3.3089,
"step": 47650
},
{
"epoch": 5.142857142857143,
"grad_norm": 0.6913286447525024,
"learning_rate": 0.0002918532110091743,
"loss": 3.3229,
"step": 47700
},
{
"epoch": 5.1482479784366575,
"grad_norm": 0.628642201423645,
"learning_rate": 0.0002915294117647059,
"loss": 3.3174,
"step": 47750
},
{
"epoch": 5.153638814016173,
"grad_norm": 0.6638661026954651,
"learning_rate": 0.00029120561252023744,
"loss": 3.3054,
"step": 47800
},
{
"epoch": 5.159029649595688,
"grad_norm": 0.7017157673835754,
"learning_rate": 0.000290881813275769,
"loss": 3.3304,
"step": 47850
},
{
"epoch": 5.164420485175202,
"grad_norm": 0.6650938987731934,
"learning_rate": 0.00029055801403130054,
"loss": 3.3221,
"step": 47900
},
{
"epoch": 5.169811320754717,
"grad_norm": 0.6689581871032715,
"learning_rate": 0.00029023421478683215,
"loss": 3.3087,
"step": 47950
},
{
"epoch": 5.175202156334231,
"grad_norm": 0.6488136649131775,
"learning_rate": 0.0002899104155423637,
"loss": 3.315,
"step": 48000
},
{
"epoch": 5.175202156334231,
"eval_accuracy": 0.3809863530886376,
"eval_loss": 3.410261631011963,
"eval_runtime": 181.8994,
"eval_samples_per_second": 99.016,
"eval_steps_per_second": 6.19,
"step": 48000
},
{
"epoch": 5.180592991913747,
"grad_norm": 0.6930067539215088,
"learning_rate": 0.0002895866162978953,
"loss": 3.3195,
"step": 48050
},
{
"epoch": 5.185983827493262,
"grad_norm": 0.6869164109230042,
"learning_rate": 0.00028926281705342685,
"loss": 3.3301,
"step": 48100
},
{
"epoch": 5.191374663072776,
"grad_norm": 0.6907550096511841,
"learning_rate": 0.0002889390178089584,
"loss": 3.3239,
"step": 48150
},
{
"epoch": 5.196765498652291,
"grad_norm": 0.7010151147842407,
"learning_rate": 0.00028861521856449,
"loss": 3.3341,
"step": 48200
},
{
"epoch": 5.202156334231806,
"grad_norm": 0.6742527484893799,
"learning_rate": 0.00028829141932002156,
"loss": 3.3272,
"step": 48250
},
{
"epoch": 5.2075471698113205,
"grad_norm": 0.6344496011734009,
"learning_rate": 0.0002879676200755531,
"loss": 3.3019,
"step": 48300
},
{
"epoch": 5.212938005390836,
"grad_norm": 0.6495826840400696,
"learning_rate": 0.0002876438208310847,
"loss": 3.3375,
"step": 48350
},
{
"epoch": 5.218328840970351,
"grad_norm": 0.6612682342529297,
"learning_rate": 0.00028732002158661626,
"loss": 3.3277,
"step": 48400
},
{
"epoch": 5.223719676549865,
"grad_norm": 0.7070847749710083,
"learning_rate": 0.00028699622234214787,
"loss": 3.3326,
"step": 48450
},
{
"epoch": 5.22911051212938,
"grad_norm": 0.6680747270584106,
"learning_rate": 0.0002866724230976794,
"loss": 3.3387,
"step": 48500
},
{
"epoch": 5.234501347708895,
"grad_norm": 0.7149842977523804,
"learning_rate": 0.00028634862385321097,
"loss": 3.3139,
"step": 48550
},
{
"epoch": 5.2398921832884096,
"grad_norm": 0.6795401573181152,
"learning_rate": 0.0002860248246087426,
"loss": 3.3161,
"step": 48600
},
{
"epoch": 5.245283018867925,
"grad_norm": 0.6816480755805969,
"learning_rate": 0.0002857010253642741,
"loss": 3.339,
"step": 48650
},
{
"epoch": 5.250673854447439,
"grad_norm": 0.7223415970802307,
"learning_rate": 0.0002853772261198057,
"loss": 3.3277,
"step": 48700
},
{
"epoch": 5.256064690026954,
"grad_norm": 0.715975284576416,
"learning_rate": 0.0002850534268753373,
"loss": 3.3273,
"step": 48750
},
{
"epoch": 5.261455525606469,
"grad_norm": 0.6848128437995911,
"learning_rate": 0.00028472962763086883,
"loss": 3.3383,
"step": 48800
},
{
"epoch": 5.2668463611859835,
"grad_norm": 0.6746169924736023,
"learning_rate": 0.0002844058283864004,
"loss": 3.3299,
"step": 48850
},
{
"epoch": 5.272237196765499,
"grad_norm": 0.6700478792190552,
"learning_rate": 0.000284082029141932,
"loss": 3.3196,
"step": 48900
},
{
"epoch": 5.277628032345014,
"grad_norm": 0.7243698835372925,
"learning_rate": 0.00028375822989746354,
"loss": 3.3327,
"step": 48950
},
{
"epoch": 5.283018867924528,
"grad_norm": 0.6648624539375305,
"learning_rate": 0.00028343443065299514,
"loss": 3.3108,
"step": 49000
},
{
"epoch": 5.283018867924528,
"eval_accuracy": 0.38090779705074773,
"eval_loss": 3.408730983734131,
"eval_runtime": 181.6804,
"eval_samples_per_second": 99.136,
"eval_steps_per_second": 6.198,
"step": 49000
},
{
"epoch": 5.288409703504043,
"grad_norm": 0.7246460914611816,
"learning_rate": 0.0002831106314085267,
"loss": 3.3337,
"step": 49050
},
{
"epoch": 5.293800539083558,
"grad_norm": 0.6399522423744202,
"learning_rate": 0.0002827868321640583,
"loss": 3.3354,
"step": 49100
},
{
"epoch": 5.2991913746630726,
"grad_norm": 0.6992799043655396,
"learning_rate": 0.0002824630329195898,
"loss": 3.3293,
"step": 49150
},
{
"epoch": 5.304582210242588,
"grad_norm": 0.6970546841621399,
"learning_rate": 0.0002821392336751214,
"loss": 3.3261,
"step": 49200
},
{
"epoch": 5.309973045822103,
"grad_norm": 0.6449671387672424,
"learning_rate": 0.00028181543443065295,
"loss": 3.3052,
"step": 49250
},
{
"epoch": 5.315363881401617,
"grad_norm": 0.6572911143302917,
"learning_rate": 0.00028149163518618455,
"loss": 3.3106,
"step": 49300
},
{
"epoch": 5.320754716981132,
"grad_norm": 0.6900778412818909,
"learning_rate": 0.0002811743119266055,
"loss": 3.341,
"step": 49350
},
{
"epoch": 5.3261455525606465,
"grad_norm": 0.6336671710014343,
"learning_rate": 0.00028085051268213706,
"loss": 3.3116,
"step": 49400
},
{
"epoch": 5.331536388140162,
"grad_norm": 0.698238730430603,
"learning_rate": 0.0002805267134376686,
"loss": 3.3287,
"step": 49450
},
{
"epoch": 5.336927223719677,
"grad_norm": 0.6817668080329895,
"learning_rate": 0.00028020291419320016,
"loss": 3.3373,
"step": 49500
},
{
"epoch": 5.342318059299191,
"grad_norm": 0.6314173340797424,
"learning_rate": 0.00027987911494873176,
"loss": 3.3249,
"step": 49550
},
{
"epoch": 5.347708894878706,
"grad_norm": 0.6567521691322327,
"learning_rate": 0.0002795553157042633,
"loss": 3.3204,
"step": 49600
},
{
"epoch": 5.353099730458221,
"grad_norm": 0.6802349090576172,
"learning_rate": 0.0002792315164597949,
"loss": 3.3259,
"step": 49650
},
{
"epoch": 5.3584905660377355,
"grad_norm": 0.6689594388008118,
"learning_rate": 0.00027890771721532647,
"loss": 3.3023,
"step": 49700
},
{
"epoch": 5.363881401617251,
"grad_norm": 0.6894006729125977,
"learning_rate": 0.00027858391797085807,
"loss": 3.3348,
"step": 49750
},
{
"epoch": 5.369272237196766,
"grad_norm": 0.673187255859375,
"learning_rate": 0.0002782601187263896,
"loss": 3.3259,
"step": 49800
},
{
"epoch": 5.37466307277628,
"grad_norm": 0.6820120215415955,
"learning_rate": 0.0002779363194819212,
"loss": 3.3265,
"step": 49850
},
{
"epoch": 5.380053908355795,
"grad_norm": 0.6760383248329163,
"learning_rate": 0.0002776125202374527,
"loss": 3.3287,
"step": 49900
},
{
"epoch": 5.38544474393531,
"grad_norm": 0.6421095132827759,
"learning_rate": 0.00027728872099298433,
"loss": 3.3256,
"step": 49950
},
{
"epoch": 5.390835579514825,
"grad_norm": 0.630399763584137,
"learning_rate": 0.0002769649217485159,
"loss": 3.3356,
"step": 50000
},
{
"epoch": 5.390835579514825,
"eval_accuracy": 0.3818138534822327,
"eval_loss": 3.403038263320923,
"eval_runtime": 181.7655,
"eval_samples_per_second": 99.089,
"eval_steps_per_second": 6.195,
"step": 50000
},
{
"epoch": 5.39622641509434,
"grad_norm": 0.6736732125282288,
"learning_rate": 0.0002766411225040475,
"loss": 3.3285,
"step": 50050
},
{
"epoch": 5.401617250673855,
"grad_norm": 0.6692689657211304,
"learning_rate": 0.00027631732325957903,
"loss": 3.3388,
"step": 50100
},
{
"epoch": 5.407008086253369,
"grad_norm": 0.6497191190719604,
"learning_rate": 0.0002759935240151106,
"loss": 3.3124,
"step": 50150
},
{
"epoch": 5.412398921832884,
"grad_norm": 0.7475671768188477,
"learning_rate": 0.0002756697247706422,
"loss": 3.3098,
"step": 50200
},
{
"epoch": 5.4177897574123985,
"grad_norm": 0.6727519035339355,
"learning_rate": 0.00027534592552617374,
"loss": 3.3339,
"step": 50250
},
{
"epoch": 5.423180592991914,
"grad_norm": 0.6927394270896912,
"learning_rate": 0.00027502212628170535,
"loss": 3.3189,
"step": 50300
},
{
"epoch": 5.428571428571429,
"grad_norm": 0.6445196270942688,
"learning_rate": 0.0002746983270372369,
"loss": 3.3331,
"step": 50350
},
{
"epoch": 5.433962264150943,
"grad_norm": 0.6594277620315552,
"learning_rate": 0.00027437452779276845,
"loss": 3.3205,
"step": 50400
},
{
"epoch": 5.439353099730458,
"grad_norm": 0.665379524230957,
"learning_rate": 0.00027405072854830005,
"loss": 3.334,
"step": 50450
},
{
"epoch": 5.444743935309973,
"grad_norm": 0.6767345070838928,
"learning_rate": 0.0002737269293038316,
"loss": 3.3317,
"step": 50500
},
{
"epoch": 5.450134770889488,
"grad_norm": 0.7038382887840271,
"learning_rate": 0.00027340313005936315,
"loss": 3.3615,
"step": 50550
},
{
"epoch": 5.455525606469003,
"grad_norm": 0.6953480839729309,
"learning_rate": 0.00027307933081489476,
"loss": 3.3188,
"step": 50600
},
{
"epoch": 5.460916442048518,
"grad_norm": 0.6953732967376709,
"learning_rate": 0.0002727555315704263,
"loss": 3.3384,
"step": 50650
},
{
"epoch": 5.466307277628032,
"grad_norm": 0.6641663312911987,
"learning_rate": 0.0002724317323259579,
"loss": 3.3364,
"step": 50700
},
{
"epoch": 5.471698113207547,
"grad_norm": 0.6947981119155884,
"learning_rate": 0.00027210793308148946,
"loss": 3.3245,
"step": 50750
},
{
"epoch": 5.4770889487870615,
"grad_norm": 0.6703706979751587,
"learning_rate": 0.000271784133837021,
"loss": 3.3403,
"step": 50800
},
{
"epoch": 5.482479784366577,
"grad_norm": 0.6884980797767639,
"learning_rate": 0.00027146033459255256,
"loss": 3.3402,
"step": 50850
},
{
"epoch": 5.487870619946092,
"grad_norm": 0.6224678158760071,
"learning_rate": 0.00027113653534808417,
"loss": 3.334,
"step": 50900
},
{
"epoch": 5.493261455525606,
"grad_norm": 0.7388006448745728,
"learning_rate": 0.0002708127361036157,
"loss": 3.3408,
"step": 50950
},
{
"epoch": 5.498652291105121,
"grad_norm": 0.6677238941192627,
"learning_rate": 0.0002704889368591473,
"loss": 3.3193,
"step": 51000
},
{
"epoch": 5.498652291105121,
"eval_accuracy": 0.3824023175226359,
"eval_loss": 3.39656138420105,
"eval_runtime": 181.295,
"eval_samples_per_second": 99.346,
"eval_steps_per_second": 6.211,
"step": 51000
},
{
"epoch": 5.504043126684636,
"grad_norm": 0.6957075595855713,
"learning_rate": 0.0002701651376146789,
"loss": 3.3396,
"step": 51050
},
{
"epoch": 5.509433962264151,
"grad_norm": 0.6859670877456665,
"learning_rate": 0.0002698413383702105,
"loss": 3.3122,
"step": 51100
},
{
"epoch": 5.514824797843666,
"grad_norm": 0.6613989472389221,
"learning_rate": 0.00026951753912574203,
"loss": 3.3243,
"step": 51150
},
{
"epoch": 5.520215633423181,
"grad_norm": 0.6999632716178894,
"learning_rate": 0.0002691937398812736,
"loss": 3.3359,
"step": 51200
},
{
"epoch": 5.525606469002695,
"grad_norm": 0.6450424194335938,
"learning_rate": 0.00026886994063680513,
"loss": 3.3342,
"step": 51250
},
{
"epoch": 5.53099730458221,
"grad_norm": 0.7276694178581238,
"learning_rate": 0.00026854614139233674,
"loss": 3.3446,
"step": 51300
},
{
"epoch": 5.536388140161725,
"grad_norm": 0.6517132520675659,
"learning_rate": 0.0002682288181327577,
"loss": 3.3142,
"step": 51350
},
{
"epoch": 5.54177897574124,
"grad_norm": 0.7173911929130554,
"learning_rate": 0.00026790501888828924,
"loss": 3.3359,
"step": 51400
},
{
"epoch": 5.547169811320755,
"grad_norm": 0.7053157091140747,
"learning_rate": 0.00026758121964382084,
"loss": 3.3334,
"step": 51450
},
{
"epoch": 5.55256064690027,
"grad_norm": 0.6601673364639282,
"learning_rate": 0.00026725742039935234,
"loss": 3.3188,
"step": 51500
},
{
"epoch": 5.557951482479784,
"grad_norm": 0.6707030534744263,
"learning_rate": 0.00026693362115488394,
"loss": 3.3456,
"step": 51550
},
{
"epoch": 5.563342318059299,
"grad_norm": 0.691186249256134,
"learning_rate": 0.0002666098219104155,
"loss": 3.3303,
"step": 51600
},
{
"epoch": 5.568733153638814,
"grad_norm": 0.7065075635910034,
"learning_rate": 0.0002662860226659471,
"loss": 3.3251,
"step": 51650
},
{
"epoch": 5.574123989218329,
"grad_norm": 0.6833894848823547,
"learning_rate": 0.00026596222342147865,
"loss": 3.3445,
"step": 51700
},
{
"epoch": 5.579514824797844,
"grad_norm": 0.69661945104599,
"learning_rate": 0.00026563842417701026,
"loss": 3.3334,
"step": 51750
},
{
"epoch": 5.584905660377358,
"grad_norm": 0.680160641670227,
"learning_rate": 0.0002653146249325418,
"loss": 3.3365,
"step": 51800
},
{
"epoch": 5.590296495956873,
"grad_norm": 0.644248902797699,
"learning_rate": 0.00026499082568807336,
"loss": 3.343,
"step": 51850
},
{
"epoch": 5.595687331536388,
"grad_norm": 0.6939667463302612,
"learning_rate": 0.00026466702644360496,
"loss": 3.3404,
"step": 51900
},
{
"epoch": 5.601078167115903,
"grad_norm": 0.7017162442207336,
"learning_rate": 0.0002643432271991365,
"loss": 3.3512,
"step": 51950
},
{
"epoch": 5.606469002695418,
"grad_norm": 0.6775293350219727,
"learning_rate": 0.00026401942795466806,
"loss": 3.3407,
"step": 52000
},
{
"epoch": 5.606469002695418,
"eval_accuracy": 0.3826056070757368,
"eval_loss": 3.394618034362793,
"eval_runtime": 181.3237,
"eval_samples_per_second": 99.331,
"eval_steps_per_second": 6.21,
"step": 52000
},
{
"epoch": 5.611859838274933,
"grad_norm": 0.7509863972663879,
"learning_rate": 0.00026369562871019967,
"loss": 3.3429,
"step": 52050
},
{
"epoch": 5.617250673854447,
"grad_norm": 0.6323455572128296,
"learning_rate": 0.0002633718294657312,
"loss": 3.3371,
"step": 52100
},
{
"epoch": 5.622641509433962,
"grad_norm": 0.6781734824180603,
"learning_rate": 0.0002630480302212628,
"loss": 3.3374,
"step": 52150
},
{
"epoch": 5.628032345013477,
"grad_norm": 0.6843175292015076,
"learning_rate": 0.00026272423097679437,
"loss": 3.3266,
"step": 52200
},
{
"epoch": 5.633423180592992,
"grad_norm": 0.7037350535392761,
"learning_rate": 0.0002624004317323259,
"loss": 3.3269,
"step": 52250
},
{
"epoch": 5.638814016172507,
"grad_norm": 0.6668564677238464,
"learning_rate": 0.00026207663248785753,
"loss": 3.3469,
"step": 52300
},
{
"epoch": 5.644204851752022,
"grad_norm": 0.7546958327293396,
"learning_rate": 0.0002617528332433891,
"loss": 3.3198,
"step": 52350
},
{
"epoch": 5.649595687331536,
"grad_norm": 0.6487285494804382,
"learning_rate": 0.00026142903399892063,
"loss": 3.3224,
"step": 52400
},
{
"epoch": 5.654986522911051,
"grad_norm": 0.6974462866783142,
"learning_rate": 0.00026110523475445223,
"loss": 3.3278,
"step": 52450
},
{
"epoch": 5.660377358490566,
"grad_norm": 0.6957582831382751,
"learning_rate": 0.0002607814355099838,
"loss": 3.3396,
"step": 52500
},
{
"epoch": 5.665768194070081,
"grad_norm": 0.6984000205993652,
"learning_rate": 0.00026045763626551534,
"loss": 3.3464,
"step": 52550
},
{
"epoch": 5.671159029649596,
"grad_norm": 0.7476868033409119,
"learning_rate": 0.00026013383702104694,
"loss": 3.3435,
"step": 52600
},
{
"epoch": 5.67654986522911,
"grad_norm": 0.6622081398963928,
"learning_rate": 0.0002598100377765785,
"loss": 3.3365,
"step": 52650
},
{
"epoch": 5.681940700808625,
"grad_norm": 0.7044835686683655,
"learning_rate": 0.0002594862385321101,
"loss": 3.3351,
"step": 52700
},
{
"epoch": 5.6873315363881405,
"grad_norm": 0.6542053818702698,
"learning_rate": 0.00025916243928764165,
"loss": 3.3309,
"step": 52750
},
{
"epoch": 5.692722371967655,
"grad_norm": 0.6703542470932007,
"learning_rate": 0.0002588386400431732,
"loss": 3.3281,
"step": 52800
},
{
"epoch": 5.69811320754717,
"grad_norm": 0.6725676655769348,
"learning_rate": 0.00025851484079870475,
"loss": 3.3307,
"step": 52850
},
{
"epoch": 5.703504043126685,
"grad_norm": 0.6873592734336853,
"learning_rate": 0.00025819104155423635,
"loss": 3.326,
"step": 52900
},
{
"epoch": 5.708894878706199,
"grad_norm": 0.6863544583320618,
"learning_rate": 0.0002578672423097679,
"loss": 3.3187,
"step": 52950
},
{
"epoch": 5.714285714285714,
"grad_norm": 0.6547959446907043,
"learning_rate": 0.0002575434430652995,
"loss": 3.348,
"step": 53000
},
{
"epoch": 5.714285714285714,
"eval_accuracy": 0.38305782039482067,
"eval_loss": 3.3898489475250244,
"eval_runtime": 181.3407,
"eval_samples_per_second": 99.321,
"eval_steps_per_second": 6.209,
"step": 53000
},
{
"epoch": 5.719676549865229,
"grad_norm": 0.6937452554702759,
"learning_rate": 0.00025721964382083106,
"loss": 3.3271,
"step": 53050
},
{
"epoch": 5.725067385444744,
"grad_norm": 0.7007577419281006,
"learning_rate": 0.00025689584457636266,
"loss": 3.3198,
"step": 53100
},
{
"epoch": 5.730458221024259,
"grad_norm": 0.7198716402053833,
"learning_rate": 0.0002565720453318942,
"loss": 3.3458,
"step": 53150
},
{
"epoch": 5.735849056603773,
"grad_norm": 0.6731821298599243,
"learning_rate": 0.00025624824608742576,
"loss": 3.3108,
"step": 53200
},
{
"epoch": 5.741239892183288,
"grad_norm": 0.6968561410903931,
"learning_rate": 0.0002559244468429573,
"loss": 3.3305,
"step": 53250
},
{
"epoch": 5.7466307277628035,
"grad_norm": 0.6855055093765259,
"learning_rate": 0.0002556006475984889,
"loss": 3.3337,
"step": 53300
},
{
"epoch": 5.752021563342318,
"grad_norm": 0.7210481762886047,
"learning_rate": 0.00025527684835402047,
"loss": 3.3252,
"step": 53350
},
{
"epoch": 5.757412398921833,
"grad_norm": 0.6705754995346069,
"learning_rate": 0.0002549595250944414,
"loss": 3.3515,
"step": 53400
},
{
"epoch": 5.762803234501348,
"grad_norm": 0.6750226020812988,
"learning_rate": 0.000254635725849973,
"loss": 3.3259,
"step": 53450
},
{
"epoch": 5.768194070080862,
"grad_norm": 0.6875684261322021,
"learning_rate": 0.0002543119266055046,
"loss": 3.3218,
"step": 53500
},
{
"epoch": 5.773584905660377,
"grad_norm": 0.7072954773902893,
"learning_rate": 0.00025398812736103613,
"loss": 3.3515,
"step": 53550
},
{
"epoch": 5.7789757412398925,
"grad_norm": 0.6821185350418091,
"learning_rate": 0.0002536643281165677,
"loss": 3.3313,
"step": 53600
},
{
"epoch": 5.784366576819407,
"grad_norm": 0.6996495127677917,
"learning_rate": 0.0002533405288720993,
"loss": 3.3474,
"step": 53650
},
{
"epoch": 5.789757412398922,
"grad_norm": 0.7212629914283752,
"learning_rate": 0.00025301672962763083,
"loss": 3.3504,
"step": 53700
},
{
"epoch": 5.795148247978437,
"grad_norm": 0.7071073055267334,
"learning_rate": 0.00025269293038316244,
"loss": 3.329,
"step": 53750
},
{
"epoch": 5.800539083557951,
"grad_norm": 0.7012442350387573,
"learning_rate": 0.000252369131138694,
"loss": 3.3373,
"step": 53800
},
{
"epoch": 5.8059299191374665,
"grad_norm": 0.678455114364624,
"learning_rate": 0.00025204533189422554,
"loss": 3.3193,
"step": 53850
},
{
"epoch": 5.811320754716981,
"grad_norm": 0.6812479496002197,
"learning_rate": 0.00025172153264975714,
"loss": 3.3532,
"step": 53900
},
{
"epoch": 5.816711590296496,
"grad_norm": 0.6944243311882019,
"learning_rate": 0.0002513977334052887,
"loss": 3.3265,
"step": 53950
},
{
"epoch": 5.822102425876011,
"grad_norm": 0.6746711730957031,
"learning_rate": 0.00025107393416082025,
"loss": 3.3288,
"step": 54000
},
{
"epoch": 5.822102425876011,
"eval_accuracy": 0.3834238719738677,
"eval_loss": 3.3838024139404297,
"eval_runtime": 181.1959,
"eval_samples_per_second": 99.401,
"eval_steps_per_second": 6.214,
"step": 54000
},
{
"epoch": 5.827493261455525,
"grad_norm": 0.7202019095420837,
"learning_rate": 0.00025075013491635185,
"loss": 3.3241,
"step": 54050
},
{
"epoch": 5.83288409703504,
"grad_norm": 0.6693416237831116,
"learning_rate": 0.0002504263356718834,
"loss": 3.3321,
"step": 54100
},
{
"epoch": 5.8382749326145555,
"grad_norm": 0.6850435137748718,
"learning_rate": 0.000250102536427415,
"loss": 3.346,
"step": 54150
},
{
"epoch": 5.84366576819407,
"grad_norm": 0.7145354151725769,
"learning_rate": 0.00024977873718294656,
"loss": 3.337,
"step": 54200
},
{
"epoch": 5.849056603773585,
"grad_norm": 0.6557576656341553,
"learning_rate": 0.0002494549379384781,
"loss": 3.3445,
"step": 54250
},
{
"epoch": 5.8544474393531,
"grad_norm": 0.6981033086776733,
"learning_rate": 0.0002491311386940097,
"loss": 3.3429,
"step": 54300
},
{
"epoch": 5.859838274932614,
"grad_norm": 0.6820398569107056,
"learning_rate": 0.00024880733944954126,
"loss": 3.3319,
"step": 54350
},
{
"epoch": 5.8652291105121295,
"grad_norm": 0.6957273483276367,
"learning_rate": 0.0002484835402050728,
"loss": 3.3197,
"step": 54400
},
{
"epoch": 5.870619946091644,
"grad_norm": 0.6696581244468689,
"learning_rate": 0.0002481597409606044,
"loss": 3.3145,
"step": 54450
},
{
"epoch": 5.876010781671159,
"grad_norm": 0.6897146105766296,
"learning_rate": 0.00024783594171613597,
"loss": 3.3309,
"step": 54500
},
{
"epoch": 5.881401617250674,
"grad_norm": 0.7034358978271484,
"learning_rate": 0.0002475121424716675,
"loss": 3.3257,
"step": 54550
},
{
"epoch": 5.886792452830189,
"grad_norm": 0.6848185062408447,
"learning_rate": 0.0002471883432271991,
"loss": 3.3573,
"step": 54600
},
{
"epoch": 5.892183288409703,
"grad_norm": 0.6904000043869019,
"learning_rate": 0.0002468645439827307,
"loss": 3.3408,
"step": 54650
},
{
"epoch": 5.8975741239892185,
"grad_norm": 0.685601532459259,
"learning_rate": 0.0002465407447382623,
"loss": 3.3402,
"step": 54700
},
{
"epoch": 5.902964959568733,
"grad_norm": 0.6840948462486267,
"learning_rate": 0.00024621694549379383,
"loss": 3.3413,
"step": 54750
},
{
"epoch": 5.908355795148248,
"grad_norm": 0.6701492667198181,
"learning_rate": 0.00024589314624932543,
"loss": 3.3445,
"step": 54800
},
{
"epoch": 5.913746630727763,
"grad_norm": 0.72344571352005,
"learning_rate": 0.00024556934700485693,
"loss": 3.3412,
"step": 54850
},
{
"epoch": 5.919137466307277,
"grad_norm": 0.7367925643920898,
"learning_rate": 0.00024524554776038853,
"loss": 3.3218,
"step": 54900
},
{
"epoch": 5.9245283018867925,
"grad_norm": 0.6771606802940369,
"learning_rate": 0.0002449217485159201,
"loss": 3.3271,
"step": 54950
},
{
"epoch": 5.929919137466308,
"grad_norm": 0.6754522919654846,
"learning_rate": 0.0002445979492714517,
"loss": 3.3283,
"step": 55000
},
{
"epoch": 5.929919137466308,
"eval_accuracy": 0.38378459956141175,
"eval_loss": 3.3799691200256348,
"eval_runtime": 181.3017,
"eval_samples_per_second": 99.343,
"eval_steps_per_second": 6.211,
"step": 55000
},
{
"epoch": 5.935309973045822,
"grad_norm": 0.6597005724906921,
"learning_rate": 0.00024427415002698324,
"loss": 3.3435,
"step": 55050
},
{
"epoch": 5.940700808625337,
"grad_norm": 0.7595816254615784,
"learning_rate": 0.00024395035078251482,
"loss": 3.3319,
"step": 55100
},
{
"epoch": 5.946091644204852,
"grad_norm": 0.7290930151939392,
"learning_rate": 0.0002436265515380464,
"loss": 3.3277,
"step": 55150
},
{
"epoch": 5.951482479784366,
"grad_norm": 0.701313853263855,
"learning_rate": 0.00024330275229357797,
"loss": 3.3499,
"step": 55200
},
{
"epoch": 5.9568733153638815,
"grad_norm": 0.6714861392974854,
"learning_rate": 0.00024297895304910952,
"loss": 3.3216,
"step": 55250
},
{
"epoch": 5.962264150943396,
"grad_norm": 0.6825129389762878,
"learning_rate": 0.0002426551538046411,
"loss": 3.3213,
"step": 55300
},
{
"epoch": 5.967654986522911,
"grad_norm": 0.7039099931716919,
"learning_rate": 0.00024233135456017265,
"loss": 3.3281,
"step": 55350
},
{
"epoch": 5.973045822102426,
"grad_norm": 0.706758439540863,
"learning_rate": 0.0002420140313005936,
"loss": 3.3336,
"step": 55400
},
{
"epoch": 5.97843665768194,
"grad_norm": 0.6805482506752014,
"learning_rate": 0.00024169023205612518,
"loss": 3.3279,
"step": 55450
},
{
"epoch": 5.9838274932614555,
"grad_norm": 0.7011688947677612,
"learning_rate": 0.00024136643281165676,
"loss": 3.3067,
"step": 55500
},
{
"epoch": 5.989218328840971,
"grad_norm": 0.6903470158576965,
"learning_rate": 0.00024104263356718834,
"loss": 3.3365,
"step": 55550
},
{
"epoch": 5.994609164420485,
"grad_norm": 0.7348570227622986,
"learning_rate": 0.0002407188343227199,
"loss": 3.3229,
"step": 55600
},
{
"epoch": 6.0,
"grad_norm": 1.4172661304473877,
"learning_rate": 0.00024039503507825147,
"loss": 3.3551,
"step": 55650
},
{
"epoch": 6.005390835579515,
"grad_norm": 0.6745185852050781,
"learning_rate": 0.00024007123583378302,
"loss": 3.233,
"step": 55700
},
{
"epoch": 6.010781671159029,
"grad_norm": 0.6688816547393799,
"learning_rate": 0.0002397474365893146,
"loss": 3.2366,
"step": 55750
},
{
"epoch": 6.0161725067385445,
"grad_norm": 0.8564075827598572,
"learning_rate": 0.00023942363734484617,
"loss": 3.2445,
"step": 55800
},
{
"epoch": 6.02156334231806,
"grad_norm": 0.686560332775116,
"learning_rate": 0.00023909983810037775,
"loss": 3.2495,
"step": 55850
},
{
"epoch": 6.026954177897574,
"grad_norm": 0.7073839902877808,
"learning_rate": 0.00023877603885590933,
"loss": 3.2498,
"step": 55900
},
{
"epoch": 6.032345013477089,
"grad_norm": 0.7176925539970398,
"learning_rate": 0.0002384522396114409,
"loss": 3.2478,
"step": 55950
},
{
"epoch": 6.037735849056604,
"grad_norm": 0.7545502185821533,
"learning_rate": 0.00023812844036697248,
"loss": 3.2613,
"step": 56000
},
{
"epoch": 6.037735849056604,
"eval_accuracy": 0.3840258089723599,
"eval_loss": 3.3837029933929443,
"eval_runtime": 181.2613,
"eval_samples_per_second": 99.365,
"eval_steps_per_second": 6.212,
"step": 56000
},
{
"epoch": 6.0431266846361185,
"grad_norm": 0.8260035514831543,
"learning_rate": 0.000237804641122504,
"loss": 3.2597,
"step": 56050
},
{
"epoch": 6.048517520215634,
"grad_norm": 0.7226826548576355,
"learning_rate": 0.00023748084187803558,
"loss": 3.248,
"step": 56100
},
{
"epoch": 6.053908355795148,
"grad_norm": 0.7119999527931213,
"learning_rate": 0.00023715704263356716,
"loss": 3.2618,
"step": 56150
},
{
"epoch": 6.059299191374663,
"grad_norm": 0.6939723491668701,
"learning_rate": 0.00023683324338909874,
"loss": 3.2554,
"step": 56200
},
{
"epoch": 6.064690026954178,
"grad_norm": 0.7050407528877258,
"learning_rate": 0.00023650944414463032,
"loss": 3.2404,
"step": 56250
},
{
"epoch": 6.070080862533692,
"grad_norm": 0.7517873644828796,
"learning_rate": 0.0002361856449001619,
"loss": 3.264,
"step": 56300
},
{
"epoch": 6.0754716981132075,
"grad_norm": 0.7282605767250061,
"learning_rate": 0.00023586184565569347,
"loss": 3.2513,
"step": 56350
},
{
"epoch": 6.080862533692723,
"grad_norm": 0.6578624248504639,
"learning_rate": 0.00023553804641122502,
"loss": 3.2545,
"step": 56400
},
{
"epoch": 6.086253369272237,
"grad_norm": 0.7751697301864624,
"learning_rate": 0.00023521424716675657,
"loss": 3.247,
"step": 56450
},
{
"epoch": 6.091644204851752,
"grad_norm": 0.7784311175346375,
"learning_rate": 0.00023489044792228815,
"loss": 3.2682,
"step": 56500
},
{
"epoch": 6.097035040431267,
"grad_norm": 0.7385051846504211,
"learning_rate": 0.00023456664867781973,
"loss": 3.2473,
"step": 56550
},
{
"epoch": 6.1024258760107815,
"grad_norm": 0.7438140511512756,
"learning_rate": 0.0002342428494333513,
"loss": 3.2541,
"step": 56600
},
{
"epoch": 6.107816711590297,
"grad_norm": 0.6857371926307678,
"learning_rate": 0.00023391905018888288,
"loss": 3.2512,
"step": 56650
},
{
"epoch": 6.113207547169812,
"grad_norm": 0.7011164426803589,
"learning_rate": 0.00023359525094441443,
"loss": 3.2732,
"step": 56700
},
{
"epoch": 6.118598382749326,
"grad_norm": 0.732795774936676,
"learning_rate": 0.000233271451699946,
"loss": 3.2637,
"step": 56750
},
{
"epoch": 6.123989218328841,
"grad_norm": 0.6810613870620728,
"learning_rate": 0.0002329476524554776,
"loss": 3.2665,
"step": 56800
},
{
"epoch": 6.129380053908355,
"grad_norm": 0.7227889895439148,
"learning_rate": 0.00023262385321100917,
"loss": 3.2682,
"step": 56850
},
{
"epoch": 6.1347708894878705,
"grad_norm": 0.7153551578521729,
"learning_rate": 0.00023230005396654072,
"loss": 3.2751,
"step": 56900
},
{
"epoch": 6.140161725067386,
"grad_norm": 0.728844940662384,
"learning_rate": 0.0002319762547220723,
"loss": 3.253,
"step": 56950
},
{
"epoch": 6.1455525606469,
"grad_norm": 0.7004696130752563,
"learning_rate": 0.00023165245547760387,
"loss": 3.2509,
"step": 57000
},
{
"epoch": 6.1455525606469,
"eval_accuracy": 0.38396626718983756,
"eval_loss": 3.383383274078369,
"eval_runtime": 181.2593,
"eval_samples_per_second": 99.366,
"eval_steps_per_second": 6.212,
"step": 57000
},
{
"epoch": 6.150943396226415,
"grad_norm": 0.7202807068824768,
"learning_rate": 0.00023132865623313542,
"loss": 3.2795,
"step": 57050
},
{
"epoch": 6.15633423180593,
"grad_norm": 0.705938994884491,
"learning_rate": 0.000231004856988667,
"loss": 3.27,
"step": 57100
},
{
"epoch": 6.1617250673854445,
"grad_norm": 0.6851361393928528,
"learning_rate": 0.00023068105774419858,
"loss": 3.2508,
"step": 57150
},
{
"epoch": 6.16711590296496,
"grad_norm": 0.6873080730438232,
"learning_rate": 0.00023035725849973016,
"loss": 3.2574,
"step": 57200
},
{
"epoch": 6.172506738544475,
"grad_norm": 0.7100987434387207,
"learning_rate": 0.00023003345925526173,
"loss": 3.2618,
"step": 57250
},
{
"epoch": 6.177897574123989,
"grad_norm": 0.7307910919189453,
"learning_rate": 0.00022970966001079328,
"loss": 3.2592,
"step": 57300
},
{
"epoch": 6.183288409703504,
"grad_norm": 0.7418186664581299,
"learning_rate": 0.00022938586076632484,
"loss": 3.2757,
"step": 57350
},
{
"epoch": 6.188679245283019,
"grad_norm": 0.6749211549758911,
"learning_rate": 0.0002290620615218564,
"loss": 3.2676,
"step": 57400
},
{
"epoch": 6.1940700808625335,
"grad_norm": 0.7412350177764893,
"learning_rate": 0.00022874473826227736,
"loss": 3.2718,
"step": 57450
},
{
"epoch": 6.199460916442049,
"grad_norm": 0.7385445237159729,
"learning_rate": 0.00022842093901780894,
"loss": 3.2709,
"step": 57500
},
{
"epoch": 6.204851752021563,
"grad_norm": 0.7048656344413757,
"learning_rate": 0.00022809713977334052,
"loss": 3.2589,
"step": 57550
},
{
"epoch": 6.210242587601078,
"grad_norm": 0.7360633611679077,
"learning_rate": 0.0002277733405288721,
"loss": 3.2467,
"step": 57600
},
{
"epoch": 6.215633423180593,
"grad_norm": 0.7095308899879456,
"learning_rate": 0.00022744954128440365,
"loss": 3.2677,
"step": 57650
},
{
"epoch": 6.2210242587601075,
"grad_norm": 0.7492310404777527,
"learning_rate": 0.0002271257420399352,
"loss": 3.2745,
"step": 57700
},
{
"epoch": 6.226415094339623,
"grad_norm": 0.7403215169906616,
"learning_rate": 0.00022680194279546678,
"loss": 3.2875,
"step": 57750
},
{
"epoch": 6.231805929919138,
"grad_norm": 0.7125136256217957,
"learning_rate": 0.00022647814355099835,
"loss": 3.2851,
"step": 57800
},
{
"epoch": 6.237196765498652,
"grad_norm": 0.707063615322113,
"learning_rate": 0.00022615434430652993,
"loss": 3.2561,
"step": 57850
},
{
"epoch": 6.242587601078167,
"grad_norm": 0.7181234955787659,
"learning_rate": 0.0002258305450620615,
"loss": 3.2781,
"step": 57900
},
{
"epoch": 6.247978436657682,
"grad_norm": 0.7536298632621765,
"learning_rate": 0.0002255067458175931,
"loss": 3.2671,
"step": 57950
},
{
"epoch": 6.2533692722371965,
"grad_norm": 0.7768057584762573,
"learning_rate": 0.00022518942255801399,
"loss": 3.2539,
"step": 58000
},
{
"epoch": 6.2533692722371965,
"eval_accuracy": 0.38457092051052516,
"eval_loss": 3.3802192211151123,
"eval_runtime": 181.2846,
"eval_samples_per_second": 99.352,
"eval_steps_per_second": 6.211,
"step": 58000
},
{
"epoch": 6.258760107816712,
"grad_norm": 0.6842986941337585,
"learning_rate": 0.00022486562331354556,
"loss": 3.2695,
"step": 58050
},
{
"epoch": 6.264150943396227,
"grad_norm": 0.7692577242851257,
"learning_rate": 0.00022454182406907714,
"loss": 3.2815,
"step": 58100
},
{
"epoch": 6.269541778975741,
"grad_norm": 0.7510808706283569,
"learning_rate": 0.00022421802482460872,
"loss": 3.2576,
"step": 58150
},
{
"epoch": 6.274932614555256,
"grad_norm": 0.7288107872009277,
"learning_rate": 0.0002238942255801403,
"loss": 3.2758,
"step": 58200
},
{
"epoch": 6.280323450134771,
"grad_norm": 0.7560855746269226,
"learning_rate": 0.00022357042633567187,
"loss": 3.282,
"step": 58250
},
{
"epoch": 6.285714285714286,
"grad_norm": 0.7523503303527832,
"learning_rate": 0.00022324662709120345,
"loss": 3.2651,
"step": 58300
},
{
"epoch": 6.291105121293801,
"grad_norm": 0.7418544888496399,
"learning_rate": 0.00022292282784673503,
"loss": 3.2573,
"step": 58350
},
{
"epoch": 6.296495956873315,
"grad_norm": 0.7388834357261658,
"learning_rate": 0.00022259902860226655,
"loss": 3.2657,
"step": 58400
},
{
"epoch": 6.30188679245283,
"grad_norm": 0.7132607102394104,
"learning_rate": 0.00022227522935779813,
"loss": 3.2836,
"step": 58450
},
{
"epoch": 6.307277628032345,
"grad_norm": 0.6894075274467468,
"learning_rate": 0.0002219514301133297,
"loss": 3.2544,
"step": 58500
},
{
"epoch": 6.3126684636118595,
"grad_norm": 0.7451420426368713,
"learning_rate": 0.00022162763086886129,
"loss": 3.2773,
"step": 58550
},
{
"epoch": 6.318059299191375,
"grad_norm": 0.7312679886817932,
"learning_rate": 0.00022130383162439286,
"loss": 3.2862,
"step": 58600
},
{
"epoch": 6.32345013477089,
"grad_norm": 0.6841517090797424,
"learning_rate": 0.00022098003237992444,
"loss": 3.2784,
"step": 58650
},
{
"epoch": 6.328840970350404,
"grad_norm": 0.7211723327636719,
"learning_rate": 0.00022065623313545602,
"loss": 3.2946,
"step": 58700
},
{
"epoch": 6.334231805929919,
"grad_norm": 0.7351445555686951,
"learning_rate": 0.00022033243389098757,
"loss": 3.2894,
"step": 58750
},
{
"epoch": 6.339622641509434,
"grad_norm": 0.7605700492858887,
"learning_rate": 0.00022000863464651915,
"loss": 3.276,
"step": 58800
},
{
"epoch": 6.345013477088949,
"grad_norm": 0.7109339237213135,
"learning_rate": 0.0002196848354020507,
"loss": 3.2798,
"step": 58850
},
{
"epoch": 6.350404312668464,
"grad_norm": 0.7678219676017761,
"learning_rate": 0.00021936103615758227,
"loss": 3.2674,
"step": 58900
},
{
"epoch": 6.355795148247978,
"grad_norm": 0.7287068963050842,
"learning_rate": 0.00021903723691311385,
"loss": 3.2872,
"step": 58950
},
{
"epoch": 6.361185983827493,
"grad_norm": 0.7394760251045227,
"learning_rate": 0.00021871343766864543,
"loss": 3.2851,
"step": 59000
},
{
"epoch": 6.361185983827493,
"eval_accuracy": 0.3851109253629631,
"eval_loss": 3.3774285316467285,
"eval_runtime": 181.3071,
"eval_samples_per_second": 99.34,
"eval_steps_per_second": 6.21,
"step": 59000
},
{
"epoch": 6.366576819407008,
"grad_norm": 0.7397855520248413,
"learning_rate": 0.00021838963842417698,
"loss": 3.2902,
"step": 59050
},
{
"epoch": 6.3719676549865225,
"grad_norm": 0.7686603665351868,
"learning_rate": 0.00021806583917970856,
"loss": 3.2809,
"step": 59100
},
{
"epoch": 6.377358490566038,
"grad_norm": 0.7741119861602783,
"learning_rate": 0.00021774203993524014,
"loss": 3.2822,
"step": 59150
},
{
"epoch": 6.382749326145553,
"grad_norm": 0.7288427948951721,
"learning_rate": 0.00021741824069077171,
"loss": 3.2994,
"step": 59200
},
{
"epoch": 6.388140161725067,
"grad_norm": 0.7430805563926697,
"learning_rate": 0.00021709444144630326,
"loss": 3.274,
"step": 59250
},
{
"epoch": 6.393530997304582,
"grad_norm": 0.7147725820541382,
"learning_rate": 0.00021677064220183484,
"loss": 3.2654,
"step": 59300
},
{
"epoch": 6.398921832884097,
"grad_norm": 0.7625100612640381,
"learning_rate": 0.00021644684295736642,
"loss": 3.2797,
"step": 59350
},
{
"epoch": 6.404312668463612,
"grad_norm": 0.7261756658554077,
"learning_rate": 0.00021612304371289797,
"loss": 3.3008,
"step": 59400
},
{
"epoch": 6.409703504043127,
"grad_norm": 0.7453996539115906,
"learning_rate": 0.00021579924446842955,
"loss": 3.2812,
"step": 59450
},
{
"epoch": 6.415094339622642,
"grad_norm": 0.7072122693061829,
"learning_rate": 0.00021547544522396113,
"loss": 3.2688,
"step": 59500
},
{
"epoch": 6.420485175202156,
"grad_norm": 0.7056574821472168,
"learning_rate": 0.0002151516459794927,
"loss": 3.301,
"step": 59550
},
{
"epoch": 6.425876010781671,
"grad_norm": 0.7520676851272583,
"learning_rate": 0.00021482784673502428,
"loss": 3.2713,
"step": 59600
},
{
"epoch": 6.431266846361186,
"grad_norm": 0.6953074336051941,
"learning_rate": 0.00021450404749055586,
"loss": 3.2715,
"step": 59650
},
{
"epoch": 6.436657681940701,
"grad_norm": 0.7296004295349121,
"learning_rate": 0.00021418024824608738,
"loss": 3.3124,
"step": 59700
},
{
"epoch": 6.442048517520216,
"grad_norm": 0.7285916805267334,
"learning_rate": 0.00021385644900161896,
"loss": 3.2901,
"step": 59750
},
{
"epoch": 6.44743935309973,
"grad_norm": 0.7235659956932068,
"learning_rate": 0.00021353264975715054,
"loss": 3.2791,
"step": 59800
},
{
"epoch": 6.452830188679245,
"grad_norm": 0.7378281354904175,
"learning_rate": 0.00021320885051268211,
"loss": 3.2736,
"step": 59850
},
{
"epoch": 6.45822102425876,
"grad_norm": 0.7465603351593018,
"learning_rate": 0.0002128850512682137,
"loss": 3.2699,
"step": 59900
},
{
"epoch": 6.463611859838275,
"grad_norm": 0.6738324761390686,
"learning_rate": 0.00021256125202374527,
"loss": 3.2668,
"step": 59950
},
{
"epoch": 6.46900269541779,
"grad_norm": 0.7212446928024292,
"learning_rate": 0.00021223745277927685,
"loss": 3.2878,
"step": 60000
},
{
"epoch": 6.46900269541779,
"eval_accuracy": 0.38529943812332124,
"eval_loss": 3.372497320175171,
"eval_runtime": 181.6396,
"eval_samples_per_second": 99.158,
"eval_steps_per_second": 6.199,
"step": 60000
},
{
"epoch": 6.474393530997305,
"grad_norm": 0.7071395516395569,
"learning_rate": 0.00021191365353480843,
"loss": 3.2634,
"step": 60050
},
{
"epoch": 6.479784366576819,
"grad_norm": 0.7288137674331665,
"learning_rate": 0.00021158985429033995,
"loss": 3.2855,
"step": 60100
},
{
"epoch": 6.485175202156334,
"grad_norm": 0.7339012026786804,
"learning_rate": 0.00021126605504587153,
"loss": 3.2896,
"step": 60150
},
{
"epoch": 6.490566037735849,
"grad_norm": 0.7525122165679932,
"learning_rate": 0.0002109422558014031,
"loss": 3.2586,
"step": 60200
},
{
"epoch": 6.495956873315364,
"grad_norm": 0.7330468893051147,
"learning_rate": 0.00021061845655693468,
"loss": 3.268,
"step": 60250
},
{
"epoch": 6.501347708894879,
"grad_norm": 0.7264840006828308,
"learning_rate": 0.00021029465731246626,
"loss": 3.302,
"step": 60300
},
{
"epoch": 6.506738544474393,
"grad_norm": 0.7638119459152222,
"learning_rate": 0.00020997085806799784,
"loss": 3.2616,
"step": 60350
},
{
"epoch": 6.512129380053908,
"grad_norm": 0.7544935345649719,
"learning_rate": 0.0002096470588235294,
"loss": 3.275,
"step": 60400
},
{
"epoch": 6.517520215633423,
"grad_norm": 0.7188968658447266,
"learning_rate": 0.00020932325957906097,
"loss": 3.2794,
"step": 60450
},
{
"epoch": 6.5229110512129385,
"grad_norm": 0.7104805707931519,
"learning_rate": 0.00020899946033459254,
"loss": 3.286,
"step": 60500
},
{
"epoch": 6.528301886792453,
"grad_norm": 0.7175164818763733,
"learning_rate": 0.0002086756610901241,
"loss": 3.2838,
"step": 60550
},
{
"epoch": 6.533692722371968,
"grad_norm": 0.7335889339447021,
"learning_rate": 0.00020835186184565567,
"loss": 3.2831,
"step": 60600
},
{
"epoch": 6.539083557951482,
"grad_norm": 0.8060126900672913,
"learning_rate": 0.00020802806260118725,
"loss": 3.2862,
"step": 60650
},
{
"epoch": 6.544474393530997,
"grad_norm": 0.7434999346733093,
"learning_rate": 0.00020770426335671883,
"loss": 3.2784,
"step": 60700
},
{
"epoch": 6.549865229110512,
"grad_norm": 0.8102301359176636,
"learning_rate": 0.00020738046411225038,
"loss": 3.2799,
"step": 60750
},
{
"epoch": 6.555256064690027,
"grad_norm": 0.7343177199363708,
"learning_rate": 0.00020705666486778196,
"loss": 3.2752,
"step": 60800
},
{
"epoch": 6.560646900269542,
"grad_norm": 0.7135523557662964,
"learning_rate": 0.00020673286562331353,
"loss": 3.2736,
"step": 60850
},
{
"epoch": 6.566037735849057,
"grad_norm": 0.7504022121429443,
"learning_rate": 0.0002064090663788451,
"loss": 3.2712,
"step": 60900
},
{
"epoch": 6.571428571428571,
"grad_norm": 0.7385920882225037,
"learning_rate": 0.00020608526713437666,
"loss": 3.2808,
"step": 60950
},
{
"epoch": 6.576819407008086,
"grad_norm": 0.6840495467185974,
"learning_rate": 0.00020576146788990824,
"loss": 3.2817,
"step": 61000
},
{
"epoch": 6.576819407008086,
"eval_accuracy": 0.3855401042698303,
"eval_loss": 3.3692896366119385,
"eval_runtime": 181.1669,
"eval_samples_per_second": 99.417,
"eval_steps_per_second": 6.215,
"step": 61000
},
{
"epoch": 6.5822102425876015,
"grad_norm": 0.7556927800178528,
"learning_rate": 0.0002054376686454398,
"loss": 3.2719,
"step": 61050
},
{
"epoch": 6.587601078167116,
"grad_norm": 0.7351938486099243,
"learning_rate": 0.00020511386940097137,
"loss": 3.2962,
"step": 61100
},
{
"epoch": 6.592991913746631,
"grad_norm": 0.7250106930732727,
"learning_rate": 0.00020479007015650294,
"loss": 3.2934,
"step": 61150
},
{
"epoch": 6.598382749326145,
"grad_norm": 0.7262390851974487,
"learning_rate": 0.00020446627091203452,
"loss": 3.2884,
"step": 61200
},
{
"epoch": 6.60377358490566,
"grad_norm": 0.7690116763114929,
"learning_rate": 0.0002041424716675661,
"loss": 3.2865,
"step": 61250
},
{
"epoch": 6.609164420485175,
"grad_norm": 0.7058427929878235,
"learning_rate": 0.00020381867242309768,
"loss": 3.2714,
"step": 61300
},
{
"epoch": 6.6145552560646905,
"grad_norm": 0.7166352272033691,
"learning_rate": 0.00020349487317862926,
"loss": 3.3044,
"step": 61350
},
{
"epoch": 6.619946091644205,
"grad_norm": 0.7602772116661072,
"learning_rate": 0.00020317107393416078,
"loss": 3.2887,
"step": 61400
},
{
"epoch": 6.62533692722372,
"grad_norm": 0.7020034790039062,
"learning_rate": 0.00020284727468969236,
"loss": 3.2996,
"step": 61450
},
{
"epoch": 6.630727762803234,
"grad_norm": 0.7787217497825623,
"learning_rate": 0.00020252347544522393,
"loss": 3.2854,
"step": 61500
},
{
"epoch": 6.636118598382749,
"grad_norm": 0.7142089605331421,
"learning_rate": 0.0002021996762007555,
"loss": 3.2919,
"step": 61550
},
{
"epoch": 6.6415094339622645,
"grad_norm": 0.7472655773162842,
"learning_rate": 0.0002018758769562871,
"loss": 3.2872,
"step": 61600
},
{
"epoch": 6.646900269541779,
"grad_norm": 0.7358732223510742,
"learning_rate": 0.00020155207771181867,
"loss": 3.2853,
"step": 61650
},
{
"epoch": 6.652291105121294,
"grad_norm": 0.7390625476837158,
"learning_rate": 0.00020122827846735024,
"loss": 3.2674,
"step": 61700
},
{
"epoch": 6.657681940700809,
"grad_norm": 0.7571896910667419,
"learning_rate": 0.00020090447922288182,
"loss": 3.292,
"step": 61750
},
{
"epoch": 6.663072776280323,
"grad_norm": 0.7482478022575378,
"learning_rate": 0.00020058067997841335,
"loss": 3.2981,
"step": 61800
},
{
"epoch": 6.668463611859838,
"grad_norm": 0.8325579166412354,
"learning_rate": 0.00020025688073394492,
"loss": 3.2977,
"step": 61850
},
{
"epoch": 6.6738544474393535,
"grad_norm": 0.7084106802940369,
"learning_rate": 0.0001999330814894765,
"loss": 3.2683,
"step": 61900
},
{
"epoch": 6.679245283018868,
"grad_norm": 0.7786864638328552,
"learning_rate": 0.00019960928224500808,
"loss": 3.2716,
"step": 61950
},
{
"epoch": 6.684636118598383,
"grad_norm": 0.8301063179969788,
"learning_rate": 0.00019929195898542903,
"loss": 3.2722,
"step": 62000
},
{
"epoch": 6.684636118598383,
"eval_accuracy": 0.3862813342705007,
"eval_loss": 3.3639204502105713,
"eval_runtime": 181.3062,
"eval_samples_per_second": 99.34,
"eval_steps_per_second": 6.21,
"step": 62000
},
{
"epoch": 6.690026954177897,
"grad_norm": 0.8427117466926575,
"learning_rate": 0.0001989681597409606,
"loss": 3.2963,
"step": 62050
},
{
"epoch": 6.695417789757412,
"grad_norm": 0.7369676232337952,
"learning_rate": 0.00019864436049649216,
"loss": 3.2889,
"step": 62100
},
{
"epoch": 6.7008086253369274,
"grad_norm": 0.7093971371650696,
"learning_rate": 0.0001983205612520237,
"loss": 3.2762,
"step": 62150
},
{
"epoch": 6.706199460916442,
"grad_norm": 0.7282435894012451,
"learning_rate": 0.0001979967620075553,
"loss": 3.2804,
"step": 62200
},
{
"epoch": 6.711590296495957,
"grad_norm": 0.7856954336166382,
"learning_rate": 0.00019767296276308686,
"loss": 3.2908,
"step": 62250
},
{
"epoch": 6.716981132075472,
"grad_norm": 0.8202438354492188,
"learning_rate": 0.00019734916351861844,
"loss": 3.276,
"step": 62300
},
{
"epoch": 6.722371967654986,
"grad_norm": 0.7405151128768921,
"learning_rate": 0.00019702536427415002,
"loss": 3.2945,
"step": 62350
},
{
"epoch": 6.727762803234501,
"grad_norm": 0.8001969456672668,
"learning_rate": 0.0001967015650296816,
"loss": 3.2754,
"step": 62400
},
{
"epoch": 6.7331536388140165,
"grad_norm": 0.7255493402481079,
"learning_rate": 0.00019637776578521315,
"loss": 3.2808,
"step": 62450
},
{
"epoch": 6.738544474393531,
"grad_norm": 0.7461863160133362,
"learning_rate": 0.00019605396654074473,
"loss": 3.2842,
"step": 62500
},
{
"epoch": 6.743935309973046,
"grad_norm": 0.7539776563644409,
"learning_rate": 0.0001957301672962763,
"loss": 3.291,
"step": 62550
},
{
"epoch": 6.74932614555256,
"grad_norm": 0.735890805721283,
"learning_rate": 0.00019540636805180785,
"loss": 3.2752,
"step": 62600
},
{
"epoch": 6.754716981132075,
"grad_norm": 0.7455848455429077,
"learning_rate": 0.00019508256880733943,
"loss": 3.2785,
"step": 62650
},
{
"epoch": 6.7601078167115904,
"grad_norm": 0.739158570766449,
"learning_rate": 0.000194758769562871,
"loss": 3.2694,
"step": 62700
},
{
"epoch": 6.765498652291106,
"grad_norm": 0.8299079537391663,
"learning_rate": 0.00019444144630329193,
"loss": 3.2988,
"step": 62750
},
{
"epoch": 6.77088948787062,
"grad_norm": 0.754202127456665,
"learning_rate": 0.0001941176470588235,
"loss": 3.2826,
"step": 62800
},
{
"epoch": 6.776280323450135,
"grad_norm": 0.7523947954177856,
"learning_rate": 0.0001937938478143551,
"loss": 3.297,
"step": 62850
},
{
"epoch": 6.781671159029649,
"grad_norm": 0.7100173234939575,
"learning_rate": 0.00019347004856988664,
"loss": 3.2971,
"step": 62900
},
{
"epoch": 6.787061994609164,
"grad_norm": 0.7373396158218384,
"learning_rate": 0.00019314624932541822,
"loss": 3.2677,
"step": 62950
},
{
"epoch": 6.7924528301886795,
"grad_norm": 0.7798445224761963,
"learning_rate": 0.0001928224500809498,
"loss": 3.2773,
"step": 63000
},
{
"epoch": 6.7924528301886795,
"eval_accuracy": 0.38666357512983207,
"eval_loss": 3.3608620166778564,
"eval_runtime": 181.2384,
"eval_samples_per_second": 99.377,
"eval_steps_per_second": 6.213,
"step": 63000
},
{
"epoch": 6.797843665768194,
"grad_norm": 0.7339450120925903,
"learning_rate": 0.00019249865083648137,
"loss": 3.2794,
"step": 63050
},
{
"epoch": 6.803234501347709,
"grad_norm": 0.8022555112838745,
"learning_rate": 0.00019217485159201292,
"loss": 3.28,
"step": 63100
},
{
"epoch": 6.808625336927224,
"grad_norm": 0.7713608741760254,
"learning_rate": 0.0001918510523475445,
"loss": 3.2818,
"step": 63150
},
{
"epoch": 6.814016172506738,
"grad_norm": 0.7245850563049316,
"learning_rate": 0.00019152725310307608,
"loss": 3.2846,
"step": 63200
},
{
"epoch": 6.819407008086253,
"grad_norm": 0.7321873903274536,
"learning_rate": 0.00019120345385860766,
"loss": 3.2755,
"step": 63250
},
{
"epoch": 6.824797843665769,
"grad_norm": 0.7811344265937805,
"learning_rate": 0.00019087965461413923,
"loss": 3.264,
"step": 63300
},
{
"epoch": 6.830188679245283,
"grad_norm": 0.7513942718505859,
"learning_rate": 0.00019055585536967079,
"loss": 3.2838,
"step": 63350
},
{
"epoch": 6.835579514824798,
"grad_norm": 0.7570948004722595,
"learning_rate": 0.00019023205612520234,
"loss": 3.2757,
"step": 63400
},
{
"epoch": 6.840970350404312,
"grad_norm": 0.7433807253837585,
"learning_rate": 0.00018990825688073391,
"loss": 3.295,
"step": 63450
},
{
"epoch": 6.846361185983827,
"grad_norm": 0.7448228597640991,
"learning_rate": 0.0001895844576362655,
"loss": 3.2817,
"step": 63500
},
{
"epoch": 6.8517520215633425,
"grad_norm": 0.7417268753051758,
"learning_rate": 0.00018926065839179707,
"loss": 3.2836,
"step": 63550
},
{
"epoch": 6.857142857142857,
"grad_norm": 0.7466510534286499,
"learning_rate": 0.00018893685914732865,
"loss": 3.2757,
"step": 63600
},
{
"epoch": 6.862533692722372,
"grad_norm": 0.7845950722694397,
"learning_rate": 0.00018861305990286022,
"loss": 3.2854,
"step": 63650
},
{
"epoch": 6.867924528301887,
"grad_norm": 0.7400863170623779,
"learning_rate": 0.0001882892606583918,
"loss": 3.2858,
"step": 63700
},
{
"epoch": 6.873315363881401,
"grad_norm": 0.7376729249954224,
"learning_rate": 0.00018796546141392333,
"loss": 3.2852,
"step": 63750
},
{
"epoch": 6.878706199460916,
"grad_norm": 0.7628201246261597,
"learning_rate": 0.0001876416621694549,
"loss": 3.2707,
"step": 63800
},
{
"epoch": 6.884097035040432,
"grad_norm": 0.7419650554656982,
"learning_rate": 0.00018731786292498648,
"loss": 3.2728,
"step": 63850
},
{
"epoch": 6.889487870619946,
"grad_norm": 0.7342135310173035,
"learning_rate": 0.00018699406368051806,
"loss": 3.3005,
"step": 63900
},
{
"epoch": 6.894878706199461,
"grad_norm": 0.7585704326629639,
"learning_rate": 0.00018667026443604964,
"loss": 3.2874,
"step": 63950
},
{
"epoch": 6.900269541778976,
"grad_norm": 0.7235996127128601,
"learning_rate": 0.00018634646519158121,
"loss": 3.2948,
"step": 64000
},
{
"epoch": 6.900269541778976,
"eval_accuracy": 0.3871532736952119,
"eval_loss": 3.3567447662353516,
"eval_runtime": 181.3276,
"eval_samples_per_second": 99.328,
"eval_steps_per_second": 6.21,
"step": 64000
},
{
"epoch": 6.90566037735849,
"grad_norm": 0.7436850070953369,
"learning_rate": 0.0001860226659471128,
"loss": 3.2803,
"step": 64050
},
{
"epoch": 6.9110512129380055,
"grad_norm": 0.7636563181877136,
"learning_rate": 0.00018569886670264434,
"loss": 3.2649,
"step": 64100
},
{
"epoch": 6.916442048517521,
"grad_norm": 0.712037980556488,
"learning_rate": 0.00018537506745817592,
"loss": 3.2751,
"step": 64150
},
{
"epoch": 6.921832884097035,
"grad_norm": 0.7578791379928589,
"learning_rate": 0.00018505126821370747,
"loss": 3.282,
"step": 64200
},
{
"epoch": 6.92722371967655,
"grad_norm": 0.7509757876396179,
"learning_rate": 0.00018472746896923905,
"loss": 3.2789,
"step": 64250
},
{
"epoch": 6.932614555256064,
"grad_norm": 0.81334388256073,
"learning_rate": 0.00018440366972477063,
"loss": 3.2735,
"step": 64300
},
{
"epoch": 6.938005390835579,
"grad_norm": 0.7207751274108887,
"learning_rate": 0.0001840798704803022,
"loss": 3.2851,
"step": 64350
},
{
"epoch": 6.943396226415095,
"grad_norm": 0.753903865814209,
"learning_rate": 0.00018375607123583378,
"loss": 3.2792,
"step": 64400
},
{
"epoch": 6.948787061994609,
"grad_norm": 0.7904179096221924,
"learning_rate": 0.00018343227199136533,
"loss": 3.2741,
"step": 64450
},
{
"epoch": 6.954177897574124,
"grad_norm": 0.7457177639007568,
"learning_rate": 0.0001831084727468969,
"loss": 3.2892,
"step": 64500
},
{
"epoch": 6.959568733153639,
"grad_norm": 0.7281033396720886,
"learning_rate": 0.0001827846735024285,
"loss": 3.2808,
"step": 64550
},
{
"epoch": 6.964959568733153,
"grad_norm": 0.7740973830223083,
"learning_rate": 0.00018246087425796004,
"loss": 3.2841,
"step": 64600
},
{
"epoch": 6.9703504043126685,
"grad_norm": 0.7639821171760559,
"learning_rate": 0.00018213707501349162,
"loss": 3.2908,
"step": 64650
},
{
"epoch": 6.975741239892184,
"grad_norm": 0.743857204914093,
"learning_rate": 0.0001818132757690232,
"loss": 3.2942,
"step": 64700
},
{
"epoch": 6.981132075471698,
"grad_norm": 0.7618937492370605,
"learning_rate": 0.00018148947652455474,
"loss": 3.2884,
"step": 64750
},
{
"epoch": 6.986522911051213,
"grad_norm": 0.8061965107917786,
"learning_rate": 0.00018116567728008632,
"loss": 3.2851,
"step": 64800
},
{
"epoch": 6.991913746630727,
"grad_norm": 0.7297230362892151,
"learning_rate": 0.0001808418780356179,
"loss": 3.2833,
"step": 64850
},
{
"epoch": 6.997304582210242,
"grad_norm": 0.7657062411308289,
"learning_rate": 0.00018051807879114948,
"loss": 3.2883,
"step": 64900
},
{
"epoch": 7.002695417789758,
"grad_norm": 0.7657566070556641,
"learning_rate": 0.00018019427954668105,
"loss": 3.2355,
"step": 64950
},
{
"epoch": 7.008086253369272,
"grad_norm": 0.7892380952835083,
"learning_rate": 0.00017987048030221263,
"loss": 3.1985,
"step": 65000
},
{
"epoch": 7.008086253369272,
"eval_accuracy": 0.38724280367477104,
"eval_loss": 3.3584020137786865,
"eval_runtime": 181.2129,
"eval_samples_per_second": 99.391,
"eval_steps_per_second": 6.214,
"step": 65000
},
{
"epoch": 7.013477088948787,
"grad_norm": 0.7784476280212402,
"learning_rate": 0.00017954668105774416,
"loss": 3.1944,
"step": 65050
},
{
"epoch": 7.018867924528302,
"grad_norm": 0.8016788363456726,
"learning_rate": 0.00017922288181327573,
"loss": 3.193,
"step": 65100
},
{
"epoch": 7.024258760107816,
"grad_norm": 0.7550609111785889,
"learning_rate": 0.0001788990825688073,
"loss": 3.2013,
"step": 65150
},
{
"epoch": 7.0296495956873315,
"grad_norm": 0.785059928894043,
"learning_rate": 0.0001785752833243389,
"loss": 3.1803,
"step": 65200
},
{
"epoch": 7.035040431266847,
"grad_norm": 0.7851862907409668,
"learning_rate": 0.00017825148407987047,
"loss": 3.1942,
"step": 65250
},
{
"epoch": 7.040431266846361,
"grad_norm": 0.8100311160087585,
"learning_rate": 0.00017792768483540204,
"loss": 3.201,
"step": 65300
},
{
"epoch": 7.045822102425876,
"grad_norm": 0.7234334349632263,
"learning_rate": 0.00017760388559093362,
"loss": 3.1893,
"step": 65350
},
{
"epoch": 7.051212938005391,
"grad_norm": 0.8020964860916138,
"learning_rate": 0.00017728656233135452,
"loss": 3.2196,
"step": 65400
},
{
"epoch": 7.056603773584905,
"grad_norm": 0.8220183849334717,
"learning_rate": 0.0001769627630868861,
"loss": 3.2083,
"step": 65450
},
{
"epoch": 7.061994609164421,
"grad_norm": 0.7600988745689392,
"learning_rate": 0.00017663896384241767,
"loss": 3.1986,
"step": 65500
},
{
"epoch": 7.067385444743936,
"grad_norm": 0.7532045841217041,
"learning_rate": 0.00017631516459794925,
"loss": 3.2103,
"step": 65550
},
{
"epoch": 7.07277628032345,
"grad_norm": 0.7940289974212646,
"learning_rate": 0.00017599136535348083,
"loss": 3.1881,
"step": 65600
},
{
"epoch": 7.078167115902965,
"grad_norm": 0.7455863952636719,
"learning_rate": 0.0001756675661090124,
"loss": 3.2116,
"step": 65650
},
{
"epoch": 7.083557951482479,
"grad_norm": 0.7693523168563843,
"learning_rate": 0.00017534376686454398,
"loss": 3.2141,
"step": 65700
},
{
"epoch": 7.0889487870619945,
"grad_norm": 0.8425500392913818,
"learning_rate": 0.00017501996762007556,
"loss": 3.216,
"step": 65750
},
{
"epoch": 7.09433962264151,
"grad_norm": 0.8262922167778015,
"learning_rate": 0.00017469616837560709,
"loss": 3.2106,
"step": 65800
},
{
"epoch": 7.099730458221024,
"grad_norm": 0.7434950470924377,
"learning_rate": 0.00017437236913113866,
"loss": 3.1954,
"step": 65850
},
{
"epoch": 7.105121293800539,
"grad_norm": 0.7773944735527039,
"learning_rate": 0.00017404856988667024,
"loss": 3.2254,
"step": 65900
},
{
"epoch": 7.110512129380054,
"grad_norm": 0.742092490196228,
"learning_rate": 0.00017372477064220182,
"loss": 3.186,
"step": 65950
},
{
"epoch": 7.115902964959568,
"grad_norm": 0.7907103896141052,
"learning_rate": 0.0001734009713977334,
"loss": 3.2183,
"step": 66000
},
{
"epoch": 7.115902964959568,
"eval_accuracy": 0.38738991968487185,
"eval_loss": 3.359558343887329,
"eval_runtime": 181.5307,
"eval_samples_per_second": 99.217,
"eval_steps_per_second": 6.203,
"step": 66000
},
{
"epoch": 7.121293800539084,
"grad_norm": 0.7941024899482727,
"learning_rate": 0.00017307717215326497,
"loss": 3.2102,
"step": 66050
},
{
"epoch": 7.126684636118599,
"grad_norm": 0.7684633135795593,
"learning_rate": 0.00017275337290879655,
"loss": 3.1984,
"step": 66100
},
{
"epoch": 7.132075471698113,
"grad_norm": 0.7823106646537781,
"learning_rate": 0.0001724295736643281,
"loss": 3.2337,
"step": 66150
},
{
"epoch": 7.137466307277628,
"grad_norm": 0.7838732004165649,
"learning_rate": 0.00017210577441985968,
"loss": 3.2317,
"step": 66200
},
{
"epoch": 7.142857142857143,
"grad_norm": 0.7811384201049805,
"learning_rate": 0.00017178197517539123,
"loss": 3.2214,
"step": 66250
},
{
"epoch": 7.1482479784366575,
"grad_norm": 0.782549262046814,
"learning_rate": 0.0001714581759309228,
"loss": 3.2093,
"step": 66300
},
{
"epoch": 7.153638814016173,
"grad_norm": 0.8092780709266663,
"learning_rate": 0.00017113437668645439,
"loss": 3.2139,
"step": 66350
},
{
"epoch": 7.159029649595688,
"grad_norm": 0.7826318740844727,
"learning_rate": 0.00017081057744198596,
"loss": 3.2121,
"step": 66400
},
{
"epoch": 7.164420485175202,
"grad_norm": 0.7995641231536865,
"learning_rate": 0.00017048677819751751,
"loss": 3.212,
"step": 66450
},
{
"epoch": 7.169811320754717,
"grad_norm": 0.770226240158081,
"learning_rate": 0.0001701629789530491,
"loss": 3.2161,
"step": 66500
},
{
"epoch": 7.175202156334231,
"grad_norm": 0.7641876339912415,
"learning_rate": 0.00016983917970858067,
"loss": 3.2127,
"step": 66550
},
{
"epoch": 7.180592991913747,
"grad_norm": 0.753595769405365,
"learning_rate": 0.00016951538046411225,
"loss": 3.2225,
"step": 66600
},
{
"epoch": 7.185983827493262,
"grad_norm": 0.7744004130363464,
"learning_rate": 0.0001691915812196438,
"loss": 3.2216,
"step": 66650
},
{
"epoch": 7.191374663072776,
"grad_norm": 0.7678277492523193,
"learning_rate": 0.00016886778197517538,
"loss": 3.2181,
"step": 66700
},
{
"epoch": 7.196765498652291,
"grad_norm": 0.7669500708580017,
"learning_rate": 0.00016854398273070693,
"loss": 3.2061,
"step": 66750
},
{
"epoch": 7.202156334231806,
"grad_norm": 0.7848168611526489,
"learning_rate": 0.0001682201834862385,
"loss": 3.2107,
"step": 66800
},
{
"epoch": 7.2075471698113205,
"grad_norm": 0.7321543097496033,
"learning_rate": 0.00016789638424177008,
"loss": 3.21,
"step": 66850
},
{
"epoch": 7.212938005390836,
"grad_norm": 0.7780288457870483,
"learning_rate": 0.00016757258499730166,
"loss": 3.2095,
"step": 66900
},
{
"epoch": 7.218328840970351,
"grad_norm": 0.7794478535652161,
"learning_rate": 0.00016724878575283324,
"loss": 3.2126,
"step": 66950
},
{
"epoch": 7.223719676549865,
"grad_norm": 0.7917138338088989,
"learning_rate": 0.00016692498650836481,
"loss": 3.2204,
"step": 67000
},
{
"epoch": 7.223719676549865,
"eval_accuracy": 0.38776651059403683,
"eval_loss": 3.3558034896850586,
"eval_runtime": 181.9981,
"eval_samples_per_second": 98.963,
"eval_steps_per_second": 6.187,
"step": 67000
},
{
"epoch": 7.22911051212938,
"grad_norm": 0.7602754235267639,
"learning_rate": 0.0001666011872638964,
"loss": 3.2317,
"step": 67050
},
{
"epoch": 7.234501347708895,
"grad_norm": 0.7741537690162659,
"learning_rate": 0.00016627738801942792,
"loss": 3.233,
"step": 67100
},
{
"epoch": 7.2398921832884096,
"grad_norm": 0.760444164276123,
"learning_rate": 0.0001659535887749595,
"loss": 3.1905,
"step": 67150
},
{
"epoch": 7.245283018867925,
"grad_norm": 0.7536256313323975,
"learning_rate": 0.00016562978953049107,
"loss": 3.2226,
"step": 67200
},
{
"epoch": 7.250673854447439,
"grad_norm": 0.759914219379425,
"learning_rate": 0.00016530599028602265,
"loss": 3.214,
"step": 67250
},
{
"epoch": 7.256064690026954,
"grad_norm": 0.7805749773979187,
"learning_rate": 0.00016498219104155423,
"loss": 3.2222,
"step": 67300
},
{
"epoch": 7.261455525606469,
"grad_norm": 0.797099769115448,
"learning_rate": 0.0001646583917970858,
"loss": 3.2376,
"step": 67350
},
{
"epoch": 7.2668463611859835,
"grad_norm": 0.776332437992096,
"learning_rate": 0.00016433459255261738,
"loss": 3.231,
"step": 67400
},
{
"epoch": 7.272237196765499,
"grad_norm": 0.8175129294395447,
"learning_rate": 0.00016401079330814896,
"loss": 3.2433,
"step": 67450
},
{
"epoch": 7.277628032345014,
"grad_norm": 0.7837097644805908,
"learning_rate": 0.00016368699406368048,
"loss": 3.2127,
"step": 67500
},
{
"epoch": 7.283018867924528,
"grad_norm": 0.8012109994888306,
"learning_rate": 0.00016336319481921206,
"loss": 3.2063,
"step": 67550
},
{
"epoch": 7.288409703504043,
"grad_norm": 0.742727518081665,
"learning_rate": 0.00016303939557474364,
"loss": 3.2417,
"step": 67600
},
{
"epoch": 7.293800539083558,
"grad_norm": 0.8155743479728699,
"learning_rate": 0.00016271559633027522,
"loss": 3.2291,
"step": 67650
},
{
"epoch": 7.2991913746630726,
"grad_norm": 0.7434958815574646,
"learning_rate": 0.0001623917970858068,
"loss": 3.2221,
"step": 67700
},
{
"epoch": 7.304582210242588,
"grad_norm": 0.7987688779830933,
"learning_rate": 0.00016206799784133837,
"loss": 3.227,
"step": 67750
},
{
"epoch": 7.309973045822103,
"grad_norm": 0.7858981490135193,
"learning_rate": 0.00016174419859686992,
"loss": 3.2375,
"step": 67800
},
{
"epoch": 7.315363881401617,
"grad_norm": 0.7990615963935852,
"learning_rate": 0.0001614203993524015,
"loss": 3.2243,
"step": 67850
},
{
"epoch": 7.320754716981132,
"grad_norm": 0.8137235045433044,
"learning_rate": 0.00016109660010793308,
"loss": 3.2211,
"step": 67900
},
{
"epoch": 7.3261455525606465,
"grad_norm": 0.7708967328071594,
"learning_rate": 0.00016077280086346463,
"loss": 3.2295,
"step": 67950
},
{
"epoch": 7.331536388140162,
"grad_norm": 0.7716946005821228,
"learning_rate": 0.00016045547760388558,
"loss": 3.2237,
"step": 68000
},
{
"epoch": 7.331536388140162,
"eval_accuracy": 0.38805476670540867,
"eval_loss": 3.3548872470855713,
"eval_runtime": 183.6531,
"eval_samples_per_second": 98.071,
"eval_steps_per_second": 6.131,
"step": 68000
},
{
"epoch": 7.336927223719677,
"grad_norm": 0.8097190260887146,
"learning_rate": 0.00016013167835941716,
"loss": 3.2202,
"step": 68050
},
{
"epoch": 7.342318059299191,
"grad_norm": 0.7494890093803406,
"learning_rate": 0.00015980787911494873,
"loss": 3.224,
"step": 68100
},
{
"epoch": 7.347708894878706,
"grad_norm": 0.750126838684082,
"learning_rate": 0.00015948407987048029,
"loss": 3.2298,
"step": 68150
},
{
"epoch": 7.353099730458221,
"grad_norm": 0.7925066351890564,
"learning_rate": 0.00015916028062601186,
"loss": 3.2223,
"step": 68200
},
{
"epoch": 7.3584905660377355,
"grad_norm": 0.8052787184715271,
"learning_rate": 0.00015883648138154344,
"loss": 3.2256,
"step": 68250
},
{
"epoch": 7.363881401617251,
"grad_norm": 0.7859789133071899,
"learning_rate": 0.000158512682137075,
"loss": 3.2419,
"step": 68300
},
{
"epoch": 7.369272237196766,
"grad_norm": 0.8004688620567322,
"learning_rate": 0.00015818888289260657,
"loss": 3.2193,
"step": 68350
},
{
"epoch": 7.37466307277628,
"grad_norm": 0.7921863794326782,
"learning_rate": 0.00015786508364813815,
"loss": 3.2173,
"step": 68400
},
{
"epoch": 7.380053908355795,
"grad_norm": 0.8183167576789856,
"learning_rate": 0.0001575412844036697,
"loss": 3.232,
"step": 68450
},
{
"epoch": 7.38544474393531,
"grad_norm": 0.8109775185585022,
"learning_rate": 0.00015721748515920127,
"loss": 3.2039,
"step": 68500
},
{
"epoch": 7.390835579514825,
"grad_norm": 0.7710207104682922,
"learning_rate": 0.00015689368591473285,
"loss": 3.199,
"step": 68550
},
{
"epoch": 7.39622641509434,
"grad_norm": 0.7475923299789429,
"learning_rate": 0.00015656988667026443,
"loss": 3.2262,
"step": 68600
},
{
"epoch": 7.401617250673855,
"grad_norm": 0.7581785917282104,
"learning_rate": 0.000156246087425796,
"loss": 3.2386,
"step": 68650
},
{
"epoch": 7.407008086253369,
"grad_norm": 0.7466723322868347,
"learning_rate": 0.00015592228818132756,
"loss": 3.2161,
"step": 68700
},
{
"epoch": 7.412398921832884,
"grad_norm": 0.7888767123222351,
"learning_rate": 0.0001555984889368591,
"loss": 3.2308,
"step": 68750
},
{
"epoch": 7.4177897574123985,
"grad_norm": 0.8188113570213318,
"learning_rate": 0.0001552746896923907,
"loss": 3.2205,
"step": 68800
},
{
"epoch": 7.423180592991914,
"grad_norm": 0.782088577747345,
"learning_rate": 0.00015495089044792226,
"loss": 3.2375,
"step": 68850
},
{
"epoch": 7.428571428571429,
"grad_norm": 0.8104148507118225,
"learning_rate": 0.00015462709120345384,
"loss": 3.2308,
"step": 68900
},
{
"epoch": 7.433962264150943,
"grad_norm": 0.7924380898475647,
"learning_rate": 0.00015430329195898542,
"loss": 3.2339,
"step": 68950
},
{
"epoch": 7.439353099730458,
"grad_norm": 0.8276554346084595,
"learning_rate": 0.000153979492714517,
"loss": 3.2464,
"step": 69000
},
{
"epoch": 7.439353099730458,
"eval_accuracy": 0.3881077893146621,
"eval_loss": 3.3497581481933594,
"eval_runtime": 183.1854,
"eval_samples_per_second": 98.321,
"eval_steps_per_second": 6.147,
"step": 69000
},
{
"epoch": 7.444743935309973,
"grad_norm": 0.78617924451828,
"learning_rate": 0.00015365569347004858,
"loss": 3.2547,
"step": 69050
},
{
"epoch": 7.450134770889488,
"grad_norm": 0.7908804416656494,
"learning_rate": 0.00015333189422558015,
"loss": 3.2359,
"step": 69100
},
{
"epoch": 7.455525606469003,
"grad_norm": 0.7701476812362671,
"learning_rate": 0.00015300809498111168,
"loss": 3.2145,
"step": 69150
},
{
"epoch": 7.460916442048518,
"grad_norm": 0.781516432762146,
"learning_rate": 0.00015268429573664325,
"loss": 3.2396,
"step": 69200
},
{
"epoch": 7.466307277628032,
"grad_norm": 0.8261198401451111,
"learning_rate": 0.00015236049649217483,
"loss": 3.2305,
"step": 69250
},
{
"epoch": 7.471698113207547,
"grad_norm": 0.772330105304718,
"learning_rate": 0.0001520366972477064,
"loss": 3.2268,
"step": 69300
},
{
"epoch": 7.4770889487870615,
"grad_norm": 0.8193607330322266,
"learning_rate": 0.000151712898003238,
"loss": 3.2257,
"step": 69350
},
{
"epoch": 7.482479784366577,
"grad_norm": 0.7732469439506531,
"learning_rate": 0.00015138909875876956,
"loss": 3.2517,
"step": 69400
},
{
"epoch": 7.487870619946092,
"grad_norm": 0.8030275702476501,
"learning_rate": 0.00015106529951430114,
"loss": 3.2364,
"step": 69450
},
{
"epoch": 7.493261455525606,
"grad_norm": 0.7973354458808899,
"learning_rate": 0.0001507415002698327,
"loss": 3.2233,
"step": 69500
},
{
"epoch": 7.498652291105121,
"grad_norm": 0.782257080078125,
"learning_rate": 0.00015041770102536424,
"loss": 3.2384,
"step": 69550
},
{
"epoch": 7.504043126684636,
"grad_norm": 0.7709639072418213,
"learning_rate": 0.00015009390178089582,
"loss": 3.2321,
"step": 69600
},
{
"epoch": 7.509433962264151,
"grad_norm": 0.7594476938247681,
"learning_rate": 0.0001497701025364274,
"loss": 3.2143,
"step": 69650
},
{
"epoch": 7.514824797843666,
"grad_norm": 0.7423638105392456,
"learning_rate": 0.00014944630329195898,
"loss": 3.2513,
"step": 69700
},
{
"epoch": 7.520215633423181,
"grad_norm": 0.8006898164749146,
"learning_rate": 0.00014912250404749055,
"loss": 3.2298,
"step": 69750
},
{
"epoch": 7.525606469002695,
"grad_norm": 0.7882317900657654,
"learning_rate": 0.0001487987048030221,
"loss": 3.2403,
"step": 69800
},
{
"epoch": 7.53099730458221,
"grad_norm": 0.7475966215133667,
"learning_rate": 0.00014847490555855368,
"loss": 3.2235,
"step": 69850
},
{
"epoch": 7.536388140161725,
"grad_norm": 0.8185462355613708,
"learning_rate": 0.00014815110631408526,
"loss": 3.2429,
"step": 69900
},
{
"epoch": 7.54177897574124,
"grad_norm": 0.7969148755073547,
"learning_rate": 0.0001478273070696168,
"loss": 3.2355,
"step": 69950
},
{
"epoch": 7.547169811320755,
"grad_norm": 0.7717929482460022,
"learning_rate": 0.0001475035078251484,
"loss": 3.2299,
"step": 70000
},
{
"epoch": 7.547169811320755,
"eval_accuracy": 0.38870994361892997,
"eval_loss": 3.3456766605377197,
"eval_runtime": 180.2201,
"eval_samples_per_second": 99.939,
"eval_steps_per_second": 6.248,
"step": 70000
},
{
"epoch": 7.55256064690027,
"grad_norm": 0.7462727427482605,
"learning_rate": 0.00014717970858067997,
"loss": 3.2336,
"step": 70050
},
{
"epoch": 7.557951482479784,
"grad_norm": 0.8422035574913025,
"learning_rate": 0.00014685590933621154,
"loss": 3.2419,
"step": 70100
},
{
"epoch": 7.563342318059299,
"grad_norm": 0.7829086780548096,
"learning_rate": 0.00014653858607663247,
"loss": 3.2359,
"step": 70150
},
{
"epoch": 7.568733153638814,
"grad_norm": 0.8007511496543884,
"learning_rate": 0.00014621478683216405,
"loss": 3.2342,
"step": 70200
},
{
"epoch": 7.574123989218329,
"grad_norm": 0.8248454332351685,
"learning_rate": 0.00014589098758769562,
"loss": 3.2369,
"step": 70250
},
{
"epoch": 7.579514824797844,
"grad_norm": 0.8310642838478088,
"learning_rate": 0.00014556718834322717,
"loss": 3.2376,
"step": 70300
},
{
"epoch": 7.584905660377358,
"grad_norm": 0.7947178483009338,
"learning_rate": 0.00014524338909875875,
"loss": 3.2524,
"step": 70350
},
{
"epoch": 7.590296495956873,
"grad_norm": 0.7793188095092773,
"learning_rate": 0.00014491958985429033,
"loss": 3.2249,
"step": 70400
},
{
"epoch": 7.595687331536388,
"grad_norm": 0.8293790221214294,
"learning_rate": 0.00014459579060982188,
"loss": 3.2475,
"step": 70450
},
{
"epoch": 7.601078167115903,
"grad_norm": 0.8022060394287109,
"learning_rate": 0.00014427199136535346,
"loss": 3.2512,
"step": 70500
},
{
"epoch": 7.606469002695418,
"grad_norm": 0.827668309211731,
"learning_rate": 0.00014394819212088504,
"loss": 3.2405,
"step": 70550
},
{
"epoch": 7.611859838274933,
"grad_norm": 0.8248385787010193,
"learning_rate": 0.0001436243928764166,
"loss": 3.2277,
"step": 70600
},
{
"epoch": 7.617250673854447,
"grad_norm": 0.8252428770065308,
"learning_rate": 0.00014330059363194816,
"loss": 3.2263,
"step": 70650
},
{
"epoch": 7.622641509433962,
"grad_norm": 0.7909324765205383,
"learning_rate": 0.00014297679438747974,
"loss": 3.2518,
"step": 70700
},
{
"epoch": 7.628032345013477,
"grad_norm": 0.7723770141601562,
"learning_rate": 0.00014265299514301132,
"loss": 3.2332,
"step": 70750
},
{
"epoch": 7.633423180592992,
"grad_norm": 0.7607382535934448,
"learning_rate": 0.0001423291958985429,
"loss": 3.2394,
"step": 70800
},
{
"epoch": 7.638814016172507,
"grad_norm": 0.7890430092811584,
"learning_rate": 0.00014200539665407445,
"loss": 3.2264,
"step": 70850
},
{
"epoch": 7.644204851752022,
"grad_norm": 0.7864667773246765,
"learning_rate": 0.00014168159740960602,
"loss": 3.2415,
"step": 70900
},
{
"epoch": 7.649595687331536,
"grad_norm": 0.8162862062454224,
"learning_rate": 0.0001413577981651376,
"loss": 3.2204,
"step": 70950
},
{
"epoch": 7.654986522911051,
"grad_norm": 0.7907611131668091,
"learning_rate": 0.00014103399892066918,
"loss": 3.2273,
"step": 71000
},
{
"epoch": 7.654986522911051,
"eval_accuracy": 0.3888505404557619,
"eval_loss": 3.3421216011047363,
"eval_runtime": 180.2705,
"eval_samples_per_second": 99.911,
"eval_steps_per_second": 6.246,
"step": 71000
},
{
"epoch": 7.660377358490566,
"grad_norm": 0.8033956289291382,
"learning_rate": 0.00014071019967620073,
"loss": 3.2526,
"step": 71050
},
{
"epoch": 7.665768194070081,
"grad_norm": 0.7934290766716003,
"learning_rate": 0.0001403864004317323,
"loss": 3.226,
"step": 71100
},
{
"epoch": 7.671159029649596,
"grad_norm": 0.7935851216316223,
"learning_rate": 0.00014006260118726389,
"loss": 3.2208,
"step": 71150
},
{
"epoch": 7.67654986522911,
"grad_norm": 0.7900667190551758,
"learning_rate": 0.00013973880194279546,
"loss": 3.228,
"step": 71200
},
{
"epoch": 7.681940700808625,
"grad_norm": 0.816220760345459,
"learning_rate": 0.00013941500269832704,
"loss": 3.2337,
"step": 71250
},
{
"epoch": 7.6873315363881405,
"grad_norm": 0.7944366931915283,
"learning_rate": 0.0001390912034538586,
"loss": 3.2289,
"step": 71300
},
{
"epoch": 7.692722371967655,
"grad_norm": 0.7893273234367371,
"learning_rate": 0.00013876740420939017,
"loss": 3.2388,
"step": 71350
},
{
"epoch": 7.69811320754717,
"grad_norm": 0.8079636096954346,
"learning_rate": 0.00013844360496492175,
"loss": 3.2394,
"step": 71400
},
{
"epoch": 7.703504043126685,
"grad_norm": 0.8532904386520386,
"learning_rate": 0.00013811980572045333,
"loss": 3.2329,
"step": 71450
},
{
"epoch": 7.708894878706199,
"grad_norm": 0.8075155019760132,
"learning_rate": 0.00013779600647598488,
"loss": 3.2418,
"step": 71500
},
{
"epoch": 7.714285714285714,
"grad_norm": 0.8469804525375366,
"learning_rate": 0.00013747220723151645,
"loss": 3.2287,
"step": 71550
},
{
"epoch": 7.719676549865229,
"grad_norm": 0.8190136551856995,
"learning_rate": 0.00013714840798704803,
"loss": 3.2317,
"step": 71600
},
{
"epoch": 7.725067385444744,
"grad_norm": 0.8388344645500183,
"learning_rate": 0.00013682460874257958,
"loss": 3.2224,
"step": 71650
},
{
"epoch": 7.730458221024259,
"grad_norm": 0.7796118259429932,
"learning_rate": 0.00013650080949811116,
"loss": 3.25,
"step": 71700
},
{
"epoch": 7.735849056603773,
"grad_norm": 0.7741323709487915,
"learning_rate": 0.00013617701025364274,
"loss": 3.2348,
"step": 71750
},
{
"epoch": 7.741239892183288,
"grad_norm": 0.8744562864303589,
"learning_rate": 0.0001358532110091743,
"loss": 3.2233,
"step": 71800
},
{
"epoch": 7.7466307277628035,
"grad_norm": 0.8152430057525635,
"learning_rate": 0.00013552941176470587,
"loss": 3.2204,
"step": 71850
},
{
"epoch": 7.752021563342318,
"grad_norm": 0.8608075976371765,
"learning_rate": 0.00013520561252023744,
"loss": 3.2423,
"step": 71900
},
{
"epoch": 7.757412398921833,
"grad_norm": 0.8437498211860657,
"learning_rate": 0.00013488181327576902,
"loss": 3.2358,
"step": 71950
},
{
"epoch": 7.762803234501348,
"grad_norm": 0.8040376305580139,
"learning_rate": 0.00013455801403130057,
"loss": 3.2389,
"step": 72000
},
{
"epoch": 7.762803234501348,
"eval_accuracy": 0.389368814730637,
"eval_loss": 3.3362984657287598,
"eval_runtime": 180.4271,
"eval_samples_per_second": 99.824,
"eval_steps_per_second": 6.241,
"step": 72000
},
{
"epoch": 7.768194070080862,
"grad_norm": 0.8019129633903503,
"learning_rate": 0.00013423421478683215,
"loss": 3.2281,
"step": 72050
},
{
"epoch": 7.773584905660377,
"grad_norm": 0.7884582877159119,
"learning_rate": 0.00013391041554236373,
"loss": 3.2377,
"step": 72100
},
{
"epoch": 7.7789757412398925,
"grad_norm": 0.7551885843276978,
"learning_rate": 0.00013358661629789528,
"loss": 3.2377,
"step": 72150
},
{
"epoch": 7.784366576819407,
"grad_norm": 0.7974773049354553,
"learning_rate": 0.00013326281705342685,
"loss": 3.2605,
"step": 72200
},
{
"epoch": 7.789757412398922,
"grad_norm": 0.7805876135826111,
"learning_rate": 0.00013293901780895843,
"loss": 3.2149,
"step": 72250
},
{
"epoch": 7.795148247978437,
"grad_norm": 0.800240159034729,
"learning_rate": 0.00013261521856449,
"loss": 3.2404,
"step": 72300
},
{
"epoch": 7.800539083557951,
"grad_norm": 0.7783510684967041,
"learning_rate": 0.00013229141932002156,
"loss": 3.232,
"step": 72350
},
{
"epoch": 7.8059299191374665,
"grad_norm": 0.8632839918136597,
"learning_rate": 0.0001319740960604425,
"loss": 3.2335,
"step": 72400
},
{
"epoch": 7.811320754716981,
"grad_norm": 0.812347412109375,
"learning_rate": 0.0001316502968159741,
"loss": 3.2372,
"step": 72450
},
{
"epoch": 7.816711590296496,
"grad_norm": 0.7981837391853333,
"learning_rate": 0.00013132649757150564,
"loss": 3.2254,
"step": 72500
},
{
"epoch": 7.822102425876011,
"grad_norm": 0.7754440307617188,
"learning_rate": 0.00013100269832703722,
"loss": 3.2122,
"step": 72550
},
{
"epoch": 7.827493261455525,
"grad_norm": 0.7779524922370911,
"learning_rate": 0.0001306788990825688,
"loss": 3.244,
"step": 72600
},
{
"epoch": 7.83288409703504,
"grad_norm": 0.8315465450286865,
"learning_rate": 0.00013035509983810037,
"loss": 3.2307,
"step": 72650
},
{
"epoch": 7.8382749326145555,
"grad_norm": 0.8681521415710449,
"learning_rate": 0.00013003130059363192,
"loss": 3.2387,
"step": 72700
},
{
"epoch": 7.84366576819407,
"grad_norm": 0.8670200705528259,
"learning_rate": 0.0001297075013491635,
"loss": 3.234,
"step": 72750
},
{
"epoch": 7.849056603773585,
"grad_norm": 0.7848912477493286,
"learning_rate": 0.00012938370210469508,
"loss": 3.23,
"step": 72800
},
{
"epoch": 7.8544474393531,
"grad_norm": 0.7673957943916321,
"learning_rate": 0.00012905990286022666,
"loss": 3.2236,
"step": 72850
},
{
"epoch": 7.859838274932614,
"grad_norm": 0.7818908095359802,
"learning_rate": 0.0001287361036157582,
"loss": 3.2273,
"step": 72900
},
{
"epoch": 7.8652291105121295,
"grad_norm": 0.7966941595077515,
"learning_rate": 0.00012841230437128979,
"loss": 3.2205,
"step": 72950
},
{
"epoch": 7.870619946091644,
"grad_norm": 0.8313457369804382,
"learning_rate": 0.00012808850512682136,
"loss": 3.2224,
"step": 73000
},
{
"epoch": 7.870619946091644,
"eval_accuracy": 0.38986622765105167,
"eval_loss": 3.333773136138916,
"eval_runtime": 179.7764,
"eval_samples_per_second": 100.186,
"eval_steps_per_second": 6.263,
"step": 73000
},
{
"epoch": 7.876010781671159,
"grad_norm": 0.844993531703949,
"learning_rate": 0.00012776470588235294,
"loss": 3.2344,
"step": 73050
},
{
"epoch": 7.881401617250674,
"grad_norm": 0.8377089500427246,
"learning_rate": 0.0001274409066378845,
"loss": 3.2145,
"step": 73100
},
{
"epoch": 7.886792452830189,
"grad_norm": 0.7669774889945984,
"learning_rate": 0.00012711710739341607,
"loss": 3.2238,
"step": 73150
},
{
"epoch": 7.892183288409703,
"grad_norm": 0.8103171586990356,
"learning_rate": 0.00012679330814894765,
"loss": 3.2379,
"step": 73200
},
{
"epoch": 7.8975741239892185,
"grad_norm": 0.785951554775238,
"learning_rate": 0.00012646950890447922,
"loss": 3.2308,
"step": 73250
},
{
"epoch": 7.902964959568733,
"grad_norm": 0.8135892748832703,
"learning_rate": 0.00012614570966001077,
"loss": 3.2247,
"step": 73300
},
{
"epoch": 7.908355795148248,
"grad_norm": 0.7969558238983154,
"learning_rate": 0.00012582191041554235,
"loss": 3.2099,
"step": 73350
},
{
"epoch": 7.913746630727763,
"grad_norm": 0.8835052251815796,
"learning_rate": 0.00012549811117107393,
"loss": 3.2568,
"step": 73400
},
{
"epoch": 7.919137466307277,
"grad_norm": 0.844317615032196,
"learning_rate": 0.0001251743119266055,
"loss": 3.2375,
"step": 73450
},
{
"epoch": 7.9245283018867925,
"grad_norm": 0.7979193925857544,
"learning_rate": 0.00012485051268213706,
"loss": 3.2481,
"step": 73500
},
{
"epoch": 7.929919137466308,
"grad_norm": 0.812570333480835,
"learning_rate": 0.00012452671343766864,
"loss": 3.2397,
"step": 73550
},
{
"epoch": 7.935309973045822,
"grad_norm": 0.8735166788101196,
"learning_rate": 0.00012420291419320021,
"loss": 3.2363,
"step": 73600
},
{
"epoch": 7.940700808625337,
"grad_norm": 0.8485626578330994,
"learning_rate": 0.0001238791149487318,
"loss": 3.2468,
"step": 73650
},
{
"epoch": 7.946091644204852,
"grad_norm": 0.7872888445854187,
"learning_rate": 0.00012355531570426334,
"loss": 3.2525,
"step": 73700
},
{
"epoch": 7.951482479784366,
"grad_norm": 0.806064248085022,
"learning_rate": 0.00012323151645979492,
"loss": 3.2077,
"step": 73750
},
{
"epoch": 7.9568733153638815,
"grad_norm": 0.750158965587616,
"learning_rate": 0.0001229077172153265,
"loss": 3.231,
"step": 73800
},
{
"epoch": 7.962264150943396,
"grad_norm": 0.8217999339103699,
"learning_rate": 0.00012258391797085805,
"loss": 3.2202,
"step": 73850
},
{
"epoch": 7.967654986522911,
"grad_norm": 0.8456032276153564,
"learning_rate": 0.00012226011872638963,
"loss": 3.2245,
"step": 73900
},
{
"epoch": 7.973045822102426,
"grad_norm": 0.8271554708480835,
"learning_rate": 0.00012193631948192119,
"loss": 3.233,
"step": 73950
},
{
"epoch": 7.97843665768194,
"grad_norm": 0.8282763361930847,
"learning_rate": 0.00012161252023745277,
"loss": 3.2305,
"step": 74000
},
{
"epoch": 7.97843665768194,
"eval_accuracy": 0.3901663269271953,
"eval_loss": 3.3320345878601074,
"eval_runtime": 179.3118,
"eval_samples_per_second": 100.445,
"eval_steps_per_second": 6.28,
"step": 74000
},
{
"epoch": 7.9838274932614555,
"grad_norm": 0.7962801456451416,
"learning_rate": 0.00012128872099298435,
"loss": 3.235,
"step": 74050
},
{
"epoch": 7.989218328840971,
"grad_norm": 0.8267016410827637,
"learning_rate": 0.00012096492174851591,
"loss": 3.2379,
"step": 74100
},
{
"epoch": 7.994609164420485,
"grad_norm": 0.8089392185211182,
"learning_rate": 0.00012064112250404749,
"loss": 3.2275,
"step": 74150
},
{
"epoch": 8.0,
"grad_norm": 1.671104907989502,
"learning_rate": 0.00012031732325957905,
"loss": 3.2413,
"step": 74200
},
{
"epoch": 8.005390835579515,
"grad_norm": 0.7758212685585022,
"learning_rate": 0.00011999352401511062,
"loss": 3.1588,
"step": 74250
},
{
"epoch": 8.01078167115903,
"grad_norm": 0.8018858432769775,
"learning_rate": 0.00011966972477064219,
"loss": 3.1518,
"step": 74300
},
{
"epoch": 8.016172506738544,
"grad_norm": 0.8397005796432495,
"learning_rate": 0.00011934592552617377,
"loss": 3.1372,
"step": 74350
},
{
"epoch": 8.021563342318059,
"grad_norm": 0.7991986870765686,
"learning_rate": 0.00011902212628170532,
"loss": 3.1423,
"step": 74400
},
{
"epoch": 8.026954177897574,
"grad_norm": 0.7923682332038879,
"learning_rate": 0.0001186983270372369,
"loss": 3.162,
"step": 74450
},
{
"epoch": 8.032345013477089,
"grad_norm": 0.8185227513313293,
"learning_rate": 0.00011837452779276848,
"loss": 3.1469,
"step": 74500
},
{
"epoch": 8.037735849056604,
"grad_norm": 0.7988160252571106,
"learning_rate": 0.00011805072854830005,
"loss": 3.1649,
"step": 74550
},
{
"epoch": 8.04312668463612,
"grad_norm": 0.8521618843078613,
"learning_rate": 0.0001177269293038316,
"loss": 3.1567,
"step": 74600
},
{
"epoch": 8.048517520215633,
"grad_norm": 0.8044568300247192,
"learning_rate": 0.00011740313005936318,
"loss": 3.15,
"step": 74650
},
{
"epoch": 8.053908355795148,
"grad_norm": 0.8159428834915161,
"learning_rate": 0.00011707933081489476,
"loss": 3.1591,
"step": 74700
},
{
"epoch": 8.059299191374663,
"grad_norm": 0.8232279419898987,
"learning_rate": 0.00011675553157042632,
"loss": 3.153,
"step": 74750
},
{
"epoch": 8.064690026954178,
"grad_norm": 0.786412239074707,
"learning_rate": 0.00011643173232595789,
"loss": 3.1602,
"step": 74800
},
{
"epoch": 8.070080862533693,
"grad_norm": 0.7966213822364807,
"learning_rate": 0.00011610793308148947,
"loss": 3.1679,
"step": 74850
},
{
"epoch": 8.075471698113208,
"grad_norm": 0.8312312364578247,
"learning_rate": 0.00011578413383702104,
"loss": 3.1483,
"step": 74900
},
{
"epoch": 8.080862533692722,
"grad_norm": 0.7947903275489807,
"learning_rate": 0.00011546033459255261,
"loss": 3.148,
"step": 74950
},
{
"epoch": 8.086253369272237,
"grad_norm": 0.8069926500320435,
"learning_rate": 0.00011513653534808419,
"loss": 3.1599,
"step": 75000
},
{
"epoch": 8.086253369272237,
"eval_accuracy": 0.39007092969169416,
"eval_loss": 3.3380625247955322,
"eval_runtime": 179.2075,
"eval_samples_per_second": 100.504,
"eval_steps_per_second": 6.283,
"step": 75000
},
{
"epoch": 8.091644204851752,
"grad_norm": 0.8516124486923218,
"learning_rate": 0.00011481273610361575,
"loss": 3.1674,
"step": 75050
},
{
"epoch": 8.097035040431267,
"grad_norm": 0.7782564163208008,
"learning_rate": 0.00011448893685914731,
"loss": 3.1528,
"step": 75100
},
{
"epoch": 8.102425876010782,
"grad_norm": 0.7980765700340271,
"learning_rate": 0.00011416513761467889,
"loss": 3.1729,
"step": 75150
},
{
"epoch": 8.107816711590296,
"grad_norm": 0.8553686738014221,
"learning_rate": 0.00011384133837021047,
"loss": 3.1644,
"step": 75200
},
{
"epoch": 8.11320754716981,
"grad_norm": 0.8430271148681641,
"learning_rate": 0.00011351753912574202,
"loss": 3.1565,
"step": 75250
},
{
"epoch": 8.118598382749326,
"grad_norm": 0.8317184448242188,
"learning_rate": 0.0001131937398812736,
"loss": 3.1647,
"step": 75300
},
{
"epoch": 8.123989218328841,
"grad_norm": 0.8602001667022705,
"learning_rate": 0.00011286994063680517,
"loss": 3.1702,
"step": 75350
},
{
"epoch": 8.129380053908356,
"grad_norm": 0.8629422187805176,
"learning_rate": 0.00011254614139233675,
"loss": 3.1678,
"step": 75400
},
{
"epoch": 8.134770889487871,
"grad_norm": 0.8722990155220032,
"learning_rate": 0.0001122223421478683,
"loss": 3.1721,
"step": 75450
},
{
"epoch": 8.140161725067385,
"grad_norm": 0.822076141834259,
"learning_rate": 0.00011189854290339988,
"loss": 3.1461,
"step": 75500
},
{
"epoch": 8.1455525606469,
"grad_norm": 0.8530601859092712,
"learning_rate": 0.00011157474365893146,
"loss": 3.1773,
"step": 75550
},
{
"epoch": 8.150943396226415,
"grad_norm": 0.8242331147193909,
"learning_rate": 0.00011125094441446302,
"loss": 3.154,
"step": 75600
},
{
"epoch": 8.15633423180593,
"grad_norm": 0.811722457408905,
"learning_rate": 0.00011092714516999459,
"loss": 3.1709,
"step": 75650
},
{
"epoch": 8.161725067385445,
"grad_norm": 0.7784085273742676,
"learning_rate": 0.00011060334592552616,
"loss": 3.1902,
"step": 75700
},
{
"epoch": 8.167115902964959,
"grad_norm": 0.8223822712898254,
"learning_rate": 0.00011027954668105773,
"loss": 3.1677,
"step": 75750
},
{
"epoch": 8.172506738544474,
"grad_norm": 0.8153392672538757,
"learning_rate": 0.0001099557474365893,
"loss": 3.1638,
"step": 75800
},
{
"epoch": 8.177897574123989,
"grad_norm": 0.8260607123374939,
"learning_rate": 0.00010963194819212088,
"loss": 3.1525,
"step": 75850
},
{
"epoch": 8.183288409703504,
"grad_norm": 0.8073074221611023,
"learning_rate": 0.00010931462493254182,
"loss": 3.1906,
"step": 75900
},
{
"epoch": 8.18867924528302,
"grad_norm": 0.7909181714057922,
"learning_rate": 0.00010899082568807339,
"loss": 3.1497,
"step": 75950
},
{
"epoch": 8.194070080862534,
"grad_norm": 0.8735651969909668,
"learning_rate": 0.00010866702644360495,
"loss": 3.1664,
"step": 76000
},
{
"epoch": 8.194070080862534,
"eval_accuracy": 0.3903450609276501,
"eval_loss": 3.3349685668945312,
"eval_runtime": 179.8585,
"eval_samples_per_second": 100.14,
"eval_steps_per_second": 6.26,
"step": 76000
},
{
"epoch": 8.199460916442048,
"grad_norm": 0.8582903146743774,
"learning_rate": 0.00010834322719913653,
"loss": 3.1751,
"step": 76050
},
{
"epoch": 8.204851752021563,
"grad_norm": 0.8306153416633606,
"learning_rate": 0.00010801942795466809,
"loss": 3.1846,
"step": 76100
},
{
"epoch": 8.210242587601078,
"grad_norm": 0.8329233527183533,
"learning_rate": 0.00010769562871019967,
"loss": 3.1722,
"step": 76150
},
{
"epoch": 8.215633423180593,
"grad_norm": 0.8242199420928955,
"learning_rate": 0.00010737182946573123,
"loss": 3.1771,
"step": 76200
},
{
"epoch": 8.221024258760108,
"grad_norm": 0.8371992111206055,
"learning_rate": 0.0001070480302212628,
"loss": 3.1643,
"step": 76250
},
{
"epoch": 8.226415094339623,
"grad_norm": 0.8237205147743225,
"learning_rate": 0.00010672423097679438,
"loss": 3.1728,
"step": 76300
},
{
"epoch": 8.231805929919137,
"grad_norm": 0.8501004576683044,
"learning_rate": 0.00010640043173232595,
"loss": 3.1787,
"step": 76350
},
{
"epoch": 8.237196765498652,
"grad_norm": 0.8589977025985718,
"learning_rate": 0.00010607663248785753,
"loss": 3.1866,
"step": 76400
},
{
"epoch": 8.242587601078167,
"grad_norm": 0.8198873400688171,
"learning_rate": 0.00010575283324338908,
"loss": 3.1569,
"step": 76450
},
{
"epoch": 8.247978436657682,
"grad_norm": 0.8157219290733337,
"learning_rate": 0.00010542903399892066,
"loss": 3.1616,
"step": 76500
},
{
"epoch": 8.253369272237197,
"grad_norm": 0.8598965406417847,
"learning_rate": 0.00010510523475445224,
"loss": 3.1561,
"step": 76550
},
{
"epoch": 8.25876010781671,
"grad_norm": 0.8343561887741089,
"learning_rate": 0.0001047814355099838,
"loss": 3.1809,
"step": 76600
},
{
"epoch": 8.264150943396226,
"grad_norm": 0.8477984070777893,
"learning_rate": 0.00010445763626551537,
"loss": 3.161,
"step": 76650
},
{
"epoch": 8.269541778975741,
"grad_norm": 0.8373808264732361,
"learning_rate": 0.00010413383702104694,
"loss": 3.1932,
"step": 76700
},
{
"epoch": 8.274932614555256,
"grad_norm": 0.7863969206809998,
"learning_rate": 0.00010381003777657852,
"loss": 3.1752,
"step": 76750
},
{
"epoch": 8.280323450134771,
"grad_norm": 0.8112500905990601,
"learning_rate": 0.00010348623853211008,
"loss": 3.1572,
"step": 76800
},
{
"epoch": 8.285714285714286,
"grad_norm": 0.8665245175361633,
"learning_rate": 0.00010316243928764165,
"loss": 3.2007,
"step": 76850
},
{
"epoch": 8.2911051212938,
"grad_norm": 0.8533705472946167,
"learning_rate": 0.00010283864004317323,
"loss": 3.185,
"step": 76900
},
{
"epoch": 8.296495956873315,
"grad_norm": 0.7933281064033508,
"learning_rate": 0.00010251484079870479,
"loss": 3.1672,
"step": 76950
},
{
"epoch": 8.30188679245283,
"grad_norm": 0.8381282091140747,
"learning_rate": 0.00010219104155423637,
"loss": 3.1774,
"step": 77000
},
{
"epoch": 8.30188679245283,
"eval_accuracy": 0.3902838893518106,
"eval_loss": 3.331824779510498,
"eval_runtime": 179.5118,
"eval_samples_per_second": 100.333,
"eval_steps_per_second": 6.273,
"step": 77000
},
{
"epoch": 8.307277628032345,
"grad_norm": 0.8132053017616272,
"learning_rate": 0.00010186724230976793,
"loss": 3.173,
"step": 77050
},
{
"epoch": 8.31266846361186,
"grad_norm": 0.8275865316390991,
"learning_rate": 0.0001015434430652995,
"loss": 3.1769,
"step": 77100
},
{
"epoch": 8.318059299191376,
"grad_norm": 0.8841974139213562,
"learning_rate": 0.00010121964382083107,
"loss": 3.1809,
"step": 77150
},
{
"epoch": 8.323450134770889,
"grad_norm": 0.839897632598877,
"learning_rate": 0.00010089584457636265,
"loss": 3.1901,
"step": 77200
},
{
"epoch": 8.328840970350404,
"grad_norm": 0.8307055234909058,
"learning_rate": 0.00010057204533189423,
"loss": 3.19,
"step": 77250
},
{
"epoch": 8.33423180592992,
"grad_norm": 0.8464680910110474,
"learning_rate": 0.00010024824608742578,
"loss": 3.1757,
"step": 77300
},
{
"epoch": 8.339622641509434,
"grad_norm": 0.8722774386405945,
"learning_rate": 9.992444684295736e-05,
"loss": 3.1731,
"step": 77350
},
{
"epoch": 8.34501347708895,
"grad_norm": 0.8517303466796875,
"learning_rate": 9.960064759848894e-05,
"loss": 3.1916,
"step": 77400
},
{
"epoch": 8.350404312668463,
"grad_norm": 0.8216865658760071,
"learning_rate": 9.92768483540205e-05,
"loss": 3.1705,
"step": 77450
},
{
"epoch": 8.355795148247978,
"grad_norm": 0.8334668874740601,
"learning_rate": 9.895304910955206e-05,
"loss": 3.1769,
"step": 77500
},
{
"epoch": 8.361185983827493,
"grad_norm": 0.7854417562484741,
"learning_rate": 9.862924986508364e-05,
"loss": 3.1867,
"step": 77550
},
{
"epoch": 8.366576819407008,
"grad_norm": 0.8398834466934204,
"learning_rate": 9.83054506206152e-05,
"loss": 3.1865,
"step": 77600
},
{
"epoch": 8.371967654986523,
"grad_norm": 0.8624947667121887,
"learning_rate": 9.798165137614678e-05,
"loss": 3.1737,
"step": 77650
},
{
"epoch": 8.377358490566039,
"grad_norm": 0.8571727275848389,
"learning_rate": 9.765785213167835e-05,
"loss": 3.191,
"step": 77700
},
{
"epoch": 8.382749326145552,
"grad_norm": 0.8505393266677856,
"learning_rate": 9.733405288720992e-05,
"loss": 3.1691,
"step": 77750
},
{
"epoch": 8.388140161725067,
"grad_norm": 0.8707283735275269,
"learning_rate": 9.701025364274149e-05,
"loss": 3.1901,
"step": 77800
},
{
"epoch": 8.393530997304582,
"grad_norm": 0.8417378664016724,
"learning_rate": 9.668645439827307e-05,
"loss": 3.1778,
"step": 77850
},
{
"epoch": 8.398921832884097,
"grad_norm": 0.8451183438301086,
"learning_rate": 9.6369131138694e-05,
"loss": 3.1811,
"step": 77900
},
{
"epoch": 8.404312668463612,
"grad_norm": 0.8235145211219788,
"learning_rate": 9.604533189422557e-05,
"loss": 3.1743,
"step": 77950
},
{
"epoch": 8.409703504043126,
"grad_norm": 0.8533026576042175,
"learning_rate": 9.572153264975715e-05,
"loss": 3.1677,
"step": 78000
},
{
"epoch": 8.409703504043126,
"eval_accuracy": 0.39089832143240144,
"eval_loss": 3.3281443119049072,
"eval_runtime": 179.4491,
"eval_samples_per_second": 100.368,
"eval_steps_per_second": 6.275,
"step": 78000
},
{
"epoch": 8.415094339622641,
"grad_norm": 0.8585849404335022,
"learning_rate": 9.539773340528871e-05,
"loss": 3.1781,
"step": 78050
},
{
"epoch": 8.420485175202156,
"grad_norm": 0.8637095093727112,
"learning_rate": 9.507393416082027e-05,
"loss": 3.1777,
"step": 78100
},
{
"epoch": 8.425876010781671,
"grad_norm": 0.9015702605247498,
"learning_rate": 9.475013491635185e-05,
"loss": 3.1771,
"step": 78150
},
{
"epoch": 8.431266846361186,
"grad_norm": 0.8846878409385681,
"learning_rate": 9.442633567188343e-05,
"loss": 3.165,
"step": 78200
},
{
"epoch": 8.436657681940702,
"grad_norm": 0.8522124886512756,
"learning_rate": 9.410253642741498e-05,
"loss": 3.1804,
"step": 78250
},
{
"epoch": 8.442048517520215,
"grad_norm": 0.8822804689407349,
"learning_rate": 9.377873718294656e-05,
"loss": 3.1729,
"step": 78300
},
{
"epoch": 8.44743935309973,
"grad_norm": 0.8797435760498047,
"learning_rate": 9.345493793847814e-05,
"loss": 3.1984,
"step": 78350
},
{
"epoch": 8.452830188679245,
"grad_norm": 0.8309037685394287,
"learning_rate": 9.313113869400971e-05,
"loss": 3.1868,
"step": 78400
},
{
"epoch": 8.45822102425876,
"grad_norm": 0.856088399887085,
"learning_rate": 9.280733944954126e-05,
"loss": 3.1695,
"step": 78450
},
{
"epoch": 8.463611859838275,
"grad_norm": 0.823049008846283,
"learning_rate": 9.248354020507284e-05,
"loss": 3.2016,
"step": 78500
},
{
"epoch": 8.46900269541779,
"grad_norm": 0.7769917845726013,
"learning_rate": 9.215974096060442e-05,
"loss": 3.1715,
"step": 78550
},
{
"epoch": 8.474393530997304,
"grad_norm": 0.7951509952545166,
"learning_rate": 9.1835941716136e-05,
"loss": 3.1745,
"step": 78600
},
{
"epoch": 8.479784366576819,
"grad_norm": 0.8375470042228699,
"learning_rate": 9.151214247166756e-05,
"loss": 3.1753,
"step": 78650
},
{
"epoch": 8.485175202156334,
"grad_norm": 0.8318585753440857,
"learning_rate": 9.118834322719913e-05,
"loss": 3.2007,
"step": 78700
},
{
"epoch": 8.49056603773585,
"grad_norm": 0.8565362095832825,
"learning_rate": 9.08645439827307e-05,
"loss": 3.1859,
"step": 78750
},
{
"epoch": 8.495956873315365,
"grad_norm": 0.8577295541763306,
"learning_rate": 9.054074473826227e-05,
"loss": 3.1783,
"step": 78800
},
{
"epoch": 8.501347708894878,
"grad_norm": 0.8890122175216675,
"learning_rate": 9.021694549379385e-05,
"loss": 3.1778,
"step": 78850
},
{
"epoch": 8.506738544474393,
"grad_norm": 0.8697896599769592,
"learning_rate": 8.989314624932541e-05,
"loss": 3.1748,
"step": 78900
},
{
"epoch": 8.512129380053908,
"grad_norm": 0.842442512512207,
"learning_rate": 8.956934700485697e-05,
"loss": 3.1968,
"step": 78950
},
{
"epoch": 8.517520215633423,
"grad_norm": 0.8355295658111572,
"learning_rate": 8.924554776038855e-05,
"loss": 3.1821,
"step": 79000
},
{
"epoch": 8.517520215633423,
"eval_accuracy": 0.3912721960193711,
"eval_loss": 3.325834274291992,
"eval_runtime": 179.3601,
"eval_samples_per_second": 100.418,
"eval_steps_per_second": 6.278,
"step": 79000
},
{
"epoch": 8.522911051212938,
"grad_norm": 0.832925021648407,
"learning_rate": 8.892174851592013e-05,
"loss": 3.1902,
"step": 79050
},
{
"epoch": 8.528301886792454,
"grad_norm": 0.8366904854774475,
"learning_rate": 8.859794927145168e-05,
"loss": 3.1957,
"step": 79100
},
{
"epoch": 8.533692722371967,
"grad_norm": 0.8405174612998962,
"learning_rate": 8.827415002698326e-05,
"loss": 3.1719,
"step": 79150
},
{
"epoch": 8.539083557951482,
"grad_norm": 0.8224647045135498,
"learning_rate": 8.795035078251483e-05,
"loss": 3.1765,
"step": 79200
},
{
"epoch": 8.544474393530997,
"grad_norm": 0.8285897970199585,
"learning_rate": 8.762655153804641e-05,
"loss": 3.1683,
"step": 79250
},
{
"epoch": 8.549865229110512,
"grad_norm": 0.8500669598579407,
"learning_rate": 8.730275229357798e-05,
"loss": 3.184,
"step": 79300
},
{
"epoch": 8.555256064690028,
"grad_norm": 0.8491334915161133,
"learning_rate": 8.697895304910954e-05,
"loss": 3.1876,
"step": 79350
},
{
"epoch": 8.560646900269543,
"grad_norm": 0.7986251711845398,
"learning_rate": 8.665515380464112e-05,
"loss": 3.1574,
"step": 79400
},
{
"epoch": 8.566037735849056,
"grad_norm": 0.8463431596755981,
"learning_rate": 8.633135456017268e-05,
"loss": 3.1904,
"step": 79450
},
{
"epoch": 8.571428571428571,
"grad_norm": 0.8518399000167847,
"learning_rate": 8.600755531570426e-05,
"loss": 3.1822,
"step": 79500
},
{
"epoch": 8.576819407008086,
"grad_norm": 0.8165387511253357,
"learning_rate": 8.568375607123582e-05,
"loss": 3.181,
"step": 79550
},
{
"epoch": 8.582210242587601,
"grad_norm": 0.815428614616394,
"learning_rate": 8.53599568267674e-05,
"loss": 3.1815,
"step": 79600
},
{
"epoch": 8.587601078167117,
"grad_norm": 0.8507580161094666,
"learning_rate": 8.503615758229897e-05,
"loss": 3.1625,
"step": 79650
},
{
"epoch": 8.59299191374663,
"grad_norm": 0.8546373248100281,
"learning_rate": 8.471235833783054e-05,
"loss": 3.1854,
"step": 79700
},
{
"epoch": 8.598382749326145,
"grad_norm": 0.9135368466377258,
"learning_rate": 8.438855909336211e-05,
"loss": 3.1938,
"step": 79750
},
{
"epoch": 8.60377358490566,
"grad_norm": 0.8222028017044067,
"learning_rate": 8.406475984889367e-05,
"loss": 3.1738,
"step": 79800
},
{
"epoch": 8.609164420485175,
"grad_norm": 0.8429611325263977,
"learning_rate": 8.374096060442525e-05,
"loss": 3.1603,
"step": 79850
},
{
"epoch": 8.61455525606469,
"grad_norm": 0.8184422850608826,
"learning_rate": 8.341716135995683e-05,
"loss": 3.1675,
"step": 79900
},
{
"epoch": 8.619946091644206,
"grad_norm": 0.8258768320083618,
"learning_rate": 8.309336211548838e-05,
"loss": 3.1894,
"step": 79950
},
{
"epoch": 8.625336927223719,
"grad_norm": 0.8129879236221313,
"learning_rate": 8.276956287101996e-05,
"loss": 3.1805,
"step": 80000
},
{
"epoch": 8.625336927223719,
"eval_accuracy": 0.391675189580275,
"eval_loss": 3.3235464096069336,
"eval_runtime": 179.6316,
"eval_samples_per_second": 100.266,
"eval_steps_per_second": 6.268,
"step": 80000
}
],
"logging_steps": 50,
"max_steps": 92750,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 6.688553435136e+17,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}