100M__634 / checkpoint-30000 /trainer_state.json
craa's picture
Training in progress, step 30000, checkpoint
f4079f4 verified
Invalid JSON: Unexpected token 'I', ..."ad_norm": Infinity, "... is not valid JSON
{
"best_metric": 3.4898300170898438,
"best_model_checkpoint": "/scratch/cl5625/exceptions/models/100M__634/checkpoint-30000",
"epoch": 3.234501347708895,
"eval_steps": 1000,
"global_step": 30000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005390835579514825,
"grad_norm": 1.412644624710083,
"learning_rate": 0.0003,
"loss": 8.6259,
"step": 50
},
{
"epoch": 0.01078167115902965,
"grad_norm": 3.4297802448272705,
"learning_rate": 0.0006,
"loss": 6.925,
"step": 100
},
{
"epoch": 0.016172506738544475,
"grad_norm": 1.0797241926193237,
"learning_rate": 0.0005996762007555315,
"loss": 6.4904,
"step": 150
},
{
"epoch": 0.0215633423180593,
"grad_norm": 1.8681477308273315,
"learning_rate": 0.000599352401511063,
"loss": 6.2514,
"step": 200
},
{
"epoch": 0.026954177897574125,
"grad_norm": 1.0239183902740479,
"learning_rate": 0.0005990286022665946,
"loss": 6.0911,
"step": 250
},
{
"epoch": 0.03234501347708895,
"grad_norm": 1.5482831001281738,
"learning_rate": 0.0005987048030221263,
"loss": 5.9845,
"step": 300
},
{
"epoch": 0.03773584905660377,
"grad_norm": 1.6334092617034912,
"learning_rate": 0.0005983810037776578,
"loss": 5.875,
"step": 350
},
{
"epoch": 0.0431266846361186,
"grad_norm": 1.5844093561172485,
"learning_rate": 0.0005980572045331894,
"loss": 5.7818,
"step": 400
},
{
"epoch": 0.04851752021563342,
"grad_norm": 1.2689578533172607,
"learning_rate": 0.0005977334052887209,
"loss": 5.712,
"step": 450
},
{
"epoch": 0.05390835579514825,
"grad_norm": 1.3288975954055786,
"learning_rate": 0.0005974096060442526,
"loss": 5.6535,
"step": 500
},
{
"epoch": 0.05929919137466307,
"grad_norm": 2.041428804397583,
"learning_rate": 0.0005970858067997841,
"loss": 5.5725,
"step": 550
},
{
"epoch": 0.0646900269541779,
"grad_norm": 1.4345282316207886,
"learning_rate": 0.0005967620075553157,
"loss": 5.5081,
"step": 600
},
{
"epoch": 0.07008086253369272,
"grad_norm": 1.1454274654388428,
"learning_rate": 0.0005964382083108472,
"loss": 5.4223,
"step": 650
},
{
"epoch": 0.07547169811320754,
"grad_norm": 1.3169114589691162,
"learning_rate": 0.0005961144090663788,
"loss": 5.3711,
"step": 700
},
{
"epoch": 0.08086253369272237,
"grad_norm": 1.3919389247894287,
"learning_rate": 0.0005957906098219104,
"loss": 5.3003,
"step": 750
},
{
"epoch": 0.0862533692722372,
"grad_norm": 1.0709481239318848,
"learning_rate": 0.0005954668105774419,
"loss": 5.2637,
"step": 800
},
{
"epoch": 0.09164420485175202,
"grad_norm": 0.8944941759109497,
"learning_rate": 0.0005951430113329735,
"loss": 5.1967,
"step": 850
},
{
"epoch": 0.09703504043126684,
"grad_norm": 1.1984753608703613,
"learning_rate": 0.0005948192120885051,
"loss": 5.1709,
"step": 900
},
{
"epoch": 0.10242587601078167,
"grad_norm": 1.0900869369506836,
"learning_rate": 0.0005944954128440366,
"loss": 5.1332,
"step": 950
},
{
"epoch": 0.1078167115902965,
"grad_norm": 1.4634649753570557,
"learning_rate": 0.0005941716135995682,
"loss": 5.0778,
"step": 1000
},
{
"epoch": 0.1078167115902965,
"eval_accuracy": 0.22697957684263617,
"eval_loss": 5.024423599243164,
"eval_runtime": 183.2168,
"eval_samples_per_second": 98.304,
"eval_steps_per_second": 6.146,
"step": 1000
},
{
"epoch": 0.11320754716981132,
"grad_norm": 1.0724071264266968,
"learning_rate": 0.0005938478143550997,
"loss": 5.039,
"step": 1050
},
{
"epoch": 0.11859838274932614,
"grad_norm": 0.7434335947036743,
"learning_rate": 0.0005935240151106314,
"loss": 5.0145,
"step": 1100
},
{
"epoch": 0.12398921832884097,
"grad_norm": 1.1407990455627441,
"learning_rate": 0.0005932002158661629,
"loss": 4.9884,
"step": 1150
},
{
"epoch": 0.1293800539083558,
"grad_norm": 1.3579697608947754,
"learning_rate": 0.0005928764166216945,
"loss": 4.9232,
"step": 1200
},
{
"epoch": 0.1347708894878706,
"grad_norm": 1.170091986656189,
"learning_rate": 0.000592552617377226,
"loss": 4.9022,
"step": 1250
},
{
"epoch": 0.14016172506738545,
"grad_norm": 1.056681513786316,
"learning_rate": 0.0005922288181327577,
"loss": 4.8796,
"step": 1300
},
{
"epoch": 0.14555256064690028,
"grad_norm": 0.8261628746986389,
"learning_rate": 0.0005919050188882893,
"loss": 4.8425,
"step": 1350
},
{
"epoch": 0.1509433962264151,
"grad_norm": 0.7253502011299133,
"learning_rate": 0.0005915812196438207,
"loss": 4.836,
"step": 1400
},
{
"epoch": 0.15633423180592992,
"grad_norm": 1.0881081819534302,
"learning_rate": 0.0005912574203993524,
"loss": 4.8436,
"step": 1450
},
{
"epoch": 0.16172506738544473,
"grad_norm": 0.8581550717353821,
"learning_rate": 0.0005909336211548839,
"loss": 4.8009,
"step": 1500
},
{
"epoch": 0.16711590296495957,
"grad_norm": 0.9875918030738831,
"learning_rate": 0.0005906098219104155,
"loss": 4.7541,
"step": 1550
},
{
"epoch": 0.1725067385444744,
"grad_norm": 0.8188138008117676,
"learning_rate": 0.000590286022665947,
"loss": 4.744,
"step": 1600
},
{
"epoch": 0.1778975741239892,
"grad_norm": 0.8326888084411621,
"learning_rate": 0.0005899622234214787,
"loss": 4.7177,
"step": 1650
},
{
"epoch": 0.18328840970350405,
"grad_norm": 1.0679171085357666,
"learning_rate": 0.0005896384241770102,
"loss": 4.6933,
"step": 1700
},
{
"epoch": 0.18867924528301888,
"grad_norm": 0.8685047626495361,
"learning_rate": 0.0005893146249325418,
"loss": 4.7025,
"step": 1750
},
{
"epoch": 0.1940700808625337,
"grad_norm": 0.8494108319282532,
"learning_rate": 0.0005889908256880733,
"loss": 4.6703,
"step": 1800
},
{
"epoch": 0.19946091644204852,
"grad_norm": 1.175525426864624,
"learning_rate": 0.0005886670264436049,
"loss": 4.6366,
"step": 1850
},
{
"epoch": 0.20485175202156333,
"grad_norm": 1.0531870126724243,
"learning_rate": 0.0005883432271991365,
"loss": 4.6263,
"step": 1900
},
{
"epoch": 0.21024258760107817,
"grad_norm": 0.9467228651046753,
"learning_rate": 0.0005880194279546681,
"loss": 4.6254,
"step": 1950
},
{
"epoch": 0.215633423180593,
"grad_norm": 0.9411425590515137,
"learning_rate": 0.0005876956287101996,
"loss": 4.5738,
"step": 2000
},
{
"epoch": 0.215633423180593,
"eval_accuracy": 0.271918085284042,
"eval_loss": 4.5008015632629395,
"eval_runtime": 181.4309,
"eval_samples_per_second": 99.272,
"eval_steps_per_second": 6.206,
"step": 2000
},
{
"epoch": 0.2210242587601078,
"grad_norm": 0.920215368270874,
"learning_rate": 0.0005873718294657312,
"loss": 4.5559,
"step": 2050
},
{
"epoch": 0.22641509433962265,
"grad_norm": 0.7012114524841309,
"learning_rate": 0.0005870480302212628,
"loss": 4.5445,
"step": 2100
},
{
"epoch": 0.23180592991913745,
"grad_norm": 0.9538075923919678,
"learning_rate": 0.0005867242309767943,
"loss": 4.5259,
"step": 2150
},
{
"epoch": 0.2371967654986523,
"grad_norm": 0.7847324013710022,
"learning_rate": 0.0005864004317323259,
"loss": 4.4889,
"step": 2200
},
{
"epoch": 0.24258760107816713,
"grad_norm": 1.0440484285354614,
"learning_rate": 0.0005860766324878575,
"loss": 4.504,
"step": 2250
},
{
"epoch": 0.24797843665768193,
"grad_norm": 0.782093346118927,
"learning_rate": 0.000585752833243389,
"loss": 4.4768,
"step": 2300
},
{
"epoch": 0.25336927223719674,
"grad_norm": 0.9644035696983337,
"learning_rate": 0.0005854290339989206,
"loss": 4.4522,
"step": 2350
},
{
"epoch": 0.2587601078167116,
"grad_norm": 1.0412849187850952,
"learning_rate": 0.0005851052347544521,
"loss": 4.4489,
"step": 2400
},
{
"epoch": 0.2641509433962264,
"grad_norm": 0.7987921237945557,
"learning_rate": 0.0005847814355099838,
"loss": 4.4304,
"step": 2450
},
{
"epoch": 0.2695417789757412,
"grad_norm": 0.8646295070648193,
"learning_rate": 0.0005844576362655154,
"loss": 4.4196,
"step": 2500
},
{
"epoch": 0.2749326145552561,
"grad_norm": 1.0314178466796875,
"learning_rate": 0.0005841338370210469,
"loss": 4.4017,
"step": 2550
},
{
"epoch": 0.2803234501347709,
"grad_norm": 0.9287024140357971,
"learning_rate": 0.0005838100377765785,
"loss": 4.3729,
"step": 2600
},
{
"epoch": 0.2857142857142857,
"grad_norm": 0.8492061495780945,
"learning_rate": 0.0005834862385321101,
"loss": 4.37,
"step": 2650
},
{
"epoch": 0.29110512129380056,
"grad_norm": 0.7161704301834106,
"learning_rate": 0.0005831624392876417,
"loss": 4.3608,
"step": 2700
},
{
"epoch": 0.29649595687331537,
"grad_norm": 1.0816575288772583,
"learning_rate": 0.0005828386400431731,
"loss": 4.3705,
"step": 2750
},
{
"epoch": 0.3018867924528302,
"grad_norm": 0.8127713799476624,
"learning_rate": 0.0005825148407987048,
"loss": 4.3681,
"step": 2800
},
{
"epoch": 0.30727762803234504,
"grad_norm": 0.9167231321334839,
"learning_rate": 0.0005821910415542363,
"loss": 4.33,
"step": 2850
},
{
"epoch": 0.31266846361185985,
"grad_norm": 0.8297504782676697,
"learning_rate": 0.0005818672423097679,
"loss": 4.3137,
"step": 2900
},
{
"epoch": 0.31805929919137466,
"grad_norm": 0.9186894297599792,
"learning_rate": 0.0005815434430652994,
"loss": 4.3441,
"step": 2950
},
{
"epoch": 0.32345013477088946,
"grad_norm": 0.7359763979911804,
"learning_rate": 0.0005812196438208311,
"loss": 4.309,
"step": 3000
},
{
"epoch": 0.32345013477088946,
"eval_accuracy": 0.29948495271589304,
"eval_loss": 4.228354454040527,
"eval_runtime": 183.1945,
"eval_samples_per_second": 98.316,
"eval_steps_per_second": 6.146,
"step": 3000
},
{
"epoch": 0.3288409703504043,
"grad_norm": 0.7310630679130554,
"learning_rate": 0.0005808958445763626,
"loss": 4.3024,
"step": 3050
},
{
"epoch": 0.33423180592991913,
"grad_norm": 0.8260939717292786,
"learning_rate": 0.0005805720453318942,
"loss": 4.2831,
"step": 3100
},
{
"epoch": 0.33962264150943394,
"grad_norm": 0.7478753924369812,
"learning_rate": 0.0005802482460874257,
"loss": 4.2722,
"step": 3150
},
{
"epoch": 0.3450134770889488,
"grad_norm": 0.8138642907142639,
"learning_rate": 0.0005799244468429573,
"loss": 4.2816,
"step": 3200
},
{
"epoch": 0.3504043126684636,
"grad_norm": 0.7284408211708069,
"learning_rate": 0.0005796006475984889,
"loss": 4.2495,
"step": 3250
},
{
"epoch": 0.3557951482479784,
"grad_norm": 0.6755571365356445,
"learning_rate": 0.0005792768483540205,
"loss": 4.2595,
"step": 3300
},
{
"epoch": 0.3611859838274933,
"grad_norm": 0.9542193412780762,
"learning_rate": 0.000578953049109552,
"loss": 4.2538,
"step": 3350
},
{
"epoch": 0.3665768194070081,
"grad_norm": 0.6383764743804932,
"learning_rate": 0.0005786292498650836,
"loss": 4.2319,
"step": 3400
},
{
"epoch": 0.3719676549865229,
"grad_norm": 0.8126682639122009,
"learning_rate": 0.0005783054506206152,
"loss": 4.2272,
"step": 3450
},
{
"epoch": 0.37735849056603776,
"grad_norm": 0.7132463455200195,
"learning_rate": 0.0005779816513761467,
"loss": 4.2186,
"step": 3500
},
{
"epoch": 0.38274932614555257,
"grad_norm": 0.818424642086029,
"learning_rate": 0.0005776578521316782,
"loss": 4.1982,
"step": 3550
},
{
"epoch": 0.3881401617250674,
"grad_norm": 0.6916500926017761,
"learning_rate": 0.0005773340528872099,
"loss": 4.2057,
"step": 3600
},
{
"epoch": 0.3935309973045822,
"grad_norm": 0.6858584880828857,
"learning_rate": 0.0005770102536427414,
"loss": 4.2202,
"step": 3650
},
{
"epoch": 0.39892183288409705,
"grad_norm": 0.7574723362922668,
"learning_rate": 0.000576686454398273,
"loss": 4.2073,
"step": 3700
},
{
"epoch": 0.40431266846361186,
"grad_norm": 0.7107515931129456,
"learning_rate": 0.0005763626551538045,
"loss": 4.1939,
"step": 3750
},
{
"epoch": 0.40970350404312667,
"grad_norm": 0.7424683570861816,
"learning_rate": 0.0005760388559093362,
"loss": 4.1827,
"step": 3800
},
{
"epoch": 0.41509433962264153,
"grad_norm": 0.6541684865951538,
"learning_rate": 0.0005757150566648678,
"loss": 4.1782,
"step": 3850
},
{
"epoch": 0.42048517520215634,
"grad_norm": 0.797566294670105,
"learning_rate": 0.0005753912574203993,
"loss": 4.17,
"step": 3900
},
{
"epoch": 0.42587601078167114,
"grad_norm": 0.5999880433082581,
"learning_rate": 0.0005750674581759309,
"loss": 4.1651,
"step": 3950
},
{
"epoch": 0.431266846361186,
"grad_norm": 0.8264360427856445,
"learning_rate": 0.0005747436589314624,
"loss": 4.1524,
"step": 4000
},
{
"epoch": 0.431266846361186,
"eval_accuracy": 0.312240258480874,
"eval_loss": 4.087605953216553,
"eval_runtime": 183.2024,
"eval_samples_per_second": 98.312,
"eval_steps_per_second": 6.146,
"step": 4000
},
{
"epoch": 0.4366576819407008,
"grad_norm": 0.7332233786582947,
"learning_rate": 0.0005744198596869941,
"loss": 4.1654,
"step": 4050
},
{
"epoch": 0.4420485175202156,
"grad_norm": 0.5775137543678284,
"learning_rate": 0.0005740960604425255,
"loss": 4.158,
"step": 4100
},
{
"epoch": 0.4474393530997305,
"grad_norm": 0.8009674549102783,
"learning_rate": 0.0005737722611980572,
"loss": 4.1392,
"step": 4150
},
{
"epoch": 0.4528301886792453,
"grad_norm": 0.6772514581680298,
"learning_rate": 0.0005734484619535887,
"loss": 4.1186,
"step": 4200
},
{
"epoch": 0.4582210242587601,
"grad_norm": 0.6068300604820251,
"learning_rate": 0.0005731246627091203,
"loss": 4.1518,
"step": 4250
},
{
"epoch": 0.4636118598382749,
"grad_norm": 0.6603842973709106,
"learning_rate": 0.0005728008634646518,
"loss": 4.1228,
"step": 4300
},
{
"epoch": 0.46900269541778977,
"grad_norm": 0.6870344877243042,
"learning_rate": 0.0005724770642201835,
"loss": 4.1306,
"step": 4350
},
{
"epoch": 0.4743935309973046,
"grad_norm": 0.8701305985450745,
"learning_rate": 0.000572153264975715,
"loss": 4.1236,
"step": 4400
},
{
"epoch": 0.4797843665768194,
"grad_norm": 0.646145224571228,
"learning_rate": 0.0005718294657312466,
"loss": 4.0979,
"step": 4450
},
{
"epoch": 0.48517520215633425,
"grad_norm": 0.5847651362419128,
"learning_rate": 0.0005715056664867781,
"loss": 4.0943,
"step": 4500
},
{
"epoch": 0.49056603773584906,
"grad_norm": 0.7485958933830261,
"learning_rate": 0.0005711818672423097,
"loss": 4.0872,
"step": 4550
},
{
"epoch": 0.49595687331536387,
"grad_norm": 0.6113314628601074,
"learning_rate": 0.0005708580679978413,
"loss": 4.0971,
"step": 4600
},
{
"epoch": 0.5013477088948787,
"grad_norm": 0.814666748046875,
"learning_rate": 0.0005705342687533729,
"loss": 4.088,
"step": 4650
},
{
"epoch": 0.5067385444743935,
"grad_norm": 0.6813623905181885,
"learning_rate": 0.0005702104695089044,
"loss": 4.0697,
"step": 4700
},
{
"epoch": 0.5121293800539084,
"grad_norm": 0.7049392461776733,
"learning_rate": 0.000569886670264436,
"loss": 4.083,
"step": 4750
},
{
"epoch": 0.5175202156334232,
"grad_norm": 0.6689280867576599,
"learning_rate": 0.0005695628710199675,
"loss": 4.0704,
"step": 4800
},
{
"epoch": 0.522911051212938,
"grad_norm": 0.5872611403465271,
"learning_rate": 0.0005692390717754991,
"loss": 4.0659,
"step": 4850
},
{
"epoch": 0.5283018867924528,
"grad_norm": 0.7064571976661682,
"learning_rate": 0.0005689152725310306,
"loss": 4.0586,
"step": 4900
},
{
"epoch": 0.5336927223719676,
"grad_norm": 0.7021653652191162,
"learning_rate": 0.0005685914732865623,
"loss": 4.0619,
"step": 4950
},
{
"epoch": 0.5390835579514824,
"grad_norm": 0.6203348636627197,
"learning_rate": 0.0005682676740420939,
"loss": 4.0714,
"step": 5000
},
{
"epoch": 0.5390835579514824,
"eval_accuracy": 0.32160700663358477,
"eval_loss": 3.9901158809661865,
"eval_runtime": 183.2916,
"eval_samples_per_second": 98.264,
"eval_steps_per_second": 6.143,
"step": 5000
},
{
"epoch": 0.5444743935309974,
"grad_norm": 0.7498524188995361,
"learning_rate": 0.0005679438747976254,
"loss": 4.0504,
"step": 5050
},
{
"epoch": 0.5498652291105122,
"grad_norm": 0.6415616869926453,
"learning_rate": 0.000567620075553157,
"loss": 4.045,
"step": 5100
},
{
"epoch": 0.555256064690027,
"grad_norm": 0.68656986951828,
"learning_rate": 0.0005672962763086886,
"loss": 4.0469,
"step": 5150
},
{
"epoch": 0.5606469002695418,
"grad_norm": 0.6866742968559265,
"learning_rate": 0.0005669724770642202,
"loss": 4.0368,
"step": 5200
},
{
"epoch": 0.5660377358490566,
"grad_norm": 0.6404621601104736,
"learning_rate": 0.0005666486778197517,
"loss": 4.0323,
"step": 5250
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.6272525191307068,
"learning_rate": 0.0005663248785752833,
"loss": 4.0431,
"step": 5300
},
{
"epoch": 0.5768194070080862,
"grad_norm": 0.6219531297683716,
"learning_rate": 0.0005660010793308148,
"loss": 4.0283,
"step": 5350
},
{
"epoch": 0.5822102425876011,
"grad_norm": 0.5969902276992798,
"learning_rate": 0.0005656772800863465,
"loss": 4.0455,
"step": 5400
},
{
"epoch": 0.5876010781671159,
"grad_norm": 0.7162837982177734,
"learning_rate": 0.0005653534808418779,
"loss": 4.0187,
"step": 5450
},
{
"epoch": 0.5929919137466307,
"grad_norm": 0.5840233564376831,
"learning_rate": 0.0005650296815974096,
"loss": 4.0264,
"step": 5500
},
{
"epoch": 0.5983827493261455,
"grad_norm": 0.5966022610664368,
"learning_rate": 0.0005647058823529411,
"loss": 4.0154,
"step": 5550
},
{
"epoch": 0.6037735849056604,
"grad_norm": 0.5551064610481262,
"learning_rate": 0.0005643820831084727,
"loss": 4.0069,
"step": 5600
},
{
"epoch": 0.6091644204851752,
"grad_norm": 0.7473669052124023,
"learning_rate": 0.0005640582838640042,
"loss": 4.0204,
"step": 5650
},
{
"epoch": 0.6145552560646901,
"grad_norm": 0.6089016795158386,
"learning_rate": 0.0005637344846195358,
"loss": 4.0132,
"step": 5700
},
{
"epoch": 0.6199460916442049,
"grad_norm": 0.5742376446723938,
"learning_rate": 0.0005634106853750674,
"loss": 4.02,
"step": 5750
},
{
"epoch": 0.6253369272237197,
"grad_norm": 0.857323169708252,
"learning_rate": 0.000563086886130599,
"loss": 3.9884,
"step": 5800
},
{
"epoch": 0.6307277628032345,
"grad_norm": 0.6711622476577759,
"learning_rate": 0.0005627630868861305,
"loss": 3.9889,
"step": 5850
},
{
"epoch": 0.6361185983827493,
"grad_norm": 0.6008835434913635,
"learning_rate": 0.0005624392876416621,
"loss": 3.9763,
"step": 5900
},
{
"epoch": 0.6415094339622641,
"grad_norm": 0.6592042446136475,
"learning_rate": 0.0005621154883971937,
"loss": 3.9816,
"step": 5950
},
{
"epoch": 0.6469002695417789,
"grad_norm": 0.6811545491218567,
"learning_rate": 0.0005617916891527253,
"loss": 3.9631,
"step": 6000
},
{
"epoch": 0.6469002695417789,
"eval_accuracy": 0.328109556010618,
"eval_loss": 3.9180023670196533,
"eval_runtime": 183.5563,
"eval_samples_per_second": 98.122,
"eval_steps_per_second": 6.134,
"step": 6000
},
{
"epoch": 0.6522911051212938,
"grad_norm": 0.838979959487915,
"learning_rate": 0.0005614678899082568,
"loss": 3.9977,
"step": 6050
},
{
"epoch": 0.6576819407008087,
"grad_norm": 0.6069373488426208,
"learning_rate": 0.0005611440906637884,
"loss": 3.9844,
"step": 6100
},
{
"epoch": 0.6630727762803235,
"grad_norm": 0.6294558048248291,
"learning_rate": 0.00056082029141932,
"loss": 3.9769,
"step": 6150
},
{
"epoch": 0.6684636118598383,
"grad_norm": 0.6527539491653442,
"learning_rate": 0.0005604964921748515,
"loss": 3.9875,
"step": 6200
},
{
"epoch": 0.6738544474393531,
"grad_norm": 0.6204699873924255,
"learning_rate": 0.000560172692930383,
"loss": 3.949,
"step": 6250
},
{
"epoch": 0.6792452830188679,
"grad_norm": 0.6665420532226562,
"learning_rate": 0.0005598488936859147,
"loss": 3.96,
"step": 6300
},
{
"epoch": 0.6846361185983828,
"grad_norm": 0.6489077806472778,
"learning_rate": 0.0005595250944414463,
"loss": 3.9534,
"step": 6350
},
{
"epoch": 0.6900269541778976,
"grad_norm": 0.6623448729515076,
"learning_rate": 0.0005592012951969778,
"loss": 3.9454,
"step": 6400
},
{
"epoch": 0.6954177897574124,
"grad_norm": 0.7035852670669556,
"learning_rate": 0.0005588774959525094,
"loss": 3.9722,
"step": 6450
},
{
"epoch": 0.7008086253369272,
"grad_norm": 0.5686920881271362,
"learning_rate": 0.000558553696708041,
"loss": 3.9525,
"step": 6500
},
{
"epoch": 0.706199460916442,
"grad_norm": 0.5780633091926575,
"learning_rate": 0.0005582298974635726,
"loss": 3.9525,
"step": 6550
},
{
"epoch": 0.7115902964959568,
"grad_norm": 0.6254565715789795,
"learning_rate": 0.0005579060982191041,
"loss": 3.9494,
"step": 6600
},
{
"epoch": 0.7169811320754716,
"grad_norm": 0.6464234590530396,
"learning_rate": 0.0005575822989746357,
"loss": 3.9432,
"step": 6650
},
{
"epoch": 0.7223719676549866,
"grad_norm": 0.5895872712135315,
"learning_rate": 0.0005572584997301672,
"loss": 3.9378,
"step": 6700
},
{
"epoch": 0.7277628032345014,
"grad_norm": 0.6661088466644287,
"learning_rate": 0.0005569347004856989,
"loss": 3.9405,
"step": 6750
},
{
"epoch": 0.7331536388140162,
"grad_norm": 0.5486308932304382,
"learning_rate": 0.0005566109012412303,
"loss": 3.9499,
"step": 6800
},
{
"epoch": 0.738544474393531,
"grad_norm": 0.5206322073936462,
"learning_rate": 0.000556287101996762,
"loss": 3.9308,
"step": 6850
},
{
"epoch": 0.7439353099730458,
"grad_norm": 0.60262531042099,
"learning_rate": 0.0005559633027522935,
"loss": 3.9279,
"step": 6900
},
{
"epoch": 0.7493261455525606,
"grad_norm": 0.6978471875190735,
"learning_rate": 0.0005556395035078251,
"loss": 3.9152,
"step": 6950
},
{
"epoch": 0.7547169811320755,
"grad_norm": 0.5400425791740417,
"learning_rate": 0.0005553157042633566,
"loss": 3.9285,
"step": 7000
},
{
"epoch": 0.7547169811320755,
"eval_accuracy": 0.33344778104183126,
"eval_loss": 3.8615846633911133,
"eval_runtime": 182.602,
"eval_samples_per_second": 98.635,
"eval_steps_per_second": 6.166,
"step": 7000
},
{
"epoch": 0.7601078167115903,
"grad_norm": 0.5189043283462524,
"learning_rate": 0.0005549919050188882,
"loss": 3.9302,
"step": 7050
},
{
"epoch": 0.7654986522911051,
"grad_norm": 0.5751085877418518,
"learning_rate": 0.0005546681057744198,
"loss": 3.9332,
"step": 7100
},
{
"epoch": 0.77088948787062,
"grad_norm": 0.6791032552719116,
"learning_rate": 0.0005543443065299514,
"loss": 3.9073,
"step": 7150
},
{
"epoch": 0.7762803234501348,
"grad_norm": 0.7200894951820374,
"learning_rate": 0.000554020507285483,
"loss": 3.9193,
"step": 7200
},
{
"epoch": 0.7816711590296496,
"grad_norm": 0.6101612448692322,
"learning_rate": 0.0005536967080410145,
"loss": 3.919,
"step": 7250
},
{
"epoch": 0.7870619946091644,
"grad_norm": 0.5997413396835327,
"learning_rate": 0.0005533729087965462,
"loss": 3.9079,
"step": 7300
},
{
"epoch": 0.7924528301886793,
"grad_norm": 0.5748898386955261,
"learning_rate": 0.0005530491095520777,
"loss": 3.9162,
"step": 7350
},
{
"epoch": 0.7978436657681941,
"grad_norm": 0.6359212398529053,
"learning_rate": 0.0005527253103076093,
"loss": 3.8807,
"step": 7400
},
{
"epoch": 0.8032345013477089,
"grad_norm": 0.6088876724243164,
"learning_rate": 0.0005524015110631408,
"loss": 3.8902,
"step": 7450
},
{
"epoch": 0.8086253369272237,
"grad_norm": 0.6242630481719971,
"learning_rate": 0.0005520777118186724,
"loss": 3.9035,
"step": 7500
},
{
"epoch": 0.8140161725067385,
"grad_norm": 0.622336208820343,
"learning_rate": 0.0005517539125742039,
"loss": 3.8986,
"step": 7550
},
{
"epoch": 0.8194070080862533,
"grad_norm": 0.5424439311027527,
"learning_rate": 0.0005514301133297355,
"loss": 3.9166,
"step": 7600
},
{
"epoch": 0.8247978436657682,
"grad_norm": 0.5808700323104858,
"learning_rate": 0.0005511063140852671,
"loss": 3.9005,
"step": 7650
},
{
"epoch": 0.8301886792452831,
"grad_norm": 0.5555144548416138,
"learning_rate": 0.0005507825148407987,
"loss": 3.8722,
"step": 7700
},
{
"epoch": 0.8355795148247979,
"grad_norm": 0.7033872604370117,
"learning_rate": 0.0005504587155963302,
"loss": 3.8852,
"step": 7750
},
{
"epoch": 0.8409703504043127,
"grad_norm": 0.5577759742736816,
"learning_rate": 0.0005501349163518618,
"loss": 3.9008,
"step": 7800
},
{
"epoch": 0.8463611859838275,
"grad_norm": Infinity,
"learning_rate": 0.0005498175930922827,
"loss": 3.888,
"step": 7850
},
{
"epoch": 0.8517520215633423,
"grad_norm": 0.6767401099205017,
"learning_rate": 0.0005494937938478143,
"loss": 3.8863,
"step": 7900
},
{
"epoch": 0.8571428571428571,
"grad_norm": 0.5402886271476746,
"learning_rate": 0.0005491699946033459,
"loss": 3.8885,
"step": 7950
},
{
"epoch": 0.862533692722372,
"grad_norm": 0.5724102258682251,
"learning_rate": 0.0005488461953588775,
"loss": 3.8676,
"step": 8000
},
{
"epoch": 0.862533692722372,
"eval_accuracy": 0.3375052058314874,
"eval_loss": 3.816683292388916,
"eval_runtime": 181.6977,
"eval_samples_per_second": 99.126,
"eval_steps_per_second": 6.197,
"step": 8000
},
{
"epoch": 0.8679245283018868,
"grad_norm": 0.5436832308769226,
"learning_rate": 0.000548522396114409,
"loss": 3.8838,
"step": 8050
},
{
"epoch": 0.8733153638814016,
"grad_norm": 0.6710783243179321,
"learning_rate": 0.0005481985968699406,
"loss": 3.8623,
"step": 8100
},
{
"epoch": 0.8787061994609164,
"grad_norm": 0.5671558380126953,
"learning_rate": 0.0005478747976254721,
"loss": 3.8746,
"step": 8150
},
{
"epoch": 0.8840970350404312,
"grad_norm": 0.6092495322227478,
"learning_rate": 0.0005475509983810037,
"loss": 3.8635,
"step": 8200
},
{
"epoch": 0.889487870619946,
"grad_norm": 0.6280654072761536,
"learning_rate": 0.0005472271991365352,
"loss": 3.8772,
"step": 8250
},
{
"epoch": 0.894878706199461,
"grad_norm": 0.5402874946594238,
"learning_rate": 0.0005469033998920669,
"loss": 3.8525,
"step": 8300
},
{
"epoch": 0.9002695417789758,
"grad_norm": 0.5597426295280457,
"learning_rate": 0.0005465796006475984,
"loss": 3.8685,
"step": 8350
},
{
"epoch": 0.9056603773584906,
"grad_norm": 0.5542333126068115,
"learning_rate": 0.00054625580140313,
"loss": 3.8677,
"step": 8400
},
{
"epoch": 0.9110512129380054,
"grad_norm": 0.5573087930679321,
"learning_rate": 0.0005459320021586615,
"loss": 3.8402,
"step": 8450
},
{
"epoch": 0.9164420485175202,
"grad_norm": 0.5278663039207458,
"learning_rate": 0.0005456082029141932,
"loss": 3.8521,
"step": 8500
},
{
"epoch": 0.921832884097035,
"grad_norm": 0.6010989546775818,
"learning_rate": 0.0005452844036697248,
"loss": 3.8536,
"step": 8550
},
{
"epoch": 0.9272237196765498,
"grad_norm": 0.6041896939277649,
"learning_rate": 0.0005449606044252563,
"loss": 3.8816,
"step": 8600
},
{
"epoch": 0.9326145552560647,
"grad_norm": 0.6230564117431641,
"learning_rate": 0.0005446368051807879,
"loss": 3.8547,
"step": 8650
},
{
"epoch": 0.9380053908355795,
"grad_norm": 0.6061603426933289,
"learning_rate": 0.0005443130059363194,
"loss": 3.8612,
"step": 8700
},
{
"epoch": 0.9433962264150944,
"grad_norm": 0.532725989818573,
"learning_rate": 0.0005439892066918511,
"loss": 3.8453,
"step": 8750
},
{
"epoch": 0.9487870619946092,
"grad_norm": 0.6650099158287048,
"learning_rate": 0.0005436654074473825,
"loss": 3.845,
"step": 8800
},
{
"epoch": 0.954177897574124,
"grad_norm": 0.5258676409721375,
"learning_rate": 0.0005433416082029142,
"loss": 3.8474,
"step": 8850
},
{
"epoch": 0.9595687331536388,
"grad_norm": 0.5918166637420654,
"learning_rate": 0.0005430178089584457,
"loss": 3.8476,
"step": 8900
},
{
"epoch": 0.9649595687331537,
"grad_norm": 0.7267642617225647,
"learning_rate": 0.0005426940097139773,
"loss": 3.8489,
"step": 8950
},
{
"epoch": 0.9703504043126685,
"grad_norm": 0.639377772808075,
"learning_rate": 0.0005423702104695088,
"loss": 3.8648,
"step": 9000
},
{
"epoch": 0.9703504043126685,
"eval_accuracy": 0.3408996306996996,
"eval_loss": 3.779827356338501,
"eval_runtime": 181.5426,
"eval_samples_per_second": 99.211,
"eval_steps_per_second": 6.202,
"step": 9000
},
{
"epoch": 0.9757412398921833,
"grad_norm": 0.6780883073806763,
"learning_rate": 0.0005420464112250404,
"loss": 3.8448,
"step": 9050
},
{
"epoch": 0.9811320754716981,
"grad_norm": 0.5915326476097107,
"learning_rate": 0.000541722611980572,
"loss": 3.8335,
"step": 9100
},
{
"epoch": 0.9865229110512129,
"grad_norm": 0.5880796313285828,
"learning_rate": 0.0005413988127361036,
"loss": 3.8434,
"step": 9150
},
{
"epoch": 0.9919137466307277,
"grad_norm": 0.551231861114502,
"learning_rate": 0.0005410750134916351,
"loss": 3.8469,
"step": 9200
},
{
"epoch": 0.9973045822102425,
"grad_norm": 0.542789101600647,
"learning_rate": 0.0005407512142471667,
"loss": 3.8382,
"step": 9250
},
{
"epoch": 1.0026954177897573,
"grad_norm": 0.5782141089439392,
"learning_rate": 0.0005404274150026983,
"loss": 3.8143,
"step": 9300
},
{
"epoch": 1.0080862533692723,
"grad_norm": 0.585417628288269,
"learning_rate": 0.0005401036157582299,
"loss": 3.782,
"step": 9350
},
{
"epoch": 1.013477088948787,
"grad_norm": 0.5786333680152893,
"learning_rate": 0.0005397798165137614,
"loss": 3.7919,
"step": 9400
},
{
"epoch": 1.0188679245283019,
"grad_norm": 0.5752071738243103,
"learning_rate": 0.000539456017269293,
"loss": 3.7809,
"step": 9450
},
{
"epoch": 1.0242587601078168,
"grad_norm": 0.5453478693962097,
"learning_rate": 0.0005391322180248245,
"loss": 3.7775,
"step": 9500
},
{
"epoch": 1.0296495956873315,
"grad_norm": 0.5964511632919312,
"learning_rate": 0.0005388084187803561,
"loss": 3.7881,
"step": 9550
},
{
"epoch": 1.0350404312668464,
"grad_norm": 0.6188067197799683,
"learning_rate": 0.0005384846195358876,
"loss": 3.7672,
"step": 9600
},
{
"epoch": 1.0404312668463611,
"grad_norm": 0.5592512488365173,
"learning_rate": 0.0005381608202914193,
"loss": 3.7615,
"step": 9650
},
{
"epoch": 1.045822102425876,
"grad_norm": 0.5626137256622314,
"learning_rate": 0.0005378370210469509,
"loss": 3.7723,
"step": 9700
},
{
"epoch": 1.0512129380053907,
"grad_norm": 0.6109785437583923,
"learning_rate": 0.0005375132218024824,
"loss": 3.7644,
"step": 9750
},
{
"epoch": 1.0566037735849056,
"grad_norm": 0.6150305271148682,
"learning_rate": 0.000537189422558014,
"loss": 3.7717,
"step": 9800
},
{
"epoch": 1.0619946091644206,
"grad_norm": 0.5786086320877075,
"learning_rate": 0.0005368656233135455,
"loss": 3.7537,
"step": 9850
},
{
"epoch": 1.0673854447439353,
"grad_norm": 0.5935583710670471,
"learning_rate": 0.0005365418240690772,
"loss": 3.7646,
"step": 9900
},
{
"epoch": 1.0727762803234502,
"grad_norm": 0.5563629269599915,
"learning_rate": 0.0005362180248246087,
"loss": 3.7625,
"step": 9950
},
{
"epoch": 1.0781671159029649,
"grad_norm": 0.5583340525627136,
"learning_rate": 0.0005358942255801403,
"loss": 3.7661,
"step": 10000
},
{
"epoch": 1.0781671159029649,
"eval_accuracy": 0.3447591985806456,
"eval_loss": 3.7474043369293213,
"eval_runtime": 179.3333,
"eval_samples_per_second": 100.433,
"eval_steps_per_second": 6.279,
"step": 10000
},
{
"epoch": 1.0835579514824798,
"grad_norm": 0.5278469324111938,
"learning_rate": 0.0005355704263356718,
"loss": 3.7572,
"step": 10050
},
{
"epoch": 1.0889487870619945,
"grad_norm": 0.599793553352356,
"learning_rate": 0.0005352466270912035,
"loss": 3.7482,
"step": 10100
},
{
"epoch": 1.0943396226415094,
"grad_norm": 0.594103991985321,
"learning_rate": 0.000534922827846735,
"loss": 3.7811,
"step": 10150
},
{
"epoch": 1.0997304582210243,
"grad_norm": 0.539659321308136,
"learning_rate": 0.0005345990286022666,
"loss": 3.7808,
"step": 10200
},
{
"epoch": 1.105121293800539,
"grad_norm": 0.5949695110321045,
"learning_rate": 0.0005342752293577981,
"loss": 3.7483,
"step": 10250
},
{
"epoch": 1.110512129380054,
"grad_norm": 0.6354559659957886,
"learning_rate": 0.0005339514301133297,
"loss": 3.7743,
"step": 10300
},
{
"epoch": 1.1159029649595686,
"grad_norm": 0.5751685500144958,
"learning_rate": 0.0005336276308688612,
"loss": 3.7727,
"step": 10350
},
{
"epoch": 1.1212938005390836,
"grad_norm": 0.6152482032775879,
"learning_rate": 0.0005333038316243928,
"loss": 3.7604,
"step": 10400
},
{
"epoch": 1.1266846361185983,
"grad_norm": 0.6259979009628296,
"learning_rate": 0.0005329800323799244,
"loss": 3.7242,
"step": 10450
},
{
"epoch": 1.1320754716981132,
"grad_norm": 0.5735751390457153,
"learning_rate": 0.000532656233135456,
"loss": 3.75,
"step": 10500
},
{
"epoch": 1.137466307277628,
"grad_norm": 0.5335408449172974,
"learning_rate": 0.0005323324338909875,
"loss": 3.7497,
"step": 10550
},
{
"epoch": 1.1428571428571428,
"grad_norm": 0.5535331964492798,
"learning_rate": 0.0005320086346465191,
"loss": 3.757,
"step": 10600
},
{
"epoch": 1.1482479784366577,
"grad_norm": 0.5518361926078796,
"learning_rate": 0.0005316848354020507,
"loss": 3.7635,
"step": 10650
},
{
"epoch": 1.1536388140161726,
"grad_norm": 0.6065233945846558,
"learning_rate": 0.0005313610361575823,
"loss": 3.7482,
"step": 10700
},
{
"epoch": 1.1590296495956873,
"grad_norm": 0.5510832667350769,
"learning_rate": 0.0005310372369131138,
"loss": 3.7464,
"step": 10750
},
{
"epoch": 1.1644204851752022,
"grad_norm": 0.5745865702629089,
"learning_rate": 0.0005307134376686454,
"loss": 3.7237,
"step": 10800
},
{
"epoch": 1.169811320754717,
"grad_norm": 0.6756680011749268,
"learning_rate": 0.000530389638424177,
"loss": 3.7627,
"step": 10850
},
{
"epoch": 1.1752021563342319,
"grad_norm": 0.6508898138999939,
"learning_rate": 0.0005300658391797085,
"loss": 3.7616,
"step": 10900
},
{
"epoch": 1.1805929919137466,
"grad_norm": 0.6372632384300232,
"learning_rate": 0.00052974203993524,
"loss": 3.7205,
"step": 10950
},
{
"epoch": 1.1859838274932615,
"grad_norm": 0.584235668182373,
"learning_rate": 0.0005294182406907717,
"loss": 3.7415,
"step": 11000
},
{
"epoch": 1.1859838274932615,
"eval_accuracy": 0.3469530090385078,
"eval_loss": 3.7233991622924805,
"eval_runtime": 179.2943,
"eval_samples_per_second": 100.455,
"eval_steps_per_second": 6.28,
"step": 11000
},
{
"epoch": 1.1913746630727764,
"grad_norm": 0.5607488751411438,
"learning_rate": 0.0005290944414463033,
"loss": 3.7504,
"step": 11050
},
{
"epoch": 1.196765498652291,
"grad_norm": 0.6091164946556091,
"learning_rate": 0.0005287706422018348,
"loss": 3.744,
"step": 11100
},
{
"epoch": 1.202156334231806,
"grad_norm": 0.6206035614013672,
"learning_rate": 0.0005284468429573664,
"loss": 3.7378,
"step": 11150
},
{
"epoch": 1.2075471698113207,
"grad_norm": 0.6528010964393616,
"learning_rate": 0.0005281230437128979,
"loss": 3.7509,
"step": 11200
},
{
"epoch": 1.2129380053908356,
"grad_norm": 0.520330011844635,
"learning_rate": 0.0005278057204533189,
"loss": 3.7608,
"step": 11250
},
{
"epoch": 1.2183288409703503,
"grad_norm": 0.6227236390113831,
"learning_rate": 0.0005274819212088505,
"loss": 3.7286,
"step": 11300
},
{
"epoch": 1.2237196765498652,
"grad_norm": 0.5682984590530396,
"learning_rate": 0.0005271581219643821,
"loss": 3.7505,
"step": 11350
},
{
"epoch": 1.2291105121293802,
"grad_norm": 0.6123554706573486,
"learning_rate": 0.0005268343227199136,
"loss": 3.7508,
"step": 11400
},
{
"epoch": 1.2345013477088949,
"grad_norm": 0.507030189037323,
"learning_rate": 0.0005265105234754452,
"loss": 3.7553,
"step": 11450
},
{
"epoch": 1.2398921832884098,
"grad_norm": 0.5759048461914062,
"learning_rate": 0.0005261867242309767,
"loss": 3.7192,
"step": 11500
},
{
"epoch": 1.2452830188679245,
"grad_norm": 0.5370573997497559,
"learning_rate": 0.0005258629249865083,
"loss": 3.7409,
"step": 11550
},
{
"epoch": 1.2506738544474394,
"grad_norm": 0.5597508549690247,
"learning_rate": 0.0005255391257420398,
"loss": 3.739,
"step": 11600
},
{
"epoch": 1.256064690026954,
"grad_norm": 0.6163648366928101,
"learning_rate": 0.0005252153264975715,
"loss": 3.7364,
"step": 11650
},
{
"epoch": 1.261455525606469,
"grad_norm": 0.561890721321106,
"learning_rate": 0.000524891527253103,
"loss": 3.7371,
"step": 11700
},
{
"epoch": 1.266846361185984,
"grad_norm": 0.6164953708648682,
"learning_rate": 0.0005245677280086346,
"loss": 3.7437,
"step": 11750
},
{
"epoch": 1.2722371967654986,
"grad_norm": 0.5438469648361206,
"learning_rate": 0.0005242439287641661,
"loss": 3.7317,
"step": 11800
},
{
"epoch": 1.2776280323450135,
"grad_norm": 0.5996424555778503,
"learning_rate": 0.0005239201295196978,
"loss": 3.7139,
"step": 11850
},
{
"epoch": 1.2830188679245282,
"grad_norm": 0.5443153381347656,
"learning_rate": 0.0005235963302752293,
"loss": 3.7189,
"step": 11900
},
{
"epoch": 1.2884097035040432,
"grad_norm": 0.5056083798408508,
"learning_rate": 0.0005232725310307609,
"loss": 3.7286,
"step": 11950
},
{
"epoch": 1.2938005390835579,
"grad_norm": 0.5346047878265381,
"learning_rate": 0.0005229487317862924,
"loss": 3.7241,
"step": 12000
},
{
"epoch": 1.2938005390835579,
"eval_accuracy": 0.3486271327339533,
"eval_loss": 3.701392889022827,
"eval_runtime": 179.3961,
"eval_samples_per_second": 100.398,
"eval_steps_per_second": 6.277,
"step": 12000
},
{
"epoch": 1.2991913746630728,
"grad_norm": 0.561946451663971,
"learning_rate": 0.000522624932541824,
"loss": 3.7065,
"step": 12050
},
{
"epoch": 1.3045822102425877,
"grad_norm": 0.6094053387641907,
"learning_rate": 0.0005223011332973557,
"loss": 3.7432,
"step": 12100
},
{
"epoch": 1.3099730458221024,
"grad_norm": 0.548875093460083,
"learning_rate": 0.0005219773340528872,
"loss": 3.7251,
"step": 12150
},
{
"epoch": 1.3153638814016173,
"grad_norm": 0.5728870630264282,
"learning_rate": 0.0005216535348084188,
"loss": 3.7107,
"step": 12200
},
{
"epoch": 1.320754716981132,
"grad_norm": 0.6108008623123169,
"learning_rate": 0.0005213297355639503,
"loss": 3.7369,
"step": 12250
},
{
"epoch": 1.326145552560647,
"grad_norm": 0.5879302024841309,
"learning_rate": 0.0005210059363194819,
"loss": 3.7043,
"step": 12300
},
{
"epoch": 1.3315363881401616,
"grad_norm": 0.550482988357544,
"learning_rate": 0.0005206821370750134,
"loss": 3.7288,
"step": 12350
},
{
"epoch": 1.3369272237196765,
"grad_norm": 0.5876588821411133,
"learning_rate": 0.000520358337830545,
"loss": 3.7,
"step": 12400
},
{
"epoch": 1.3423180592991915,
"grad_norm": 0.5548244118690491,
"learning_rate": 0.0005200345385860766,
"loss": 3.7202,
"step": 12450
},
{
"epoch": 1.3477088948787062,
"grad_norm": 0.6892051696777344,
"learning_rate": 0.0005197107393416082,
"loss": 3.7287,
"step": 12500
},
{
"epoch": 1.353099730458221,
"grad_norm": 0.5671435594558716,
"learning_rate": 0.0005193869400971397,
"loss": 3.7361,
"step": 12550
},
{
"epoch": 1.3584905660377358,
"grad_norm": 0.5692296624183655,
"learning_rate": 0.0005190631408526713,
"loss": 3.7137,
"step": 12600
},
{
"epoch": 1.3638814016172507,
"grad_norm": 0.5560773611068726,
"learning_rate": 0.0005187393416082029,
"loss": 3.7092,
"step": 12650
},
{
"epoch": 1.3692722371967654,
"grad_norm": 0.5214106440544128,
"learning_rate": 0.0005184155423637345,
"loss": 3.7076,
"step": 12700
},
{
"epoch": 1.3746630727762803,
"grad_norm": 0.5432198643684387,
"learning_rate": 0.000518091743119266,
"loss": 3.7224,
"step": 12750
},
{
"epoch": 1.3800539083557952,
"grad_norm": 0.5707585215568542,
"learning_rate": 0.0005177679438747976,
"loss": 3.7136,
"step": 12800
},
{
"epoch": 1.38544474393531,
"grad_norm": 0.583604633808136,
"learning_rate": 0.0005174441446303291,
"loss": 3.7356,
"step": 12850
},
{
"epoch": 1.3908355795148248,
"grad_norm": 0.567251443862915,
"learning_rate": 0.0005171203453858607,
"loss": 3.7341,
"step": 12900
},
{
"epoch": 1.3962264150943398,
"grad_norm": 0.5615735054016113,
"learning_rate": 0.0005167965461413922,
"loss": 3.7331,
"step": 12950
},
{
"epoch": 1.4016172506738545,
"grad_norm": 0.6066398024559021,
"learning_rate": 0.0005164727468969239,
"loss": 3.715,
"step": 13000
},
{
"epoch": 1.4016172506738545,
"eval_accuracy": 0.3512098118773035,
"eval_loss": 3.67993426322937,
"eval_runtime": 181.5275,
"eval_samples_per_second": 99.219,
"eval_steps_per_second": 6.203,
"step": 13000
},
{
"epoch": 1.4070080862533692,
"grad_norm": 0.6397793292999268,
"learning_rate": 0.0005161489476524554,
"loss": 3.6967,
"step": 13050
},
{
"epoch": 1.412398921832884,
"grad_norm": 0.6502065062522888,
"learning_rate": 0.000515825148407987,
"loss": 3.7131,
"step": 13100
},
{
"epoch": 1.417789757412399,
"grad_norm": 0.5458613038063049,
"learning_rate": 0.0005155013491635185,
"loss": 3.7147,
"step": 13150
},
{
"epoch": 1.4231805929919137,
"grad_norm": 0.5654726028442383,
"learning_rate": 0.0005151775499190501,
"loss": 3.7392,
"step": 13200
},
{
"epoch": 1.4285714285714286,
"grad_norm": 0.5382741689682007,
"learning_rate": 0.0005148537506745818,
"loss": 3.6966,
"step": 13250
},
{
"epoch": 1.4339622641509435,
"grad_norm": 0.5260798931121826,
"learning_rate": 0.0005145299514301133,
"loss": 3.703,
"step": 13300
},
{
"epoch": 1.4393530997304582,
"grad_norm": 0.5853448510169983,
"learning_rate": 0.0005142061521856449,
"loss": 3.684,
"step": 13350
},
{
"epoch": 1.444743935309973,
"grad_norm": 0.538550615310669,
"learning_rate": 0.0005138823529411764,
"loss": 3.7219,
"step": 13400
},
{
"epoch": 1.4501347708894878,
"grad_norm": 0.596662700176239,
"learning_rate": 0.0005135585536967081,
"loss": 3.7194,
"step": 13450
},
{
"epoch": 1.4555256064690028,
"grad_norm": 0.6186038851737976,
"learning_rate": 0.0005132347544522396,
"loss": 3.7033,
"step": 13500
},
{
"epoch": 1.4609164420485174,
"grad_norm": 0.542559802532196,
"learning_rate": 0.0005129109552077712,
"loss": 3.7194,
"step": 13550
},
{
"epoch": 1.4663072776280324,
"grad_norm": 0.5352370738983154,
"learning_rate": 0.0005125871559633027,
"loss": 3.7046,
"step": 13600
},
{
"epoch": 1.4716981132075473,
"grad_norm": 0.5471335649490356,
"learning_rate": 0.0005122633567188343,
"loss": 3.7091,
"step": 13650
},
{
"epoch": 1.477088948787062,
"grad_norm": 0.6399145126342773,
"learning_rate": 0.0005119395574743658,
"loss": 3.7208,
"step": 13700
},
{
"epoch": 1.482479784366577,
"grad_norm": 0.5687584280967712,
"learning_rate": 0.0005116222342147868,
"loss": 3.7,
"step": 13750
},
{
"epoch": 1.4878706199460916,
"grad_norm": 0.5827769041061401,
"learning_rate": 0.0005112984349703183,
"loss": 3.7004,
"step": 13800
},
{
"epoch": 1.4932614555256065,
"grad_norm": 0.5126985907554626,
"learning_rate": 0.00051097463572585,
"loss": 3.6955,
"step": 13850
},
{
"epoch": 1.4986522911051212,
"grad_norm": 0.6048992872238159,
"learning_rate": 0.0005106508364813815,
"loss": 3.6853,
"step": 13900
},
{
"epoch": 1.5040431266846361,
"grad_norm": 0.5209577679634094,
"learning_rate": 0.0005103270372369131,
"loss": 3.7061,
"step": 13950
},
{
"epoch": 1.509433962264151,
"grad_norm": 0.558080792427063,
"learning_rate": 0.0005100032379924446,
"loss": 3.6982,
"step": 14000
},
{
"epoch": 1.509433962264151,
"eval_accuracy": 0.35338667248466665,
"eval_loss": 3.6556079387664795,
"eval_runtime": 181.0746,
"eval_samples_per_second": 99.467,
"eval_steps_per_second": 6.218,
"step": 14000
},
{
"epoch": 1.5148247978436657,
"grad_norm": 0.5223732590675354,
"learning_rate": 0.0005096794387479762,
"loss": 3.6743,
"step": 14050
},
{
"epoch": 1.5202156334231804,
"grad_norm": 0.5266443490982056,
"learning_rate": 0.0005093556395035078,
"loss": 3.7049,
"step": 14100
},
{
"epoch": 1.5256064690026954,
"grad_norm": 0.5410740971565247,
"learning_rate": 0.0005090318402590394,
"loss": 3.6871,
"step": 14150
},
{
"epoch": 1.5309973045822103,
"grad_norm": 0.5198672413825989,
"learning_rate": 0.0005087080410145709,
"loss": 3.6848,
"step": 14200
},
{
"epoch": 1.536388140161725,
"grad_norm": 0.5388085246086121,
"learning_rate": 0.0005083842417701025,
"loss": 3.6925,
"step": 14250
},
{
"epoch": 1.54177897574124,
"grad_norm": 0.5680968761444092,
"learning_rate": 0.000508060442525634,
"loss": 3.7015,
"step": 14300
},
{
"epoch": 1.5471698113207548,
"grad_norm": 0.529816746711731,
"learning_rate": 0.0005077366432811656,
"loss": 3.7089,
"step": 14350
},
{
"epoch": 1.5525606469002695,
"grad_norm": 0.588886022567749,
"learning_rate": 0.0005074128440366971,
"loss": 3.6898,
"step": 14400
},
{
"epoch": 1.5579514824797842,
"grad_norm": 0.53922438621521,
"learning_rate": 0.0005070890447922288,
"loss": 3.7101,
"step": 14450
},
{
"epoch": 1.5633423180592994,
"grad_norm": 0.5846419930458069,
"learning_rate": 0.0005067652455477604,
"loss": 3.6752,
"step": 14500
},
{
"epoch": 1.568733153638814,
"grad_norm": 0.5448037981987,
"learning_rate": 0.0005064414463032919,
"loss": 3.6868,
"step": 14550
},
{
"epoch": 1.5741239892183287,
"grad_norm": 0.5100300908088684,
"learning_rate": 0.0005061176470588235,
"loss": 3.6773,
"step": 14600
},
{
"epoch": 1.5795148247978437,
"grad_norm": 0.5662325620651245,
"learning_rate": 0.0005057938478143551,
"loss": 3.693,
"step": 14650
},
{
"epoch": 1.5849056603773586,
"grad_norm": 0.5295085906982422,
"learning_rate": 0.0005054700485698867,
"loss": 3.6999,
"step": 14700
},
{
"epoch": 1.5902964959568733,
"grad_norm": 0.6152496337890625,
"learning_rate": 0.0005051462493254182,
"loss": 3.6639,
"step": 14750
},
{
"epoch": 1.595687331536388,
"grad_norm": 0.5873838067054749,
"learning_rate": 0.0005048224500809498,
"loss": 3.6879,
"step": 14800
},
{
"epoch": 1.6010781671159031,
"grad_norm": 0.5197713375091553,
"learning_rate": 0.0005044986508364813,
"loss": 3.6784,
"step": 14850
},
{
"epoch": 1.6064690026954178,
"grad_norm": 0.5193688869476318,
"learning_rate": 0.0005041748515920129,
"loss": 3.6756,
"step": 14900
},
{
"epoch": 1.6118598382749325,
"grad_norm": 0.5456000566482544,
"learning_rate": 0.0005038510523475444,
"loss": 3.6745,
"step": 14950
},
{
"epoch": 1.6172506738544474,
"grad_norm": 0.565951943397522,
"learning_rate": 0.0005035272531030761,
"loss": 3.6829,
"step": 15000
},
{
"epoch": 1.6172506738544474,
"eval_accuracy": 0.3553477484568302,
"eval_loss": 3.6376187801361084,
"eval_runtime": 181.6027,
"eval_samples_per_second": 99.178,
"eval_steps_per_second": 6.2,
"step": 15000
},
{
"epoch": 1.6226415094339623,
"grad_norm": 0.5930358171463013,
"learning_rate": 0.0005032034538586076,
"loss": 3.6899,
"step": 15050
},
{
"epoch": 1.628032345013477,
"grad_norm": 0.5067706108093262,
"learning_rate": 0.0005028796546141392,
"loss": 3.6659,
"step": 15100
},
{
"epoch": 1.633423180592992,
"grad_norm": 0.5201679468154907,
"learning_rate": 0.0005025558553696707,
"loss": 3.6864,
"step": 15150
},
{
"epoch": 1.6388140161725069,
"grad_norm": 0.5754110217094421,
"learning_rate": 0.0005022320561252023,
"loss": 3.6606,
"step": 15200
},
{
"epoch": 1.6442048517520216,
"grad_norm": 0.592934250831604,
"learning_rate": 0.0005019082568807339,
"loss": 3.7035,
"step": 15250
},
{
"epoch": 1.6495956873315363,
"grad_norm": 0.5429046154022217,
"learning_rate": 0.0005015844576362655,
"loss": 3.687,
"step": 15300
},
{
"epoch": 1.6549865229110512,
"grad_norm": 0.5521410703659058,
"learning_rate": 0.000501260658391797,
"loss": 3.6814,
"step": 15350
},
{
"epoch": 1.6603773584905661,
"grad_norm": 0.5523940324783325,
"learning_rate": 0.0005009368591473286,
"loss": 3.671,
"step": 15400
},
{
"epoch": 1.6657681940700808,
"grad_norm": 0.5145202875137329,
"learning_rate": 0.0005006130599028602,
"loss": 3.6823,
"step": 15450
},
{
"epoch": 1.6711590296495957,
"grad_norm": 0.541299045085907,
"learning_rate": 0.0005002892606583918,
"loss": 3.6941,
"step": 15500
},
{
"epoch": 1.6765498652291106,
"grad_norm": 0.578633189201355,
"learning_rate": 0.0004999654614139233,
"loss": 3.6671,
"step": 15550
},
{
"epoch": 1.6819407008086253,
"grad_norm": 0.5630112290382385,
"learning_rate": 0.0004996416621694549,
"loss": 3.6661,
"step": 15600
},
{
"epoch": 1.68733153638814,
"grad_norm": 0.546072244644165,
"learning_rate": 0.0004993178629249864,
"loss": 3.663,
"step": 15650
},
{
"epoch": 1.692722371967655,
"grad_norm": 0.6002097725868225,
"learning_rate": 0.000498994063680518,
"loss": 3.6783,
"step": 15700
},
{
"epoch": 1.6981132075471699,
"grad_norm": 0.6249855756759644,
"learning_rate": 0.000498676740420939,
"loss": 3.6534,
"step": 15750
},
{
"epoch": 1.7035040431266846,
"grad_norm": 0.5264080166816711,
"learning_rate": 0.0004983529411764705,
"loss": 3.6742,
"step": 15800
},
{
"epoch": 1.7088948787061995,
"grad_norm": 0.5488578081130981,
"learning_rate": 0.0004980291419320022,
"loss": 3.6645,
"step": 15850
},
{
"epoch": 1.7142857142857144,
"grad_norm": 0.5747875571250916,
"learning_rate": 0.0004977053426875337,
"loss": 3.6661,
"step": 15900
},
{
"epoch": 1.719676549865229,
"grad_norm": 0.5004916191101074,
"learning_rate": 0.0004973815434430653,
"loss": 3.6556,
"step": 15950
},
{
"epoch": 1.7250673854447438,
"grad_norm": 0.5628464221954346,
"learning_rate": 0.0004970577441985968,
"loss": 3.6554,
"step": 16000
},
{
"epoch": 1.7250673854447438,
"eval_accuracy": 0.3569113721653684,
"eval_loss": 3.6237761974334717,
"eval_runtime": 181.1991,
"eval_samples_per_second": 99.399,
"eval_steps_per_second": 6.214,
"step": 16000
},
{
"epoch": 1.7304582210242587,
"grad_norm": 0.5369266271591187,
"learning_rate": 0.0004967339449541284,
"loss": 3.6758,
"step": 16050
},
{
"epoch": 1.7358490566037736,
"grad_norm": 0.575958788394928,
"learning_rate": 0.00049641014570966,
"loss": 3.6766,
"step": 16100
},
{
"epoch": 1.7412398921832883,
"grad_norm": 0.6171749234199524,
"learning_rate": 0.0004960863464651916,
"loss": 3.658,
"step": 16150
},
{
"epoch": 1.7466307277628033,
"grad_norm": 0.5248631238937378,
"learning_rate": 0.0004957625472207231,
"loss": 3.6652,
"step": 16200
},
{
"epoch": 1.7520215633423182,
"grad_norm": 0.5663668513298035,
"learning_rate": 0.0004954387479762547,
"loss": 3.6645,
"step": 16250
},
{
"epoch": 1.7574123989218329,
"grad_norm": 0.5405056476593018,
"learning_rate": 0.0004951149487317862,
"loss": 3.6633,
"step": 16300
},
{
"epoch": 1.7628032345013476,
"grad_norm": 0.5469680428504944,
"learning_rate": 0.0004947911494873178,
"loss": 3.6642,
"step": 16350
},
{
"epoch": 1.7681940700808625,
"grad_norm": 0.5379207134246826,
"learning_rate": 0.0004944673502428493,
"loss": 3.6666,
"step": 16400
},
{
"epoch": 1.7735849056603774,
"grad_norm": 0.5439175963401794,
"learning_rate": 0.000494143550998381,
"loss": 3.6703,
"step": 16450
},
{
"epoch": 1.778975741239892,
"grad_norm": 0.5585680603981018,
"learning_rate": 0.0004938197517539125,
"loss": 3.666,
"step": 16500
},
{
"epoch": 1.784366576819407,
"grad_norm": 0.5458288788795471,
"learning_rate": 0.0004934959525094441,
"loss": 3.6453,
"step": 16550
},
{
"epoch": 1.789757412398922,
"grad_norm": 0.5485697388648987,
"learning_rate": 0.0004931721532649756,
"loss": 3.6609,
"step": 16600
},
{
"epoch": 1.7951482479784366,
"grad_norm": 0.5749149918556213,
"learning_rate": 0.0004928483540205073,
"loss": 3.6645,
"step": 16650
},
{
"epoch": 1.8005390835579513,
"grad_norm": 0.5598737001419067,
"learning_rate": 0.0004925245547760388,
"loss": 3.6582,
"step": 16700
},
{
"epoch": 1.8059299191374663,
"grad_norm": 0.5720735788345337,
"learning_rate": 0.0004922007555315704,
"loss": 3.6479,
"step": 16750
},
{
"epoch": 1.8113207547169812,
"grad_norm": 0.544169008731842,
"learning_rate": 0.000491876956287102,
"loss": 3.6539,
"step": 16800
},
{
"epoch": 1.8167115902964959,
"grad_norm": 0.587104082107544,
"learning_rate": 0.0004915531570426335,
"loss": 3.6497,
"step": 16850
},
{
"epoch": 1.8221024258760108,
"grad_norm": 0.556251585483551,
"learning_rate": 0.0004912293577981652,
"loss": 3.6587,
"step": 16900
},
{
"epoch": 1.8274932614555257,
"grad_norm": 0.5477184057235718,
"learning_rate": 0.0004909055585536966,
"loss": 3.6421,
"step": 16950
},
{
"epoch": 1.8328840970350404,
"grad_norm": 0.5440017580986023,
"learning_rate": 0.0004905817593092283,
"loss": 3.6345,
"step": 17000
},
{
"epoch": 1.8328840970350404,
"eval_accuracy": 0.3582564949165117,
"eval_loss": 3.606245994567871,
"eval_runtime": 181.6324,
"eval_samples_per_second": 99.162,
"eval_steps_per_second": 6.199,
"step": 17000
},
{
"epoch": 1.838274932614555,
"grad_norm": 0.5022566318511963,
"learning_rate": 0.0004902579600647598,
"loss": 3.6459,
"step": 17050
},
{
"epoch": 1.8436657681940702,
"grad_norm": 0.572801947593689,
"learning_rate": 0.0004899341608202914,
"loss": 3.646,
"step": 17100
},
{
"epoch": 1.849056603773585,
"grad_norm": 0.5857950448989868,
"learning_rate": 0.0004896103615758229,
"loss": 3.6343,
"step": 17150
},
{
"epoch": 1.8544474393530996,
"grad_norm": 0.529326856136322,
"learning_rate": 0.0004892865623313546,
"loss": 3.6621,
"step": 17200
},
{
"epoch": 1.8598382749326146,
"grad_norm": 0.5415377616882324,
"learning_rate": 0.0004889627630868861,
"loss": 3.6692,
"step": 17250
},
{
"epoch": 1.8652291105121295,
"grad_norm": 0.5492255091667175,
"learning_rate": 0.0004886389638424177,
"loss": 3.6534,
"step": 17300
},
{
"epoch": 1.8706199460916442,
"grad_norm": 0.5427126884460449,
"learning_rate": 0.0004883151645979492,
"loss": 3.6425,
"step": 17350
},
{
"epoch": 1.8760107816711589,
"grad_norm": 0.561755359172821,
"learning_rate": 0.00048799136535348077,
"loss": 3.6583,
"step": 17400
},
{
"epoch": 1.881401617250674,
"grad_norm": 0.5410698056221008,
"learning_rate": 0.0004876675661090124,
"loss": 3.6306,
"step": 17450
},
{
"epoch": 1.8867924528301887,
"grad_norm": 0.542617678642273,
"learning_rate": 0.0004873437668645439,
"loss": 3.6534,
"step": 17500
},
{
"epoch": 1.8921832884097034,
"grad_norm": 0.5104948878288269,
"learning_rate": 0.00048701996762007553,
"loss": 3.6543,
"step": 17550
},
{
"epoch": 1.8975741239892183,
"grad_norm": 0.6261042356491089,
"learning_rate": 0.0004866961683756071,
"loss": 3.6425,
"step": 17600
},
{
"epoch": 1.9029649595687332,
"grad_norm": 0.5958898067474365,
"learning_rate": 0.0004863723691311387,
"loss": 3.6418,
"step": 17650
},
{
"epoch": 1.908355795148248,
"grad_norm": 0.5822218656539917,
"learning_rate": 0.00048604856988667024,
"loss": 3.6586,
"step": 17700
},
{
"epoch": 1.9137466307277629,
"grad_norm": 0.5634023547172546,
"learning_rate": 0.0004857247706422018,
"loss": 3.6335,
"step": 17750
},
{
"epoch": 1.9191374663072778,
"grad_norm": 0.5490560531616211,
"learning_rate": 0.00048540744738262274,
"loss": 3.6368,
"step": 17800
},
{
"epoch": 1.9245283018867925,
"grad_norm": 0.5121088027954102,
"learning_rate": 0.00048508364813815434,
"loss": 3.6266,
"step": 17850
},
{
"epoch": 1.9299191374663072,
"grad_norm": 0.5225231647491455,
"learning_rate": 0.00048475984889368584,
"loss": 3.6324,
"step": 17900
},
{
"epoch": 1.935309973045822,
"grad_norm": 0.5291708111763,
"learning_rate": 0.0004844360496492175,
"loss": 3.6424,
"step": 17950
},
{
"epoch": 1.940700808625337,
"grad_norm": 0.5437564849853516,
"learning_rate": 0.000484112250404749,
"loss": 3.6469,
"step": 18000
},
{
"epoch": 1.940700808625337,
"eval_accuracy": 0.3598998698447057,
"eval_loss": 3.5913586616516113,
"eval_runtime": 181.4123,
"eval_samples_per_second": 99.282,
"eval_steps_per_second": 6.207,
"step": 18000
},
{
"epoch": 1.9460916442048517,
"grad_norm": 0.5686632394790649,
"learning_rate": 0.00048378845116028055,
"loss": 3.6235,
"step": 18050
},
{
"epoch": 1.9514824797843666,
"grad_norm": 0.5239707231521606,
"learning_rate": 0.00048346465191581215,
"loss": 3.6487,
"step": 18100
},
{
"epoch": 1.9568733153638815,
"grad_norm": 0.5632966756820679,
"learning_rate": 0.0004831408526713437,
"loss": 3.642,
"step": 18150
},
{
"epoch": 1.9622641509433962,
"grad_norm": 0.5901452898979187,
"learning_rate": 0.0004828170534268753,
"loss": 3.6363,
"step": 18200
},
{
"epoch": 1.967654986522911,
"grad_norm": 0.5481223464012146,
"learning_rate": 0.00048249325418240686,
"loss": 3.6498,
"step": 18250
},
{
"epoch": 1.9730458221024259,
"grad_norm": 0.5510320067405701,
"learning_rate": 0.00048216945493793846,
"loss": 3.6471,
"step": 18300
},
{
"epoch": 1.9784366576819408,
"grad_norm": 0.5957186222076416,
"learning_rate": 0.00048184565569347,
"loss": 3.6295,
"step": 18350
},
{
"epoch": 1.9838274932614555,
"grad_norm": 0.5391411781311035,
"learning_rate": 0.0004815218564490016,
"loss": 3.6625,
"step": 18400
},
{
"epoch": 1.9892183288409704,
"grad_norm": 0.5556368231773376,
"learning_rate": 0.00048119805720453317,
"loss": 3.6166,
"step": 18450
},
{
"epoch": 1.9946091644204853,
"grad_norm": 0.6190194487571716,
"learning_rate": 0.0004808742579600647,
"loss": 3.6231,
"step": 18500
},
{
"epoch": 2.0,
"grad_norm": 1.197678804397583,
"learning_rate": 0.0004805504587155963,
"loss": 3.6412,
"step": 18550
},
{
"epoch": 2.0053908355795147,
"grad_norm": 0.5590324401855469,
"learning_rate": 0.0004802266594711278,
"loss": 3.5499,
"step": 18600
},
{
"epoch": 2.01078167115903,
"grad_norm": 0.5855118632316589,
"learning_rate": 0.0004799028602266594,
"loss": 3.5515,
"step": 18650
},
{
"epoch": 2.0161725067385445,
"grad_norm": 0.5683425664901733,
"learning_rate": 0.000479579060982191,
"loss": 3.5434,
"step": 18700
},
{
"epoch": 2.0215633423180592,
"grad_norm": 0.5408341884613037,
"learning_rate": 0.0004792552617377226,
"loss": 3.5613,
"step": 18750
},
{
"epoch": 2.026954177897574,
"grad_norm": 0.5905522108078003,
"learning_rate": 0.00047893146249325413,
"loss": 3.5492,
"step": 18800
},
{
"epoch": 2.032345013477089,
"grad_norm": 0.5483571887016296,
"learning_rate": 0.0004786076632487857,
"loss": 3.5309,
"step": 18850
},
{
"epoch": 2.0377358490566038,
"grad_norm": 0.5604623556137085,
"learning_rate": 0.0004782838640043173,
"loss": 3.5583,
"step": 18900
},
{
"epoch": 2.0431266846361185,
"grad_norm": 0.5344943404197693,
"learning_rate": 0.00047796006475984883,
"loss": 3.5482,
"step": 18950
},
{
"epoch": 2.0485175202156336,
"grad_norm": 0.5930701494216919,
"learning_rate": 0.00047763626551538044,
"loss": 3.5639,
"step": 19000
},
{
"epoch": 2.0485175202156336,
"eval_accuracy": 0.36086101329031256,
"eval_loss": 3.584812641143799,
"eval_runtime": 181.1798,
"eval_samples_per_second": 99.41,
"eval_steps_per_second": 6.215,
"step": 19000
},
{
"epoch": 2.0539083557951483,
"grad_norm": 0.597104012966156,
"learning_rate": 0.000477312466270912,
"loss": 3.5241,
"step": 19050
},
{
"epoch": 2.059299191374663,
"grad_norm": 0.5604715943336487,
"learning_rate": 0.0004769886670264436,
"loss": 3.5646,
"step": 19100
},
{
"epoch": 2.0646900269541777,
"grad_norm": 0.5276287198066711,
"learning_rate": 0.00047666486778197515,
"loss": 3.549,
"step": 19150
},
{
"epoch": 2.070080862533693,
"grad_norm": 0.5561823844909668,
"learning_rate": 0.00047634106853750675,
"loss": 3.5711,
"step": 19200
},
{
"epoch": 2.0754716981132075,
"grad_norm": 0.6093335151672363,
"learning_rate": 0.00047601726929303825,
"loss": 3.5684,
"step": 19250
},
{
"epoch": 2.0808625336927222,
"grad_norm": 0.588876485824585,
"learning_rate": 0.0004756934700485698,
"loss": 3.5646,
"step": 19300
},
{
"epoch": 2.0862533692722374,
"grad_norm": 0.5568276643753052,
"learning_rate": 0.0004753696708041014,
"loss": 3.5551,
"step": 19350
},
{
"epoch": 2.091644204851752,
"grad_norm": 0.5360884666442871,
"learning_rate": 0.00047504587155963295,
"loss": 3.5605,
"step": 19400
},
{
"epoch": 2.0970350404312668,
"grad_norm": 0.5348809361457825,
"learning_rate": 0.00047472207231516456,
"loss": 3.5486,
"step": 19450
},
{
"epoch": 2.1024258760107815,
"grad_norm": 0.6352576017379761,
"learning_rate": 0.0004743982730706961,
"loss": 3.5433,
"step": 19500
},
{
"epoch": 2.1078167115902966,
"grad_norm": 0.570564329624176,
"learning_rate": 0.0004740744738262277,
"loss": 3.5608,
"step": 19550
},
{
"epoch": 2.1132075471698113,
"grad_norm": 0.5465729832649231,
"learning_rate": 0.00047375067458175926,
"loss": 3.5578,
"step": 19600
},
{
"epoch": 2.118598382749326,
"grad_norm": 0.5874576568603516,
"learning_rate": 0.00047342687533729087,
"loss": 3.5579,
"step": 19650
},
{
"epoch": 2.123989218328841,
"grad_norm": 0.59136563539505,
"learning_rate": 0.0004731030760928224,
"loss": 3.5486,
"step": 19700
},
{
"epoch": 2.129380053908356,
"grad_norm": 0.6302415728569031,
"learning_rate": 0.00047277927684835397,
"loss": 3.5422,
"step": 19750
},
{
"epoch": 2.1347708894878705,
"grad_norm": 0.544438898563385,
"learning_rate": 0.0004724619535887749,
"loss": 3.5742,
"step": 19800
},
{
"epoch": 2.1401617250673857,
"grad_norm": 0.6034201383590698,
"learning_rate": 0.0004721381543443065,
"loss": 3.5728,
"step": 19850
},
{
"epoch": 2.1455525606469004,
"grad_norm": 0.6693742871284485,
"learning_rate": 0.000471814355099838,
"loss": 3.5634,
"step": 19900
},
{
"epoch": 2.150943396226415,
"grad_norm": 0.5606766939163208,
"learning_rate": 0.0004714905558553697,
"loss": 3.5611,
"step": 19950
},
{
"epoch": 2.1563342318059298,
"grad_norm": 0.600206196308136,
"learning_rate": 0.0004711667566109012,
"loss": 3.5606,
"step": 20000
},
{
"epoch": 2.1563342318059298,
"eval_accuracy": 0.36217603919153124,
"eval_loss": 3.5728728771209717,
"eval_runtime": 181.6212,
"eval_samples_per_second": 99.168,
"eval_steps_per_second": 6.2,
"step": 20000
},
{
"epoch": 2.161725067385445,
"grad_norm": 0.5841259956359863,
"learning_rate": 0.00047084295736643273,
"loss": 3.5593,
"step": 20050
},
{
"epoch": 2.1671159029649596,
"grad_norm": 0.6095578670501709,
"learning_rate": 0.00047051915812196433,
"loss": 3.5696,
"step": 20100
},
{
"epoch": 2.1725067385444743,
"grad_norm": 0.5807099938392639,
"learning_rate": 0.0004701953588774959,
"loss": 3.546,
"step": 20150
},
{
"epoch": 2.177897574123989,
"grad_norm": 0.5789870023727417,
"learning_rate": 0.0004698715596330275,
"loss": 3.5722,
"step": 20200
},
{
"epoch": 2.183288409703504,
"grad_norm": 0.5594269037246704,
"learning_rate": 0.00046954776038855904,
"loss": 3.5715,
"step": 20250
},
{
"epoch": 2.188679245283019,
"grad_norm": 0.643621027469635,
"learning_rate": 0.00046922396114409064,
"loss": 3.5454,
"step": 20300
},
{
"epoch": 2.1940700808625335,
"grad_norm": 0.5453854203224182,
"learning_rate": 0.0004689001618996222,
"loss": 3.5642,
"step": 20350
},
{
"epoch": 2.1994609164420487,
"grad_norm": 0.5129548907279968,
"learning_rate": 0.0004685763626551538,
"loss": 3.5621,
"step": 20400
},
{
"epoch": 2.2048517520215634,
"grad_norm": 0.6147149205207825,
"learning_rate": 0.00046825256341068535,
"loss": 3.5631,
"step": 20450
},
{
"epoch": 2.210242587601078,
"grad_norm": 0.5020909905433655,
"learning_rate": 0.0004679287641662169,
"loss": 3.5656,
"step": 20500
},
{
"epoch": 2.215633423180593,
"grad_norm": 0.6170671582221985,
"learning_rate": 0.0004676049649217485,
"loss": 3.5674,
"step": 20550
},
{
"epoch": 2.221024258760108,
"grad_norm": 0.5955941081047058,
"learning_rate": 0.00046728764166216946,
"loss": 3.5746,
"step": 20600
},
{
"epoch": 2.2264150943396226,
"grad_norm": 0.5670434832572937,
"learning_rate": 0.00046696384241770095,
"loss": 3.5567,
"step": 20650
},
{
"epoch": 2.2318059299191373,
"grad_norm": 0.570024311542511,
"learning_rate": 0.00046664004317323256,
"loss": 3.5609,
"step": 20700
},
{
"epoch": 2.2371967654986524,
"grad_norm": 0.5753904581069946,
"learning_rate": 0.0004663162439287641,
"loss": 3.563,
"step": 20750
},
{
"epoch": 2.242587601078167,
"grad_norm": 0.5676912665367126,
"learning_rate": 0.00046599244468429566,
"loss": 3.5655,
"step": 20800
},
{
"epoch": 2.247978436657682,
"grad_norm": 0.5626962780952454,
"learning_rate": 0.00046566864543982726,
"loss": 3.5484,
"step": 20850
},
{
"epoch": 2.2533692722371965,
"grad_norm": 0.6296677589416504,
"learning_rate": 0.0004653448461953588,
"loss": 3.5597,
"step": 20900
},
{
"epoch": 2.2587601078167117,
"grad_norm": 0.59935063123703,
"learning_rate": 0.0004650210469508904,
"loss": 3.5682,
"step": 20950
},
{
"epoch": 2.2641509433962264,
"grad_norm": 0.5628940463066101,
"learning_rate": 0.00046469724770642197,
"loss": 3.5497,
"step": 21000
},
{
"epoch": 2.2641509433962264,
"eval_accuracy": 0.3636019996912085,
"eval_loss": 3.5614914894104004,
"eval_runtime": 180.9956,
"eval_samples_per_second": 99.511,
"eval_steps_per_second": 6.221,
"step": 21000
},
{
"epoch": 2.269541778975741,
"grad_norm": 0.5957950353622437,
"learning_rate": 0.0004643734484619536,
"loss": 3.5417,
"step": 21050
},
{
"epoch": 2.274932614555256,
"grad_norm": 0.5501925349235535,
"learning_rate": 0.0004640496492174851,
"loss": 3.5728,
"step": 21100
},
{
"epoch": 2.280323450134771,
"grad_norm": 0.5697695016860962,
"learning_rate": 0.00046372584997301673,
"loss": 3.5687,
"step": 21150
},
{
"epoch": 2.2857142857142856,
"grad_norm": 0.5633260607719421,
"learning_rate": 0.0004634020507285483,
"loss": 3.5589,
"step": 21200
},
{
"epoch": 2.2911051212938007,
"grad_norm": 0.5334222316741943,
"learning_rate": 0.0004630782514840798,
"loss": 3.5327,
"step": 21250
},
{
"epoch": 2.2964959568733154,
"grad_norm": 0.5983673334121704,
"learning_rate": 0.0004627544522396114,
"loss": 3.5524,
"step": 21300
},
{
"epoch": 2.30188679245283,
"grad_norm": 0.5501682758331299,
"learning_rate": 0.00046243065299514293,
"loss": 3.5701,
"step": 21350
},
{
"epoch": 2.3072776280323453,
"grad_norm": 0.5882470011711121,
"learning_rate": 0.00046210685375067454,
"loss": 3.546,
"step": 21400
},
{
"epoch": 2.31266846361186,
"grad_norm": 0.5191405415534973,
"learning_rate": 0.0004617830545062061,
"loss": 3.5579,
"step": 21450
},
{
"epoch": 2.3180592991913747,
"grad_norm": 0.5627714395523071,
"learning_rate": 0.0004614592552617377,
"loss": 3.5624,
"step": 21500
},
{
"epoch": 2.3234501347708894,
"grad_norm": 0.578151285648346,
"learning_rate": 0.00046113545601726924,
"loss": 3.5502,
"step": 21550
},
{
"epoch": 2.3288409703504045,
"grad_norm": 0.5719261169433594,
"learning_rate": 0.00046081165677280085,
"loss": 3.5608,
"step": 21600
},
{
"epoch": 2.334231805929919,
"grad_norm": 0.5572363138198853,
"learning_rate": 0.0004604878575283324,
"loss": 3.5599,
"step": 21650
},
{
"epoch": 2.339622641509434,
"grad_norm": 0.6042853593826294,
"learning_rate": 0.00046016405828386395,
"loss": 3.561,
"step": 21700
},
{
"epoch": 2.3450134770889486,
"grad_norm": 0.5274094343185425,
"learning_rate": 0.00045984025903939555,
"loss": 3.5605,
"step": 21750
},
{
"epoch": 2.3504043126684637,
"grad_norm": 0.5416309237480164,
"learning_rate": 0.0004595164597949271,
"loss": 3.5678,
"step": 21800
},
{
"epoch": 2.3557951482479784,
"grad_norm": 0.5653731822967529,
"learning_rate": 0.0004591926605504587,
"loss": 3.5622,
"step": 21850
},
{
"epoch": 2.361185983827493,
"grad_norm": 0.5564022064208984,
"learning_rate": 0.0004588688613059902,
"loss": 3.5496,
"step": 21900
},
{
"epoch": 2.3665768194070083,
"grad_norm": 0.5706598162651062,
"learning_rate": 0.00045854506206152186,
"loss": 3.5628,
"step": 21950
},
{
"epoch": 2.371967654986523,
"grad_norm": 0.5681759715080261,
"learning_rate": 0.00045822126281705336,
"loss": 3.5624,
"step": 22000
},
{
"epoch": 2.371967654986523,
"eval_accuracy": 0.36438560431812655,
"eval_loss": 3.550971269607544,
"eval_runtime": 181.5333,
"eval_samples_per_second": 99.216,
"eval_steps_per_second": 6.203,
"step": 22000
},
{
"epoch": 2.3773584905660377,
"grad_norm": 0.6144738793373108,
"learning_rate": 0.00045789746357258497,
"loss": 3.538,
"step": 22050
},
{
"epoch": 2.382749326145553,
"grad_norm": 0.5621081590652466,
"learning_rate": 0.0004575736643281165,
"loss": 3.5622,
"step": 22100
},
{
"epoch": 2.3881401617250675,
"grad_norm": 0.5389125943183899,
"learning_rate": 0.00045724986508364807,
"loss": 3.5571,
"step": 22150
},
{
"epoch": 2.393530997304582,
"grad_norm": 0.6281595230102539,
"learning_rate": 0.00045692606583917967,
"loss": 3.5816,
"step": 22200
},
{
"epoch": 2.398921832884097,
"grad_norm": 0.573566734790802,
"learning_rate": 0.0004566022665947112,
"loss": 3.551,
"step": 22250
},
{
"epoch": 2.404312668463612,
"grad_norm": 0.6374450922012329,
"learning_rate": 0.0004562784673502428,
"loss": 3.5344,
"step": 22300
},
{
"epoch": 2.4097035040431267,
"grad_norm": 0.5555562376976013,
"learning_rate": 0.0004559546681057744,
"loss": 3.5563,
"step": 22350
},
{
"epoch": 2.4150943396226414,
"grad_norm": 0.5741384029388428,
"learning_rate": 0.000455630868861306,
"loss": 3.5455,
"step": 22400
},
{
"epoch": 2.420485175202156,
"grad_norm": 0.6011936664581299,
"learning_rate": 0.00045530706961683753,
"loss": 3.5751,
"step": 22450
},
{
"epoch": 2.4258760107816713,
"grad_norm": 0.6304056644439697,
"learning_rate": 0.00045498327037236914,
"loss": 3.5478,
"step": 22500
},
{
"epoch": 2.431266846361186,
"grad_norm": 0.5553408861160278,
"learning_rate": 0.0004546594711279007,
"loss": 3.5519,
"step": 22550
},
{
"epoch": 2.4366576819407006,
"grad_norm": 0.514491081237793,
"learning_rate": 0.0004543356718834322,
"loss": 3.5529,
"step": 22600
},
{
"epoch": 2.442048517520216,
"grad_norm": 0.5898559093475342,
"learning_rate": 0.0004540118726389638,
"loss": 3.5497,
"step": 22650
},
{
"epoch": 2.4474393530997305,
"grad_norm": 0.5183122158050537,
"learning_rate": 0.00045368807339449534,
"loss": 3.539,
"step": 22700
},
{
"epoch": 2.452830188679245,
"grad_norm": 0.588056206703186,
"learning_rate": 0.00045336427415002694,
"loss": 3.5642,
"step": 22750
},
{
"epoch": 2.4582210242587603,
"grad_norm": 0.5579840540885925,
"learning_rate": 0.0004530404749055585,
"loss": 3.5741,
"step": 22800
},
{
"epoch": 2.463611859838275,
"grad_norm": 0.6022590398788452,
"learning_rate": 0.0004527166756610901,
"loss": 3.5405,
"step": 22850
},
{
"epoch": 2.4690026954177897,
"grad_norm": 0.5468297600746155,
"learning_rate": 0.00045239287641662165,
"loss": 3.5504,
"step": 22900
},
{
"epoch": 2.4743935309973044,
"grad_norm": 0.576793372631073,
"learning_rate": 0.0004520690771721532,
"loss": 3.5537,
"step": 22950
},
{
"epoch": 2.4797843665768196,
"grad_norm": 0.5995750427246094,
"learning_rate": 0.0004517452779276848,
"loss": 3.5459,
"step": 23000
},
{
"epoch": 2.4797843665768196,
"eval_accuracy": 0.3654815859975112,
"eval_loss": 3.5396082401275635,
"eval_runtime": 180.9737,
"eval_samples_per_second": 99.523,
"eval_steps_per_second": 6.222,
"step": 23000
},
{
"epoch": 2.4851752021563343,
"grad_norm": 0.6596251130104065,
"learning_rate": 0.00045142147868321636,
"loss": 3.5446,
"step": 23050
},
{
"epoch": 2.490566037735849,
"grad_norm": 0.5252313017845154,
"learning_rate": 0.00045109767943874796,
"loss": 3.5398,
"step": 23100
},
{
"epoch": 2.4959568733153636,
"grad_norm": 0.6100819706916809,
"learning_rate": 0.0004507738801942795,
"loss": 3.5374,
"step": 23150
},
{
"epoch": 2.501347708894879,
"grad_norm": 0.5994240045547485,
"learning_rate": 0.0004504500809498111,
"loss": 3.5518,
"step": 23200
},
{
"epoch": 2.5067385444743935,
"grad_norm": 0.5911756753921509,
"learning_rate": 0.0004501262817053426,
"loss": 3.5651,
"step": 23250
},
{
"epoch": 2.512129380053908,
"grad_norm": 0.5529770851135254,
"learning_rate": 0.00044980248246087427,
"loss": 3.5623,
"step": 23300
},
{
"epoch": 2.5175202156334233,
"grad_norm": 0.5851297378540039,
"learning_rate": 0.00044947868321640577,
"loss": 3.5372,
"step": 23350
},
{
"epoch": 2.522911051212938,
"grad_norm": 0.5575948357582092,
"learning_rate": 0.0004491548839719373,
"loss": 3.5416,
"step": 23400
},
{
"epoch": 2.5283018867924527,
"grad_norm": 0.5790318250656128,
"learning_rate": 0.0004488310847274689,
"loss": 3.5464,
"step": 23450
},
{
"epoch": 2.533692722371968,
"grad_norm": 0.5353867411613464,
"learning_rate": 0.0004485072854830005,
"loss": 3.5237,
"step": 23500
},
{
"epoch": 2.5390835579514826,
"grad_norm": 0.6083977818489075,
"learning_rate": 0.0004481834862385321,
"loss": 3.5601,
"step": 23550
},
{
"epoch": 2.5444743935309972,
"grad_norm": 0.5814011096954346,
"learning_rate": 0.00044785968699406363,
"loss": 3.5533,
"step": 23600
},
{
"epoch": 2.5498652291105124,
"grad_norm": 0.5686614513397217,
"learning_rate": 0.00044753588774959523,
"loss": 3.5471,
"step": 23650
},
{
"epoch": 2.555256064690027,
"grad_norm": 0.6276265978813171,
"learning_rate": 0.0004472120885051268,
"loss": 3.5638,
"step": 23700
},
{
"epoch": 2.560646900269542,
"grad_norm": 0.5724161863327026,
"learning_rate": 0.0004468882892606584,
"loss": 3.5684,
"step": 23750
},
{
"epoch": 2.5660377358490565,
"grad_norm": 0.6355558633804321,
"learning_rate": 0.00044656449001618994,
"loss": 3.5441,
"step": 23800
},
{
"epoch": 2.571428571428571,
"grad_norm": 0.6126329898834229,
"learning_rate": 0.0004462406907717215,
"loss": 3.5433,
"step": 23850
},
{
"epoch": 2.5768194070080863,
"grad_norm": 0.6300326585769653,
"learning_rate": 0.0004459168915272531,
"loss": 3.5409,
"step": 23900
},
{
"epoch": 2.582210242587601,
"grad_norm": 0.5199249386787415,
"learning_rate": 0.0004455930922827846,
"loss": 3.5678,
"step": 23950
},
{
"epoch": 2.5876010781671157,
"grad_norm": 0.5622717142105103,
"learning_rate": 0.0004452692930383162,
"loss": 3.5482,
"step": 24000
},
{
"epoch": 2.5876010781671157,
"eval_accuracy": 0.36659712519670246,
"eval_loss": 3.5317554473876953,
"eval_runtime": 181.2936,
"eval_samples_per_second": 99.347,
"eval_steps_per_second": 6.211,
"step": 24000
},
{
"epoch": 2.592991913746631,
"grad_norm": 0.6593669056892395,
"learning_rate": 0.00044494549379384775,
"loss": 3.5524,
"step": 24050
},
{
"epoch": 2.5983827493261455,
"grad_norm": 0.6096122860908508,
"learning_rate": 0.00044462169454937935,
"loss": 3.5285,
"step": 24100
},
{
"epoch": 2.6037735849056602,
"grad_norm": 0.5589276552200317,
"learning_rate": 0.0004442978953049109,
"loss": 3.5507,
"step": 24150
},
{
"epoch": 2.6091644204851754,
"grad_norm": 0.593991219997406,
"learning_rate": 0.0004439740960604425,
"loss": 3.5445,
"step": 24200
},
{
"epoch": 2.61455525606469,
"grad_norm": 0.5771310329437256,
"learning_rate": 0.00044365029681597406,
"loss": 3.5493,
"step": 24250
},
{
"epoch": 2.6199460916442048,
"grad_norm": 0.6104580760002136,
"learning_rate": 0.0004433264975715056,
"loss": 3.5519,
"step": 24300
},
{
"epoch": 2.62533692722372,
"grad_norm": 0.5800246000289917,
"learning_rate": 0.0004430026983270372,
"loss": 3.5551,
"step": 24350
},
{
"epoch": 2.6307277628032346,
"grad_norm": 0.5594913363456726,
"learning_rate": 0.00044267889908256876,
"loss": 3.5318,
"step": 24400
},
{
"epoch": 2.6361185983827493,
"grad_norm": 0.570894181728363,
"learning_rate": 0.00044235509983810037,
"loss": 3.5575,
"step": 24450
},
{
"epoch": 2.641509433962264,
"grad_norm": 0.5746343731880188,
"learning_rate": 0.0004420313005936319,
"loss": 3.5216,
"step": 24500
},
{
"epoch": 2.6469002695417787,
"grad_norm": 0.5852980613708496,
"learning_rate": 0.0004417075013491635,
"loss": 3.527,
"step": 24550
},
{
"epoch": 2.652291105121294,
"grad_norm": 0.6170436143875122,
"learning_rate": 0.0004413837021046951,
"loss": 3.5347,
"step": 24600
},
{
"epoch": 2.6576819407008085,
"grad_norm": 0.5268665552139282,
"learning_rate": 0.00044106637884511597,
"loss": 3.5423,
"step": 24650
},
{
"epoch": 2.6630727762803232,
"grad_norm": 0.5274102687835693,
"learning_rate": 0.0004407425796006475,
"loss": 3.542,
"step": 24700
},
{
"epoch": 2.6684636118598384,
"grad_norm": 0.562574028968811,
"learning_rate": 0.00044041878035617913,
"loss": 3.5585,
"step": 24750
},
{
"epoch": 2.673854447439353,
"grad_norm": 0.6228138208389282,
"learning_rate": 0.0004400949811117107,
"loss": 3.5354,
"step": 24800
},
{
"epoch": 2.6792452830188678,
"grad_norm": 0.5509768724441528,
"learning_rate": 0.0004397711818672423,
"loss": 3.534,
"step": 24850
},
{
"epoch": 2.684636118598383,
"grad_norm": 0.5812339186668396,
"learning_rate": 0.00043944738262277383,
"loss": 3.5431,
"step": 24900
},
{
"epoch": 2.6900269541778976,
"grad_norm": 0.5285933017730713,
"learning_rate": 0.00043912358337830544,
"loss": 3.5385,
"step": 24950
},
{
"epoch": 2.6954177897574123,
"grad_norm": 0.5929449200630188,
"learning_rate": 0.000438799784133837,
"loss": 3.5542,
"step": 25000
},
{
"epoch": 2.6954177897574123,
"eval_accuracy": 0.3673601944278236,
"eval_loss": 3.5220768451690674,
"eval_runtime": 181.2911,
"eval_samples_per_second": 99.349,
"eval_steps_per_second": 6.211,
"step": 25000
},
{
"epoch": 2.7008086253369274,
"grad_norm": 0.6103118658065796,
"learning_rate": 0.00043847598488936854,
"loss": 3.5537,
"step": 25050
},
{
"epoch": 2.706199460916442,
"grad_norm": 0.6209373474121094,
"learning_rate": 0.00043815218564490014,
"loss": 3.5647,
"step": 25100
},
{
"epoch": 2.711590296495957,
"grad_norm": 0.6222750544548035,
"learning_rate": 0.0004378283864004317,
"loss": 3.5444,
"step": 25150
},
{
"epoch": 2.7169811320754715,
"grad_norm": 0.5424869060516357,
"learning_rate": 0.0004375045871559633,
"loss": 3.5295,
"step": 25200
},
{
"epoch": 2.7223719676549867,
"grad_norm": 0.5619545578956604,
"learning_rate": 0.00043718078791149485,
"loss": 3.5323,
"step": 25250
},
{
"epoch": 2.7277628032345014,
"grad_norm": 0.5697439312934875,
"learning_rate": 0.00043685698866702645,
"loss": 3.5397,
"step": 25300
},
{
"epoch": 2.733153638814016,
"grad_norm": 0.5689240097999573,
"learning_rate": 0.00043653318942255795,
"loss": 3.5332,
"step": 25350
},
{
"epoch": 2.7385444743935308,
"grad_norm": 0.5533002018928528,
"learning_rate": 0.00043620939017808956,
"loss": 3.5223,
"step": 25400
},
{
"epoch": 2.743935309973046,
"grad_norm": 0.5482644438743591,
"learning_rate": 0.0004358855909336211,
"loss": 3.5409,
"step": 25450
},
{
"epoch": 2.7493261455525606,
"grad_norm": 0.5769422650337219,
"learning_rate": 0.00043556179168915266,
"loss": 3.5584,
"step": 25500
},
{
"epoch": 2.7547169811320753,
"grad_norm": 0.6086459159851074,
"learning_rate": 0.00043523799244468426,
"loss": 3.5519,
"step": 25550
},
{
"epoch": 2.7601078167115904,
"grad_norm": 0.5854797959327698,
"learning_rate": 0.0004349141932002158,
"loss": 3.5419,
"step": 25600
},
{
"epoch": 2.765498652291105,
"grad_norm": 0.5399619340896606,
"learning_rate": 0.0004345903939557474,
"loss": 3.5286,
"step": 25650
},
{
"epoch": 2.77088948787062,
"grad_norm": 0.5609824061393738,
"learning_rate": 0.00043426659471127897,
"loss": 3.5512,
"step": 25700
},
{
"epoch": 2.776280323450135,
"grad_norm": 0.5754132866859436,
"learning_rate": 0.00043394279546681057,
"loss": 3.5341,
"step": 25750
},
{
"epoch": 2.7816711590296497,
"grad_norm": 0.5857130289077759,
"learning_rate": 0.0004336189962223421,
"loss": 3.5377,
"step": 25800
},
{
"epoch": 2.7870619946091644,
"grad_norm": 0.5504029393196106,
"learning_rate": 0.0004332951969778737,
"loss": 3.5221,
"step": 25850
},
{
"epoch": 2.7924528301886795,
"grad_norm": 0.56480473279953,
"learning_rate": 0.0004329713977334053,
"loss": 3.5429,
"step": 25900
},
{
"epoch": 2.797843665768194,
"grad_norm": 0.5525421500205994,
"learning_rate": 0.0004326475984889368,
"loss": 3.5136,
"step": 25950
},
{
"epoch": 2.803234501347709,
"grad_norm": 0.6140046715736389,
"learning_rate": 0.0004323237992444684,
"loss": 3.5342,
"step": 26000
},
{
"epoch": 2.803234501347709,
"eval_accuracy": 0.3681909544080531,
"eval_loss": 3.5146098136901855,
"eval_runtime": 181.2808,
"eval_samples_per_second": 99.354,
"eval_steps_per_second": 6.211,
"step": 26000
},
{
"epoch": 2.8086253369272236,
"grad_norm": 0.5526379346847534,
"learning_rate": 0.00043199999999999993,
"loss": 3.5372,
"step": 26050
},
{
"epoch": 2.8140161725067383,
"grad_norm": 0.5372548699378967,
"learning_rate": 0.00043167620075553153,
"loss": 3.5251,
"step": 26100
},
{
"epoch": 2.8194070080862534,
"grad_norm": 0.5878198742866516,
"learning_rate": 0.0004313524015110631,
"loss": 3.536,
"step": 26150
},
{
"epoch": 2.824797843665768,
"grad_norm": 0.6414244174957275,
"learning_rate": 0.0004310286022665947,
"loss": 3.5469,
"step": 26200
},
{
"epoch": 2.830188679245283,
"grad_norm": 0.5342559218406677,
"learning_rate": 0.00043070480302212624,
"loss": 3.533,
"step": 26250
},
{
"epoch": 2.835579514824798,
"grad_norm": 0.5910995602607727,
"learning_rate": 0.0004303810037776578,
"loss": 3.532,
"step": 26300
},
{
"epoch": 2.8409703504043127,
"grad_norm": 0.4914022982120514,
"learning_rate": 0.0004300572045331894,
"loss": 3.5451,
"step": 26350
},
{
"epoch": 2.8463611859838274,
"grad_norm": 0.5698440074920654,
"learning_rate": 0.00042973340528872095,
"loss": 3.528,
"step": 26400
},
{
"epoch": 2.8517520215633425,
"grad_norm": 0.5266543030738831,
"learning_rate": 0.00042940960604425255,
"loss": 3.5381,
"step": 26450
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.5502971410751343,
"learning_rate": 0.0004290858067997841,
"loss": 3.5261,
"step": 26500
},
{
"epoch": 2.862533692722372,
"grad_norm": 0.5623043775558472,
"learning_rate": 0.0004287620075553157,
"loss": 3.5085,
"step": 26550
},
{
"epoch": 2.867924528301887,
"grad_norm": 0.5682037472724915,
"learning_rate": 0.00042843820831084726,
"loss": 3.5304,
"step": 26600
},
{
"epoch": 2.8733153638814017,
"grad_norm": 0.5245981216430664,
"learning_rate": 0.00042811440906637886,
"loss": 3.5259,
"step": 26650
},
{
"epoch": 2.8787061994609164,
"grad_norm": 0.558342456817627,
"learning_rate": 0.00042779060982191036,
"loss": 3.5226,
"step": 26700
},
{
"epoch": 2.884097035040431,
"grad_norm": 0.5356988310813904,
"learning_rate": 0.0004274668105774419,
"loss": 3.5257,
"step": 26750
},
{
"epoch": 2.889487870619946,
"grad_norm": 0.5896562933921814,
"learning_rate": 0.0004271430113329735,
"loss": 3.5067,
"step": 26800
},
{
"epoch": 2.894878706199461,
"grad_norm": 0.5992358922958374,
"learning_rate": 0.00042682568807339447,
"loss": 3.5249,
"step": 26850
},
{
"epoch": 2.9002695417789757,
"grad_norm": 0.5337636470794678,
"learning_rate": 0.000426501888828926,
"loss": 3.5128,
"step": 26900
},
{
"epoch": 2.9056603773584904,
"grad_norm": 0.6004590392112732,
"learning_rate": 0.0004261780895844576,
"loss": 3.5133,
"step": 26950
},
{
"epoch": 2.9110512129380055,
"grad_norm": 0.5698912739753723,
"learning_rate": 0.00042585429033998917,
"loss": 3.5381,
"step": 27000
},
{
"epoch": 2.9110512129380055,
"eval_accuracy": 0.3692894351038575,
"eval_loss": 3.504966974258423,
"eval_runtime": 181.0594,
"eval_samples_per_second": 99.476,
"eval_steps_per_second": 6.219,
"step": 27000
},
{
"epoch": 2.91644204851752,
"grad_norm": 0.6112388372421265,
"learning_rate": 0.0004255304910955207,
"loss": 3.5501,
"step": 27050
},
{
"epoch": 2.921832884097035,
"grad_norm": 0.5333527326583862,
"learning_rate": 0.0004252066918510523,
"loss": 3.5325,
"step": 27100
},
{
"epoch": 2.92722371967655,
"grad_norm": 0.5607829689979553,
"learning_rate": 0.0004248828926065839,
"loss": 3.5464,
"step": 27150
},
{
"epoch": 2.9326145552560647,
"grad_norm": 0.5657780766487122,
"learning_rate": 0.0004245590933621155,
"loss": 3.5333,
"step": 27200
},
{
"epoch": 2.9380053908355794,
"grad_norm": 0.5746095180511475,
"learning_rate": 0.00042423529411764703,
"loss": 3.5474,
"step": 27250
},
{
"epoch": 2.9433962264150946,
"grad_norm": 0.604708194732666,
"learning_rate": 0.00042391149487317864,
"loss": 3.5284,
"step": 27300
},
{
"epoch": 2.9487870619946093,
"grad_norm": 0.6213203072547913,
"learning_rate": 0.00042358769562871013,
"loss": 3.5173,
"step": 27350
},
{
"epoch": 2.954177897574124,
"grad_norm": 0.5948765277862549,
"learning_rate": 0.00042326389638424174,
"loss": 3.5149,
"step": 27400
},
{
"epoch": 2.9595687331536387,
"grad_norm": 0.5501255989074707,
"learning_rate": 0.0004229400971397733,
"loss": 3.5339,
"step": 27450
},
{
"epoch": 2.964959568733154,
"grad_norm": 0.5301875472068787,
"learning_rate": 0.00042261629789530484,
"loss": 3.5331,
"step": 27500
},
{
"epoch": 2.9703504043126685,
"grad_norm": 0.5574668049812317,
"learning_rate": 0.00042229249865083644,
"loss": 3.522,
"step": 27550
},
{
"epoch": 2.975741239892183,
"grad_norm": 0.5755630135536194,
"learning_rate": 0.000421968699406368,
"loss": 3.5198,
"step": 27600
},
{
"epoch": 2.981132075471698,
"grad_norm": 0.54078608751297,
"learning_rate": 0.0004216449001618996,
"loss": 3.5438,
"step": 27650
},
{
"epoch": 2.986522911051213,
"grad_norm": 0.5769183039665222,
"learning_rate": 0.00042132110091743115,
"loss": 3.5085,
"step": 27700
},
{
"epoch": 2.9919137466307277,
"grad_norm": 0.5652152895927429,
"learning_rate": 0.00042099730167296275,
"loss": 3.5019,
"step": 27750
},
{
"epoch": 2.9973045822102424,
"grad_norm": 0.5847728848457336,
"learning_rate": 0.0004206735024284943,
"loss": 3.5239,
"step": 27800
},
{
"epoch": 3.0026954177897576,
"grad_norm": 0.5741530656814575,
"learning_rate": 0.0004203497031840259,
"loss": 3.4812,
"step": 27850
},
{
"epoch": 3.0080862533692723,
"grad_norm": 0.5797563195228577,
"learning_rate": 0.00042002590393955746,
"loss": 3.4285,
"step": 27900
},
{
"epoch": 3.013477088948787,
"grad_norm": 0.634753406047821,
"learning_rate": 0.00041970210469508896,
"loss": 3.4059,
"step": 27950
},
{
"epoch": 3.018867924528302,
"grad_norm": 0.5791683197021484,
"learning_rate": 0.00041937830545062056,
"loss": 3.4278,
"step": 28000
},
{
"epoch": 3.018867924528302,
"eval_accuracy": 0.37001849598109265,
"eval_loss": 3.502011299133301,
"eval_runtime": 181.3383,
"eval_samples_per_second": 99.323,
"eval_steps_per_second": 6.209,
"step": 28000
},
{
"epoch": 3.024258760107817,
"grad_norm": 0.603844165802002,
"learning_rate": 0.0004190545062061521,
"loss": 3.4184,
"step": 28050
},
{
"epoch": 3.0296495956873315,
"grad_norm": 0.5584900975227356,
"learning_rate": 0.0004187307069616837,
"loss": 3.435,
"step": 28100
},
{
"epoch": 3.035040431266846,
"grad_norm": 0.5561067461967468,
"learning_rate": 0.00041840690771721527,
"loss": 3.4382,
"step": 28150
},
{
"epoch": 3.0404312668463613,
"grad_norm": 0.6057212948799133,
"learning_rate": 0.00041808310847274687,
"loss": 3.435,
"step": 28200
},
{
"epoch": 3.045822102425876,
"grad_norm": 0.5753740668296814,
"learning_rate": 0.0004177593092282784,
"loss": 3.4403,
"step": 28250
},
{
"epoch": 3.0512129380053907,
"grad_norm": 0.5986172556877136,
"learning_rate": 0.00041743550998381,
"loss": 3.4394,
"step": 28300
},
{
"epoch": 3.056603773584906,
"grad_norm": 0.5863624811172485,
"learning_rate": 0.0004171117107393416,
"loss": 3.4487,
"step": 28350
},
{
"epoch": 3.0619946091644206,
"grad_norm": 0.6170995831489563,
"learning_rate": 0.00041678791149487313,
"loss": 3.4238,
"step": 28400
},
{
"epoch": 3.0673854447439353,
"grad_norm": 0.5697750449180603,
"learning_rate": 0.00041646411225040473,
"loss": 3.4469,
"step": 28450
},
{
"epoch": 3.07277628032345,
"grad_norm": 0.5869051814079285,
"learning_rate": 0.0004161403130059363,
"loss": 3.4314,
"step": 28500
},
{
"epoch": 3.078167115902965,
"grad_norm": 0.6245196461677551,
"learning_rate": 0.0004158165137614679,
"loss": 3.4267,
"step": 28550
},
{
"epoch": 3.08355795148248,
"grad_norm": 0.5763593912124634,
"learning_rate": 0.00041549271451699944,
"loss": 3.4496,
"step": 28600
},
{
"epoch": 3.0889487870619945,
"grad_norm": 0.6235705018043518,
"learning_rate": 0.00041516891527253104,
"loss": 3.4518,
"step": 28650
},
{
"epoch": 3.0943396226415096,
"grad_norm": 0.5487541556358337,
"learning_rate": 0.00041484511602806254,
"loss": 3.4529,
"step": 28700
},
{
"epoch": 3.0997304582210243,
"grad_norm": 0.5920215845108032,
"learning_rate": 0.0004145213167835941,
"loss": 3.4479,
"step": 28750
},
{
"epoch": 3.105121293800539,
"grad_norm": 0.6436508893966675,
"learning_rate": 0.0004141975175391257,
"loss": 3.4435,
"step": 28800
},
{
"epoch": 3.1105121293800537,
"grad_norm": 0.6300854682922363,
"learning_rate": 0.00041387371829465725,
"loss": 3.4504,
"step": 28850
},
{
"epoch": 3.115902964959569,
"grad_norm": 0.6098328828811646,
"learning_rate": 0.00041354991905018885,
"loss": 3.4489,
"step": 28900
},
{
"epoch": 3.1212938005390836,
"grad_norm": 0.5779024958610535,
"learning_rate": 0.0004132261198057204,
"loss": 3.4428,
"step": 28950
},
{
"epoch": 3.1266846361185983,
"grad_norm": 0.6418079137802124,
"learning_rate": 0.00041290879654614135,
"loss": 3.4513,
"step": 29000
},
{
"epoch": 3.1266846361185983,
"eval_accuracy": 0.37081785527674377,
"eval_loss": 3.4941792488098145,
"eval_runtime": 181.2333,
"eval_samples_per_second": 99.38,
"eval_steps_per_second": 6.213,
"step": 29000
},
{
"epoch": 3.1320754716981134,
"grad_norm": 0.5885114073753357,
"learning_rate": 0.00041258499730167296,
"loss": 3.431,
"step": 29050
},
{
"epoch": 3.137466307277628,
"grad_norm": 0.5644581913948059,
"learning_rate": 0.0004122611980572045,
"loss": 3.4565,
"step": 29100
},
{
"epoch": 3.142857142857143,
"grad_norm": 0.5936179161071777,
"learning_rate": 0.00041193739881273606,
"loss": 3.451,
"step": 29150
},
{
"epoch": 3.1482479784366575,
"grad_norm": 0.5671179294586182,
"learning_rate": 0.00041161359956826766,
"loss": 3.4676,
"step": 29200
},
{
"epoch": 3.1536388140161726,
"grad_norm": 0.5778055191040039,
"learning_rate": 0.0004112898003237992,
"loss": 3.4678,
"step": 29250
},
{
"epoch": 3.1590296495956873,
"grad_norm": 0.6444634795188904,
"learning_rate": 0.0004109660010793308,
"loss": 3.4602,
"step": 29300
},
{
"epoch": 3.164420485175202,
"grad_norm": 0.5693305730819702,
"learning_rate": 0.0004106422018348623,
"loss": 3.4477,
"step": 29350
},
{
"epoch": 3.169811320754717,
"grad_norm": 0.6001082062721252,
"learning_rate": 0.0004103184025903939,
"loss": 3.4583,
"step": 29400
},
{
"epoch": 3.175202156334232,
"grad_norm": 0.598551332950592,
"learning_rate": 0.00040999460334592547,
"loss": 3.4711,
"step": 29450
},
{
"epoch": 3.1805929919137466,
"grad_norm": 0.6061559319496155,
"learning_rate": 0.000409670804101457,
"loss": 3.4438,
"step": 29500
},
{
"epoch": 3.1859838274932613,
"grad_norm": 0.6079073548316956,
"learning_rate": 0.00040934700485698863,
"loss": 3.4549,
"step": 29550
},
{
"epoch": 3.1913746630727764,
"grad_norm": 0.5677346587181091,
"learning_rate": 0.0004090232056125202,
"loss": 3.4684,
"step": 29600
},
{
"epoch": 3.196765498652291,
"grad_norm": 0.5982911586761475,
"learning_rate": 0.0004086994063680518,
"loss": 3.4518,
"step": 29650
},
{
"epoch": 3.202156334231806,
"grad_norm": 0.5999668836593628,
"learning_rate": 0.00040837560712358333,
"loss": 3.4704,
"step": 29700
},
{
"epoch": 3.207547169811321,
"grad_norm": 0.6423681974411011,
"learning_rate": 0.00040805180787911494,
"loss": 3.4663,
"step": 29750
},
{
"epoch": 3.2129380053908356,
"grad_norm": 0.6018577218055725,
"learning_rate": 0.0004077280086346465,
"loss": 3.4421,
"step": 29800
},
{
"epoch": 3.2183288409703503,
"grad_norm": 0.6331573128700256,
"learning_rate": 0.0004074042093901781,
"loss": 3.4647,
"step": 29850
},
{
"epoch": 3.223719676549865,
"grad_norm": 0.5911356210708618,
"learning_rate": 0.00040708041014570964,
"loss": 3.4587,
"step": 29900
},
{
"epoch": 3.22911051212938,
"grad_norm": 0.6494351625442505,
"learning_rate": 0.00040675661090124114,
"loss": 3.4525,
"step": 29950
},
{
"epoch": 3.234501347708895,
"grad_norm": 0.6582809090614319,
"learning_rate": 0.0004064328116567728,
"loss": 3.4627,
"step": 30000
},
{
"epoch": 3.234501347708895,
"eval_accuracy": 0.37125040242313323,
"eval_loss": 3.4898300170898438,
"eval_runtime": 181.4533,
"eval_samples_per_second": 99.26,
"eval_steps_per_second": 6.205,
"step": 30000
}
],
"logging_steps": 50,
"max_steps": 92750,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.508207538176e+17,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}