100M__495 / checkpoint-90000 /trainer_state.json
craa's picture
Training in progress, step 90000, checkpoint
325112e verified
{
"best_metric": 3.302194118499756,
"best_model_checkpoint": "/scratch/cl5625/exceptions/models/100M__495/checkpoint-90000",
"epoch": 9.703504043126685,
"eval_steps": 1000,
"global_step": 90000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005390835579514825,
"grad_norm": 0.9464320540428162,
"learning_rate": 0.000276,
"loss": 8.7571,
"step": 50
},
{
"epoch": 0.01078167115902965,
"grad_norm": 1.1756318807601929,
"learning_rate": 0.0005759999999999999,
"loss": 7.0078,
"step": 100
},
{
"epoch": 0.016172506738544475,
"grad_norm": 1.551442265510559,
"learning_rate": 0.000599702104695089,
"loss": 6.6048,
"step": 150
},
{
"epoch": 0.0215633423180593,
"grad_norm": 0.7034249901771545,
"learning_rate": 0.0005993783054506205,
"loss": 6.3332,
"step": 200
},
{
"epoch": 0.026954177897574125,
"grad_norm": 1.0177743434906006,
"learning_rate": 0.0005990545062061521,
"loss": 6.1389,
"step": 250
},
{
"epoch": 0.03234501347708895,
"grad_norm": 1.3281992673873901,
"learning_rate": 0.0005987307069616836,
"loss": 6.0274,
"step": 300
},
{
"epoch": 0.03773584905660377,
"grad_norm": 1.3769625425338745,
"learning_rate": 0.0005984069077172153,
"loss": 5.9227,
"step": 350
},
{
"epoch": 0.0431266846361186,
"grad_norm": 1.3584883213043213,
"learning_rate": 0.0005980831084727469,
"loss": 5.8655,
"step": 400
},
{
"epoch": 0.04851752021563342,
"grad_norm": 0.9795990586280823,
"learning_rate": 0.0005977593092282784,
"loss": 5.78,
"step": 450
},
{
"epoch": 0.05390835579514825,
"grad_norm": 1.4081448316574097,
"learning_rate": 0.00059743550998381,
"loss": 5.7304,
"step": 500
},
{
"epoch": 0.05929919137466307,
"grad_norm": 1.7430726289749146,
"learning_rate": 0.0005971117107393416,
"loss": 5.6254,
"step": 550
},
{
"epoch": 0.0646900269541779,
"grad_norm": 1.4288103580474854,
"learning_rate": 0.0005967879114948732,
"loss": 5.5997,
"step": 600
},
{
"epoch": 0.07008086253369272,
"grad_norm": 0.9352820515632629,
"learning_rate": 0.0005964641122504047,
"loss": 5.5009,
"step": 650
},
{
"epoch": 0.07547169811320754,
"grad_norm": 1.0235956907272339,
"learning_rate": 0.0005961403130059363,
"loss": 5.4834,
"step": 700
},
{
"epoch": 0.08086253369272237,
"grad_norm": 1.1283656358718872,
"learning_rate": 0.0005958165137614678,
"loss": 5.4096,
"step": 750
},
{
"epoch": 0.0862533692722372,
"grad_norm": 1.1757662296295166,
"learning_rate": 0.0005954927145169995,
"loss": 5.3508,
"step": 800
},
{
"epoch": 0.09164420485175202,
"grad_norm": 1.2652894258499146,
"learning_rate": 0.0005951689152725309,
"loss": 5.3137,
"step": 850
},
{
"epoch": 0.09703504043126684,
"grad_norm": 0.8220955729484558,
"learning_rate": 0.0005948451160280626,
"loss": 5.234,
"step": 900
},
{
"epoch": 0.10242587601078167,
"grad_norm": 1.0178474187850952,
"learning_rate": 0.0005945213167835941,
"loss": 5.2211,
"step": 950
},
{
"epoch": 0.1078167115902965,
"grad_norm": 1.0003489255905151,
"learning_rate": 0.0005941975175391257,
"loss": 5.1665,
"step": 1000
},
{
"epoch": 0.1078167115902965,
"eval_accuracy": 0.2206364212520268,
"eval_loss": 5.09123420715332,
"eval_runtime": 146.3271,
"eval_samples_per_second": 123.087,
"eval_steps_per_second": 7.695,
"step": 1000
},
{
"epoch": 0.11320754716981132,
"grad_norm": 1.0129122734069824,
"learning_rate": 0.0005938737182946572,
"loss": 5.1253,
"step": 1050
},
{
"epoch": 0.11859838274932614,
"grad_norm": 1.2447277307510376,
"learning_rate": 0.0005935499190501888,
"loss": 5.0843,
"step": 1100
},
{
"epoch": 0.12398921832884097,
"grad_norm": 1.1244721412658691,
"learning_rate": 0.0005932261198057204,
"loss": 5.0632,
"step": 1150
},
{
"epoch": 0.1293800539083558,
"grad_norm": 1.0002185106277466,
"learning_rate": 0.000592902320561252,
"loss": 5.0409,
"step": 1200
},
{
"epoch": 0.1347708894878706,
"grad_norm": 1.237203598022461,
"learning_rate": 0.0005925785213167835,
"loss": 5.0158,
"step": 1250
},
{
"epoch": 0.14016172506738545,
"grad_norm": 0.9483816623687744,
"learning_rate": 0.0005922547220723151,
"loss": 4.9578,
"step": 1300
},
{
"epoch": 0.14555256064690028,
"grad_norm": 1.1492619514465332,
"learning_rate": 0.0005919309228278468,
"loss": 4.9422,
"step": 1350
},
{
"epoch": 0.1509433962264151,
"grad_norm": 0.8103901743888855,
"learning_rate": 0.0005916071235833783,
"loss": 4.9161,
"step": 1400
},
{
"epoch": 0.15633423180592992,
"grad_norm": 0.9849348068237305,
"learning_rate": 0.0005912833243389097,
"loss": 4.8937,
"step": 1450
},
{
"epoch": 0.16172506738544473,
"grad_norm": 1.0992136001586914,
"learning_rate": 0.0005909595250944414,
"loss": 4.8771,
"step": 1500
},
{
"epoch": 0.16711590296495957,
"grad_norm": 0.7955754995346069,
"learning_rate": 0.000590635725849973,
"loss": 4.8337,
"step": 1550
},
{
"epoch": 0.1725067385444744,
"grad_norm": 0.8510985970497131,
"learning_rate": 0.0005903119266055045,
"loss": 4.8487,
"step": 1600
},
{
"epoch": 0.1778975741239892,
"grad_norm": 0.8423263430595398,
"learning_rate": 0.0005899881273610361,
"loss": 4.8251,
"step": 1650
},
{
"epoch": 0.18328840970350405,
"grad_norm": 0.8307051658630371,
"learning_rate": 0.0005896643281165677,
"loss": 4.7528,
"step": 1700
},
{
"epoch": 0.18867924528301888,
"grad_norm": 1.171060562133789,
"learning_rate": 0.0005893405288720993,
"loss": 4.7617,
"step": 1750
},
{
"epoch": 0.1940700808625337,
"grad_norm": 1.0469765663146973,
"learning_rate": 0.0005890167296276308,
"loss": 4.7597,
"step": 1800
},
{
"epoch": 0.19946091644204852,
"grad_norm": 0.8637204170227051,
"learning_rate": 0.0005886929303831624,
"loss": 4.7128,
"step": 1850
},
{
"epoch": 0.20485175202156333,
"grad_norm": 0.9173099398612976,
"learning_rate": 0.0005883691311386939,
"loss": 4.7067,
"step": 1900
},
{
"epoch": 0.21024258760107817,
"grad_norm": 0.9003922343254089,
"learning_rate": 0.0005880453318942256,
"loss": 4.6783,
"step": 1950
},
{
"epoch": 0.215633423180593,
"grad_norm": 0.9870163798332214,
"learning_rate": 0.0005877215326497571,
"loss": 4.6555,
"step": 2000
},
{
"epoch": 0.215633423180593,
"eval_accuracy": 0.260708366848222,
"eval_loss": 4.6007513999938965,
"eval_runtime": 144.747,
"eval_samples_per_second": 124.431,
"eval_steps_per_second": 7.779,
"step": 2000
},
{
"epoch": 0.2210242587601078,
"grad_norm": 0.8778396248817444,
"learning_rate": 0.0005873977334052887,
"loss": 4.6696,
"step": 2050
},
{
"epoch": 0.22641509433962265,
"grad_norm": 0.8375086188316345,
"learning_rate": 0.0005870739341608202,
"loss": 4.6395,
"step": 2100
},
{
"epoch": 0.23180592991913745,
"grad_norm": 0.8078502416610718,
"learning_rate": 0.0005867501349163519,
"loss": 4.5823,
"step": 2150
},
{
"epoch": 0.2371967654986523,
"grad_norm": 0.8603857755661011,
"learning_rate": 0.0005864263356718833,
"loss": 4.6175,
"step": 2200
},
{
"epoch": 0.24258760107816713,
"grad_norm": 0.8307099342346191,
"learning_rate": 0.000586102536427415,
"loss": 4.5541,
"step": 2250
},
{
"epoch": 0.24797843665768193,
"grad_norm": 1.0236228704452515,
"learning_rate": 0.0005857787371829465,
"loss": 4.5368,
"step": 2300
},
{
"epoch": 0.25336927223719674,
"grad_norm": 0.9307533502578735,
"learning_rate": 0.0005854549379384781,
"loss": 4.5326,
"step": 2350
},
{
"epoch": 0.2587601078167116,
"grad_norm": 1.4191182851791382,
"learning_rate": 0.0005851311386940096,
"loss": 4.5253,
"step": 2400
},
{
"epoch": 0.2641509433962264,
"grad_norm": 1.015572428703308,
"learning_rate": 0.0005848073394495412,
"loss": 4.5272,
"step": 2450
},
{
"epoch": 0.2695417789757412,
"grad_norm": 0.8903120756149292,
"learning_rate": 0.0005844835402050728,
"loss": 4.5219,
"step": 2500
},
{
"epoch": 0.2749326145552561,
"grad_norm": 1.0123368501663208,
"learning_rate": 0.0005841597409606044,
"loss": 4.4772,
"step": 2550
},
{
"epoch": 0.2803234501347709,
"grad_norm": 0.925151526927948,
"learning_rate": 0.000583835941716136,
"loss": 4.4624,
"step": 2600
},
{
"epoch": 0.2857142857142857,
"grad_norm": 1.1478705406188965,
"learning_rate": 0.0005835121424716675,
"loss": 4.4605,
"step": 2650
},
{
"epoch": 0.29110512129380056,
"grad_norm": 1.0130943059921265,
"learning_rate": 0.0005831883432271992,
"loss": 4.4529,
"step": 2700
},
{
"epoch": 0.29649595687331537,
"grad_norm": 0.991671621799469,
"learning_rate": 0.0005828645439827307,
"loss": 4.4101,
"step": 2750
},
{
"epoch": 0.3018867924528302,
"grad_norm": 0.8699747323989868,
"learning_rate": 0.0005825407447382622,
"loss": 4.4505,
"step": 2800
},
{
"epoch": 0.30727762803234504,
"grad_norm": 1.0381041765213013,
"learning_rate": 0.0005822169454937938,
"loss": 4.4171,
"step": 2850
},
{
"epoch": 0.31266846361185985,
"grad_norm": 1.0296149253845215,
"learning_rate": 0.0005818931462493254,
"loss": 4.4014,
"step": 2900
},
{
"epoch": 0.31805929919137466,
"grad_norm": 0.9270951747894287,
"learning_rate": 0.0005815693470048569,
"loss": 4.3968,
"step": 2950
},
{
"epoch": 0.32345013477088946,
"grad_norm": 0.7531670331954956,
"learning_rate": 0.0005812455477603885,
"loss": 4.3901,
"step": 3000
},
{
"epoch": 0.32345013477088946,
"eval_accuracy": 0.29159338482103947,
"eval_loss": 4.296316146850586,
"eval_runtime": 144.5889,
"eval_samples_per_second": 124.567,
"eval_steps_per_second": 7.788,
"step": 3000
},
{
"epoch": 0.3288409703504043,
"grad_norm": 0.8791877627372742,
"learning_rate": 0.0005809217485159201,
"loss": 4.3683,
"step": 3050
},
{
"epoch": 0.33423180592991913,
"grad_norm": 0.843708872795105,
"learning_rate": 0.0005805979492714517,
"loss": 4.3643,
"step": 3100
},
{
"epoch": 0.33962264150943394,
"grad_norm": 0.8123868703842163,
"learning_rate": 0.0005802741500269832,
"loss": 4.3262,
"step": 3150
},
{
"epoch": 0.3450134770889488,
"grad_norm": 0.9056026339530945,
"learning_rate": 0.0005799503507825148,
"loss": 4.3298,
"step": 3200
},
{
"epoch": 0.3504043126684636,
"grad_norm": 0.6567044854164124,
"learning_rate": 0.0005796265515380463,
"loss": 4.306,
"step": 3250
},
{
"epoch": 0.3557951482479784,
"grad_norm": 0.9095497131347656,
"learning_rate": 0.000579302752293578,
"loss": 4.328,
"step": 3300
},
{
"epoch": 0.3611859838274933,
"grad_norm": 0.8091392517089844,
"learning_rate": 0.0005789789530491095,
"loss": 4.3044,
"step": 3350
},
{
"epoch": 0.3665768194070081,
"grad_norm": 0.9594192504882812,
"learning_rate": 0.0005786551538046411,
"loss": 4.2963,
"step": 3400
},
{
"epoch": 0.3719676549865229,
"grad_norm": 0.72095787525177,
"learning_rate": 0.0005783313545601726,
"loss": 4.2802,
"step": 3450
},
{
"epoch": 0.37735849056603776,
"grad_norm": 0.7558978199958801,
"learning_rate": 0.0005780075553157043,
"loss": 4.2589,
"step": 3500
},
{
"epoch": 0.38274932614555257,
"grad_norm": 0.7811341881752014,
"learning_rate": 0.0005776837560712357,
"loss": 4.264,
"step": 3550
},
{
"epoch": 0.3881401617250674,
"grad_norm": 0.9022195935249329,
"learning_rate": 0.0005773599568267673,
"loss": 4.2651,
"step": 3600
},
{
"epoch": 0.3935309973045822,
"grad_norm": 0.9639933109283447,
"learning_rate": 0.0005770361575822989,
"loss": 4.2789,
"step": 3650
},
{
"epoch": 0.39892183288409705,
"grad_norm": 0.9333063960075378,
"learning_rate": 0.0005767123583378305,
"loss": 4.24,
"step": 3700
},
{
"epoch": 0.40431266846361186,
"grad_norm": 0.7508504986763,
"learning_rate": 0.000576388559093362,
"loss": 4.2359,
"step": 3750
},
{
"epoch": 0.40970350404312667,
"grad_norm": 0.7458257079124451,
"learning_rate": 0.0005760647598488936,
"loss": 4.2555,
"step": 3800
},
{
"epoch": 0.41509433962264153,
"grad_norm": 0.7919742465019226,
"learning_rate": 0.0005757409606044253,
"loss": 4.2366,
"step": 3850
},
{
"epoch": 0.42048517520215634,
"grad_norm": 0.9453123211860657,
"learning_rate": 0.0005754171613599568,
"loss": 4.2055,
"step": 3900
},
{
"epoch": 0.42587601078167114,
"grad_norm": 0.6152997612953186,
"learning_rate": 0.0005750933621154884,
"loss": 4.201,
"step": 3950
},
{
"epoch": 0.431266846361186,
"grad_norm": 0.8247600197792053,
"learning_rate": 0.0005747695628710199,
"loss": 4.2075,
"step": 4000
},
{
"epoch": 0.431266846361186,
"eval_accuracy": 0.30819272110400897,
"eval_loss": 4.134169101715088,
"eval_runtime": 144.6112,
"eval_samples_per_second": 124.548,
"eval_steps_per_second": 7.786,
"step": 4000
},
{
"epoch": 0.4366576819407008,
"grad_norm": 0.8243815898895264,
"learning_rate": 0.0005744457636265515,
"loss": 4.2068,
"step": 4050
},
{
"epoch": 0.4420485175202156,
"grad_norm": 0.6849672794342041,
"learning_rate": 0.0005741219643820831,
"loss": 4.2091,
"step": 4100
},
{
"epoch": 0.4474393530997305,
"grad_norm": 0.7505493760108948,
"learning_rate": 0.0005737981651376146,
"loss": 4.193,
"step": 4150
},
{
"epoch": 0.4528301886792453,
"grad_norm": 0.6466169953346252,
"learning_rate": 0.0005734743658931462,
"loss": 4.1829,
"step": 4200
},
{
"epoch": 0.4582210242587601,
"grad_norm": 0.5937972664833069,
"learning_rate": 0.0005731505666486778,
"loss": 4.1738,
"step": 4250
},
{
"epoch": 0.4636118598382749,
"grad_norm": 0.7906216979026794,
"learning_rate": 0.0005728267674042093,
"loss": 4.1668,
"step": 4300
},
{
"epoch": 0.46900269541778977,
"grad_norm": 0.744433581829071,
"learning_rate": 0.0005725029681597409,
"loss": 4.1723,
"step": 4350
},
{
"epoch": 0.4743935309973046,
"grad_norm": 0.8073228597640991,
"learning_rate": 0.0005721791689152725,
"loss": 4.158,
"step": 4400
},
{
"epoch": 0.4797843665768194,
"grad_norm": 0.8262885808944702,
"learning_rate": 0.0005718553696708041,
"loss": 4.1602,
"step": 4450
},
{
"epoch": 0.48517520215633425,
"grad_norm": 0.6594825387001038,
"learning_rate": 0.0005715315704263356,
"loss": 4.1434,
"step": 4500
},
{
"epoch": 0.49056603773584906,
"grad_norm": 0.6674824357032776,
"learning_rate": 0.0005712077711818672,
"loss": 4.1315,
"step": 4550
},
{
"epoch": 0.49595687331536387,
"grad_norm": 0.7260637879371643,
"learning_rate": 0.0005708839719373987,
"loss": 4.1407,
"step": 4600
},
{
"epoch": 0.5013477088948787,
"grad_norm": 0.6827527284622192,
"learning_rate": 0.0005705601726929304,
"loss": 4.1196,
"step": 4650
},
{
"epoch": 0.5067385444743935,
"grad_norm": 0.774723470211029,
"learning_rate": 0.0005702363734484619,
"loss": 4.1256,
"step": 4700
},
{
"epoch": 0.5121293800539084,
"grad_norm": 0.6491437554359436,
"learning_rate": 0.0005699125742039935,
"loss": 4.1063,
"step": 4750
},
{
"epoch": 0.5175202156334232,
"grad_norm": 0.7277990579605103,
"learning_rate": 0.000569588774959525,
"loss": 4.1197,
"step": 4800
},
{
"epoch": 0.522911051212938,
"grad_norm": 0.6551647782325745,
"learning_rate": 0.0005692649757150567,
"loss": 4.1135,
"step": 4850
},
{
"epoch": 0.5283018867924528,
"grad_norm": 0.7088435292243958,
"learning_rate": 0.0005689411764705881,
"loss": 4.1188,
"step": 4900
},
{
"epoch": 0.5336927223719676,
"grad_norm": 0.7585951089859009,
"learning_rate": 0.0005686173772261197,
"loss": 4.0928,
"step": 4950
},
{
"epoch": 0.5390835579514824,
"grad_norm": 0.673933744430542,
"learning_rate": 0.0005682935779816514,
"loss": 4.0819,
"step": 5000
},
{
"epoch": 0.5390835579514824,
"eval_accuracy": 0.31859362744293795,
"eval_loss": 4.022838115692139,
"eval_runtime": 144.8217,
"eval_samples_per_second": 124.367,
"eval_steps_per_second": 7.775,
"step": 5000
},
{
"epoch": 0.5444743935309974,
"grad_norm": 0.617730438709259,
"learning_rate": 0.0005679697787371829,
"loss": 4.0793,
"step": 5050
},
{
"epoch": 0.5498652291105122,
"grad_norm": 0.6957946419715881,
"learning_rate": 0.0005676459794927145,
"loss": 4.0821,
"step": 5100
},
{
"epoch": 0.555256064690027,
"grad_norm": 0.6225258708000183,
"learning_rate": 0.000567322180248246,
"loss": 4.0873,
"step": 5150
},
{
"epoch": 0.5606469002695418,
"grad_norm": 0.6864507794380188,
"learning_rate": 0.0005669983810037777,
"loss": 4.0634,
"step": 5200
},
{
"epoch": 0.5660377358490566,
"grad_norm": 0.642590343952179,
"learning_rate": 0.0005666745817593092,
"loss": 4.0649,
"step": 5250
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.614847719669342,
"learning_rate": 0.0005663507825148408,
"loss": 4.0476,
"step": 5300
},
{
"epoch": 0.5768194070080862,
"grad_norm": 0.7870457768440247,
"learning_rate": 0.0005660269832703723,
"loss": 4.063,
"step": 5350
},
{
"epoch": 0.5822102425876011,
"grad_norm": 0.6789854764938354,
"learning_rate": 0.0005657031840259039,
"loss": 4.0518,
"step": 5400
},
{
"epoch": 0.5876010781671159,
"grad_norm": 0.6867729425430298,
"learning_rate": 0.0005653793847814355,
"loss": 4.0614,
"step": 5450
},
{
"epoch": 0.5929919137466307,
"grad_norm": 0.5899894833564758,
"learning_rate": 0.000565055585536967,
"loss": 4.0639,
"step": 5500
},
{
"epoch": 0.5983827493261455,
"grad_norm": 0.6574368476867676,
"learning_rate": 0.0005647317862924986,
"loss": 4.067,
"step": 5550
},
{
"epoch": 0.6037735849056604,
"grad_norm": 0.7431745529174805,
"learning_rate": 0.0005644079870480302,
"loss": 4.0425,
"step": 5600
},
{
"epoch": 0.6091644204851752,
"grad_norm": 0.6241595149040222,
"learning_rate": 0.0005640841878035617,
"loss": 4.0319,
"step": 5650
},
{
"epoch": 0.6145552560646901,
"grad_norm": 0.6736788749694824,
"learning_rate": 0.0005637603885590933,
"loss": 4.0366,
"step": 5700
},
{
"epoch": 0.6199460916442049,
"grad_norm": 0.6149032711982727,
"learning_rate": 0.0005634365893146248,
"loss": 4.0495,
"step": 5750
},
{
"epoch": 0.6253369272237197,
"grad_norm": 0.6543477177619934,
"learning_rate": 0.0005631127900701565,
"loss": 4.042,
"step": 5800
},
{
"epoch": 0.6307277628032345,
"grad_norm": 0.6215724945068359,
"learning_rate": 0.000562788990825688,
"loss": 4.0478,
"step": 5850
},
{
"epoch": 0.6361185983827493,
"grad_norm": 0.6606348752975464,
"learning_rate": 0.0005624651915812196,
"loss": 4.0192,
"step": 5900
},
{
"epoch": 0.6415094339622641,
"grad_norm": 0.7944669723510742,
"learning_rate": 0.0005621413923367511,
"loss": 4.012,
"step": 5950
},
{
"epoch": 0.6469002695417789,
"grad_norm": 0.6075884699821472,
"learning_rate": 0.0005618175930922828,
"loss": 4.0201,
"step": 6000
},
{
"epoch": 0.6469002695417789,
"eval_accuracy": 0.3257018080166491,
"eval_loss": 3.940925359725952,
"eval_runtime": 144.8645,
"eval_samples_per_second": 124.33,
"eval_steps_per_second": 7.773,
"step": 6000
},
{
"epoch": 0.6522911051212938,
"grad_norm": 0.7395045757293701,
"learning_rate": 0.0005614937938478143,
"loss": 3.9927,
"step": 6050
},
{
"epoch": 0.6576819407008087,
"grad_norm": 0.69764643907547,
"learning_rate": 0.0005611699946033459,
"loss": 4.0252,
"step": 6100
},
{
"epoch": 0.6630727762803235,
"grad_norm": 0.5907176733016968,
"learning_rate": 0.0005608461953588774,
"loss": 4.0071,
"step": 6150
},
{
"epoch": 0.6684636118598383,
"grad_norm": 0.7374498248100281,
"learning_rate": 0.000560522396114409,
"loss": 3.9854,
"step": 6200
},
{
"epoch": 0.6738544474393531,
"grad_norm": 0.5937222838401794,
"learning_rate": 0.0005601985968699405,
"loss": 4.003,
"step": 6250
},
{
"epoch": 0.6792452830188679,
"grad_norm": 0.6928643584251404,
"learning_rate": 0.0005598747976254721,
"loss": 4.022,
"step": 6300
},
{
"epoch": 0.6846361185983828,
"grad_norm": 0.6832301020622253,
"learning_rate": 0.0005595509983810038,
"loss": 3.9816,
"step": 6350
},
{
"epoch": 0.6900269541778976,
"grad_norm": 0.6227492690086365,
"learning_rate": 0.0005592271991365353,
"loss": 3.9855,
"step": 6400
},
{
"epoch": 0.6954177897574124,
"grad_norm": 0.6595360636711121,
"learning_rate": 0.0005589033998920669,
"loss": 3.9733,
"step": 6450
},
{
"epoch": 0.7008086253369272,
"grad_norm": 0.6538481116294861,
"learning_rate": 0.0005585796006475984,
"loss": 3.9867,
"step": 6500
},
{
"epoch": 0.706199460916442,
"grad_norm": 0.6099511384963989,
"learning_rate": 0.0005582558014031301,
"loss": 4.009,
"step": 6550
},
{
"epoch": 0.7115902964959568,
"grad_norm": 0.5673043131828308,
"learning_rate": 0.0005579320021586616,
"loss": 3.9638,
"step": 6600
},
{
"epoch": 0.7169811320754716,
"grad_norm": 0.7762152552604675,
"learning_rate": 0.0005576082029141932,
"loss": 3.9942,
"step": 6650
},
{
"epoch": 0.7223719676549866,
"grad_norm": 0.6117172241210938,
"learning_rate": 0.0005572844036697247,
"loss": 3.9672,
"step": 6700
},
{
"epoch": 0.7277628032345014,
"grad_norm": 0.6088191866874695,
"learning_rate": 0.0005569606044252563,
"loss": 3.9738,
"step": 6750
},
{
"epoch": 0.7331536388140162,
"grad_norm": 0.6414440274238586,
"learning_rate": 0.0005566368051807879,
"loss": 3.9641,
"step": 6800
},
{
"epoch": 0.738544474393531,
"grad_norm": 0.8121737241744995,
"learning_rate": 0.0005563130059363194,
"loss": 3.9423,
"step": 6850
},
{
"epoch": 0.7439353099730458,
"grad_norm": 0.7500906586647034,
"learning_rate": 0.000555989206691851,
"loss": 3.9677,
"step": 6900
},
{
"epoch": 0.7493261455525606,
"grad_norm": 0.6089574694633484,
"learning_rate": 0.0005556654074473826,
"loss": 3.9647,
"step": 6950
},
{
"epoch": 0.7547169811320755,
"grad_norm": 0.7751880288124084,
"learning_rate": 0.0005553416082029141,
"loss": 3.9519,
"step": 7000
},
{
"epoch": 0.7547169811320755,
"eval_accuracy": 0.3312385418738994,
"eval_loss": 3.88531494140625,
"eval_runtime": 144.5963,
"eval_samples_per_second": 124.561,
"eval_steps_per_second": 7.787,
"step": 7000
},
{
"epoch": 0.7601078167115903,
"grad_norm": 0.6487019658088684,
"learning_rate": 0.0005550178089584457,
"loss": 3.9428,
"step": 7050
},
{
"epoch": 0.7654986522911051,
"grad_norm": 0.6093623638153076,
"learning_rate": 0.0005546940097139772,
"loss": 3.9544,
"step": 7100
},
{
"epoch": 0.77088948787062,
"grad_norm": 0.5531768798828125,
"learning_rate": 0.0005543702104695089,
"loss": 3.9569,
"step": 7150
},
{
"epoch": 0.7762803234501348,
"grad_norm": 0.6401906609535217,
"learning_rate": 0.0005540464112250404,
"loss": 3.9564,
"step": 7200
},
{
"epoch": 0.7816711590296496,
"grad_norm": 0.5921440720558167,
"learning_rate": 0.000553722611980572,
"loss": 3.9108,
"step": 7250
},
{
"epoch": 0.7870619946091644,
"grad_norm": 0.6791409254074097,
"learning_rate": 0.0005533988127361035,
"loss": 3.9488,
"step": 7300
},
{
"epoch": 0.7924528301886793,
"grad_norm": 0.6472693681716919,
"learning_rate": 0.0005530750134916352,
"loss": 3.9341,
"step": 7350
},
{
"epoch": 0.7978436657681941,
"grad_norm": 0.6375269889831543,
"learning_rate": 0.0005527512142471668,
"loss": 3.9339,
"step": 7400
},
{
"epoch": 0.8032345013477089,
"grad_norm": 0.626977264881134,
"learning_rate": 0.0005524274150026982,
"loss": 3.9247,
"step": 7450
},
{
"epoch": 0.8086253369272237,
"grad_norm": 0.696706235408783,
"learning_rate": 0.0005521036157582299,
"loss": 3.9116,
"step": 7500
},
{
"epoch": 0.8140161725067385,
"grad_norm": 0.594398558139801,
"learning_rate": 0.0005517798165137614,
"loss": 3.9356,
"step": 7550
},
{
"epoch": 0.8194070080862533,
"grad_norm": 0.6184767484664917,
"learning_rate": 0.000551456017269293,
"loss": 3.9384,
"step": 7600
},
{
"epoch": 0.8247978436657682,
"grad_norm": 0.5797574520111084,
"learning_rate": 0.0005511322180248245,
"loss": 3.9149,
"step": 7650
},
{
"epoch": 0.8301886792452831,
"grad_norm": 0.5616925954818726,
"learning_rate": 0.0005508084187803562,
"loss": 3.9173,
"step": 7700
},
{
"epoch": 0.8355795148247979,
"grad_norm": 0.6098619103431702,
"learning_rate": 0.0005504846195358877,
"loss": 3.8999,
"step": 7750
},
{
"epoch": 0.8409703504043127,
"grad_norm": 0.6354513764381409,
"learning_rate": 0.0005501608202914193,
"loss": 3.9284,
"step": 7800
},
{
"epoch": 0.8463611859838275,
"grad_norm": 0.6673269867897034,
"learning_rate": 0.0005498370210469508,
"loss": 3.909,
"step": 7850
},
{
"epoch": 0.8517520215633423,
"grad_norm": 0.539669930934906,
"learning_rate": 0.0005495132218024824,
"loss": 3.9074,
"step": 7900
},
{
"epoch": 0.8571428571428571,
"grad_norm": 0.618360161781311,
"learning_rate": 0.000549189422558014,
"loss": 3.9046,
"step": 7950
},
{
"epoch": 0.862533692722372,
"grad_norm": 0.6503387689590454,
"learning_rate": 0.0005488656233135456,
"loss": 3.8917,
"step": 8000
},
{
"epoch": 0.862533692722372,
"eval_accuracy": 0.33594255999893957,
"eval_loss": 3.8360989093780518,
"eval_runtime": 144.8786,
"eval_samples_per_second": 124.318,
"eval_steps_per_second": 7.772,
"step": 8000
},
{
"epoch": 0.8679245283018868,
"grad_norm": 0.6546068787574768,
"learning_rate": 0.0005485418240690771,
"loss": 3.9022,
"step": 8050
},
{
"epoch": 0.8733153638814016,
"grad_norm": 0.6453102231025696,
"learning_rate": 0.0005482180248246087,
"loss": 3.8843,
"step": 8100
},
{
"epoch": 0.8787061994609164,
"grad_norm": 0.5534443259239197,
"learning_rate": 0.0005478942255801403,
"loss": 3.8988,
"step": 8150
},
{
"epoch": 0.8840970350404312,
"grad_norm": 0.6230263113975525,
"learning_rate": 0.0005475704263356718,
"loss": 3.8879,
"step": 8200
},
{
"epoch": 0.889487870619946,
"grad_norm": 0.6733997464179993,
"learning_rate": 0.0005472466270912034,
"loss": 3.8729,
"step": 8250
},
{
"epoch": 0.894878706199461,
"grad_norm": 0.6633635759353638,
"learning_rate": 0.000546922827846735,
"loss": 3.8991,
"step": 8300
},
{
"epoch": 0.9002695417789758,
"grad_norm": 0.6503227949142456,
"learning_rate": 0.0005465990286022665,
"loss": 3.8792,
"step": 8350
},
{
"epoch": 0.9056603773584906,
"grad_norm": 0.7666671276092529,
"learning_rate": 0.0005462752293577981,
"loss": 3.8944,
"step": 8400
},
{
"epoch": 0.9110512129380054,
"grad_norm": 0.6036889553070068,
"learning_rate": 0.0005459514301133296,
"loss": 3.9057,
"step": 8450
},
{
"epoch": 0.9164420485175202,
"grad_norm": 0.6154916882514954,
"learning_rate": 0.0005456276308688613,
"loss": 3.894,
"step": 8500
},
{
"epoch": 0.921832884097035,
"grad_norm": 0.5653623938560486,
"learning_rate": 0.0005453038316243929,
"loss": 3.8763,
"step": 8550
},
{
"epoch": 0.9272237196765498,
"grad_norm": 0.5695276856422424,
"learning_rate": 0.0005449800323799244,
"loss": 3.8709,
"step": 8600
},
{
"epoch": 0.9326145552560647,
"grad_norm": 0.5406414270401001,
"learning_rate": 0.000544656233135456,
"loss": 3.8623,
"step": 8650
},
{
"epoch": 0.9380053908355795,
"grad_norm": 0.5876409411430359,
"learning_rate": 0.0005443324338909875,
"loss": 3.867,
"step": 8700
},
{
"epoch": 0.9433962264150944,
"grad_norm": 0.5680667757987976,
"learning_rate": 0.0005440086346465192,
"loss": 3.8691,
"step": 8750
},
{
"epoch": 0.9487870619946092,
"grad_norm": 0.5708035230636597,
"learning_rate": 0.0005436848354020506,
"loss": 3.8589,
"step": 8800
},
{
"epoch": 0.954177897574124,
"grad_norm": 0.5115900635719299,
"learning_rate": 0.0005433610361575823,
"loss": 3.8488,
"step": 8850
},
{
"epoch": 0.9595687331536388,
"grad_norm": 0.6006278395652771,
"learning_rate": 0.0005430372369131138,
"loss": 3.8652,
"step": 8900
},
{
"epoch": 0.9649595687331537,
"grad_norm": 0.6494414210319519,
"learning_rate": 0.0005427134376686454,
"loss": 3.8716,
"step": 8950
},
{
"epoch": 0.9703504043126685,
"grad_norm": 0.5888278484344482,
"learning_rate": 0.0005423896384241769,
"loss": 3.8592,
"step": 9000
},
{
"epoch": 0.9703504043126685,
"eval_accuracy": 0.34041004078720755,
"eval_loss": 3.7949342727661133,
"eval_runtime": 144.616,
"eval_samples_per_second": 124.544,
"eval_steps_per_second": 7.786,
"step": 9000
},
{
"epoch": 0.9757412398921833,
"grad_norm": 0.5697623491287231,
"learning_rate": 0.0005420658391797086,
"loss": 3.855,
"step": 9050
},
{
"epoch": 0.9811320754716981,
"grad_norm": 0.6447634696960449,
"learning_rate": 0.0005417420399352401,
"loss": 3.866,
"step": 9100
},
{
"epoch": 0.9865229110512129,
"grad_norm": 0.583088219165802,
"learning_rate": 0.0005414182406907717,
"loss": 3.8415,
"step": 9150
},
{
"epoch": 0.9919137466307277,
"grad_norm": 0.5796465277671814,
"learning_rate": 0.0005410944414463032,
"loss": 3.8376,
"step": 9200
},
{
"epoch": 0.9973045822102425,
"grad_norm": 0.6316399574279785,
"learning_rate": 0.0005407706422018348,
"loss": 3.8524,
"step": 9250
},
{
"epoch": 1.0026954177897573,
"grad_norm": 0.5727218389511108,
"learning_rate": 0.0005404468429573664,
"loss": 3.8268,
"step": 9300
},
{
"epoch": 1.0080862533692723,
"grad_norm": 0.5713456869125366,
"learning_rate": 0.000540123043712898,
"loss": 3.7921,
"step": 9350
},
{
"epoch": 1.013477088948787,
"grad_norm": 0.6198617815971375,
"learning_rate": 0.0005397992444684295,
"loss": 3.7876,
"step": 9400
},
{
"epoch": 1.0188679245283019,
"grad_norm": 0.5770072937011719,
"learning_rate": 0.0005394754452239611,
"loss": 3.7856,
"step": 9450
},
{
"epoch": 1.0242587601078168,
"grad_norm": 0.5920796990394592,
"learning_rate": 0.0005391516459794927,
"loss": 3.7855,
"step": 9500
},
{
"epoch": 1.0296495956873315,
"grad_norm": 0.5636431574821472,
"learning_rate": 0.0005388278467350242,
"loss": 3.7857,
"step": 9550
},
{
"epoch": 1.0350404312668464,
"grad_norm": 0.669791579246521,
"learning_rate": 0.0005385040474905557,
"loss": 3.7723,
"step": 9600
},
{
"epoch": 1.0404312668463611,
"grad_norm": 0.6635991930961609,
"learning_rate": 0.0005381802482460874,
"loss": 3.8142,
"step": 9650
},
{
"epoch": 1.045822102425876,
"grad_norm": 0.601437509059906,
"learning_rate": 0.000537856449001619,
"loss": 3.798,
"step": 9700
},
{
"epoch": 1.0512129380053907,
"grad_norm": 0.5584002137184143,
"learning_rate": 0.0005375326497571505,
"loss": 3.7985,
"step": 9750
},
{
"epoch": 1.0566037735849056,
"grad_norm": 0.5526735782623291,
"learning_rate": 0.000537208850512682,
"loss": 3.8074,
"step": 9800
},
{
"epoch": 1.0619946091644206,
"grad_norm": 0.5977774858474731,
"learning_rate": 0.0005368850512682137,
"loss": 3.7989,
"step": 9850
},
{
"epoch": 1.0673854447439353,
"grad_norm": 0.5950897932052612,
"learning_rate": 0.0005365612520237453,
"loss": 3.7924,
"step": 9900
},
{
"epoch": 1.0727762803234502,
"grad_norm": 0.5768362283706665,
"learning_rate": 0.0005362374527792768,
"loss": 3.7855,
"step": 9950
},
{
"epoch": 1.0781671159029649,
"grad_norm": 0.6516294479370117,
"learning_rate": 0.0005359136535348084,
"loss": 3.7811,
"step": 10000
},
{
"epoch": 1.0781671159029649,
"eval_accuracy": 0.34365658907510427,
"eval_loss": 3.762051582336426,
"eval_runtime": 144.5563,
"eval_samples_per_second": 124.595,
"eval_steps_per_second": 7.789,
"step": 10000
},
{
"epoch": 1.0835579514824798,
"grad_norm": 0.54844731092453,
"learning_rate": 0.0005355898542903399,
"loss": 3.7802,
"step": 10050
},
{
"epoch": 1.0889487870619945,
"grad_norm": 0.6120631694793701,
"learning_rate": 0.0005352660550458716,
"loss": 3.789,
"step": 10100
},
{
"epoch": 1.0943396226415094,
"grad_norm": 0.5617305636405945,
"learning_rate": 0.000534942255801403,
"loss": 3.784,
"step": 10150
},
{
"epoch": 1.0997304582210243,
"grad_norm": 0.6198379993438721,
"learning_rate": 0.0005346184565569347,
"loss": 3.7835,
"step": 10200
},
{
"epoch": 1.105121293800539,
"grad_norm": 0.5185474753379822,
"learning_rate": 0.0005342946573124662,
"loss": 3.7928,
"step": 10250
},
{
"epoch": 1.110512129380054,
"grad_norm": 0.5625671744346619,
"learning_rate": 0.0005339708580679978,
"loss": 3.7792,
"step": 10300
},
{
"epoch": 1.1159029649595686,
"grad_norm": 0.6791179180145264,
"learning_rate": 0.0005336470588235293,
"loss": 3.7647,
"step": 10350
},
{
"epoch": 1.1212938005390836,
"grad_norm": 0.5952463746070862,
"learning_rate": 0.000533323259579061,
"loss": 3.7826,
"step": 10400
},
{
"epoch": 1.1266846361185983,
"grad_norm": 0.5367695689201355,
"learning_rate": 0.0005329994603345925,
"loss": 3.7714,
"step": 10450
},
{
"epoch": 1.1320754716981132,
"grad_norm": 0.6577426195144653,
"learning_rate": 0.0005326756610901241,
"loss": 3.7588,
"step": 10500
},
{
"epoch": 1.137466307277628,
"grad_norm": 0.5481351017951965,
"learning_rate": 0.0005323518618456556,
"loss": 3.7808,
"step": 10550
},
{
"epoch": 1.1428571428571428,
"grad_norm": 0.5973223447799683,
"learning_rate": 0.0005320280626011872,
"loss": 3.7903,
"step": 10600
},
{
"epoch": 1.1482479784366577,
"grad_norm": 0.6764339804649353,
"learning_rate": 0.0005317042633567188,
"loss": 3.7673,
"step": 10650
},
{
"epoch": 1.1536388140161726,
"grad_norm": 0.6202555894851685,
"learning_rate": 0.0005313804641122504,
"loss": 3.7633,
"step": 10700
},
{
"epoch": 1.1590296495956873,
"grad_norm": 0.6169992089271545,
"learning_rate": 0.0005310566648677819,
"loss": 3.7519,
"step": 10750
},
{
"epoch": 1.1644204851752022,
"grad_norm": 0.5893779993057251,
"learning_rate": 0.0005307328656233135,
"loss": 3.7433,
"step": 10800
},
{
"epoch": 1.169811320754717,
"grad_norm": 0.5708191394805908,
"learning_rate": 0.000530409066378845,
"loss": 3.7609,
"step": 10850
},
{
"epoch": 1.1752021563342319,
"grad_norm": 0.5436216592788696,
"learning_rate": 0.0005300852671343766,
"loss": 3.7635,
"step": 10900
},
{
"epoch": 1.1805929919137466,
"grad_norm": 0.5945119261741638,
"learning_rate": 0.0005297614678899081,
"loss": 3.7755,
"step": 10950
},
{
"epoch": 1.1859838274932615,
"grad_norm": 0.5752778053283691,
"learning_rate": 0.0005294376686454398,
"loss": 3.7908,
"step": 11000
},
{
"epoch": 1.1859838274932615,
"eval_accuracy": 0.3459605735612103,
"eval_loss": 3.7342617511749268,
"eval_runtime": 145.0651,
"eval_samples_per_second": 124.158,
"eval_steps_per_second": 7.762,
"step": 11000
},
{
"epoch": 1.1913746630727764,
"grad_norm": 0.5780414342880249,
"learning_rate": 0.0005291138694009714,
"loss": 3.7773,
"step": 11050
},
{
"epoch": 1.196765498652291,
"grad_norm": 0.6393083930015564,
"learning_rate": 0.0005287900701565029,
"loss": 3.7719,
"step": 11100
},
{
"epoch": 1.202156334231806,
"grad_norm": 0.5772005319595337,
"learning_rate": 0.0005284662709120345,
"loss": 3.7688,
"step": 11150
},
{
"epoch": 1.2075471698113207,
"grad_norm": 0.5036531686782837,
"learning_rate": 0.0005281424716675661,
"loss": 3.7578,
"step": 11200
},
{
"epoch": 1.2129380053908356,
"grad_norm": 0.56954026222229,
"learning_rate": 0.0005278186724230977,
"loss": 3.7505,
"step": 11250
},
{
"epoch": 1.2183288409703503,
"grad_norm": 0.6294519901275635,
"learning_rate": 0.0005274948731786292,
"loss": 3.7672,
"step": 11300
},
{
"epoch": 1.2237196765498652,
"grad_norm": 0.6276326775550842,
"learning_rate": 0.0005271710739341608,
"loss": 3.7523,
"step": 11350
},
{
"epoch": 1.2291105121293802,
"grad_norm": 0.6219097971916199,
"learning_rate": 0.0005268472746896923,
"loss": 3.7687,
"step": 11400
},
{
"epoch": 1.2345013477088949,
"grad_norm": 0.5939977765083313,
"learning_rate": 0.000526523475445224,
"loss": 3.7595,
"step": 11450
},
{
"epoch": 1.2398921832884098,
"grad_norm": 0.6375150680541992,
"learning_rate": 0.0005261996762007554,
"loss": 3.7498,
"step": 11500
},
{
"epoch": 1.2452830188679245,
"grad_norm": 0.534351110458374,
"learning_rate": 0.0005258758769562871,
"loss": 3.7602,
"step": 11550
},
{
"epoch": 1.2506738544474394,
"grad_norm": 0.5753054618835449,
"learning_rate": 0.0005255520777118186,
"loss": 3.7345,
"step": 11600
},
{
"epoch": 1.256064690026954,
"grad_norm": 0.5968658328056335,
"learning_rate": 0.0005252282784673502,
"loss": 3.7679,
"step": 11650
},
{
"epoch": 1.261455525606469,
"grad_norm": 0.6453511118888855,
"learning_rate": 0.0005249044792228817,
"loss": 3.7641,
"step": 11700
},
{
"epoch": 1.266846361185984,
"grad_norm": 0.5844177007675171,
"learning_rate": 0.0005245806799784133,
"loss": 3.7551,
"step": 11750
},
{
"epoch": 1.2722371967654986,
"grad_norm": 0.5597920417785645,
"learning_rate": 0.0005242568807339449,
"loss": 3.7458,
"step": 11800
},
{
"epoch": 1.2776280323450135,
"grad_norm": 0.6149951815605164,
"learning_rate": 0.0005239330814894765,
"loss": 3.7582,
"step": 11850
},
{
"epoch": 1.2830188679245282,
"grad_norm": 0.5561873316764832,
"learning_rate": 0.000523609282245008,
"loss": 3.7456,
"step": 11900
},
{
"epoch": 1.2884097035040432,
"grad_norm": 0.6077041029930115,
"learning_rate": 0.0005232854830005396,
"loss": 3.7601,
"step": 11950
},
{
"epoch": 1.2938005390835579,
"grad_norm": 0.6389844417572021,
"learning_rate": 0.0005229616837560712,
"loss": 3.7514,
"step": 12000
},
{
"epoch": 1.2938005390835579,
"eval_accuracy": 0.3482391332715678,
"eval_loss": 3.7092418670654297,
"eval_runtime": 144.795,
"eval_samples_per_second": 124.39,
"eval_steps_per_second": 7.777,
"step": 12000
},
{
"epoch": 1.2991913746630728,
"grad_norm": 0.6131287217140198,
"learning_rate": 0.0005226378845116028,
"loss": 3.7472,
"step": 12050
},
{
"epoch": 1.3045822102425877,
"grad_norm": 0.5687003135681152,
"learning_rate": 0.0005223140852671344,
"loss": 3.7541,
"step": 12100
},
{
"epoch": 1.3099730458221024,
"grad_norm": 0.603330135345459,
"learning_rate": 0.0005219902860226659,
"loss": 3.7719,
"step": 12150
},
{
"epoch": 1.3153638814016173,
"grad_norm": 0.5906956791877747,
"learning_rate": 0.0005216664867781975,
"loss": 3.7419,
"step": 12200
},
{
"epoch": 1.320754716981132,
"grad_norm": 0.5257678627967834,
"learning_rate": 0.000521342687533729,
"loss": 3.7494,
"step": 12250
},
{
"epoch": 1.326145552560647,
"grad_norm": 0.55323725938797,
"learning_rate": 0.0005210188882892606,
"loss": 3.7598,
"step": 12300
},
{
"epoch": 1.3315363881401616,
"grad_norm": 0.6195726990699768,
"learning_rate": 0.0005206950890447922,
"loss": 3.7455,
"step": 12350
},
{
"epoch": 1.3369272237196765,
"grad_norm": 0.6453518271446228,
"learning_rate": 0.0005203712898003238,
"loss": 3.7346,
"step": 12400
},
{
"epoch": 1.3423180592991915,
"grad_norm": 0.591871976852417,
"learning_rate": 0.0005200474905558553,
"loss": 3.7498,
"step": 12450
},
{
"epoch": 1.3477088948787062,
"grad_norm": 0.8153529167175293,
"learning_rate": 0.0005197236913113869,
"loss": 3.73,
"step": 12500
},
{
"epoch": 1.353099730458221,
"grad_norm": 0.6054574847221375,
"learning_rate": 0.0005193998920669184,
"loss": 3.7438,
"step": 12550
},
{
"epoch": 1.3584905660377358,
"grad_norm": 0.5890757441520691,
"learning_rate": 0.0005190760928224501,
"loss": 3.7383,
"step": 12600
},
{
"epoch": 1.3638814016172507,
"grad_norm": 0.5858767032623291,
"learning_rate": 0.0005187522935779816,
"loss": 3.7272,
"step": 12650
},
{
"epoch": 1.3692722371967654,
"grad_norm": 0.5663976073265076,
"learning_rate": 0.0005184284943335132,
"loss": 3.7232,
"step": 12700
},
{
"epoch": 1.3746630727762803,
"grad_norm": 0.6173025965690613,
"learning_rate": 0.0005181046950890447,
"loss": 3.7428,
"step": 12750
},
{
"epoch": 1.3800539083557952,
"grad_norm": 0.5534438490867615,
"learning_rate": 0.0005177808958445764,
"loss": 3.7214,
"step": 12800
},
{
"epoch": 1.38544474393531,
"grad_norm": 0.6565149426460266,
"learning_rate": 0.0005174570966001078,
"loss": 3.7343,
"step": 12850
},
{
"epoch": 1.3908355795148248,
"grad_norm": 0.518814742565155,
"learning_rate": 0.0005171332973556395,
"loss": 3.7343,
"step": 12900
},
{
"epoch": 1.3962264150943398,
"grad_norm": 0.6175557374954224,
"learning_rate": 0.000516809498111171,
"loss": 3.726,
"step": 12950
},
{
"epoch": 1.4016172506738545,
"grad_norm": 0.525554895401001,
"learning_rate": 0.0005164856988667026,
"loss": 3.7207,
"step": 13000
},
{
"epoch": 1.4016172506738545,
"eval_accuracy": 0.3513273743019188,
"eval_loss": 3.684098482131958,
"eval_runtime": 145.0832,
"eval_samples_per_second": 124.143,
"eval_steps_per_second": 7.761,
"step": 13000
},
{
"epoch": 1.4070080862533692,
"grad_norm": 0.5694339275360107,
"learning_rate": 0.0005161618996222341,
"loss": 3.7017,
"step": 13050
},
{
"epoch": 1.412398921832884,
"grad_norm": 0.5720424056053162,
"learning_rate": 0.0005158381003777657,
"loss": 3.7224,
"step": 13100
},
{
"epoch": 1.417789757412399,
"grad_norm": 0.5959078073501587,
"learning_rate": 0.0005155143011332973,
"loss": 3.7268,
"step": 13150
},
{
"epoch": 1.4231805929919137,
"grad_norm": 0.5883017182350159,
"learning_rate": 0.0005151905018888289,
"loss": 3.7077,
"step": 13200
},
{
"epoch": 1.4285714285714286,
"grad_norm": 0.5214637517929077,
"learning_rate": 0.0005148667026443604,
"loss": 3.7251,
"step": 13250
},
{
"epoch": 1.4339622641509435,
"grad_norm": 0.5803607106208801,
"learning_rate": 0.000514542903399892,
"loss": 3.7207,
"step": 13300
},
{
"epoch": 1.4393530997304582,
"grad_norm": 0.6287586688995361,
"learning_rate": 0.0005142191041554237,
"loss": 3.7256,
"step": 13350
},
{
"epoch": 1.444743935309973,
"grad_norm": 0.614562451839447,
"learning_rate": 0.0005138953049109552,
"loss": 3.7166,
"step": 13400
},
{
"epoch": 1.4501347708894878,
"grad_norm": 0.5314472317695618,
"learning_rate": 0.0005135715056664868,
"loss": 3.7338,
"step": 13450
},
{
"epoch": 1.4555256064690028,
"grad_norm": 0.6482129693031311,
"learning_rate": 0.0005132477064220183,
"loss": 3.7264,
"step": 13500
},
{
"epoch": 1.4609164420485174,
"grad_norm": 0.5918267369270325,
"learning_rate": 0.0005129239071775499,
"loss": 3.7023,
"step": 13550
},
{
"epoch": 1.4663072776280324,
"grad_norm": 0.5850944519042969,
"learning_rate": 0.0005126001079330814,
"loss": 3.7298,
"step": 13600
},
{
"epoch": 1.4716981132075473,
"grad_norm": 0.5356786847114563,
"learning_rate": 0.000512276308688613,
"loss": 3.7098,
"step": 13650
},
{
"epoch": 1.477088948787062,
"grad_norm": 0.5910783410072327,
"learning_rate": 0.0005119525094441446,
"loss": 3.7309,
"step": 13700
},
{
"epoch": 1.482479784366577,
"grad_norm": 0.5758869051933289,
"learning_rate": 0.0005116287101996762,
"loss": 3.703,
"step": 13750
},
{
"epoch": 1.4878706199460916,
"grad_norm": 0.5499842762947083,
"learning_rate": 0.0005113049109552077,
"loss": 3.7275,
"step": 13800
},
{
"epoch": 1.4932614555256065,
"grad_norm": 0.6198793053627014,
"learning_rate": 0.0005109811117107393,
"loss": 3.7118,
"step": 13850
},
{
"epoch": 1.4986522911051212,
"grad_norm": 0.5693920850753784,
"learning_rate": 0.0005106573124662708,
"loss": 3.7209,
"step": 13900
},
{
"epoch": 1.5040431266846361,
"grad_norm": 0.6063724160194397,
"learning_rate": 0.0005103335132218025,
"loss": 3.7108,
"step": 13950
},
{
"epoch": 1.509433962264151,
"grad_norm": 0.5341413021087646,
"learning_rate": 0.000510009713977334,
"loss": 3.7092,
"step": 14000
},
{
"epoch": 1.509433962264151,
"eval_accuracy": 0.35327356482845174,
"eval_loss": 3.66573429107666,
"eval_runtime": 144.4021,
"eval_samples_per_second": 124.728,
"eval_steps_per_second": 7.798,
"step": 14000
},
{
"epoch": 1.5148247978436657,
"grad_norm": 0.5940130352973938,
"learning_rate": 0.0005096859147328656,
"loss": 3.7083,
"step": 14050
},
{
"epoch": 1.5202156334231804,
"grad_norm": 0.5854769945144653,
"learning_rate": 0.0005093621154883971,
"loss": 3.7072,
"step": 14100
},
{
"epoch": 1.5256064690026954,
"grad_norm": 0.5369813442230225,
"learning_rate": 0.0005090383162439288,
"loss": 3.6994,
"step": 14150
},
{
"epoch": 1.5309973045822103,
"grad_norm": 0.6851678490638733,
"learning_rate": 0.0005087145169994602,
"loss": 3.7003,
"step": 14200
},
{
"epoch": 1.536388140161725,
"grad_norm": 0.5451446771621704,
"learning_rate": 0.0005083907177549918,
"loss": 3.7145,
"step": 14250
},
{
"epoch": 1.54177897574124,
"grad_norm": 0.550528347492218,
"learning_rate": 0.0005080669185105234,
"loss": 3.7145,
"step": 14300
},
{
"epoch": 1.5471698113207548,
"grad_norm": 0.5607097744941711,
"learning_rate": 0.000507743119266055,
"loss": 3.7074,
"step": 14350
},
{
"epoch": 1.5525606469002695,
"grad_norm": 0.5699529051780701,
"learning_rate": 0.0005074193200215865,
"loss": 3.7239,
"step": 14400
},
{
"epoch": 1.5579514824797842,
"grad_norm": 0.6193976402282715,
"learning_rate": 0.0005070955207771181,
"loss": 3.7146,
"step": 14450
},
{
"epoch": 1.5633423180592994,
"grad_norm": 0.5902137756347656,
"learning_rate": 0.0005067717215326498,
"loss": 3.6944,
"step": 14500
},
{
"epoch": 1.568733153638814,
"grad_norm": 0.5481423735618591,
"learning_rate": 0.0005064479222881813,
"loss": 3.7103,
"step": 14550
},
{
"epoch": 1.5741239892183287,
"grad_norm": 0.6323958039283752,
"learning_rate": 0.0005061241230437129,
"loss": 3.6955,
"step": 14600
},
{
"epoch": 1.5795148247978437,
"grad_norm": 0.6062991619110107,
"learning_rate": 0.0005058003237992444,
"loss": 3.6937,
"step": 14650
},
{
"epoch": 1.5849056603773586,
"grad_norm": 0.5340829491615295,
"learning_rate": 0.0005054830005396654,
"loss": 3.7098,
"step": 14700
},
{
"epoch": 1.5902964959568733,
"grad_norm": 0.590190052986145,
"learning_rate": 0.0005051592012951969,
"loss": 3.7098,
"step": 14750
},
{
"epoch": 1.595687331536388,
"grad_norm": 0.5628999471664429,
"learning_rate": 0.0005048354020507286,
"loss": 3.6799,
"step": 14800
},
{
"epoch": 1.6010781671159031,
"grad_norm": 0.5697661638259888,
"learning_rate": 0.00050451160280626,
"loss": 3.7014,
"step": 14850
},
{
"epoch": 1.6064690026954178,
"grad_norm": 0.5965352058410645,
"learning_rate": 0.0005041878035617917,
"loss": 3.7081,
"step": 14900
},
{
"epoch": 1.6118598382749325,
"grad_norm": 0.5691752433776855,
"learning_rate": 0.0005038640043173232,
"loss": 3.697,
"step": 14950
},
{
"epoch": 1.6172506738544474,
"grad_norm": 0.5844449400901794,
"learning_rate": 0.0005035402050728548,
"loss": 3.6968,
"step": 15000
},
{
"epoch": 1.6172506738544474,
"eval_accuracy": 0.35501950808274263,
"eval_loss": 3.6473617553710938,
"eval_runtime": 144.9331,
"eval_samples_per_second": 124.271,
"eval_steps_per_second": 7.769,
"step": 15000
},
{
"epoch": 1.6226415094339623,
"grad_norm": 0.5243232846260071,
"learning_rate": 0.0005032164058283863,
"loss": 3.6876,
"step": 15050
},
{
"epoch": 1.628032345013477,
"grad_norm": 0.521431565284729,
"learning_rate": 0.0005028926065839179,
"loss": 3.6816,
"step": 15100
},
{
"epoch": 1.633423180592992,
"grad_norm": 0.54181307554245,
"learning_rate": 0.0005025688073394495,
"loss": 3.681,
"step": 15150
},
{
"epoch": 1.6388140161725069,
"grad_norm": 0.5746421217918396,
"learning_rate": 0.0005022450080949811,
"loss": 3.6847,
"step": 15200
},
{
"epoch": 1.6442048517520216,
"grad_norm": 0.5637415051460266,
"learning_rate": 0.0005019212088505126,
"loss": 3.6782,
"step": 15250
},
{
"epoch": 1.6495956873315363,
"grad_norm": 0.5300856828689575,
"learning_rate": 0.0005015974096060442,
"loss": 3.6848,
"step": 15300
},
{
"epoch": 1.6549865229110512,
"grad_norm": 0.5390794277191162,
"learning_rate": 0.0005012736103615758,
"loss": 3.7087,
"step": 15350
},
{
"epoch": 1.6603773584905661,
"grad_norm": 0.6010688543319702,
"learning_rate": 0.0005009498111171074,
"loss": 3.6966,
"step": 15400
},
{
"epoch": 1.6657681940700808,
"grad_norm": 0.548942506313324,
"learning_rate": 0.0005006260118726389,
"loss": 3.6648,
"step": 15450
},
{
"epoch": 1.6711590296495957,
"grad_norm": 0.5794510245323181,
"learning_rate": 0.0005003022126281705,
"loss": 3.6827,
"step": 15500
},
{
"epoch": 1.6765498652291106,
"grad_norm": 0.5393055081367493,
"learning_rate": 0.000499978413383702,
"loss": 3.68,
"step": 15550
},
{
"epoch": 1.6819407008086253,
"grad_norm": 0.5647615194320679,
"learning_rate": 0.0004996546141392336,
"loss": 3.6677,
"step": 15600
},
{
"epoch": 1.68733153638814,
"grad_norm": 0.5918028354644775,
"learning_rate": 0.0004993308148947651,
"loss": 3.6859,
"step": 15650
},
{
"epoch": 1.692722371967655,
"grad_norm": 0.5556984543800354,
"learning_rate": 0.0004990070156502968,
"loss": 3.6733,
"step": 15700
},
{
"epoch": 1.6981132075471699,
"grad_norm": 0.5747886896133423,
"learning_rate": 0.0004986832164058284,
"loss": 3.6979,
"step": 15750
},
{
"epoch": 1.7035040431266846,
"grad_norm": 0.5632966756820679,
"learning_rate": 0.0004983594171613599,
"loss": 3.6957,
"step": 15800
},
{
"epoch": 1.7088948787061995,
"grad_norm": 0.5908682942390442,
"learning_rate": 0.0004980356179168915,
"loss": 3.6886,
"step": 15850
},
{
"epoch": 1.7142857142857144,
"grad_norm": 0.6491772532463074,
"learning_rate": 0.000497711818672423,
"loss": 3.6902,
"step": 15900
},
{
"epoch": 1.719676549865229,
"grad_norm": 0.594153106212616,
"learning_rate": 0.0004973880194279547,
"loss": 3.6752,
"step": 15950
},
{
"epoch": 1.7250673854447438,
"grad_norm": 0.6420136094093323,
"learning_rate": 0.0004970642201834862,
"loss": 3.6724,
"step": 16000
},
{
"epoch": 1.7250673854447438,
"eval_accuracy": 0.3570638121669721,
"eval_loss": 3.6263442039489746,
"eval_runtime": 144.5518,
"eval_samples_per_second": 124.599,
"eval_steps_per_second": 7.79,
"step": 16000
},
{
"epoch": 1.7304582210242587,
"grad_norm": 0.5980614423751831,
"learning_rate": 0.0004967404209390178,
"loss": 3.6727,
"step": 16050
},
{
"epoch": 1.7358490566037736,
"grad_norm": 0.5954625010490417,
"learning_rate": 0.0004964166216945493,
"loss": 3.6587,
"step": 16100
},
{
"epoch": 1.7412398921832883,
"grad_norm": 0.563701868057251,
"learning_rate": 0.000496092822450081,
"loss": 3.683,
"step": 16150
},
{
"epoch": 1.7466307277628033,
"grad_norm": 0.5353266596794128,
"learning_rate": 0.0004957690232056125,
"loss": 3.6928,
"step": 16200
},
{
"epoch": 1.7520215633423182,
"grad_norm": 0.6115831136703491,
"learning_rate": 0.0004954452239611441,
"loss": 3.6697,
"step": 16250
},
{
"epoch": 1.7574123989218329,
"grad_norm": 0.5373592376708984,
"learning_rate": 0.0004951214247166756,
"loss": 3.6907,
"step": 16300
},
{
"epoch": 1.7628032345013476,
"grad_norm": 0.7120974063873291,
"learning_rate": 0.0004947976254722072,
"loss": 3.6741,
"step": 16350
},
{
"epoch": 1.7681940700808625,
"grad_norm": 0.5365995764732361,
"learning_rate": 0.0004944738262277387,
"loss": 3.6656,
"step": 16400
},
{
"epoch": 1.7735849056603774,
"grad_norm": 0.6026771664619446,
"learning_rate": 0.0004941500269832703,
"loss": 3.6708,
"step": 16450
},
{
"epoch": 1.778975741239892,
"grad_norm": 0.5560598969459534,
"learning_rate": 0.0004938262277388019,
"loss": 3.6594,
"step": 16500
},
{
"epoch": 1.784366576819407,
"grad_norm": 0.6191685199737549,
"learning_rate": 0.0004935024284943335,
"loss": 3.685,
"step": 16550
},
{
"epoch": 1.789757412398922,
"grad_norm": 0.6867356896400452,
"learning_rate": 0.000493178629249865,
"loss": 3.6698,
"step": 16600
},
{
"epoch": 1.7951482479784366,
"grad_norm": 0.5517817139625549,
"learning_rate": 0.0004928548300053966,
"loss": 3.6725,
"step": 16650
},
{
"epoch": 1.8005390835579513,
"grad_norm": 0.5496578216552734,
"learning_rate": 0.0004925310307609282,
"loss": 3.6745,
"step": 16700
},
{
"epoch": 1.8059299191374663,
"grad_norm": 0.540912926197052,
"learning_rate": 0.0004922072315164598,
"loss": 3.6624,
"step": 16750
},
{
"epoch": 1.8113207547169812,
"grad_norm": 0.5637075304985046,
"learning_rate": 0.0004918834322719913,
"loss": 3.6658,
"step": 16800
},
{
"epoch": 1.8167115902964959,
"grad_norm": 0.5674017071723938,
"learning_rate": 0.0004915596330275229,
"loss": 3.6701,
"step": 16850
},
{
"epoch": 1.8221024258760108,
"grad_norm": 0.5619826912879944,
"learning_rate": 0.0004912487857528331,
"loss": 3.6714,
"step": 16900
},
{
"epoch": 1.8274932614555257,
"grad_norm": 0.5217414498329163,
"learning_rate": 0.0004909249865083648,
"loss": 3.6863,
"step": 16950
},
{
"epoch": 1.8328840970350404,
"grad_norm": 0.565362811088562,
"learning_rate": 0.0004906011872638964,
"loss": 3.6737,
"step": 17000
},
{
"epoch": 1.8328840970350404,
"eval_accuracy": 0.3582435652228617,
"eval_loss": 3.6162045001983643,
"eval_runtime": 144.7573,
"eval_samples_per_second": 124.422,
"eval_steps_per_second": 7.779,
"step": 17000
},
{
"epoch": 1.838274932614555,
"grad_norm": 0.5198625326156616,
"learning_rate": 0.0004902773880194279,
"loss": 3.6515,
"step": 17050
},
{
"epoch": 1.8436657681940702,
"grad_norm": 0.5708647966384888,
"learning_rate": 0.0004899535887749595,
"loss": 3.6661,
"step": 17100
},
{
"epoch": 1.849056603773585,
"grad_norm": 0.583521842956543,
"learning_rate": 0.000489629789530491,
"loss": 3.6622,
"step": 17150
},
{
"epoch": 1.8544474393530996,
"grad_norm": 0.5446178913116455,
"learning_rate": 0.0004893059902860227,
"loss": 3.647,
"step": 17200
},
{
"epoch": 1.8598382749326146,
"grad_norm": 0.5899081230163574,
"learning_rate": 0.0004889821910415542,
"loss": 3.6697,
"step": 17250
},
{
"epoch": 1.8652291105121295,
"grad_norm": 0.6142265796661377,
"learning_rate": 0.0004886583917970858,
"loss": 3.6597,
"step": 17300
},
{
"epoch": 1.8706199460916442,
"grad_norm": 0.9669179320335388,
"learning_rate": 0.0004883345925526173,
"loss": 3.6618,
"step": 17350
},
{
"epoch": 1.8760107816711589,
"grad_norm": 0.564871072769165,
"learning_rate": 0.00048801079330814887,
"loss": 3.6571,
"step": 17400
},
{
"epoch": 1.881401617250674,
"grad_norm": 0.5841614007949829,
"learning_rate": 0.00048768699406368047,
"loss": 3.6743,
"step": 17450
},
{
"epoch": 1.8867924528301887,
"grad_norm": 0.5918266773223877,
"learning_rate": 0.000487363194819212,
"loss": 3.6675,
"step": 17500
},
{
"epoch": 1.8921832884097034,
"grad_norm": 0.5641137361526489,
"learning_rate": 0.0004870393955747436,
"loss": 3.6488,
"step": 17550
},
{
"epoch": 1.8975741239892183,
"grad_norm": 0.5747765898704529,
"learning_rate": 0.0004867155963302752,
"loss": 3.6558,
"step": 17600
},
{
"epoch": 1.9029649595687332,
"grad_norm": 0.5801583528518677,
"learning_rate": 0.0004863917970858068,
"loss": 3.6348,
"step": 17650
},
{
"epoch": 1.908355795148248,
"grad_norm": 0.5788767337799072,
"learning_rate": 0.00048606799784133833,
"loss": 3.6446,
"step": 17700
},
{
"epoch": 1.9137466307277629,
"grad_norm": 0.5854235291481018,
"learning_rate": 0.00048574419859686994,
"loss": 3.648,
"step": 17750
},
{
"epoch": 1.9191374663072778,
"grad_norm": 0.5577847957611084,
"learning_rate": 0.0004854203993524015,
"loss": 3.6665,
"step": 17800
},
{
"epoch": 1.9245283018867925,
"grad_norm": 0.536088228225708,
"learning_rate": 0.000485096600107933,
"loss": 3.6671,
"step": 17850
},
{
"epoch": 1.9299191374663072,
"grad_norm": 0.5481696724891663,
"learning_rate": 0.00048477280086346464,
"loss": 3.6721,
"step": 17900
},
{
"epoch": 1.935309973045822,
"grad_norm": 0.547725260257721,
"learning_rate": 0.00048444900161899614,
"loss": 3.6622,
"step": 17950
},
{
"epoch": 1.940700808625337,
"grad_norm": 0.5937644243240356,
"learning_rate": 0.00048412520237452774,
"loss": 3.6477,
"step": 18000
},
{
"epoch": 1.940700808625337,
"eval_accuracy": 0.3597537317105952,
"eval_loss": 3.5971779823303223,
"eval_runtime": 144.7293,
"eval_samples_per_second": 124.446,
"eval_steps_per_second": 7.78,
"step": 18000
},
{
"epoch": 1.9460916442048517,
"grad_norm": 0.5931717753410339,
"learning_rate": 0.0004838014031300593,
"loss": 3.6358,
"step": 18050
},
{
"epoch": 1.9514824797843666,
"grad_norm": 0.6099575757980347,
"learning_rate": 0.0004834776038855909,
"loss": 3.644,
"step": 18100
},
{
"epoch": 1.9568733153638815,
"grad_norm": 0.5704085230827332,
"learning_rate": 0.00048315380464112245,
"loss": 3.6517,
"step": 18150
},
{
"epoch": 1.9622641509433962,
"grad_norm": 0.5262129902839661,
"learning_rate": 0.00048283000539665405,
"loss": 3.6523,
"step": 18200
},
{
"epoch": 1.967654986522911,
"grad_norm": 0.512693464756012,
"learning_rate": 0.0004825062061521856,
"loss": 3.6432,
"step": 18250
},
{
"epoch": 1.9730458221024259,
"grad_norm": 0.5602070093154907,
"learning_rate": 0.00048218240690771716,
"loss": 3.6688,
"step": 18300
},
{
"epoch": 1.9784366576819408,
"grad_norm": 0.5258756875991821,
"learning_rate": 0.00048185860766324876,
"loss": 3.6471,
"step": 18350
},
{
"epoch": 1.9838274932614555,
"grad_norm": 0.5815879106521606,
"learning_rate": 0.0004815348084187803,
"loss": 3.6496,
"step": 18400
},
{
"epoch": 1.9892183288409704,
"grad_norm": 0.5756340622901917,
"learning_rate": 0.0004812110091743119,
"loss": 3.6589,
"step": 18450
},
{
"epoch": 1.9946091644204853,
"grad_norm": 0.6157427430152893,
"learning_rate": 0.00048088720992984347,
"loss": 3.6378,
"step": 18500
},
{
"epoch": 2.0,
"grad_norm": 1.1887223720550537,
"learning_rate": 0.00048056341068537507,
"loss": 3.6418,
"step": 18550
},
{
"epoch": 2.0053908355795147,
"grad_norm": 0.5796721577644348,
"learning_rate": 0.00048023961144090657,
"loss": 3.5398,
"step": 18600
},
{
"epoch": 2.01078167115903,
"grad_norm": 0.5702629089355469,
"learning_rate": 0.00047991581219643817,
"loss": 3.5522,
"step": 18650
},
{
"epoch": 2.0161725067385445,
"grad_norm": 0.5687718391418457,
"learning_rate": 0.0004795920129519697,
"loss": 3.5469,
"step": 18700
},
{
"epoch": 2.0215633423180592,
"grad_norm": 0.6058363914489746,
"learning_rate": 0.0004792682137075013,
"loss": 3.5565,
"step": 18750
},
{
"epoch": 2.026954177897574,
"grad_norm": 0.5502753257751465,
"learning_rate": 0.0004789444144630329,
"loss": 3.5781,
"step": 18800
},
{
"epoch": 2.032345013477089,
"grad_norm": 0.5815129280090332,
"learning_rate": 0.00047862061521856443,
"loss": 3.5754,
"step": 18850
},
{
"epoch": 2.0377358490566038,
"grad_norm": 0.5740176439285278,
"learning_rate": 0.00047829681597409603,
"loss": 3.5584,
"step": 18900
},
{
"epoch": 2.0431266846361185,
"grad_norm": 0.5666323304176331,
"learning_rate": 0.0004779730167296276,
"loss": 3.5638,
"step": 18950
},
{
"epoch": 2.0485175202156336,
"grad_norm": 0.5930228233337402,
"learning_rate": 0.0004776492174851592,
"loss": 3.5922,
"step": 19000
},
{
"epoch": 2.0485175202156336,
"eval_accuracy": 0.36108874974317173,
"eval_loss": 3.588949203491211,
"eval_runtime": 144.8607,
"eval_samples_per_second": 124.333,
"eval_steps_per_second": 7.773,
"step": 19000
},
{
"epoch": 2.0539083557951483,
"grad_norm": 0.5626940727233887,
"learning_rate": 0.00047732541824069074,
"loss": 3.5695,
"step": 19050
},
{
"epoch": 2.059299191374663,
"grad_norm": 0.6210435628890991,
"learning_rate": 0.0004770016189962223,
"loss": 3.5607,
"step": 19100
},
{
"epoch": 2.0646900269541777,
"grad_norm": 0.5758371949195862,
"learning_rate": 0.0004766778197517539,
"loss": 3.5751,
"step": 19150
},
{
"epoch": 2.070080862533693,
"grad_norm": 0.6037834286689758,
"learning_rate": 0.0004763540205072854,
"loss": 3.5875,
"step": 19200
},
{
"epoch": 2.0754716981132075,
"grad_norm": 0.5686853528022766,
"learning_rate": 0.00047603022126281705,
"loss": 3.5713,
"step": 19250
},
{
"epoch": 2.0808625336927222,
"grad_norm": 0.6485291123390198,
"learning_rate": 0.00047570642201834855,
"loss": 3.5609,
"step": 19300
},
{
"epoch": 2.0862533692722374,
"grad_norm": 0.6449443697929382,
"learning_rate": 0.00047538262277388015,
"loss": 3.5789,
"step": 19350
},
{
"epoch": 2.091644204851752,
"grad_norm": 0.550469696521759,
"learning_rate": 0.0004750588235294117,
"loss": 3.554,
"step": 19400
},
{
"epoch": 2.0970350404312668,
"grad_norm": 0.5864623785018921,
"learning_rate": 0.0004747350242849433,
"loss": 3.5732,
"step": 19450
},
{
"epoch": 2.1024258760107815,
"grad_norm": 0.6241238117218018,
"learning_rate": 0.00047441122504047486,
"loss": 3.5717,
"step": 19500
},
{
"epoch": 2.1078167115902966,
"grad_norm": 0.602936327457428,
"learning_rate": 0.0004740874257960064,
"loss": 3.5604,
"step": 19550
},
{
"epoch": 2.1132075471698113,
"grad_norm": 0.5674468874931335,
"learning_rate": 0.000473763626551538,
"loss": 3.5643,
"step": 19600
},
{
"epoch": 2.118598382749326,
"grad_norm": 0.600834310054779,
"learning_rate": 0.00047343982730706956,
"loss": 3.5695,
"step": 19650
},
{
"epoch": 2.123989218328841,
"grad_norm": 0.5601726770401001,
"learning_rate": 0.00047311602806260117,
"loss": 3.5724,
"step": 19700
},
{
"epoch": 2.129380053908356,
"grad_norm": 0.5551213622093201,
"learning_rate": 0.0004727922288181327,
"loss": 3.5783,
"step": 19750
},
{
"epoch": 2.1347708894878705,
"grad_norm": 0.5737272500991821,
"learning_rate": 0.0004724684295736643,
"loss": 3.5642,
"step": 19800
},
{
"epoch": 2.1401617250673857,
"grad_norm": 0.5712146162986755,
"learning_rate": 0.0004721446303291959,
"loss": 3.5894,
"step": 19850
},
{
"epoch": 2.1455525606469004,
"grad_norm": 0.5902134776115417,
"learning_rate": 0.0004718208310847275,
"loss": 3.5903,
"step": 19900
},
{
"epoch": 2.150943396226415,
"grad_norm": 0.6139191389083862,
"learning_rate": 0.000471497031840259,
"loss": 3.5862,
"step": 19950
},
{
"epoch": 2.1563342318059298,
"grad_norm": 0.5515206456184387,
"learning_rate": 0.0004711732325957905,
"loss": 3.5706,
"step": 20000
},
{
"epoch": 2.1563342318059298,
"eval_accuracy": 0.3621358376230399,
"eval_loss": 3.5808968544006348,
"eval_runtime": 144.61,
"eval_samples_per_second": 124.549,
"eval_steps_per_second": 7.786,
"step": 20000
},
{
"epoch": 2.161725067385445,
"grad_norm": 0.6040294170379639,
"learning_rate": 0.00047084943335132213,
"loss": 3.5692,
"step": 20050
},
{
"epoch": 2.1671159029649596,
"grad_norm": 0.6468523740768433,
"learning_rate": 0.0004705256341068537,
"loss": 3.5764,
"step": 20100
},
{
"epoch": 2.1725067385444743,
"grad_norm": 0.5732126235961914,
"learning_rate": 0.0004702018348623853,
"loss": 3.572,
"step": 20150
},
{
"epoch": 2.177897574123989,
"grad_norm": 0.545032262802124,
"learning_rate": 0.00046987803561791684,
"loss": 3.5862,
"step": 20200
},
{
"epoch": 2.183288409703504,
"grad_norm": 0.5633872747421265,
"learning_rate": 0.00046955423637344844,
"loss": 3.5738,
"step": 20250
},
{
"epoch": 2.188679245283019,
"grad_norm": 0.6415254473686218,
"learning_rate": 0.00046923043712898,
"loss": 3.5789,
"step": 20300
},
{
"epoch": 2.1940700808625335,
"grad_norm": 0.6461573839187622,
"learning_rate": 0.0004689066378845116,
"loss": 3.5603,
"step": 20350
},
{
"epoch": 2.1994609164420487,
"grad_norm": 0.5884630680084229,
"learning_rate": 0.00046858283864004315,
"loss": 3.5676,
"step": 20400
},
{
"epoch": 2.2048517520215634,
"grad_norm": 0.5497499704360962,
"learning_rate": 0.0004682590393955747,
"loss": 3.5713,
"step": 20450
},
{
"epoch": 2.210242587601078,
"grad_norm": 0.5444649457931519,
"learning_rate": 0.0004679352401511063,
"loss": 3.5807,
"step": 20500
},
{
"epoch": 2.215633423180593,
"grad_norm": 0.5752549171447754,
"learning_rate": 0.0004676114409066378,
"loss": 3.576,
"step": 20550
},
{
"epoch": 2.221024258760108,
"grad_norm": 0.5602301359176636,
"learning_rate": 0.00046728764166216946,
"loss": 3.5703,
"step": 20600
},
{
"epoch": 2.2264150943396226,
"grad_norm": 0.5674179792404175,
"learning_rate": 0.00046696384241770095,
"loss": 3.5705,
"step": 20650
},
{
"epoch": 2.2318059299191373,
"grad_norm": 0.6246010661125183,
"learning_rate": 0.00046664004317323256,
"loss": 3.5675,
"step": 20700
},
{
"epoch": 2.2371967654986524,
"grad_norm": 0.5755497217178345,
"learning_rate": 0.0004663162439287641,
"loss": 3.5682,
"step": 20750
},
{
"epoch": 2.242587601078167,
"grad_norm": 0.601298987865448,
"learning_rate": 0.00046599244468429566,
"loss": 3.5731,
"step": 20800
},
{
"epoch": 2.247978436657682,
"grad_norm": 0.6204710602760315,
"learning_rate": 0.00046566864543982726,
"loss": 3.5701,
"step": 20850
},
{
"epoch": 2.2533692722371965,
"grad_norm": 0.5474254488945007,
"learning_rate": 0.0004653448461953588,
"loss": 3.5649,
"step": 20900
},
{
"epoch": 2.2587601078167117,
"grad_norm": 0.5631576776504517,
"learning_rate": 0.00046502752293577977,
"loss": 3.5743,
"step": 20950
},
{
"epoch": 2.2641509433962264,
"grad_norm": 0.5887001752853394,
"learning_rate": 0.00046470372369131137,
"loss": 3.5568,
"step": 21000
},
{
"epoch": 2.2641509433962264,
"eval_accuracy": 0.3631279471416739,
"eval_loss": 3.5703935623168945,
"eval_runtime": 144.8136,
"eval_samples_per_second": 124.374,
"eval_steps_per_second": 7.776,
"step": 21000
},
{
"epoch": 2.269541778975741,
"grad_norm": 0.5233503580093384,
"learning_rate": 0.0004643799244468429,
"loss": 3.5712,
"step": 21050
},
{
"epoch": 2.274932614555256,
"grad_norm": 0.5863848924636841,
"learning_rate": 0.0004640561252023745,
"loss": 3.5732,
"step": 21100
},
{
"epoch": 2.280323450134771,
"grad_norm": 0.610373854637146,
"learning_rate": 0.0004637323259579061,
"loss": 3.5865,
"step": 21150
},
{
"epoch": 2.2857142857142856,
"grad_norm": 0.5934001207351685,
"learning_rate": 0.0004634085267134376,
"loss": 3.5383,
"step": 21200
},
{
"epoch": 2.2911051212938007,
"grad_norm": 0.550365686416626,
"learning_rate": 0.00046308472746896923,
"loss": 3.5677,
"step": 21250
},
{
"epoch": 2.2964959568733154,
"grad_norm": 0.5905864238739014,
"learning_rate": 0.00046276092822450073,
"loss": 3.5684,
"step": 21300
},
{
"epoch": 2.30188679245283,
"grad_norm": 0.5769224166870117,
"learning_rate": 0.00046243712898003233,
"loss": 3.5621,
"step": 21350
},
{
"epoch": 2.3072776280323453,
"grad_norm": 0.5868223905563354,
"learning_rate": 0.0004621133297355639,
"loss": 3.5773,
"step": 21400
},
{
"epoch": 2.31266846361186,
"grad_norm": 0.5571302175521851,
"learning_rate": 0.0004617895304910955,
"loss": 3.5739,
"step": 21450
},
{
"epoch": 2.3180592991913747,
"grad_norm": 0.5874112844467163,
"learning_rate": 0.00046146573124662704,
"loss": 3.5778,
"step": 21500
},
{
"epoch": 2.3234501347708894,
"grad_norm": 0.6028329133987427,
"learning_rate": 0.00046114193200215864,
"loss": 3.558,
"step": 21550
},
{
"epoch": 2.3288409703504045,
"grad_norm": 0.5316165089607239,
"learning_rate": 0.0004608181327576902,
"loss": 3.5785,
"step": 21600
},
{
"epoch": 2.334231805929919,
"grad_norm": 0.5828713178634644,
"learning_rate": 0.00046049433351322175,
"loss": 3.5738,
"step": 21650
},
{
"epoch": 2.339622641509434,
"grad_norm": 0.5575670003890991,
"learning_rate": 0.00046017053426875335,
"loss": 3.5545,
"step": 21700
},
{
"epoch": 2.3450134770889486,
"grad_norm": 0.578111469745636,
"learning_rate": 0.0004598467350242849,
"loss": 3.5695,
"step": 21750
},
{
"epoch": 2.3504043126684637,
"grad_norm": 0.5924418568611145,
"learning_rate": 0.0004595229357798165,
"loss": 3.5927,
"step": 21800
},
{
"epoch": 2.3557951482479784,
"grad_norm": 0.5741672515869141,
"learning_rate": 0.00045919913653534806,
"loss": 3.576,
"step": 21850
},
{
"epoch": 2.361185983827493,
"grad_norm": 0.6206271648406982,
"learning_rate": 0.00045887533729087966,
"loss": 3.5548,
"step": 21900
},
{
"epoch": 2.3665768194070083,
"grad_norm": 0.6218240857124329,
"learning_rate": 0.00045855153804641116,
"loss": 3.5706,
"step": 21950
},
{
"epoch": 2.371967654986523,
"grad_norm": 0.5502433180809021,
"learning_rate": 0.0004582277388019427,
"loss": 3.5596,
"step": 22000
},
{
"epoch": 2.371967654986523,
"eval_accuracy": 0.36454162986502814,
"eval_loss": 3.558351993560791,
"eval_runtime": 144.2942,
"eval_samples_per_second": 124.821,
"eval_steps_per_second": 7.804,
"step": 22000
},
{
"epoch": 2.3773584905660377,
"grad_norm": 0.5923473238945007,
"learning_rate": 0.0004579039395574743,
"loss": 3.5625,
"step": 22050
},
{
"epoch": 2.382749326145553,
"grad_norm": 0.5796228051185608,
"learning_rate": 0.00045758014031300586,
"loss": 3.5745,
"step": 22100
},
{
"epoch": 2.3881401617250675,
"grad_norm": 0.5643661618232727,
"learning_rate": 0.00045725634106853747,
"loss": 3.5762,
"step": 22150
},
{
"epoch": 2.393530997304582,
"grad_norm": 0.563675582408905,
"learning_rate": 0.000456932541824069,
"loss": 3.5626,
"step": 22200
},
{
"epoch": 2.398921832884097,
"grad_norm": 0.5896977782249451,
"learning_rate": 0.0004566087425796006,
"loss": 3.5699,
"step": 22250
},
{
"epoch": 2.404312668463612,
"grad_norm": 0.5657273530960083,
"learning_rate": 0.0004562849433351322,
"loss": 3.5572,
"step": 22300
},
{
"epoch": 2.4097035040431267,
"grad_norm": 0.5821571946144104,
"learning_rate": 0.0004559611440906638,
"loss": 3.5794,
"step": 22350
},
{
"epoch": 2.4150943396226414,
"grad_norm": 0.576914370059967,
"learning_rate": 0.00045563734484619533,
"loss": 3.5643,
"step": 22400
},
{
"epoch": 2.420485175202156,
"grad_norm": 0.5930933952331543,
"learning_rate": 0.0004553135456017269,
"loss": 3.5656,
"step": 22450
},
{
"epoch": 2.4258760107816713,
"grad_norm": 0.6359766721725464,
"learning_rate": 0.0004549897463572585,
"loss": 3.5722,
"step": 22500
},
{
"epoch": 2.431266846361186,
"grad_norm": 0.5717756152153015,
"learning_rate": 0.00045466594711279,
"loss": 3.5636,
"step": 22550
},
{
"epoch": 2.4366576819407006,
"grad_norm": 0.6088978052139282,
"learning_rate": 0.00045434214786832164,
"loss": 3.5952,
"step": 22600
},
{
"epoch": 2.442048517520216,
"grad_norm": 0.596017062664032,
"learning_rate": 0.00045401834862385314,
"loss": 3.5571,
"step": 22650
},
{
"epoch": 2.4474393530997305,
"grad_norm": 0.5859695076942444,
"learning_rate": 0.00045369454937938474,
"loss": 3.5462,
"step": 22700
},
{
"epoch": 2.452830188679245,
"grad_norm": 0.5753331780433655,
"learning_rate": 0.0004533707501349163,
"loss": 3.5518,
"step": 22750
},
{
"epoch": 2.4582210242587603,
"grad_norm": 0.569882333278656,
"learning_rate": 0.0004530469508904479,
"loss": 3.5628,
"step": 22800
},
{
"epoch": 2.463611859838275,
"grad_norm": 0.604755699634552,
"learning_rate": 0.00045272315164597945,
"loss": 3.5464,
"step": 22850
},
{
"epoch": 2.4690026954177897,
"grad_norm": 0.6051239967346191,
"learning_rate": 0.000452399352401511,
"loss": 3.563,
"step": 22900
},
{
"epoch": 2.4743935309973044,
"grad_norm": 0.5919761061668396,
"learning_rate": 0.0004520755531570426,
"loss": 3.5611,
"step": 22950
},
{
"epoch": 2.4797843665768196,
"grad_norm": 0.5852389335632324,
"learning_rate": 0.00045175822989746355,
"loss": 3.5529,
"step": 23000
},
{
"epoch": 2.4797843665768196,
"eval_accuracy": 0.3654994050711128,
"eval_loss": 3.5451345443725586,
"eval_runtime": 144.5225,
"eval_samples_per_second": 124.624,
"eval_steps_per_second": 7.791,
"step": 23000
},
{
"epoch": 2.4851752021563343,
"grad_norm": 0.5408614873886108,
"learning_rate": 0.0004514344306529951,
"loss": 3.5645,
"step": 23050
},
{
"epoch": 2.490566037735849,
"grad_norm": 0.5751845836639404,
"learning_rate": 0.0004511106314085267,
"loss": 3.5671,
"step": 23100
},
{
"epoch": 2.4959568733153636,
"grad_norm": 0.5665581226348877,
"learning_rate": 0.00045078683216405826,
"loss": 3.5651,
"step": 23150
},
{
"epoch": 2.501347708894879,
"grad_norm": 0.5446679592132568,
"learning_rate": 0.00045046303291958976,
"loss": 3.5602,
"step": 23200
},
{
"epoch": 2.5067385444743935,
"grad_norm": 0.6096646189689636,
"learning_rate": 0.0004501392336751214,
"loss": 3.5606,
"step": 23250
},
{
"epoch": 2.512129380053908,
"grad_norm": 0.5711411237716675,
"learning_rate": 0.0004498154344306529,
"loss": 3.5462,
"step": 23300
},
{
"epoch": 2.5175202156334233,
"grad_norm": 0.5946795344352722,
"learning_rate": 0.0004494916351861845,
"loss": 3.5517,
"step": 23350
},
{
"epoch": 2.522911051212938,
"grad_norm": 0.6075916886329651,
"learning_rate": 0.00044916783594171607,
"loss": 3.5477,
"step": 23400
},
{
"epoch": 2.5283018867924527,
"grad_norm": 0.5496644377708435,
"learning_rate": 0.00044884403669724767,
"loss": 3.5583,
"step": 23450
},
{
"epoch": 2.533692722371968,
"grad_norm": 0.5712068676948547,
"learning_rate": 0.0004485202374527792,
"loss": 3.5607,
"step": 23500
},
{
"epoch": 2.5390835579514826,
"grad_norm": 0.5820903182029724,
"learning_rate": 0.00044819643820831083,
"loss": 3.5643,
"step": 23550
},
{
"epoch": 2.5444743935309972,
"grad_norm": 0.5886263847351074,
"learning_rate": 0.0004478726389638424,
"loss": 3.5662,
"step": 23600
},
{
"epoch": 2.5498652291105124,
"grad_norm": 0.5735592842102051,
"learning_rate": 0.00044754883971937393,
"loss": 3.5579,
"step": 23650
},
{
"epoch": 2.555256064690027,
"grad_norm": 0.6012240052223206,
"learning_rate": 0.00044722504047490553,
"loss": 3.5642,
"step": 23700
},
{
"epoch": 2.560646900269542,
"grad_norm": 0.5736075639724731,
"learning_rate": 0.0004469012412304371,
"loss": 3.5574,
"step": 23750
},
{
"epoch": 2.5660377358490565,
"grad_norm": 0.6146102547645569,
"learning_rate": 0.0004465774419859687,
"loss": 3.5704,
"step": 23800
},
{
"epoch": 2.571428571428571,
"grad_norm": 0.5984640717506409,
"learning_rate": 0.00044625364274150024,
"loss": 3.5763,
"step": 23850
},
{
"epoch": 2.5768194070080863,
"grad_norm": 0.5764756798744202,
"learning_rate": 0.00044592984349703184,
"loss": 3.5746,
"step": 23900
},
{
"epoch": 2.582210242587601,
"grad_norm": 0.6541941165924072,
"learning_rate": 0.00044560604425256334,
"loss": 3.55,
"step": 23950
},
{
"epoch": 2.5876010781671157,
"grad_norm": 0.5598556399345398,
"learning_rate": 0.000445282245008095,
"loss": 3.5466,
"step": 24000
},
{
"epoch": 2.5876010781671157,
"eval_accuracy": 0.36651433169618786,
"eval_loss": 3.5378851890563965,
"eval_runtime": 144.1764,
"eval_samples_per_second": 124.923,
"eval_steps_per_second": 7.81,
"step": 24000
},
{
"epoch": 2.592991913746631,
"grad_norm": 0.6188113689422607,
"learning_rate": 0.0004449584457636265,
"loss": 3.5458,
"step": 24050
},
{
"epoch": 2.5983827493261455,
"grad_norm": 0.5713769197463989,
"learning_rate": 0.00044463464651915805,
"loss": 3.5473,
"step": 24100
},
{
"epoch": 2.6037735849056602,
"grad_norm": 0.5993424654006958,
"learning_rate": 0.00044431084727468965,
"loss": 3.5493,
"step": 24150
},
{
"epoch": 2.6091644204851754,
"grad_norm": 0.6186326146125793,
"learning_rate": 0.0004439870480302212,
"loss": 3.5668,
"step": 24200
},
{
"epoch": 2.61455525606469,
"grad_norm": 0.5661455392837524,
"learning_rate": 0.0004436632487857528,
"loss": 3.5703,
"step": 24250
},
{
"epoch": 2.6199460916442048,
"grad_norm": 0.6343756318092346,
"learning_rate": 0.00044333944954128436,
"loss": 3.5559,
"step": 24300
},
{
"epoch": 2.62533692722372,
"grad_norm": 0.5699751377105713,
"learning_rate": 0.00044301565029681596,
"loss": 3.5509,
"step": 24350
},
{
"epoch": 2.6307277628032346,
"grad_norm": 0.5953941941261292,
"learning_rate": 0.0004426918510523475,
"loss": 3.5609,
"step": 24400
},
{
"epoch": 2.6361185983827493,
"grad_norm": 0.5832213163375854,
"learning_rate": 0.0004423680518078791,
"loss": 3.531,
"step": 24450
},
{
"epoch": 2.641509433962264,
"grad_norm": 0.6015488505363464,
"learning_rate": 0.00044204425256341067,
"loss": 3.5396,
"step": 24500
},
{
"epoch": 2.6469002695417787,
"grad_norm": 0.6897053718566895,
"learning_rate": 0.0004417204533189422,
"loss": 3.5562,
"step": 24550
},
{
"epoch": 2.652291105121294,
"grad_norm": 0.5880343317985535,
"learning_rate": 0.0004413966540744738,
"loss": 3.5371,
"step": 24600
},
{
"epoch": 2.6576819407008085,
"grad_norm": 0.574292778968811,
"learning_rate": 0.0004410728548300053,
"loss": 3.5532,
"step": 24650
},
{
"epoch": 2.6630727762803232,
"grad_norm": 0.6216583251953125,
"learning_rate": 0.0004407490555855369,
"loss": 3.5528,
"step": 24700
},
{
"epoch": 2.6684636118598384,
"grad_norm": 0.6199180483818054,
"learning_rate": 0.0004404252563410685,
"loss": 3.5568,
"step": 24750
},
{
"epoch": 2.673854447439353,
"grad_norm": 0.6043687462806702,
"learning_rate": 0.0004401014570966001,
"loss": 3.5543,
"step": 24800
},
{
"epoch": 2.6792452830188678,
"grad_norm": 0.5746572613716125,
"learning_rate": 0.00043977765785213163,
"loss": 3.5559,
"step": 24850
},
{
"epoch": 2.684636118598383,
"grad_norm": 0.5911267995834351,
"learning_rate": 0.0004394538586076632,
"loss": 3.5716,
"step": 24900
},
{
"epoch": 2.6900269541778976,
"grad_norm": 0.5925143361091614,
"learning_rate": 0.0004391300593631948,
"loss": 3.5607,
"step": 24950
},
{
"epoch": 2.6954177897574123,
"grad_norm": 0.5580176711082458,
"learning_rate": 0.00043880626011872634,
"loss": 3.555,
"step": 25000
},
{
"epoch": 2.6954177897574123,
"eval_accuracy": 0.36743233994533236,
"eval_loss": 3.5253796577453613,
"eval_runtime": 144.4202,
"eval_samples_per_second": 124.712,
"eval_steps_per_second": 7.797,
"step": 25000
},
{
"epoch": 2.7008086253369274,
"grad_norm": 0.576725959777832,
"learning_rate": 0.0004384889368591473,
"loss": 3.5449,
"step": 25050
},
{
"epoch": 2.706199460916442,
"grad_norm": 0.545190691947937,
"learning_rate": 0.0004381651376146789,
"loss": 3.5538,
"step": 25100
},
{
"epoch": 2.711590296495957,
"grad_norm": 0.5915215611457825,
"learning_rate": 0.00043784133837021044,
"loss": 3.5311,
"step": 25150
},
{
"epoch": 2.7169811320754715,
"grad_norm": 0.6212695837020874,
"learning_rate": 0.00043751753912574205,
"loss": 3.5534,
"step": 25200
},
{
"epoch": 2.7223719676549867,
"grad_norm": 0.5867090225219727,
"learning_rate": 0.0004371937398812736,
"loss": 3.5584,
"step": 25250
},
{
"epoch": 2.7277628032345014,
"grad_norm": 0.5455405116081238,
"learning_rate": 0.0004368699406368051,
"loss": 3.5605,
"step": 25300
},
{
"epoch": 2.733153638814016,
"grad_norm": 0.6148755550384521,
"learning_rate": 0.0004365461413923367,
"loss": 3.5503,
"step": 25350
},
{
"epoch": 2.7385444743935308,
"grad_norm": 0.6133694052696228,
"learning_rate": 0.00043622234214786825,
"loss": 3.5414,
"step": 25400
},
{
"epoch": 2.743935309973046,
"grad_norm": 0.5954641699790955,
"learning_rate": 0.00043589854290339985,
"loss": 3.5439,
"step": 25450
},
{
"epoch": 2.7493261455525606,
"grad_norm": 0.5887457728385925,
"learning_rate": 0.0004355747436589314,
"loss": 3.5571,
"step": 25500
},
{
"epoch": 2.7547169811320753,
"grad_norm": 0.573692798614502,
"learning_rate": 0.000435250944414463,
"loss": 3.5495,
"step": 25550
},
{
"epoch": 2.7601078167115904,
"grad_norm": 0.5801973938941956,
"learning_rate": 0.00043492714516999456,
"loss": 3.5522,
"step": 25600
},
{
"epoch": 2.765498652291105,
"grad_norm": 0.583278238773346,
"learning_rate": 0.0004346033459255261,
"loss": 3.5448,
"step": 25650
},
{
"epoch": 2.77088948787062,
"grad_norm": 0.6692058444023132,
"learning_rate": 0.0004342795466810577,
"loss": 3.5509,
"step": 25700
},
{
"epoch": 2.776280323450135,
"grad_norm": 0.609228253364563,
"learning_rate": 0.00043395574743658927,
"loss": 3.5385,
"step": 25750
},
{
"epoch": 2.7816711590296497,
"grad_norm": 0.5607845187187195,
"learning_rate": 0.00043363194819212087,
"loss": 3.5623,
"step": 25800
},
{
"epoch": 2.7870619946091644,
"grad_norm": 0.5615296363830566,
"learning_rate": 0.0004333081489476524,
"loss": 3.5353,
"step": 25850
},
{
"epoch": 2.7924528301886795,
"grad_norm": 0.5903550982475281,
"learning_rate": 0.000432984349703184,
"loss": 3.5447,
"step": 25900
},
{
"epoch": 2.797843665768194,
"grad_norm": 0.6404184103012085,
"learning_rate": 0.0004326605504587155,
"loss": 3.5401,
"step": 25950
},
{
"epoch": 2.803234501347709,
"grad_norm": 0.5671994090080261,
"learning_rate": 0.0004323367512142472,
"loss": 3.5597,
"step": 26000
},
{
"epoch": 2.803234501347709,
"eval_accuracy": 0.36840749961346736,
"eval_loss": 3.5180230140686035,
"eval_runtime": 144.5115,
"eval_samples_per_second": 124.634,
"eval_steps_per_second": 7.792,
"step": 26000
},
{
"epoch": 2.8086253369272236,
"grad_norm": 0.5972022414207458,
"learning_rate": 0.0004320129519697787,
"loss": 3.5636,
"step": 26050
},
{
"epoch": 2.8140161725067383,
"grad_norm": 0.5757322907447815,
"learning_rate": 0.00043168915272531023,
"loss": 3.5479,
"step": 26100
},
{
"epoch": 2.8194070080862534,
"grad_norm": 0.5859302282333374,
"learning_rate": 0.00043136535348084183,
"loss": 3.5425,
"step": 26150
},
{
"epoch": 2.824797843665768,
"grad_norm": 0.5637912154197693,
"learning_rate": 0.0004310415542363734,
"loss": 3.555,
"step": 26200
},
{
"epoch": 2.830188679245283,
"grad_norm": 0.5691296458244324,
"learning_rate": 0.000430717754991905,
"loss": 3.5357,
"step": 26250
},
{
"epoch": 2.835579514824798,
"grad_norm": 0.6390897631645203,
"learning_rate": 0.00043039395574743654,
"loss": 3.5369,
"step": 26300
},
{
"epoch": 2.8409703504043127,
"grad_norm": 0.6023745536804199,
"learning_rate": 0.00043007015650296814,
"loss": 3.5569,
"step": 26350
},
{
"epoch": 2.8463611859838274,
"grad_norm": 0.5968989133834839,
"learning_rate": 0.0004297463572584997,
"loss": 3.5324,
"step": 26400
},
{
"epoch": 2.8517520215633425,
"grad_norm": 0.5747265219688416,
"learning_rate": 0.0004294225580140313,
"loss": 3.5554,
"step": 26450
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.5473029017448425,
"learning_rate": 0.00042909875876956285,
"loss": 3.5601,
"step": 26500
},
{
"epoch": 2.862533692722372,
"grad_norm": 0.6153762936592102,
"learning_rate": 0.0004287749595250944,
"loss": 3.5416,
"step": 26550
},
{
"epoch": 2.867924528301887,
"grad_norm": 0.5814242959022522,
"learning_rate": 0.000428451160280626,
"loss": 3.5436,
"step": 26600
},
{
"epoch": 2.8733153638814017,
"grad_norm": 0.544052243232727,
"learning_rate": 0.0004281273610361575,
"loss": 3.5283,
"step": 26650
},
{
"epoch": 2.8787061994609164,
"grad_norm": 0.580087423324585,
"learning_rate": 0.0004278035617916891,
"loss": 3.5665,
"step": 26700
},
{
"epoch": 2.884097035040431,
"grad_norm": 0.606545627117157,
"learning_rate": 0.00042747976254722066,
"loss": 3.5333,
"step": 26750
},
{
"epoch": 2.889487870619946,
"grad_norm": 0.5613415241241455,
"learning_rate": 0.00042715596330275226,
"loss": 3.5188,
"step": 26800
},
{
"epoch": 2.894878706199461,
"grad_norm": 0.5772558450698853,
"learning_rate": 0.0004268321640582838,
"loss": 3.521,
"step": 26850
},
{
"epoch": 2.9002695417789757,
"grad_norm": 0.5379123091697693,
"learning_rate": 0.0004265083648138154,
"loss": 3.5405,
"step": 26900
},
{
"epoch": 2.9056603773584904,
"grad_norm": 0.5935842394828796,
"learning_rate": 0.00042618456556934697,
"loss": 3.5612,
"step": 26950
},
{
"epoch": 2.9110512129380055,
"grad_norm": 0.622439444065094,
"learning_rate": 0.0004258607663248785,
"loss": 3.5422,
"step": 27000
},
{
"epoch": 2.9110512129380055,
"eval_accuracy": 0.3692755275342172,
"eval_loss": 3.508702039718628,
"eval_runtime": 144.9148,
"eval_samples_per_second": 124.287,
"eval_steps_per_second": 7.77,
"step": 27000
},
{
"epoch": 2.91644204851752,
"grad_norm": 0.6212618947029114,
"learning_rate": 0.00042554344306529947,
"loss": 3.531,
"step": 27050
},
{
"epoch": 2.921832884097035,
"grad_norm": 0.6010525226593018,
"learning_rate": 0.0004252196438208311,
"loss": 3.5393,
"step": 27100
},
{
"epoch": 2.92722371967655,
"grad_norm": 0.5927774310112,
"learning_rate": 0.0004248958445763626,
"loss": 3.5333,
"step": 27150
},
{
"epoch": 2.9326145552560647,
"grad_norm": 0.6531330347061157,
"learning_rate": 0.00042457204533189423,
"loss": 3.5434,
"step": 27200
},
{
"epoch": 2.9380053908355794,
"grad_norm": 0.584989607334137,
"learning_rate": 0.0004242482460874258,
"loss": 3.5498,
"step": 27250
},
{
"epoch": 2.9433962264150946,
"grad_norm": 0.6219198107719421,
"learning_rate": 0.0004239244468429573,
"loss": 3.5142,
"step": 27300
},
{
"epoch": 2.9487870619946093,
"grad_norm": 0.6168526411056519,
"learning_rate": 0.0004236006475984889,
"loss": 3.5346,
"step": 27350
},
{
"epoch": 2.954177897574124,
"grad_norm": 0.583755373954773,
"learning_rate": 0.00042327684835402043,
"loss": 3.5291,
"step": 27400
},
{
"epoch": 2.9595687331536387,
"grad_norm": 0.6100607514381409,
"learning_rate": 0.00042295304910955204,
"loss": 3.5208,
"step": 27450
},
{
"epoch": 2.964959568733154,
"grad_norm": 0.5626996159553528,
"learning_rate": 0.0004226292498650836,
"loss": 3.5461,
"step": 27500
},
{
"epoch": 2.9703504043126685,
"grad_norm": 0.5924006700515747,
"learning_rate": 0.0004223054506206152,
"loss": 3.5485,
"step": 27550
},
{
"epoch": 2.975741239892183,
"grad_norm": 0.5733758807182312,
"learning_rate": 0.00042198165137614674,
"loss": 3.5297,
"step": 27600
},
{
"epoch": 2.981132075471698,
"grad_norm": 0.6193982362747192,
"learning_rate": 0.00042165785213167835,
"loss": 3.5365,
"step": 27650
},
{
"epoch": 2.986522911051213,
"grad_norm": 0.5635408759117126,
"learning_rate": 0.0004213340528872099,
"loss": 3.5253,
"step": 27700
},
{
"epoch": 2.9919137466307277,
"grad_norm": 0.6305370926856995,
"learning_rate": 0.00042101025364274145,
"loss": 3.5239,
"step": 27750
},
{
"epoch": 2.9973045822102424,
"grad_norm": 0.581883430480957,
"learning_rate": 0.00042068645439827305,
"loss": 3.5336,
"step": 27800
},
{
"epoch": 3.0026954177897576,
"grad_norm": 0.5631284713745117,
"learning_rate": 0.0004203626551538046,
"loss": 3.4953,
"step": 27850
},
{
"epoch": 3.0080862533692723,
"grad_norm": 0.6554384827613831,
"learning_rate": 0.0004200388559093362,
"loss": 3.4436,
"step": 27900
},
{
"epoch": 3.013477088948787,
"grad_norm": 0.5941973924636841,
"learning_rate": 0.0004197150566648677,
"loss": 3.4446,
"step": 27950
},
{
"epoch": 3.018867924528302,
"grad_norm": 0.5603510141372681,
"learning_rate": 0.00041939125742039936,
"loss": 3.4422,
"step": 28000
},
{
"epoch": 3.018867924528302,
"eval_accuracy": 0.3700082826096381,
"eval_loss": 3.5040652751922607,
"eval_runtime": 144.5182,
"eval_samples_per_second": 124.628,
"eval_steps_per_second": 7.791,
"step": 28000
},
{
"epoch": 3.024258760107817,
"grad_norm": 0.58504718542099,
"learning_rate": 0.00041906745817593086,
"loss": 3.442,
"step": 28050
},
{
"epoch": 3.0296495956873315,
"grad_norm": 0.590460479259491,
"learning_rate": 0.00041874365893146247,
"loss": 3.4482,
"step": 28100
},
{
"epoch": 3.035040431266846,
"grad_norm": 0.5873590707778931,
"learning_rate": 0.000418419859686994,
"loss": 3.4321,
"step": 28150
},
{
"epoch": 3.0404312668463613,
"grad_norm": 0.5897404551506042,
"learning_rate": 0.00041809606044252557,
"loss": 3.45,
"step": 28200
},
{
"epoch": 3.045822102425876,
"grad_norm": 0.5874994397163391,
"learning_rate": 0.00041777226119805717,
"loss": 3.4424,
"step": 28250
},
{
"epoch": 3.0512129380053907,
"grad_norm": 0.5833684206008911,
"learning_rate": 0.0004174484619535887,
"loss": 3.4642,
"step": 28300
},
{
"epoch": 3.056603773584906,
"grad_norm": 0.609053909778595,
"learning_rate": 0.00041712466270912033,
"loss": 3.4483,
"step": 28350
},
{
"epoch": 3.0619946091644206,
"grad_norm": 0.5735374093055725,
"learning_rate": 0.0004168008634646519,
"loss": 3.4695,
"step": 28400
},
{
"epoch": 3.0673854447439353,
"grad_norm": 0.60845947265625,
"learning_rate": 0.0004164770642201835,
"loss": 3.4465,
"step": 28450
},
{
"epoch": 3.07277628032345,
"grad_norm": 0.6065637469291687,
"learning_rate": 0.00041615326497571503,
"loss": 3.4424,
"step": 28500
},
{
"epoch": 3.078167115902965,
"grad_norm": 0.5800220370292664,
"learning_rate": 0.0004158294657312466,
"loss": 3.443,
"step": 28550
},
{
"epoch": 3.08355795148248,
"grad_norm": 0.6139459609985352,
"learning_rate": 0.0004155056664867782,
"loss": 3.4636,
"step": 28600
},
{
"epoch": 3.0889487870619945,
"grad_norm": 0.6870418190956116,
"learning_rate": 0.0004151818672423097,
"loss": 3.4479,
"step": 28650
},
{
"epoch": 3.0943396226415096,
"grad_norm": 0.5827794075012207,
"learning_rate": 0.0004148580679978413,
"loss": 3.4704,
"step": 28700
},
{
"epoch": 3.0997304582210243,
"grad_norm": 0.5892341732978821,
"learning_rate": 0.00041453426875337284,
"loss": 3.4355,
"step": 28750
},
{
"epoch": 3.105121293800539,
"grad_norm": 0.5540435314178467,
"learning_rate": 0.00041421046950890445,
"loss": 3.4644,
"step": 28800
},
{
"epoch": 3.1105121293800537,
"grad_norm": 0.6163308024406433,
"learning_rate": 0.000413886670264436,
"loss": 3.4522,
"step": 28850
},
{
"epoch": 3.115902964959569,
"grad_norm": 0.5884636640548706,
"learning_rate": 0.0004135628710199676,
"loss": 3.4664,
"step": 28900
},
{
"epoch": 3.1212938005390836,
"grad_norm": 0.6189049482345581,
"learning_rate": 0.00041323907177549915,
"loss": 3.465,
"step": 28950
},
{
"epoch": 3.1266846361185983,
"grad_norm": 0.6232777833938599,
"learning_rate": 0.0004129152725310307,
"loss": 3.4594,
"step": 29000
},
{
"epoch": 3.1266846361185983,
"eval_accuracy": 0.37088217778633,
"eval_loss": 3.4994044303894043,
"eval_runtime": 145.0712,
"eval_samples_per_second": 124.153,
"eval_steps_per_second": 7.762,
"step": 29000
},
{
"epoch": 3.1320754716981134,
"grad_norm": 0.6272684335708618,
"learning_rate": 0.00041259794927145165,
"loss": 3.4647,
"step": 29050
},
{
"epoch": 3.137466307277628,
"grad_norm": 0.5963659882545471,
"learning_rate": 0.00041227415002698326,
"loss": 3.4641,
"step": 29100
},
{
"epoch": 3.142857142857143,
"grad_norm": 0.5980457663536072,
"learning_rate": 0.0004119503507825148,
"loss": 3.4658,
"step": 29150
},
{
"epoch": 3.1482479784366575,
"grad_norm": 0.6148801445960999,
"learning_rate": 0.00041163302752293576,
"loss": 3.4656,
"step": 29200
},
{
"epoch": 3.1536388140161726,
"grad_norm": 0.6103987693786621,
"learning_rate": 0.00041130922827846726,
"loss": 3.4807,
"step": 29250
},
{
"epoch": 3.1590296495956873,
"grad_norm": 0.6764293313026428,
"learning_rate": 0.0004109854290339989,
"loss": 3.4681,
"step": 29300
},
{
"epoch": 3.164420485175202,
"grad_norm": 0.5955349802970886,
"learning_rate": 0.0004106616297895304,
"loss": 3.4627,
"step": 29350
},
{
"epoch": 3.169811320754717,
"grad_norm": 0.5901073813438416,
"learning_rate": 0.000410337830545062,
"loss": 3.4575,
"step": 29400
},
{
"epoch": 3.175202156334232,
"grad_norm": 0.6012365818023682,
"learning_rate": 0.00041001403130059357,
"loss": 3.4659,
"step": 29450
},
{
"epoch": 3.1805929919137466,
"grad_norm": 0.6144543886184692,
"learning_rate": 0.00040969023205612517,
"loss": 3.4615,
"step": 29500
},
{
"epoch": 3.1859838274932613,
"grad_norm": 0.7040764689445496,
"learning_rate": 0.0004093664328116567,
"loss": 3.4752,
"step": 29550
},
{
"epoch": 3.1913746630727764,
"grad_norm": 0.6097145676612854,
"learning_rate": 0.00040904263356718833,
"loss": 3.4716,
"step": 29600
},
{
"epoch": 3.196765498652291,
"grad_norm": 0.6183958649635315,
"learning_rate": 0.0004087188343227199,
"loss": 3.4591,
"step": 29650
},
{
"epoch": 3.202156334231806,
"grad_norm": 0.6033417582511902,
"learning_rate": 0.00040839503507825143,
"loss": 3.4897,
"step": 29700
},
{
"epoch": 3.207547169811321,
"grad_norm": 0.6251811981201172,
"learning_rate": 0.00040807123583378303,
"loss": 3.4603,
"step": 29750
},
{
"epoch": 3.2129380053908356,
"grad_norm": 0.6118080019950867,
"learning_rate": 0.0004077474365893146,
"loss": 3.4704,
"step": 29800
},
{
"epoch": 3.2183288409703503,
"grad_norm": 0.6248761415481567,
"learning_rate": 0.0004074236373448462,
"loss": 3.4467,
"step": 29850
},
{
"epoch": 3.223719676549865,
"grad_norm": 0.6107171177864075,
"learning_rate": 0.00040709983810037774,
"loss": 3.4591,
"step": 29900
},
{
"epoch": 3.22911051212938,
"grad_norm": 0.6119734644889832,
"learning_rate": 0.00040677603885590934,
"loss": 3.4716,
"step": 29950
},
{
"epoch": 3.234501347708895,
"grad_norm": 0.6032190918922424,
"learning_rate": 0.00040645223961144084,
"loss": 3.4767,
"step": 30000
},
{
"epoch": 3.234501347708895,
"eval_accuracy": 0.37148009462797305,
"eval_loss": 3.493159770965576,
"eval_runtime": 144.5095,
"eval_samples_per_second": 124.635,
"eval_steps_per_second": 7.792,
"step": 30000
},
{
"epoch": 3.2398921832884096,
"grad_norm": 0.6084169149398804,
"learning_rate": 0.0004061284403669725,
"loss": 3.4791,
"step": 30050
},
{
"epoch": 3.2452830188679247,
"grad_norm": 0.5927680730819702,
"learning_rate": 0.000405804641122504,
"loss": 3.4603,
"step": 30100
},
{
"epoch": 3.2506738544474394,
"grad_norm": 0.6702844500541687,
"learning_rate": 0.00040548084187803555,
"loss": 3.4631,
"step": 30150
},
{
"epoch": 3.256064690026954,
"grad_norm": 0.6025919318199158,
"learning_rate": 0.00040515704263356715,
"loss": 3.4672,
"step": 30200
},
{
"epoch": 3.2614555256064692,
"grad_norm": 0.5886960625648499,
"learning_rate": 0.0004048332433890987,
"loss": 3.4467,
"step": 30250
},
{
"epoch": 3.266846361185984,
"grad_norm": 0.6077654361724854,
"learning_rate": 0.0004045094441446303,
"loss": 3.4781,
"step": 30300
},
{
"epoch": 3.2722371967654986,
"grad_norm": 0.5719964504241943,
"learning_rate": 0.00040418564490016186,
"loss": 3.4629,
"step": 30350
},
{
"epoch": 3.2776280323450133,
"grad_norm": 0.5921858549118042,
"learning_rate": 0.00040386184565569346,
"loss": 3.4642,
"step": 30400
},
{
"epoch": 3.2830188679245285,
"grad_norm": 0.6381065845489502,
"learning_rate": 0.000403538046411225,
"loss": 3.466,
"step": 30450
},
{
"epoch": 3.288409703504043,
"grad_norm": 0.6194201707839966,
"learning_rate": 0.00040321424716675656,
"loss": 3.4934,
"step": 30500
},
{
"epoch": 3.293800539083558,
"grad_norm": 0.6250463724136353,
"learning_rate": 0.00040289044792228817,
"loss": 3.4645,
"step": 30550
},
{
"epoch": 3.2991913746630726,
"grad_norm": 0.6104064583778381,
"learning_rate": 0.0004025666486778197,
"loss": 3.473,
"step": 30600
},
{
"epoch": 3.3045822102425877,
"grad_norm": 0.5955620408058167,
"learning_rate": 0.0004022428494333513,
"loss": 3.4611,
"step": 30650
},
{
"epoch": 3.3099730458221024,
"grad_norm": 0.6021254658699036,
"learning_rate": 0.0004019190501888828,
"loss": 3.4637,
"step": 30700
},
{
"epoch": 3.315363881401617,
"grad_norm": 0.6101205348968506,
"learning_rate": 0.0004015952509444144,
"loss": 3.4533,
"step": 30750
},
{
"epoch": 3.3207547169811322,
"grad_norm": 0.6132895350456238,
"learning_rate": 0.000401271451699946,
"loss": 3.4668,
"step": 30800
},
{
"epoch": 3.326145552560647,
"grad_norm": 0.5911172032356262,
"learning_rate": 0.0004009476524554776,
"loss": 3.4788,
"step": 30850
},
{
"epoch": 3.3315363881401616,
"grad_norm": 0.6629490256309509,
"learning_rate": 0.00040062385321100913,
"loss": 3.4685,
"step": 30900
},
{
"epoch": 3.3369272237196768,
"grad_norm": 0.6565037369728088,
"learning_rate": 0.0004003000539665407,
"loss": 3.4751,
"step": 30950
},
{
"epoch": 3.3423180592991915,
"grad_norm": 0.5742873549461365,
"learning_rate": 0.0003999762547220723,
"loss": 3.4629,
"step": 31000
},
{
"epoch": 3.3423180592991915,
"eval_accuracy": 0.3724715522292802,
"eval_loss": 3.4883720874786377,
"eval_runtime": 144.1254,
"eval_samples_per_second": 124.968,
"eval_steps_per_second": 7.813,
"step": 31000
},
{
"epoch": 3.347708894878706,
"grad_norm": 0.6102972030639648,
"learning_rate": 0.00039965245547760384,
"loss": 3.4736,
"step": 31050
},
{
"epoch": 3.353099730458221,
"grad_norm": 0.6134117245674133,
"learning_rate": 0.00039932865623313544,
"loss": 3.4616,
"step": 31100
},
{
"epoch": 3.358490566037736,
"grad_norm": 0.5953965783119202,
"learning_rate": 0.000399004856988667,
"loss": 3.4647,
"step": 31150
},
{
"epoch": 3.3638814016172507,
"grad_norm": 0.6266044974327087,
"learning_rate": 0.0003986810577441986,
"loss": 3.4725,
"step": 31200
},
{
"epoch": 3.3692722371967654,
"grad_norm": 0.6344805359840393,
"learning_rate": 0.00039835725849973015,
"loss": 3.4614,
"step": 31250
},
{
"epoch": 3.37466307277628,
"grad_norm": 0.6390222907066345,
"learning_rate": 0.0003980399352401511,
"loss": 3.4691,
"step": 31300
},
{
"epoch": 3.3800539083557952,
"grad_norm": 0.6394646167755127,
"learning_rate": 0.0003977161359956826,
"loss": 3.4724,
"step": 31350
},
{
"epoch": 3.38544474393531,
"grad_norm": 0.6056656241416931,
"learning_rate": 0.0003973923367512142,
"loss": 3.4829,
"step": 31400
},
{
"epoch": 3.3908355795148246,
"grad_norm": 0.6254240274429321,
"learning_rate": 0.00039706853750674575,
"loss": 3.4643,
"step": 31450
},
{
"epoch": 3.3962264150943398,
"grad_norm": 0.5952332615852356,
"learning_rate": 0.00039674473826227736,
"loss": 3.4635,
"step": 31500
},
{
"epoch": 3.4016172506738545,
"grad_norm": 0.5912657976150513,
"learning_rate": 0.0003964209390178089,
"loss": 3.4492,
"step": 31550
},
{
"epoch": 3.407008086253369,
"grad_norm": 0.6330773234367371,
"learning_rate": 0.0003960971397733405,
"loss": 3.4594,
"step": 31600
},
{
"epoch": 3.4123989218328843,
"grad_norm": 0.6112014055252075,
"learning_rate": 0.00039577334052887206,
"loss": 3.4739,
"step": 31650
},
{
"epoch": 3.417789757412399,
"grad_norm": 0.6433203220367432,
"learning_rate": 0.0003954495412844036,
"loss": 3.4597,
"step": 31700
},
{
"epoch": 3.4231805929919137,
"grad_norm": 0.6591313481330872,
"learning_rate": 0.0003951257420399352,
"loss": 3.4708,
"step": 31750
},
{
"epoch": 3.4285714285714284,
"grad_norm": 0.6169155836105347,
"learning_rate": 0.00039480194279546677,
"loss": 3.4686,
"step": 31800
},
{
"epoch": 3.4339622641509435,
"grad_norm": 0.5913453102111816,
"learning_rate": 0.00039447814355099837,
"loss": 3.457,
"step": 31850
},
{
"epoch": 3.439353099730458,
"grad_norm": 0.6046425104141235,
"learning_rate": 0.0003941543443065299,
"loss": 3.459,
"step": 31900
},
{
"epoch": 3.444743935309973,
"grad_norm": 0.5933957695960999,
"learning_rate": 0.00039383054506206153,
"loss": 3.4667,
"step": 31950
},
{
"epoch": 3.450134770889488,
"grad_norm": 0.5954340696334839,
"learning_rate": 0.000393506745817593,
"loss": 3.471,
"step": 32000
},
{
"epoch": 3.450134770889488,
"eval_accuracy": 0.37276263331573517,
"eval_loss": 3.481382131576538,
"eval_runtime": 144.5021,
"eval_samples_per_second": 124.642,
"eval_steps_per_second": 7.792,
"step": 32000
},
{
"epoch": 3.4555256064690028,
"grad_norm": 0.6200750470161438,
"learning_rate": 0.0003931829465731247,
"loss": 3.4648,
"step": 32050
},
{
"epoch": 3.4609164420485174,
"grad_norm": 0.6105481386184692,
"learning_rate": 0.0003928591473286562,
"loss": 3.4673,
"step": 32100
},
{
"epoch": 3.466307277628032,
"grad_norm": 0.5963844060897827,
"learning_rate": 0.00039253534808418773,
"loss": 3.4654,
"step": 32150
},
{
"epoch": 3.4716981132075473,
"grad_norm": 0.5792421102523804,
"learning_rate": 0.00039221154883971933,
"loss": 3.4649,
"step": 32200
},
{
"epoch": 3.477088948787062,
"grad_norm": 0.6019930839538574,
"learning_rate": 0.0003918877495952509,
"loss": 3.4742,
"step": 32250
},
{
"epoch": 3.4824797843665767,
"grad_norm": 0.6327193379402161,
"learning_rate": 0.0003915639503507825,
"loss": 3.4832,
"step": 32300
},
{
"epoch": 3.487870619946092,
"grad_norm": 0.6305009126663208,
"learning_rate": 0.00039124015110631404,
"loss": 3.4694,
"step": 32350
},
{
"epoch": 3.4932614555256065,
"grad_norm": 0.6175006628036499,
"learning_rate": 0.00039091635186184565,
"loss": 3.4701,
"step": 32400
},
{
"epoch": 3.498652291105121,
"grad_norm": 0.6704298257827759,
"learning_rate": 0.0003905925526173772,
"loss": 3.4508,
"step": 32450
},
{
"epoch": 3.5040431266846364,
"grad_norm": 0.6491182446479797,
"learning_rate": 0.0003902687533729088,
"loss": 3.4779,
"step": 32500
},
{
"epoch": 3.509433962264151,
"grad_norm": 0.6863692402839661,
"learning_rate": 0.00038994495412844035,
"loss": 3.4519,
"step": 32550
},
{
"epoch": 3.5148247978436657,
"grad_norm": 0.6133790016174316,
"learning_rate": 0.0003896211548839719,
"loss": 3.4748,
"step": 32600
},
{
"epoch": 3.5202156334231804,
"grad_norm": 0.62857985496521,
"learning_rate": 0.0003892973556395035,
"loss": 3.4592,
"step": 32650
},
{
"epoch": 3.525606469002695,
"grad_norm": 0.6060515642166138,
"learning_rate": 0.000388973556395035,
"loss": 3.456,
"step": 32700
},
{
"epoch": 3.5309973045822103,
"grad_norm": 0.6039831042289734,
"learning_rate": 0.0003886497571505666,
"loss": 3.4666,
"step": 32750
},
{
"epoch": 3.536388140161725,
"grad_norm": 0.6068740487098694,
"learning_rate": 0.00038832595790609816,
"loss": 3.4716,
"step": 32800
},
{
"epoch": 3.5417789757412397,
"grad_norm": 0.6067098379135132,
"learning_rate": 0.00038800215866162976,
"loss": 3.4587,
"step": 32850
},
{
"epoch": 3.547169811320755,
"grad_norm": 0.5945684313774109,
"learning_rate": 0.0003876783594171613,
"loss": 3.4612,
"step": 32900
},
{
"epoch": 3.5525606469002695,
"grad_norm": 0.6096765398979187,
"learning_rate": 0.0003873545601726929,
"loss": 3.4795,
"step": 32950
},
{
"epoch": 3.557951482479784,
"grad_norm": 0.6418932676315308,
"learning_rate": 0.00038703076092822447,
"loss": 3.4594,
"step": 33000
},
{
"epoch": 3.557951482479784,
"eval_accuracy": 0.3734538829880109,
"eval_loss": 3.475005626678467,
"eval_runtime": 144.2527,
"eval_samples_per_second": 124.857,
"eval_steps_per_second": 7.806,
"step": 33000
},
{
"epoch": 3.5633423180592994,
"grad_norm": 0.6273847818374634,
"learning_rate": 0.000386706961683756,
"loss": 3.461,
"step": 33050
},
{
"epoch": 3.568733153638814,
"grad_norm": 0.6163213849067688,
"learning_rate": 0.0003863831624392876,
"loss": 3.4814,
"step": 33100
},
{
"epoch": 3.5741239892183287,
"grad_norm": 0.6207212209701538,
"learning_rate": 0.0003860593631948192,
"loss": 3.4588,
"step": 33150
},
{
"epoch": 3.579514824797844,
"grad_norm": 0.6105016469955444,
"learning_rate": 0.0003857355639503508,
"loss": 3.4763,
"step": 33200
},
{
"epoch": 3.5849056603773586,
"grad_norm": 0.6159183979034424,
"learning_rate": 0.00038541176470588233,
"loss": 3.4726,
"step": 33250
},
{
"epoch": 3.5902964959568733,
"grad_norm": 0.6719298958778381,
"learning_rate": 0.00038508796546141393,
"loss": 3.4954,
"step": 33300
},
{
"epoch": 3.595687331536388,
"grad_norm": 0.6160264611244202,
"learning_rate": 0.00038476416621694543,
"loss": 3.4587,
"step": 33350
},
{
"epoch": 3.601078167115903,
"grad_norm": 0.6148490905761719,
"learning_rate": 0.000384440366972477,
"loss": 3.4732,
"step": 33400
},
{
"epoch": 3.606469002695418,
"grad_norm": 0.5894046425819397,
"learning_rate": 0.0003841165677280086,
"loss": 3.4815,
"step": 33450
},
{
"epoch": 3.6118598382749325,
"grad_norm": 0.6228636503219604,
"learning_rate": 0.00038379276848354014,
"loss": 3.4764,
"step": 33500
},
{
"epoch": 3.617250673854447,
"grad_norm": 0.6030029058456421,
"learning_rate": 0.00038346896923907174,
"loss": 3.4636,
"step": 33550
},
{
"epoch": 3.6226415094339623,
"grad_norm": 0.646578848361969,
"learning_rate": 0.0003831451699946033,
"loss": 3.4602,
"step": 33600
},
{
"epoch": 3.628032345013477,
"grad_norm": 0.6056541800498962,
"learning_rate": 0.0003828213707501349,
"loss": 3.4521,
"step": 33650
},
{
"epoch": 3.6334231805929917,
"grad_norm": 0.6055415868759155,
"learning_rate": 0.00038249757150566645,
"loss": 3.451,
"step": 33700
},
{
"epoch": 3.638814016172507,
"grad_norm": 0.624546468257904,
"learning_rate": 0.00038217377226119805,
"loss": 3.471,
"step": 33750
},
{
"epoch": 3.6442048517520216,
"grad_norm": 0.6001887321472168,
"learning_rate": 0.0003818499730167296,
"loss": 3.453,
"step": 33800
},
{
"epoch": 3.6495956873315363,
"grad_norm": 0.6137872338294983,
"learning_rate": 0.00038152617377226115,
"loss": 3.4622,
"step": 33850
},
{
"epoch": 3.6549865229110514,
"grad_norm": 0.5993956923484802,
"learning_rate": 0.00038120237452779276,
"loss": 3.4632,
"step": 33900
},
{
"epoch": 3.660377358490566,
"grad_norm": 0.6484405994415283,
"learning_rate": 0.0003808785752833243,
"loss": 3.4613,
"step": 33950
},
{
"epoch": 3.665768194070081,
"grad_norm": 0.6379072070121765,
"learning_rate": 0.0003805547760388559,
"loss": 3.4814,
"step": 34000
},
{
"epoch": 3.665768194070081,
"eval_accuracy": 0.3738812147957853,
"eval_loss": 3.4691996574401855,
"eval_runtime": 144.3217,
"eval_samples_per_second": 124.798,
"eval_steps_per_second": 7.802,
"step": 34000
},
{
"epoch": 3.671159029649596,
"grad_norm": 0.6134325861930847,
"learning_rate": 0.0003802309767943874,
"loss": 3.461,
"step": 34050
},
{
"epoch": 3.6765498652291106,
"grad_norm": 0.6350472569465637,
"learning_rate": 0.000379907177549919,
"loss": 3.471,
"step": 34100
},
{
"epoch": 3.6819407008086253,
"grad_norm": 0.6383688449859619,
"learning_rate": 0.00037958337830545057,
"loss": 3.4755,
"step": 34150
},
{
"epoch": 3.68733153638814,
"grad_norm": 0.594897449016571,
"learning_rate": 0.00037925957906098217,
"loss": 3.4756,
"step": 34200
},
{
"epoch": 3.6927223719676547,
"grad_norm": 0.5822266936302185,
"learning_rate": 0.0003789357798165137,
"loss": 3.4679,
"step": 34250
},
{
"epoch": 3.69811320754717,
"grad_norm": 0.6292484402656555,
"learning_rate": 0.00037861198057204527,
"loss": 3.4758,
"step": 34300
},
{
"epoch": 3.7035040431266846,
"grad_norm": 0.6565120220184326,
"learning_rate": 0.0003783011332973556,
"loss": 3.4872,
"step": 34350
},
{
"epoch": 3.7088948787061993,
"grad_norm": 0.638484537601471,
"learning_rate": 0.0003779773340528872,
"loss": 3.4586,
"step": 34400
},
{
"epoch": 3.7142857142857144,
"grad_norm": 0.623930037021637,
"learning_rate": 0.0003776535348084188,
"loss": 3.4429,
"step": 34450
},
{
"epoch": 3.719676549865229,
"grad_norm": 0.670249342918396,
"learning_rate": 0.00037732973556395033,
"loss": 3.4623,
"step": 34500
},
{
"epoch": 3.725067385444744,
"grad_norm": 0.6273740530014038,
"learning_rate": 0.0003770059363194819,
"loss": 3.4813,
"step": 34550
},
{
"epoch": 3.730458221024259,
"grad_norm": 0.6443477272987366,
"learning_rate": 0.0003766821370750135,
"loss": 3.47,
"step": 34600
},
{
"epoch": 3.7358490566037736,
"grad_norm": 0.6770917177200317,
"learning_rate": 0.000376358337830545,
"loss": 3.4468,
"step": 34650
},
{
"epoch": 3.7412398921832883,
"grad_norm": 0.6392006874084473,
"learning_rate": 0.00037603453858607664,
"loss": 3.4892,
"step": 34700
},
{
"epoch": 3.7466307277628035,
"grad_norm": 0.6437645554542542,
"learning_rate": 0.00037571073934160814,
"loss": 3.4501,
"step": 34750
},
{
"epoch": 3.752021563342318,
"grad_norm": 0.6367730498313904,
"learning_rate": 0.00037538694009713974,
"loss": 3.4832,
"step": 34800
},
{
"epoch": 3.757412398921833,
"grad_norm": 0.5876086950302124,
"learning_rate": 0.0003750631408526713,
"loss": 3.4945,
"step": 34850
},
{
"epoch": 3.7628032345013476,
"grad_norm": 0.6111919283866882,
"learning_rate": 0.0003747393416082029,
"loss": 3.4791,
"step": 34900
},
{
"epoch": 3.7681940700808623,
"grad_norm": 0.5883005857467651,
"learning_rate": 0.00037441554236373445,
"loss": 3.4586,
"step": 34950
},
{
"epoch": 3.7735849056603774,
"grad_norm": 0.6253954172134399,
"learning_rate": 0.000374091743119266,
"loss": 3.4593,
"step": 35000
},
{
"epoch": 3.7735849056603774,
"eval_accuracy": 0.37472892462651386,
"eval_loss": 3.4628891944885254,
"eval_runtime": 144.7455,
"eval_samples_per_second": 124.432,
"eval_steps_per_second": 7.779,
"step": 35000
},
{
"epoch": 3.778975741239892,
"grad_norm": 0.6512451767921448,
"learning_rate": 0.0003737679438747976,
"loss": 3.4804,
"step": 35050
},
{
"epoch": 3.784366576819407,
"grad_norm": 0.6874452829360962,
"learning_rate": 0.00037344414463032915,
"loss": 3.449,
"step": 35100
},
{
"epoch": 3.789757412398922,
"grad_norm": 0.6314534544944763,
"learning_rate": 0.00037312034538586076,
"loss": 3.4722,
"step": 35150
},
{
"epoch": 3.7951482479784366,
"grad_norm": 0.6123417019844055,
"learning_rate": 0.0003727965461413923,
"loss": 3.453,
"step": 35200
},
{
"epoch": 3.8005390835579513,
"grad_norm": 0.6286754012107849,
"learning_rate": 0.0003724727468969239,
"loss": 3.4793,
"step": 35250
},
{
"epoch": 3.8059299191374665,
"grad_norm": 0.6487993597984314,
"learning_rate": 0.00037214894765245546,
"loss": 3.4678,
"step": 35300
},
{
"epoch": 3.811320754716981,
"grad_norm": 0.6038015484809875,
"learning_rate": 0.00037182514840798696,
"loss": 3.4703,
"step": 35350
},
{
"epoch": 3.816711590296496,
"grad_norm": 0.6406208276748657,
"learning_rate": 0.00037150134916351857,
"loss": 3.4885,
"step": 35400
},
{
"epoch": 3.822102425876011,
"grad_norm": 0.6278253793716431,
"learning_rate": 0.0003711775499190501,
"loss": 3.4436,
"step": 35450
},
{
"epoch": 3.8274932614555257,
"grad_norm": 0.6240292191505432,
"learning_rate": 0.0003708537506745817,
"loss": 3.4806,
"step": 35500
},
{
"epoch": 3.8328840970350404,
"grad_norm": 0.6784418225288391,
"learning_rate": 0.00037052995143011327,
"loss": 3.4606,
"step": 35550
},
{
"epoch": 3.838274932614555,
"grad_norm": 0.6133866906166077,
"learning_rate": 0.0003702061521856449,
"loss": 3.4521,
"step": 35600
},
{
"epoch": 3.8436657681940702,
"grad_norm": 0.5978075861930847,
"learning_rate": 0.00036988235294117643,
"loss": 3.4653,
"step": 35650
},
{
"epoch": 3.849056603773585,
"grad_norm": 0.6256576180458069,
"learning_rate": 0.00036955855369670803,
"loss": 3.4603,
"step": 35700
},
{
"epoch": 3.8544474393530996,
"grad_norm": 0.6484295725822449,
"learning_rate": 0.0003692347544522396,
"loss": 3.4417,
"step": 35750
},
{
"epoch": 3.8598382749326143,
"grad_norm": 0.6462515592575073,
"learning_rate": 0.00036891095520777113,
"loss": 3.4627,
"step": 35800
},
{
"epoch": 3.8652291105121295,
"grad_norm": 0.6457804441452026,
"learning_rate": 0.00036858715596330274,
"loss": 3.4683,
"step": 35850
},
{
"epoch": 3.870619946091644,
"grad_norm": 0.618513286113739,
"learning_rate": 0.0003682633567188343,
"loss": 3.4575,
"step": 35900
},
{
"epoch": 3.876010781671159,
"grad_norm": 0.5929429531097412,
"learning_rate": 0.0003679395574743659,
"loss": 3.4615,
"step": 35950
},
{
"epoch": 3.881401617250674,
"grad_norm": 0.6334658265113831,
"learning_rate": 0.00036761575822989744,
"loss": 3.4435,
"step": 36000
},
{
"epoch": 3.881401617250674,
"eval_accuracy": 0.3755860872584823,
"eval_loss": 3.4560911655426025,
"eval_runtime": 144.1446,
"eval_samples_per_second": 124.951,
"eval_steps_per_second": 7.812,
"step": 36000
},
{
"epoch": 3.8867924528301887,
"grad_norm": 0.6168849468231201,
"learning_rate": 0.00036729195898542905,
"loss": 3.4479,
"step": 36050
},
{
"epoch": 3.8921832884097034,
"grad_norm": 0.6238396167755127,
"learning_rate": 0.00036696815974096055,
"loss": 3.4723,
"step": 36100
},
{
"epoch": 3.8975741239892185,
"grad_norm": 0.6613624691963196,
"learning_rate": 0.00036664436049649215,
"loss": 3.4614,
"step": 36150
},
{
"epoch": 3.9029649595687332,
"grad_norm": 0.5860582590103149,
"learning_rate": 0.0003663205612520237,
"loss": 3.4426,
"step": 36200
},
{
"epoch": 3.908355795148248,
"grad_norm": 0.617758572101593,
"learning_rate": 0.00036599676200755525,
"loss": 3.4478,
"step": 36250
},
{
"epoch": 3.913746630727763,
"grad_norm": 0.6506857872009277,
"learning_rate": 0.00036567296276308686,
"loss": 3.4601,
"step": 36300
},
{
"epoch": 3.9191374663072778,
"grad_norm": 0.6125609278678894,
"learning_rate": 0.0003653491635186184,
"loss": 3.4523,
"step": 36350
},
{
"epoch": 3.9245283018867925,
"grad_norm": 0.6411651372909546,
"learning_rate": 0.00036502536427415,
"loss": 3.454,
"step": 36400
},
{
"epoch": 3.929919137466307,
"grad_norm": 0.637863278388977,
"learning_rate": 0.00036470156502968156,
"loss": 3.4599,
"step": 36450
},
{
"epoch": 3.935309973045822,
"grad_norm": 0.6554402112960815,
"learning_rate": 0.00036437776578521317,
"loss": 3.4673,
"step": 36500
},
{
"epoch": 3.940700808625337,
"grad_norm": 0.6139333248138428,
"learning_rate": 0.0003640539665407447,
"loss": 3.4545,
"step": 36550
},
{
"epoch": 3.9460916442048517,
"grad_norm": 0.6386415958404541,
"learning_rate": 0.0003637301672962763,
"loss": 3.4653,
"step": 36600
},
{
"epoch": 3.9514824797843664,
"grad_norm": 0.62546706199646,
"learning_rate": 0.00036340636805180787,
"loss": 3.4614,
"step": 36650
},
{
"epoch": 3.9568733153638815,
"grad_norm": 0.5943716168403625,
"learning_rate": 0.00036308256880733937,
"loss": 3.4583,
"step": 36700
},
{
"epoch": 3.9622641509433962,
"grad_norm": 0.6002622842788696,
"learning_rate": 0.000362758769562871,
"loss": 3.4477,
"step": 36750
},
{
"epoch": 3.967654986522911,
"grad_norm": 0.7137020826339722,
"learning_rate": 0.0003624349703184025,
"loss": 3.4475,
"step": 36800
},
{
"epoch": 3.973045822102426,
"grad_norm": 0.6767378449440002,
"learning_rate": 0.00036211117107393413,
"loss": 3.4445,
"step": 36850
},
{
"epoch": 3.9784366576819408,
"grad_norm": 0.6204874515533447,
"learning_rate": 0.0003617873718294657,
"loss": 3.4594,
"step": 36900
},
{
"epoch": 3.9838274932614555,
"grad_norm": 0.6272158026695251,
"learning_rate": 0.0003614635725849973,
"loss": 3.4718,
"step": 36950
},
{
"epoch": 3.9892183288409706,
"grad_norm": 0.6251803636550903,
"learning_rate": 0.00036113977334052883,
"loss": 3.4633,
"step": 37000
},
{
"epoch": 3.9892183288409706,
"eval_accuracy": 0.37588879420393345,
"eval_loss": 3.4493606090545654,
"eval_runtime": 144.3787,
"eval_samples_per_second": 124.748,
"eval_steps_per_second": 7.799,
"step": 37000
},
{
"epoch": 3.9946091644204853,
"grad_norm": 0.5956203937530518,
"learning_rate": 0.0003608159740960604,
"loss": 3.4618,
"step": 37050
},
{
"epoch": 4.0,
"grad_norm": 1.227440595626831,
"learning_rate": 0.000360492174851592,
"loss": 3.4529,
"step": 37100
},
{
"epoch": 4.005390835579515,
"grad_norm": 0.5926157236099243,
"learning_rate": 0.00036016837560712354,
"loss": 3.3682,
"step": 37150
},
{
"epoch": 4.010781671159029,
"grad_norm": 0.6577735543251038,
"learning_rate": 0.00035984457636265515,
"loss": 3.3682,
"step": 37200
},
{
"epoch": 4.0161725067385445,
"grad_norm": 0.6370235681533813,
"learning_rate": 0.0003595207771181867,
"loss": 3.3643,
"step": 37250
},
{
"epoch": 4.02156334231806,
"grad_norm": 0.6772335767745972,
"learning_rate": 0.0003591969778737183,
"loss": 3.3448,
"step": 37300
},
{
"epoch": 4.026954177897574,
"grad_norm": 0.6929683089256287,
"learning_rate": 0.00035887317862924985,
"loss": 3.3728,
"step": 37350
},
{
"epoch": 4.032345013477089,
"grad_norm": 0.6454404592514038,
"learning_rate": 0.00035854937938478146,
"loss": 3.379,
"step": 37400
},
{
"epoch": 4.037735849056604,
"grad_norm": 0.6357432007789612,
"learning_rate": 0.00035822558014031295,
"loss": 3.3648,
"step": 37450
},
{
"epoch": 4.0431266846361185,
"grad_norm": 0.6574638485908508,
"learning_rate": 0.0003579017808958445,
"loss": 3.3803,
"step": 37500
},
{
"epoch": 4.048517520215634,
"grad_norm": 0.6872835755348206,
"learning_rate": 0.0003575779816513761,
"loss": 3.3717,
"step": 37550
},
{
"epoch": 4.053908355795148,
"grad_norm": 0.6696375012397766,
"learning_rate": 0.00035725418240690766,
"loss": 3.3657,
"step": 37600
},
{
"epoch": 4.059299191374663,
"grad_norm": 0.6354072690010071,
"learning_rate": 0.00035693038316243926,
"loss": 3.3803,
"step": 37650
},
{
"epoch": 4.064690026954178,
"grad_norm": 0.6598987579345703,
"learning_rate": 0.0003566065839179708,
"loss": 3.3848,
"step": 37700
},
{
"epoch": 4.070080862533692,
"grad_norm": 0.6303004026412964,
"learning_rate": 0.0003562827846735024,
"loss": 3.3634,
"step": 37750
},
{
"epoch": 4.0754716981132075,
"grad_norm": 0.7050724029541016,
"learning_rate": 0.00035595898542903397,
"loss": 3.3615,
"step": 37800
},
{
"epoch": 4.080862533692723,
"grad_norm": 0.6530691981315613,
"learning_rate": 0.0003556351861845656,
"loss": 3.374,
"step": 37850
},
{
"epoch": 4.086253369272237,
"grad_norm": 0.6223407983779907,
"learning_rate": 0.0003553113869400971,
"loss": 3.3844,
"step": 37900
},
{
"epoch": 4.091644204851752,
"grad_norm": 0.6723674535751343,
"learning_rate": 0.0003549875876956287,
"loss": 3.3946,
"step": 37950
},
{
"epoch": 4.097035040431267,
"grad_norm": 0.6736387610435486,
"learning_rate": 0.0003546637884511603,
"loss": 3.381,
"step": 38000
},
{
"epoch": 4.097035040431267,
"eval_accuracy": 0.37658775823124396,
"eval_loss": 3.452270984649658,
"eval_runtime": 144.6168,
"eval_samples_per_second": 124.543,
"eval_steps_per_second": 7.786,
"step": 38000
},
{
"epoch": 4.1024258760107815,
"grad_norm": 0.6325782537460327,
"learning_rate": 0.0003543399892066918,
"loss": 3.3755,
"step": 38050
},
{
"epoch": 4.107816711590297,
"grad_norm": 0.6484423279762268,
"learning_rate": 0.0003540161899622234,
"loss": 3.3826,
"step": 38100
},
{
"epoch": 4.113207547169812,
"grad_norm": 0.6256469488143921,
"learning_rate": 0.00035369239071775493,
"loss": 3.3744,
"step": 38150
},
{
"epoch": 4.118598382749326,
"grad_norm": 0.6691221594810486,
"learning_rate": 0.00035336859147328654,
"loss": 3.4009,
"step": 38200
},
{
"epoch": 4.123989218328841,
"grad_norm": 0.6852509379386902,
"learning_rate": 0.0003530447922288181,
"loss": 3.3919,
"step": 38250
},
{
"epoch": 4.129380053908355,
"grad_norm": 0.6498061418533325,
"learning_rate": 0.0003527209929843497,
"loss": 3.3943,
"step": 38300
},
{
"epoch": 4.1347708894878705,
"grad_norm": 0.6283285021781921,
"learning_rate": 0.00035239719373988124,
"loss": 3.3832,
"step": 38350
},
{
"epoch": 4.140161725067386,
"grad_norm": 0.6327186226844788,
"learning_rate": 0.0003520733944954128,
"loss": 3.3768,
"step": 38400
},
{
"epoch": 4.1455525606469,
"grad_norm": 0.6534860134124756,
"learning_rate": 0.0003517495952509444,
"loss": 3.3869,
"step": 38450
},
{
"epoch": 4.150943396226415,
"grad_norm": 0.6725557446479797,
"learning_rate": 0.00035142579600647595,
"loss": 3.3936,
"step": 38500
},
{
"epoch": 4.15633423180593,
"grad_norm": 0.6597609519958496,
"learning_rate": 0.00035110199676200755,
"loss": 3.3816,
"step": 38550
},
{
"epoch": 4.1617250673854445,
"grad_norm": 0.6526814103126526,
"learning_rate": 0.0003507781975175391,
"loss": 3.3799,
"step": 38600
},
{
"epoch": 4.16711590296496,
"grad_norm": 0.6776999235153198,
"learning_rate": 0.0003504543982730707,
"loss": 3.4063,
"step": 38650
},
{
"epoch": 4.172506738544475,
"grad_norm": 0.6845957636833191,
"learning_rate": 0.00035013059902860226,
"loss": 3.3836,
"step": 38700
},
{
"epoch": 4.177897574123989,
"grad_norm": 0.6326277852058411,
"learning_rate": 0.00034980679978413375,
"loss": 3.3779,
"step": 38750
},
{
"epoch": 4.183288409703504,
"grad_norm": 0.6746770143508911,
"learning_rate": 0.00034948300053966536,
"loss": 3.3979,
"step": 38800
},
{
"epoch": 4.188679245283019,
"grad_norm": 0.637310266494751,
"learning_rate": 0.0003491592012951969,
"loss": 3.4033,
"step": 38850
},
{
"epoch": 4.1940700808625335,
"grad_norm": 0.6572282910346985,
"learning_rate": 0.0003488354020507285,
"loss": 3.4011,
"step": 38900
},
{
"epoch": 4.199460916442049,
"grad_norm": 0.6860615015029907,
"learning_rate": 0.00034851160280626007,
"loss": 3.395,
"step": 38950
},
{
"epoch": 4.204851752021563,
"grad_norm": 0.6367702484130859,
"learning_rate": 0.00034818780356179167,
"loss": 3.3899,
"step": 39000
},
{
"epoch": 4.204851752021563,
"eval_accuracy": 0.37691665052265844,
"eval_loss": 3.44580340385437,
"eval_runtime": 144.2533,
"eval_samples_per_second": 124.857,
"eval_steps_per_second": 7.806,
"step": 39000
},
{
"epoch": 4.210242587601078,
"grad_norm": 0.6492680907249451,
"learning_rate": 0.0003478640043173232,
"loss": 3.3895,
"step": 39050
},
{
"epoch": 4.215633423180593,
"grad_norm": 0.5910336971282959,
"learning_rate": 0.0003475402050728548,
"loss": 3.3916,
"step": 39100
},
{
"epoch": 4.2210242587601075,
"grad_norm": 0.6553267240524292,
"learning_rate": 0.0003472164058283864,
"loss": 3.386,
"step": 39150
},
{
"epoch": 4.226415094339623,
"grad_norm": 0.6129311323165894,
"learning_rate": 0.0003468926065839179,
"loss": 3.3835,
"step": 39200
},
{
"epoch": 4.231805929919138,
"grad_norm": 0.6861194968223572,
"learning_rate": 0.00034656880733944953,
"loss": 3.3812,
"step": 39250
},
{
"epoch": 4.237196765498652,
"grad_norm": 0.6951770782470703,
"learning_rate": 0.0003462450080949811,
"loss": 3.3953,
"step": 39300
},
{
"epoch": 4.242587601078167,
"grad_norm": 0.6330463290214539,
"learning_rate": 0.0003459212088505127,
"loss": 3.3945,
"step": 39350
},
{
"epoch": 4.247978436657682,
"grad_norm": 0.6417586803436279,
"learning_rate": 0.0003455974096060442,
"loss": 3.3972,
"step": 39400
},
{
"epoch": 4.2533692722371965,
"grad_norm": 0.6526570320129395,
"learning_rate": 0.00034528008634646514,
"loss": 3.3888,
"step": 39450
},
{
"epoch": 4.258760107816712,
"grad_norm": 0.6432264447212219,
"learning_rate": 0.00034495628710199674,
"loss": 3.3796,
"step": 39500
},
{
"epoch": 4.264150943396227,
"grad_norm": 0.67496657371521,
"learning_rate": 0.0003446324878575283,
"loss": 3.3853,
"step": 39550
},
{
"epoch": 4.269541778975741,
"grad_norm": 0.6567350625991821,
"learning_rate": 0.00034430868861305984,
"loss": 3.3848,
"step": 39600
},
{
"epoch": 4.274932614555256,
"grad_norm": 0.6509039998054504,
"learning_rate": 0.00034398488936859145,
"loss": 3.4107,
"step": 39650
},
{
"epoch": 4.280323450134771,
"grad_norm": 0.7118595838546753,
"learning_rate": 0.000343661090124123,
"loss": 3.3953,
"step": 39700
},
{
"epoch": 4.285714285714286,
"grad_norm": 0.655920684337616,
"learning_rate": 0.0003433372908796546,
"loss": 3.3909,
"step": 39750
},
{
"epoch": 4.291105121293801,
"grad_norm": 0.6276910305023193,
"learning_rate": 0.00034301349163518615,
"loss": 3.4053,
"step": 39800
},
{
"epoch": 4.296495956873315,
"grad_norm": 0.6661441922187805,
"learning_rate": 0.00034268969239071776,
"loss": 3.4073,
"step": 39850
},
{
"epoch": 4.30188679245283,
"grad_norm": 0.6385535001754761,
"learning_rate": 0.0003423658931462493,
"loss": 3.3961,
"step": 39900
},
{
"epoch": 4.307277628032345,
"grad_norm": 0.6921438574790955,
"learning_rate": 0.00034204209390178086,
"loss": 3.4005,
"step": 39950
},
{
"epoch": 4.3126684636118595,
"grad_norm": 0.6449606418609619,
"learning_rate": 0.00034171829465731246,
"loss": 3.3917,
"step": 40000
},
{
"epoch": 4.3126684636118595,
"eval_accuracy": 0.3773982001214522,
"eval_loss": 3.443068265914917,
"eval_runtime": 144.5482,
"eval_samples_per_second": 124.602,
"eval_steps_per_second": 7.79,
"step": 40000
},
{
"epoch": 4.318059299191375,
"grad_norm": 0.6605200171470642,
"learning_rate": 0.00034139449541284396,
"loss": 3.3852,
"step": 40050
},
{
"epoch": 4.32345013477089,
"grad_norm": 0.6481444239616394,
"learning_rate": 0.00034107069616837556,
"loss": 3.4133,
"step": 40100
},
{
"epoch": 4.328840970350404,
"grad_norm": 0.6764447093009949,
"learning_rate": 0.0003407468969239071,
"loss": 3.4224,
"step": 40150
},
{
"epoch": 4.334231805929919,
"grad_norm": 0.697698175907135,
"learning_rate": 0.0003404230976794387,
"loss": 3.3895,
"step": 40200
},
{
"epoch": 4.339622641509434,
"grad_norm": 0.6654530763626099,
"learning_rate": 0.00034009929843497027,
"loss": 3.3957,
"step": 40250
},
{
"epoch": 4.345013477088949,
"grad_norm": 0.6817287802696228,
"learning_rate": 0.0003397754991905019,
"loss": 3.4012,
"step": 40300
},
{
"epoch": 4.350404312668464,
"grad_norm": 0.6639880537986755,
"learning_rate": 0.0003394516999460334,
"loss": 3.3964,
"step": 40350
},
{
"epoch": 4.355795148247978,
"grad_norm": 0.7102729678153992,
"learning_rate": 0.000339127900701565,
"loss": 3.4083,
"step": 40400
},
{
"epoch": 4.361185983827493,
"grad_norm": 0.6927199959754944,
"learning_rate": 0.0003388041014570966,
"loss": 3.3772,
"step": 40450
},
{
"epoch": 4.366576819407008,
"grad_norm": 0.7088574171066284,
"learning_rate": 0.00033848030221262813,
"loss": 3.3837,
"step": 40500
},
{
"epoch": 4.3719676549865225,
"grad_norm": 0.6638116240501404,
"learning_rate": 0.00033815650296815974,
"loss": 3.4081,
"step": 40550
},
{
"epoch": 4.377358490566038,
"grad_norm": 0.6668712496757507,
"learning_rate": 0.0003378327037236913,
"loss": 3.3914,
"step": 40600
},
{
"epoch": 4.382749326145553,
"grad_norm": 0.6364918351173401,
"learning_rate": 0.0003375089044792229,
"loss": 3.3817,
"step": 40650
},
{
"epoch": 4.388140161725067,
"grad_norm": 0.6079948544502258,
"learning_rate": 0.00033718510523475444,
"loss": 3.4127,
"step": 40700
},
{
"epoch": 4.393530997304582,
"grad_norm": 0.668003261089325,
"learning_rate": 0.00033686130599028605,
"loss": 3.4093,
"step": 40750
},
{
"epoch": 4.398921832884097,
"grad_norm": 0.7348698377609253,
"learning_rate": 0.00033653750674581754,
"loss": 3.3802,
"step": 40800
},
{
"epoch": 4.404312668463612,
"grad_norm": 0.6434510350227356,
"learning_rate": 0.0003362137075013491,
"loss": 3.4029,
"step": 40850
},
{
"epoch": 4.409703504043127,
"grad_norm": 0.6313856244087219,
"learning_rate": 0.0003358899082568807,
"loss": 3.3884,
"step": 40900
},
{
"epoch": 4.415094339622642,
"grad_norm": 0.6700965762138367,
"learning_rate": 0.00033556610901241225,
"loss": 3.3958,
"step": 40950
},
{
"epoch": 4.420485175202156,
"grad_norm": 0.6766634583473206,
"learning_rate": 0.00033524230976794385,
"loss": 3.4098,
"step": 41000
},
{
"epoch": 4.420485175202156,
"eval_accuracy": 0.37805739719182263,
"eval_loss": 3.4349136352539062,
"eval_runtime": 144.174,
"eval_samples_per_second": 124.925,
"eval_steps_per_second": 7.81,
"step": 41000
},
{
"epoch": 4.425876010781671,
"grad_norm": 0.6245938539505005,
"learning_rate": 0.0003349185105234754,
"loss": 3.4083,
"step": 41050
},
{
"epoch": 4.431266846361186,
"grad_norm": 0.648089587688446,
"learning_rate": 0.000334594711279007,
"loss": 3.3911,
"step": 41100
},
{
"epoch": 4.436657681940701,
"grad_norm": 0.6679409742355347,
"learning_rate": 0.00033427091203453856,
"loss": 3.3715,
"step": 41150
},
{
"epoch": 4.442048517520216,
"grad_norm": 0.6518722176551819,
"learning_rate": 0.00033394711279007016,
"loss": 3.4074,
"step": 41200
},
{
"epoch": 4.44743935309973,
"grad_norm": 0.6937799453735352,
"learning_rate": 0.0003336233135456017,
"loss": 3.3949,
"step": 41250
},
{
"epoch": 4.452830188679245,
"grad_norm": 0.6212307214736938,
"learning_rate": 0.00033329951430113326,
"loss": 3.3916,
"step": 41300
},
{
"epoch": 4.45822102425876,
"grad_norm": 0.6860057711601257,
"learning_rate": 0.00033297571505666487,
"loss": 3.3834,
"step": 41350
},
{
"epoch": 4.463611859838275,
"grad_norm": 0.6513189673423767,
"learning_rate": 0.00033265191581219637,
"loss": 3.3937,
"step": 41400
},
{
"epoch": 4.46900269541779,
"grad_norm": 0.7178239226341248,
"learning_rate": 0.00033232811656772797,
"loss": 3.3812,
"step": 41450
},
{
"epoch": 4.474393530997305,
"grad_norm": 0.6064930558204651,
"learning_rate": 0.0003320043173232595,
"loss": 3.3859,
"step": 41500
},
{
"epoch": 4.479784366576819,
"grad_norm": 0.6735703945159912,
"learning_rate": 0.0003316869940636805,
"loss": 3.4139,
"step": 41550
},
{
"epoch": 4.485175202156334,
"grad_norm": 0.6741956472396851,
"learning_rate": 0.000331363194819212,
"loss": 3.4056,
"step": 41600
},
{
"epoch": 4.490566037735849,
"grad_norm": 0.6844803094863892,
"learning_rate": 0.00033103939557474363,
"loss": 3.4093,
"step": 41650
},
{
"epoch": 4.495956873315364,
"grad_norm": 0.6774608492851257,
"learning_rate": 0.0003307155963302752,
"loss": 3.4104,
"step": 41700
},
{
"epoch": 4.501347708894879,
"grad_norm": 0.7241947054862976,
"learning_rate": 0.0003303917970858068,
"loss": 3.394,
"step": 41750
},
{
"epoch": 4.506738544474393,
"grad_norm": 0.6362870335578918,
"learning_rate": 0.00033006799784133833,
"loss": 3.407,
"step": 41800
},
{
"epoch": 4.512129380053908,
"grad_norm": 0.6904320120811462,
"learning_rate": 0.00032974419859686994,
"loss": 3.3856,
"step": 41850
},
{
"epoch": 4.517520215633423,
"grad_norm": 0.6738048195838928,
"learning_rate": 0.0003294203993524015,
"loss": 3.4067,
"step": 41900
},
{
"epoch": 4.5229110512129385,
"grad_norm": 0.7088378071784973,
"learning_rate": 0.0003290966001079331,
"loss": 3.4089,
"step": 41950
},
{
"epoch": 4.528301886792453,
"grad_norm": 0.6381910443305969,
"learning_rate": 0.00032877280086346465,
"loss": 3.4073,
"step": 42000
},
{
"epoch": 4.528301886792453,
"eval_accuracy": 0.3784368130760708,
"eval_loss": 3.433210849761963,
"eval_runtime": 144.4135,
"eval_samples_per_second": 124.718,
"eval_steps_per_second": 7.797,
"step": 42000
},
{
"epoch": 4.533692722371968,
"grad_norm": 0.6477466821670532,
"learning_rate": 0.00032844900161899614,
"loss": 3.3838,
"step": 42050
},
{
"epoch": 4.539083557951482,
"grad_norm": 0.6849921345710754,
"learning_rate": 0.0003281252023745278,
"loss": 3.3943,
"step": 42100
},
{
"epoch": 4.544474393530997,
"grad_norm": 0.7039716243743896,
"learning_rate": 0.0003278014031300593,
"loss": 3.3973,
"step": 42150
},
{
"epoch": 4.549865229110512,
"grad_norm": 0.6716494560241699,
"learning_rate": 0.0003274776038855909,
"loss": 3.4107,
"step": 42200
},
{
"epoch": 4.555256064690027,
"grad_norm": 0.6577488780021667,
"learning_rate": 0.00032715380464112245,
"loss": 3.4032,
"step": 42250
},
{
"epoch": 4.560646900269542,
"grad_norm": 0.6571736931800842,
"learning_rate": 0.00032683000539665406,
"loss": 3.3956,
"step": 42300
},
{
"epoch": 4.566037735849057,
"grad_norm": 0.6686569452285767,
"learning_rate": 0.0003265062061521856,
"loss": 3.4182,
"step": 42350
},
{
"epoch": 4.571428571428571,
"grad_norm": 0.664892315864563,
"learning_rate": 0.0003261824069077172,
"loss": 3.3942,
"step": 42400
},
{
"epoch": 4.576819407008086,
"grad_norm": 0.624660849571228,
"learning_rate": 0.00032585860766324876,
"loss": 3.4074,
"step": 42450
},
{
"epoch": 4.5822102425876015,
"grad_norm": 0.7108879685401917,
"learning_rate": 0.0003255348084187803,
"loss": 3.391,
"step": 42500
},
{
"epoch": 4.587601078167116,
"grad_norm": 0.6596403121948242,
"learning_rate": 0.0003252110091743119,
"loss": 3.4013,
"step": 42550
},
{
"epoch": 4.592991913746631,
"grad_norm": 0.6906297206878662,
"learning_rate": 0.00032488720992984347,
"loss": 3.3821,
"step": 42600
},
{
"epoch": 4.598382749326145,
"grad_norm": 0.6205872893333435,
"learning_rate": 0.0003245634106853751,
"loss": 3.4077,
"step": 42650
},
{
"epoch": 4.60377358490566,
"grad_norm": 0.623426616191864,
"learning_rate": 0.0003242396114409066,
"loss": 3.4026,
"step": 42700
},
{
"epoch": 4.609164420485175,
"grad_norm": 0.6449417471885681,
"learning_rate": 0.00032391581219643823,
"loss": 3.3922,
"step": 42750
},
{
"epoch": 4.6145552560646905,
"grad_norm": 0.6269782781600952,
"learning_rate": 0.0003235920129519697,
"loss": 3.3923,
"step": 42800
},
{
"epoch": 4.619946091644205,
"grad_norm": 0.6648560762405396,
"learning_rate": 0.0003232682137075013,
"loss": 3.4036,
"step": 42850
},
{
"epoch": 4.62533692722372,
"grad_norm": 0.6681641340255737,
"learning_rate": 0.0003229444144630329,
"loss": 3.4127,
"step": 42900
},
{
"epoch": 4.630727762803234,
"grad_norm": 0.6480531692504883,
"learning_rate": 0.00032262061521856443,
"loss": 3.4226,
"step": 42950
},
{
"epoch": 4.636118598382749,
"grad_norm": 0.690486490726471,
"learning_rate": 0.00032229681597409604,
"loss": 3.3993,
"step": 43000
},
{
"epoch": 4.636118598382749,
"eval_accuracy": 0.3786213056795798,
"eval_loss": 3.4269800186157227,
"eval_runtime": 144.4596,
"eval_samples_per_second": 124.678,
"eval_steps_per_second": 7.795,
"step": 43000
},
{
"epoch": 4.6415094339622645,
"grad_norm": 0.631764829158783,
"learning_rate": 0.0003219730167296276,
"loss": 3.4012,
"step": 43050
},
{
"epoch": 4.646900269541779,
"grad_norm": 0.6853381991386414,
"learning_rate": 0.0003216492174851592,
"loss": 3.3943,
"step": 43100
},
{
"epoch": 4.652291105121294,
"grad_norm": 0.7007822394371033,
"learning_rate": 0.00032132541824069074,
"loss": 3.4042,
"step": 43150
},
{
"epoch": 4.657681940700809,
"grad_norm": 0.6877723932266235,
"learning_rate": 0.00032100161899622235,
"loss": 3.4013,
"step": 43200
},
{
"epoch": 4.663072776280323,
"grad_norm": 0.6408644914627075,
"learning_rate": 0.0003206778197517539,
"loss": 3.39,
"step": 43250
},
{
"epoch": 4.668463611859838,
"grad_norm": 0.6747402548789978,
"learning_rate": 0.00032035402050728545,
"loss": 3.3905,
"step": 43300
},
{
"epoch": 4.6738544474393535,
"grad_norm": 0.6702984571456909,
"learning_rate": 0.00032003022126281705,
"loss": 3.388,
"step": 43350
},
{
"epoch": 4.679245283018868,
"grad_norm": 0.6272915005683899,
"learning_rate": 0.00031970642201834855,
"loss": 3.4139,
"step": 43400
},
{
"epoch": 4.684636118598383,
"grad_norm": 0.6993704438209534,
"learning_rate": 0.0003193826227738802,
"loss": 3.406,
"step": 43450
},
{
"epoch": 4.690026954177897,
"grad_norm": 0.6674578189849854,
"learning_rate": 0.0003190588235294117,
"loss": 3.4172,
"step": 43500
},
{
"epoch": 4.695417789757412,
"grad_norm": 0.6447967886924744,
"learning_rate": 0.0003187350242849433,
"loss": 3.3835,
"step": 43550
},
{
"epoch": 4.7008086253369274,
"grad_norm": 0.6298507452011108,
"learning_rate": 0.00031841122504047486,
"loss": 3.3945,
"step": 43600
},
{
"epoch": 4.706199460916442,
"grad_norm": 0.6774371862411499,
"learning_rate": 0.00031808742579600646,
"loss": 3.4136,
"step": 43650
},
{
"epoch": 4.711590296495957,
"grad_norm": 0.6548194289207458,
"learning_rate": 0.000317763626551538,
"loss": 3.4013,
"step": 43700
},
{
"epoch": 4.716981132075472,
"grad_norm": 0.65519779920578,
"learning_rate": 0.00031743982730706957,
"loss": 3.3831,
"step": 43750
},
{
"epoch": 4.722371967654986,
"grad_norm": 0.6379764080047607,
"learning_rate": 0.00031711602806260117,
"loss": 3.3949,
"step": 43800
},
{
"epoch": 4.727762803234501,
"grad_norm": 0.6646135449409485,
"learning_rate": 0.0003167922288181327,
"loss": 3.411,
"step": 43850
},
{
"epoch": 4.7331536388140165,
"grad_norm": 0.6977700591087341,
"learning_rate": 0.0003164684295736643,
"loss": 3.3905,
"step": 43900
},
{
"epoch": 4.738544474393531,
"grad_norm": 0.6893843412399292,
"learning_rate": 0.0003161446303291959,
"loss": 3.3993,
"step": 43950
},
{
"epoch": 4.743935309973046,
"grad_norm": 0.626663327217102,
"learning_rate": 0.0003158208310847275,
"loss": 3.4026,
"step": 44000
},
{
"epoch": 4.743935309973046,
"eval_accuracy": 0.3796951221698503,
"eval_loss": 3.420562744140625,
"eval_runtime": 144.3885,
"eval_samples_per_second": 124.74,
"eval_steps_per_second": 7.798,
"step": 44000
},
{
"epoch": 4.74932614555256,
"grad_norm": 0.6197607517242432,
"learning_rate": 0.00031549703184025903,
"loss": 3.418,
"step": 44050
},
{
"epoch": 4.754716981132075,
"grad_norm": 0.7079957723617554,
"learning_rate": 0.00031517323259579064,
"loss": 3.3875,
"step": 44100
},
{
"epoch": 4.7601078167115904,
"grad_norm": 0.7121281027793884,
"learning_rate": 0.00031484943335132213,
"loss": 3.3957,
"step": 44150
},
{
"epoch": 4.765498652291106,
"grad_norm": 0.6488174796104431,
"learning_rate": 0.0003145256341068537,
"loss": 3.3873,
"step": 44200
},
{
"epoch": 4.77088948787062,
"grad_norm": 0.6334786415100098,
"learning_rate": 0.0003142018348623853,
"loss": 3.4079,
"step": 44250
},
{
"epoch": 4.776280323450135,
"grad_norm": 0.6670863628387451,
"learning_rate": 0.00031387803561791684,
"loss": 3.3928,
"step": 44300
},
{
"epoch": 4.781671159029649,
"grad_norm": 0.7258300185203552,
"learning_rate": 0.00031355423637344844,
"loss": 3.3898,
"step": 44350
},
{
"epoch": 4.787061994609164,
"grad_norm": 0.6830951571464539,
"learning_rate": 0.00031323043712898,
"loss": 3.3975,
"step": 44400
},
{
"epoch": 4.7924528301886795,
"grad_norm": 0.6534838080406189,
"learning_rate": 0.0003129066378845116,
"loss": 3.4116,
"step": 44450
},
{
"epoch": 4.797843665768194,
"grad_norm": 0.6513312458992004,
"learning_rate": 0.0003125893146249325,
"loss": 3.4059,
"step": 44500
},
{
"epoch": 4.803234501347709,
"grad_norm": 0.6737047433853149,
"learning_rate": 0.0003122655153804641,
"loss": 3.3976,
"step": 44550
},
{
"epoch": 4.808625336927224,
"grad_norm": 0.6408705711364746,
"learning_rate": 0.00031194171613599565,
"loss": 3.4072,
"step": 44600
},
{
"epoch": 4.814016172506738,
"grad_norm": 0.6722981333732605,
"learning_rate": 0.00031161791689152726,
"loss": 3.4035,
"step": 44650
},
{
"epoch": 4.819407008086253,
"grad_norm": 0.6444264650344849,
"learning_rate": 0.0003112941176470588,
"loss": 3.3919,
"step": 44700
},
{
"epoch": 4.824797843665769,
"grad_norm": 0.6497697830200195,
"learning_rate": 0.0003109703184025904,
"loss": 3.4102,
"step": 44750
},
{
"epoch": 4.830188679245283,
"grad_norm": 0.6528656482696533,
"learning_rate": 0.0003106465191581219,
"loss": 3.4189,
"step": 44800
},
{
"epoch": 4.835579514824798,
"grad_norm": 0.694046676158905,
"learning_rate": 0.0003103227199136535,
"loss": 3.4056,
"step": 44850
},
{
"epoch": 4.840970350404312,
"grad_norm": 0.690146803855896,
"learning_rate": 0.00030999892066918506,
"loss": 3.4129,
"step": 44900
},
{
"epoch": 4.846361185983827,
"grad_norm": 0.6869221329689026,
"learning_rate": 0.0003096751214247166,
"loss": 3.4142,
"step": 44950
},
{
"epoch": 4.8517520215633425,
"grad_norm": 0.6895796656608582,
"learning_rate": 0.0003093513221802482,
"loss": 3.407,
"step": 45000
},
{
"epoch": 4.8517520215633425,
"eval_accuracy": 0.3797722657201986,
"eval_loss": 3.4162397384643555,
"eval_runtime": 144.4662,
"eval_samples_per_second": 124.673,
"eval_steps_per_second": 7.794,
"step": 45000
},
{
"epoch": 4.857142857142857,
"grad_norm": 0.6864706873893738,
"learning_rate": 0.00030902752293577977,
"loss": 3.397,
"step": 45050
},
{
"epoch": 4.862533692722372,
"grad_norm": 0.6451402306556702,
"learning_rate": 0.0003087037236913114,
"loss": 3.4013,
"step": 45100
},
{
"epoch": 4.867924528301887,
"grad_norm": 0.6563948392868042,
"learning_rate": 0.0003083799244468429,
"loss": 3.4058,
"step": 45150
},
{
"epoch": 4.873315363881401,
"grad_norm": 0.72186279296875,
"learning_rate": 0.00030805612520237453,
"loss": 3.3864,
"step": 45200
},
{
"epoch": 4.878706199460916,
"grad_norm": 0.6799795031547546,
"learning_rate": 0.0003077323259579061,
"loss": 3.3967,
"step": 45250
},
{
"epoch": 4.884097035040432,
"grad_norm": 0.6401630640029907,
"learning_rate": 0.00030740852671343763,
"loss": 3.3831,
"step": 45300
},
{
"epoch": 4.889487870619946,
"grad_norm": 0.6507147550582886,
"learning_rate": 0.00030708472746896924,
"loss": 3.3956,
"step": 45350
},
{
"epoch": 4.894878706199461,
"grad_norm": 0.6847612857818604,
"learning_rate": 0.00030676092822450073,
"loss": 3.4008,
"step": 45400
},
{
"epoch": 4.900269541778976,
"grad_norm": 0.6450076103210449,
"learning_rate": 0.0003064371289800324,
"loss": 3.4016,
"step": 45450
},
{
"epoch": 4.90566037735849,
"grad_norm": 0.7253491282463074,
"learning_rate": 0.0003061133297355639,
"loss": 3.3926,
"step": 45500
},
{
"epoch": 4.9110512129380055,
"grad_norm": 0.6734742522239685,
"learning_rate": 0.0003057895304910955,
"loss": 3.3791,
"step": 45550
},
{
"epoch": 4.916442048517521,
"grad_norm": 0.6890952587127686,
"learning_rate": 0.00030546573124662704,
"loss": 3.3887,
"step": 45600
},
{
"epoch": 4.921832884097035,
"grad_norm": 0.6757829189300537,
"learning_rate": 0.00030514193200215865,
"loss": 3.3902,
"step": 45650
},
{
"epoch": 4.92722371967655,
"grad_norm": 0.6162055730819702,
"learning_rate": 0.0003048181327576902,
"loss": 3.4045,
"step": 45700
},
{
"epoch": 4.932614555256064,
"grad_norm": 0.6603294610977173,
"learning_rate": 0.00030449433351322175,
"loss": 3.4003,
"step": 45750
},
{
"epoch": 4.938005390835579,
"grad_norm": 0.7538667321205139,
"learning_rate": 0.00030417053426875335,
"loss": 3.3879,
"step": 45800
},
{
"epoch": 4.943396226415095,
"grad_norm": 0.6716868281364441,
"learning_rate": 0.0003038467350242849,
"loss": 3.3925,
"step": 45850
},
{
"epoch": 4.948787061994609,
"grad_norm": 0.6670491099357605,
"learning_rate": 0.0003035229357798165,
"loss": 3.3971,
"step": 45900
},
{
"epoch": 4.954177897574124,
"grad_norm": 0.6961699724197388,
"learning_rate": 0.00030319913653534806,
"loss": 3.396,
"step": 45950
},
{
"epoch": 4.959568733153639,
"grad_norm": 0.6553487777709961,
"learning_rate": 0.00030287533729087966,
"loss": 3.3882,
"step": 46000
},
{
"epoch": 4.959568733153639,
"eval_accuracy": 0.3807090709189351,
"eval_loss": 3.412442445755005,
"eval_runtime": 144.2702,
"eval_samples_per_second": 124.842,
"eval_steps_per_second": 7.805,
"step": 46000
},
{
"epoch": 4.964959568733153,
"grad_norm": 0.6806796789169312,
"learning_rate": 0.0003025515380464112,
"loss": 3.4042,
"step": 46050
},
{
"epoch": 4.9703504043126685,
"grad_norm": 0.6539183259010315,
"learning_rate": 0.0003022277388019428,
"loss": 3.3863,
"step": 46100
},
{
"epoch": 4.975741239892184,
"grad_norm": 0.6914280652999878,
"learning_rate": 0.0003019039395574743,
"loss": 3.4058,
"step": 46150
},
{
"epoch": 4.981132075471698,
"grad_norm": 0.6996748447418213,
"learning_rate": 0.00030158014031300587,
"loss": 3.3922,
"step": 46200
},
{
"epoch": 4.986522911051213,
"grad_norm": 0.6464667320251465,
"learning_rate": 0.00030125634106853747,
"loss": 3.4021,
"step": 46250
},
{
"epoch": 4.991913746630727,
"grad_norm": 0.6513360738754272,
"learning_rate": 0.000300932541824069,
"loss": 3.4054,
"step": 46300
},
{
"epoch": 4.997304582210242,
"grad_norm": 0.6710907816886902,
"learning_rate": 0.0003006087425796006,
"loss": 3.389,
"step": 46350
},
{
"epoch": 5.002695417789758,
"grad_norm": 0.6305559873580933,
"learning_rate": 0.0003002849433351322,
"loss": 3.3599,
"step": 46400
},
{
"epoch": 5.008086253369272,
"grad_norm": 0.6296229362487793,
"learning_rate": 0.00029996114409066373,
"loss": 3.2968,
"step": 46450
},
{
"epoch": 5.013477088948787,
"grad_norm": 0.6661510467529297,
"learning_rate": 0.00029963734484619533,
"loss": 3.2995,
"step": 46500
},
{
"epoch": 5.018867924528302,
"grad_norm": 0.7171527147293091,
"learning_rate": 0.0002993135456017269,
"loss": 3.2957,
"step": 46550
},
{
"epoch": 5.024258760107816,
"grad_norm": 0.69913250207901,
"learning_rate": 0.0002989897463572585,
"loss": 3.3066,
"step": 46600
},
{
"epoch": 5.0296495956873315,
"grad_norm": 0.6664648652076721,
"learning_rate": 0.00029866594711279004,
"loss": 3.3058,
"step": 46650
},
{
"epoch": 5.035040431266847,
"grad_norm": 0.6796714067459106,
"learning_rate": 0.00029834214786832164,
"loss": 3.3204,
"step": 46700
},
{
"epoch": 5.040431266846361,
"grad_norm": 0.6862428784370422,
"learning_rate": 0.0002980183486238532,
"loss": 3.3077,
"step": 46750
},
{
"epoch": 5.045822102425876,
"grad_norm": 0.642417311668396,
"learning_rate": 0.00029769454937938474,
"loss": 3.3257,
"step": 46800
},
{
"epoch": 5.051212938005391,
"grad_norm": 0.7176947593688965,
"learning_rate": 0.0002973707501349163,
"loss": 3.307,
"step": 46850
},
{
"epoch": 5.056603773584905,
"grad_norm": 0.7516077756881714,
"learning_rate": 0.0002970469508904479,
"loss": 3.3188,
"step": 46900
},
{
"epoch": 5.061994609164421,
"grad_norm": 0.6804366111755371,
"learning_rate": 0.00029672962763086885,
"loss": 3.3267,
"step": 46950
},
{
"epoch": 5.067385444743936,
"grad_norm": 0.7159265875816345,
"learning_rate": 0.0002964058283864004,
"loss": 3.3153,
"step": 47000
},
{
"epoch": 5.067385444743936,
"eval_accuracy": 0.380938111206448,
"eval_loss": 3.4160373210906982,
"eval_runtime": 144.5322,
"eval_samples_per_second": 124.616,
"eval_steps_per_second": 7.791,
"step": 47000
},
{
"epoch": 5.07277628032345,
"grad_norm": 0.6850479245185852,
"learning_rate": 0.000296082029141932,
"loss": 3.3179,
"step": 47050
},
{
"epoch": 5.078167115902965,
"grad_norm": 0.7106557488441467,
"learning_rate": 0.00029575822989746356,
"loss": 3.3213,
"step": 47100
},
{
"epoch": 5.083557951482479,
"grad_norm": 0.6948096752166748,
"learning_rate": 0.0002954344306529951,
"loss": 3.3084,
"step": 47150
},
{
"epoch": 5.0889487870619945,
"grad_norm": 0.6754189729690552,
"learning_rate": 0.00029511063140852666,
"loss": 3.3179,
"step": 47200
},
{
"epoch": 5.09433962264151,
"grad_norm": 0.6741254925727844,
"learning_rate": 0.00029478683216405826,
"loss": 3.309,
"step": 47250
},
{
"epoch": 5.099730458221024,
"grad_norm": 0.6865102648735046,
"learning_rate": 0.0002944630329195898,
"loss": 3.3228,
"step": 47300
},
{
"epoch": 5.105121293800539,
"grad_norm": 0.7147253751754761,
"learning_rate": 0.0002941392336751214,
"loss": 3.3286,
"step": 47350
},
{
"epoch": 5.110512129380054,
"grad_norm": 0.6572174429893494,
"learning_rate": 0.00029381543443065297,
"loss": 3.337,
"step": 47400
},
{
"epoch": 5.115902964959568,
"grad_norm": 0.7195749878883362,
"learning_rate": 0.0002934916351861846,
"loss": 3.3268,
"step": 47450
},
{
"epoch": 5.121293800539084,
"grad_norm": 0.6609562039375305,
"learning_rate": 0.0002931678359417161,
"loss": 3.3286,
"step": 47500
},
{
"epoch": 5.126684636118599,
"grad_norm": 0.7119935750961304,
"learning_rate": 0.0002928440366972477,
"loss": 3.3268,
"step": 47550
},
{
"epoch": 5.132075471698113,
"grad_norm": 0.7080712914466858,
"learning_rate": 0.0002925202374527792,
"loss": 3.3288,
"step": 47600
},
{
"epoch": 5.137466307277628,
"grad_norm": 0.7142196893692017,
"learning_rate": 0.00029219643820831083,
"loss": 3.32,
"step": 47650
},
{
"epoch": 5.142857142857143,
"grad_norm": 0.6926892995834351,
"learning_rate": 0.0002918726389638424,
"loss": 3.3151,
"step": 47700
},
{
"epoch": 5.1482479784366575,
"grad_norm": 0.6689636707305908,
"learning_rate": 0.000291548839719374,
"loss": 3.3159,
"step": 47750
},
{
"epoch": 5.153638814016173,
"grad_norm": 0.665522575378418,
"learning_rate": 0.00029122504047490554,
"loss": 3.3321,
"step": 47800
},
{
"epoch": 5.159029649595688,
"grad_norm": 0.6729112267494202,
"learning_rate": 0.0002909012412304371,
"loss": 3.3053,
"step": 47850
},
{
"epoch": 5.164420485175202,
"grad_norm": 0.7552754282951355,
"learning_rate": 0.0002905774419859687,
"loss": 3.3429,
"step": 47900
},
{
"epoch": 5.169811320754717,
"grad_norm": 0.6447635889053345,
"learning_rate": 0.00029025364274150024,
"loss": 3.3259,
"step": 47950
},
{
"epoch": 5.175202156334231,
"grad_norm": 0.6801449656486511,
"learning_rate": 0.0002899298434970318,
"loss": 3.3206,
"step": 48000
},
{
"epoch": 5.175202156334231,
"eval_accuracy": 0.38110282898437475,
"eval_loss": 3.411052703857422,
"eval_runtime": 145.5236,
"eval_samples_per_second": 123.767,
"eval_steps_per_second": 7.738,
"step": 48000
},
{
"epoch": 5.180592991913747,
"grad_norm": 0.706554114818573,
"learning_rate": 0.0002896060442525634,
"loss": 3.3333,
"step": 48050
},
{
"epoch": 5.185983827493262,
"grad_norm": 0.6805872917175293,
"learning_rate": 0.00028928224500809495,
"loss": 3.3201,
"step": 48100
},
{
"epoch": 5.191374663072776,
"grad_norm": 0.6964337825775146,
"learning_rate": 0.0002889584457636265,
"loss": 3.3337,
"step": 48150
},
{
"epoch": 5.196765498652291,
"grad_norm": 0.6783593893051147,
"learning_rate": 0.0002886346465191581,
"loss": 3.3354,
"step": 48200
},
{
"epoch": 5.202156334231806,
"grad_norm": 0.7004501819610596,
"learning_rate": 0.00028831084727468965,
"loss": 3.3268,
"step": 48250
},
{
"epoch": 5.2075471698113205,
"grad_norm": 0.6932908296585083,
"learning_rate": 0.00028798704803022126,
"loss": 3.328,
"step": 48300
},
{
"epoch": 5.212938005390836,
"grad_norm": 0.6647869944572449,
"learning_rate": 0.0002876632487857528,
"loss": 3.3303,
"step": 48350
},
{
"epoch": 5.218328840970351,
"grad_norm": 0.6336517333984375,
"learning_rate": 0.0002873394495412844,
"loss": 3.342,
"step": 48400
},
{
"epoch": 5.223719676549865,
"grad_norm": 0.7249319553375244,
"learning_rate": 0.0002870156502968159,
"loss": 3.3318,
"step": 48450
},
{
"epoch": 5.22911051212938,
"grad_norm": 0.6921640634536743,
"learning_rate": 0.0002866918510523475,
"loss": 3.3209,
"step": 48500
},
{
"epoch": 5.234501347708895,
"grad_norm": 0.70216304063797,
"learning_rate": 0.00028636805180787907,
"loss": 3.3307,
"step": 48550
},
{
"epoch": 5.2398921832884096,
"grad_norm": 0.6812149286270142,
"learning_rate": 0.00028604425256341067,
"loss": 3.3543,
"step": 48600
},
{
"epoch": 5.245283018867925,
"grad_norm": 0.6724953055381775,
"learning_rate": 0.0002857204533189422,
"loss": 3.3339,
"step": 48650
},
{
"epoch": 5.250673854447439,
"grad_norm": 0.6912009119987488,
"learning_rate": 0.0002853966540744738,
"loss": 3.3355,
"step": 48700
},
{
"epoch": 5.256064690026954,
"grad_norm": 0.7083752751350403,
"learning_rate": 0.0002850728548300054,
"loss": 3.3435,
"step": 48750
},
{
"epoch": 5.261455525606469,
"grad_norm": 0.7092831134796143,
"learning_rate": 0.000284749055585537,
"loss": 3.3268,
"step": 48800
},
{
"epoch": 5.2668463611859835,
"grad_norm": 0.6768770813941956,
"learning_rate": 0.0002844252563410685,
"loss": 3.321,
"step": 48850
},
{
"epoch": 5.272237196765499,
"grad_norm": 0.7014017701148987,
"learning_rate": 0.0002841014570966001,
"loss": 3.336,
"step": 48900
},
{
"epoch": 5.277628032345014,
"grad_norm": 0.7322688102722168,
"learning_rate": 0.00028377765785213163,
"loss": 3.3385,
"step": 48950
},
{
"epoch": 5.283018867924528,
"grad_norm": 0.7685863971710205,
"learning_rate": 0.00028345385860766324,
"loss": 3.3424,
"step": 49000
},
{
"epoch": 5.283018867924528,
"eval_accuracy": 0.3814316126229014,
"eval_loss": 3.4073944091796875,
"eval_runtime": 144.7837,
"eval_samples_per_second": 124.399,
"eval_steps_per_second": 7.777,
"step": 49000
},
{
"epoch": 5.288409703504043,
"grad_norm": 0.6961640119552612,
"learning_rate": 0.0002831300593631948,
"loss": 3.3409,
"step": 49050
},
{
"epoch": 5.293800539083558,
"grad_norm": 0.6957879066467285,
"learning_rate": 0.0002828062601187264,
"loss": 3.3524,
"step": 49100
},
{
"epoch": 5.2991913746630726,
"grad_norm": 0.6970066428184509,
"learning_rate": 0.00028248246087425794,
"loss": 3.3484,
"step": 49150
},
{
"epoch": 5.304582210242588,
"grad_norm": 0.7614482045173645,
"learning_rate": 0.0002821586616297895,
"loss": 3.3549,
"step": 49200
},
{
"epoch": 5.309973045822103,
"grad_norm": 0.7144331336021423,
"learning_rate": 0.0002818348623853211,
"loss": 3.3365,
"step": 49250
},
{
"epoch": 5.315363881401617,
"grad_norm": 0.6948640942573547,
"learning_rate": 0.00028151106314085265,
"loss": 3.3399,
"step": 49300
},
{
"epoch": 5.320754716981132,
"grad_norm": 0.7086396217346191,
"learning_rate": 0.0002811872638963842,
"loss": 3.3396,
"step": 49350
},
{
"epoch": 5.3261455525606465,
"grad_norm": 0.7019725441932678,
"learning_rate": 0.0002808634646519158,
"loss": 3.3322,
"step": 49400
},
{
"epoch": 5.331536388140162,
"grad_norm": 0.672603189945221,
"learning_rate": 0.00028053966540744736,
"loss": 3.3151,
"step": 49450
},
{
"epoch": 5.336927223719677,
"grad_norm": 0.6889719367027283,
"learning_rate": 0.0002802158661629789,
"loss": 3.3461,
"step": 49500
},
{
"epoch": 5.342318059299191,
"grad_norm": 0.6748621463775635,
"learning_rate": 0.0002798920669185105,
"loss": 3.3413,
"step": 49550
},
{
"epoch": 5.347708894878706,
"grad_norm": 0.7129660844802856,
"learning_rate": 0.00027956826767404206,
"loss": 3.3341,
"step": 49600
},
{
"epoch": 5.353099730458221,
"grad_norm": 0.7002583742141724,
"learning_rate": 0.00027924446842957367,
"loss": 3.3368,
"step": 49650
},
{
"epoch": 5.3584905660377355,
"grad_norm": 0.7028433084487915,
"learning_rate": 0.0002789206691851052,
"loss": 3.3228,
"step": 49700
},
{
"epoch": 5.363881401617251,
"grad_norm": 0.6843782067298889,
"learning_rate": 0.00027859686994063677,
"loss": 3.36,
"step": 49750
},
{
"epoch": 5.369272237196766,
"grad_norm": 0.6877606511116028,
"learning_rate": 0.0002782730706961683,
"loss": 3.338,
"step": 49800
},
{
"epoch": 5.37466307277628,
"grad_norm": 0.7371047735214233,
"learning_rate": 0.0002779492714516999,
"loss": 3.3458,
"step": 49850
},
{
"epoch": 5.380053908355795,
"grad_norm": 0.6934622526168823,
"learning_rate": 0.00027762547220723147,
"loss": 3.3383,
"step": 49900
},
{
"epoch": 5.38544474393531,
"grad_norm": 0.6579985022544861,
"learning_rate": 0.0002773016729627631,
"loss": 3.3524,
"step": 49950
},
{
"epoch": 5.390835579514825,
"grad_norm": 0.6802271008491516,
"learning_rate": 0.00027697787371829463,
"loss": 3.3458,
"step": 50000
},
{
"epoch": 5.390835579514825,
"eval_accuracy": 0.3819480397986836,
"eval_loss": 3.4055545330047607,
"eval_runtime": 144.3918,
"eval_samples_per_second": 124.737,
"eval_steps_per_second": 7.798,
"step": 50000
},
{
"epoch": 5.39622641509434,
"grad_norm": 0.6987673044204712,
"learning_rate": 0.00027665407447382623,
"loss": 3.3288,
"step": 50050
},
{
"epoch": 5.401617250673855,
"grad_norm": 0.7340160608291626,
"learning_rate": 0.0002763302752293578,
"loss": 3.3532,
"step": 50100
},
{
"epoch": 5.407008086253369,
"grad_norm": 0.6872392892837524,
"learning_rate": 0.00027600647598488933,
"loss": 3.3324,
"step": 50150
},
{
"epoch": 5.412398921832884,
"grad_norm": 0.6771326661109924,
"learning_rate": 0.0002756826767404209,
"loss": 3.3355,
"step": 50200
},
{
"epoch": 5.4177897574123985,
"grad_norm": 0.6996474862098694,
"learning_rate": 0.0002753588774959525,
"loss": 3.357,
"step": 50250
},
{
"epoch": 5.423180592991914,
"grad_norm": 0.7138876914978027,
"learning_rate": 0.00027503507825148404,
"loss": 3.3214,
"step": 50300
},
{
"epoch": 5.428571428571429,
"grad_norm": 0.755319356918335,
"learning_rate": 0.00027471127900701564,
"loss": 3.3383,
"step": 50350
},
{
"epoch": 5.433962264150943,
"grad_norm": 0.6721000671386719,
"learning_rate": 0.0002743874797625472,
"loss": 3.3523,
"step": 50400
},
{
"epoch": 5.439353099730458,
"grad_norm": 0.730150580406189,
"learning_rate": 0.0002740636805180788,
"loss": 3.3376,
"step": 50450
},
{
"epoch": 5.444743935309973,
"grad_norm": 0.7079633474349976,
"learning_rate": 0.00027373988127361035,
"loss": 3.3507,
"step": 50500
},
{
"epoch": 5.450134770889488,
"grad_norm": 0.7010264992713928,
"learning_rate": 0.0002734160820291419,
"loss": 3.3462,
"step": 50550
},
{
"epoch": 5.455525606469003,
"grad_norm": 0.6866102814674377,
"learning_rate": 0.00027309228278467345,
"loss": 3.3319,
"step": 50600
},
{
"epoch": 5.460916442048518,
"grad_norm": 0.6866927146911621,
"learning_rate": 0.00027276848354020506,
"loss": 3.3463,
"step": 50650
},
{
"epoch": 5.466307277628032,
"grad_norm": 0.7239250540733337,
"learning_rate": 0.0002724446842957366,
"loss": 3.362,
"step": 50700
},
{
"epoch": 5.471698113207547,
"grad_norm": 0.7562030553817749,
"learning_rate": 0.0002721208850512682,
"loss": 3.3648,
"step": 50750
},
{
"epoch": 5.4770889487870615,
"grad_norm": 0.7127982974052429,
"learning_rate": 0.00027179708580679976,
"loss": 3.3432,
"step": 50800
},
{
"epoch": 5.482479784366577,
"grad_norm": 0.7013341784477234,
"learning_rate": 0.0002714732865623313,
"loss": 3.3444,
"step": 50850
},
{
"epoch": 5.487870619946092,
"grad_norm": 0.6669678092002869,
"learning_rate": 0.0002711494873178629,
"loss": 3.3592,
"step": 50900
},
{
"epoch": 5.493261455525606,
"grad_norm": 0.7303927540779114,
"learning_rate": 0.0002708321640582838,
"loss": 3.3454,
"step": 50950
},
{
"epoch": 5.498652291105121,
"grad_norm": 0.6964647769927979,
"learning_rate": 0.0002705083648138154,
"loss": 3.3476,
"step": 51000
},
{
"epoch": 5.498652291105121,
"eval_accuracy": 0.3824629458340364,
"eval_loss": 3.4016828536987305,
"eval_runtime": 144.3883,
"eval_samples_per_second": 124.74,
"eval_steps_per_second": 7.798,
"step": 51000
},
{
"epoch": 5.504043126684636,
"grad_norm": 0.7446438670158386,
"learning_rate": 0.00027018456556934697,
"loss": 3.339,
"step": 51050
},
{
"epoch": 5.509433962264151,
"grad_norm": 0.7122694849967957,
"learning_rate": 0.0002698607663248786,
"loss": 3.3297,
"step": 51100
},
{
"epoch": 5.514824797843666,
"grad_norm": 0.7483970522880554,
"learning_rate": 0.0002695369670804101,
"loss": 3.3296,
"step": 51150
},
{
"epoch": 5.520215633423181,
"grad_norm": 0.6955212354660034,
"learning_rate": 0.0002692131678359417,
"loss": 3.3312,
"step": 51200
},
{
"epoch": 5.525606469002695,
"grad_norm": 0.6897768974304199,
"learning_rate": 0.0002688893685914733,
"loss": 3.3589,
"step": 51250
},
{
"epoch": 5.53099730458221,
"grad_norm": 0.6922851204872131,
"learning_rate": 0.00026856556934700483,
"loss": 3.3508,
"step": 51300
},
{
"epoch": 5.536388140161725,
"grad_norm": 0.7360769510269165,
"learning_rate": 0.0002682417701025364,
"loss": 3.3515,
"step": 51350
},
{
"epoch": 5.54177897574124,
"grad_norm": 0.6909701824188232,
"learning_rate": 0.000267917970858068,
"loss": 3.3279,
"step": 51400
},
{
"epoch": 5.547169811320755,
"grad_norm": 0.6974009275436401,
"learning_rate": 0.00026759417161359954,
"loss": 3.3313,
"step": 51450
},
{
"epoch": 5.55256064690027,
"grad_norm": 0.7308899760246277,
"learning_rate": 0.0002672703723691311,
"loss": 3.3725,
"step": 51500
},
{
"epoch": 5.557951482479784,
"grad_norm": 0.7055568099021912,
"learning_rate": 0.0002669465731246627,
"loss": 3.3351,
"step": 51550
},
{
"epoch": 5.563342318059299,
"grad_norm": 0.7265756130218506,
"learning_rate": 0.00026662277388019424,
"loss": 3.3488,
"step": 51600
},
{
"epoch": 5.568733153638814,
"grad_norm": 0.6935765743255615,
"learning_rate": 0.00026629897463572585,
"loss": 3.3351,
"step": 51650
},
{
"epoch": 5.574123989218329,
"grad_norm": 0.7005050778388977,
"learning_rate": 0.0002659751753912574,
"loss": 3.3393,
"step": 51700
},
{
"epoch": 5.579514824797844,
"grad_norm": 0.676528811454773,
"learning_rate": 0.00026565137614678895,
"loss": 3.352,
"step": 51750
},
{
"epoch": 5.584905660377358,
"grad_norm": 0.7144435048103333,
"learning_rate": 0.0002653275769023205,
"loss": 3.3258,
"step": 51800
},
{
"epoch": 5.590296495956873,
"grad_norm": 0.6825423240661621,
"learning_rate": 0.0002650037776578521,
"loss": 3.323,
"step": 51850
},
{
"epoch": 5.595687331536388,
"grad_norm": 0.6950230598449707,
"learning_rate": 0.00026467997841338366,
"loss": 3.3775,
"step": 51900
},
{
"epoch": 5.601078167115903,
"grad_norm": 0.7042683959007263,
"learning_rate": 0.00026435617916891526,
"loss": 3.3395,
"step": 51950
},
{
"epoch": 5.606469002695418,
"grad_norm": 0.687217116355896,
"learning_rate": 0.0002640323799244468,
"loss": 3.3541,
"step": 52000
},
{
"epoch": 5.606469002695418,
"eval_accuracy": 0.38270795809605807,
"eval_loss": 3.3957579135894775,
"eval_runtime": 144.4258,
"eval_samples_per_second": 124.708,
"eval_steps_per_second": 7.796,
"step": 52000
},
{
"epoch": 5.611859838274933,
"grad_norm": 0.7226871252059937,
"learning_rate": 0.0002637085806799784,
"loss": 3.33,
"step": 52050
},
{
"epoch": 5.617250673854447,
"grad_norm": 0.7055585980415344,
"learning_rate": 0.00026338478143550997,
"loss": 3.3534,
"step": 52100
},
{
"epoch": 5.622641509433962,
"grad_norm": 0.7270804643630981,
"learning_rate": 0.00026306098219104157,
"loss": 3.3246,
"step": 52150
},
{
"epoch": 5.628032345013477,
"grad_norm": 0.7429816722869873,
"learning_rate": 0.00026273718294657307,
"loss": 3.3227,
"step": 52200
},
{
"epoch": 5.633423180592992,
"grad_norm": 0.6981556415557861,
"learning_rate": 0.00026241338370210467,
"loss": 3.342,
"step": 52250
},
{
"epoch": 5.638814016172507,
"grad_norm": 0.695846676826477,
"learning_rate": 0.0002620895844576362,
"loss": 3.358,
"step": 52300
},
{
"epoch": 5.644204851752022,
"grad_norm": 0.7060867547988892,
"learning_rate": 0.00026176578521316783,
"loss": 3.3285,
"step": 52350
},
{
"epoch": 5.649595687331536,
"grad_norm": 0.6964941620826721,
"learning_rate": 0.0002614419859686994,
"loss": 3.3238,
"step": 52400
},
{
"epoch": 5.654986522911051,
"grad_norm": 0.8142193555831909,
"learning_rate": 0.000261118186724231,
"loss": 3.3334,
"step": 52450
},
{
"epoch": 5.660377358490566,
"grad_norm": 0.7683114409446716,
"learning_rate": 0.00026079438747976253,
"loss": 3.349,
"step": 52500
},
{
"epoch": 5.665768194070081,
"grad_norm": 0.7481276392936707,
"learning_rate": 0.0002604705882352941,
"loss": 3.3428,
"step": 52550
},
{
"epoch": 5.671159029649596,
"grad_norm": 0.7752293944358826,
"learning_rate": 0.00026014678899082563,
"loss": 3.3439,
"step": 52600
},
{
"epoch": 5.67654986522911,
"grad_norm": 0.7023149728775024,
"learning_rate": 0.00025982298974635724,
"loss": 3.3571,
"step": 52650
},
{
"epoch": 5.681940700808625,
"grad_norm": 0.7217230200767517,
"learning_rate": 0.0002594991905018888,
"loss": 3.358,
"step": 52700
},
{
"epoch": 5.6873315363881405,
"grad_norm": 0.7547357082366943,
"learning_rate": 0.0002591753912574204,
"loss": 3.3512,
"step": 52750
},
{
"epoch": 5.692722371967655,
"grad_norm": 0.762870728969574,
"learning_rate": 0.00025885159201295195,
"loss": 3.3294,
"step": 52800
},
{
"epoch": 5.69811320754717,
"grad_norm": 0.7588973045349121,
"learning_rate": 0.0002585277927684835,
"loss": 3.334,
"step": 52850
},
{
"epoch": 5.703504043126685,
"grad_norm": 0.7037749886512756,
"learning_rate": 0.0002582039935240151,
"loss": 3.3558,
"step": 52900
},
{
"epoch": 5.708894878706199,
"grad_norm": 0.696289598941803,
"learning_rate": 0.00025788019427954665,
"loss": 3.3524,
"step": 52950
},
{
"epoch": 5.714285714285714,
"grad_norm": 0.7137579917907715,
"learning_rate": 0.00025755639503507826,
"loss": 3.347,
"step": 53000
},
{
"epoch": 5.714285714285714,
"eval_accuracy": 0.3831239900034997,
"eval_loss": 3.3917641639709473,
"eval_runtime": 144.3442,
"eval_samples_per_second": 124.778,
"eval_steps_per_second": 7.801,
"step": 53000
},
{
"epoch": 5.719676549865229,
"grad_norm": 0.6979992389678955,
"learning_rate": 0.0002572325957906098,
"loss": 3.3288,
"step": 53050
},
{
"epoch": 5.725067385444744,
"grad_norm": 0.7089249491691589,
"learning_rate": 0.00025690879654614136,
"loss": 3.3518,
"step": 53100
},
{
"epoch": 5.730458221024259,
"grad_norm": 0.7526663541793823,
"learning_rate": 0.0002565849973016729,
"loss": 3.3515,
"step": 53150
},
{
"epoch": 5.735849056603773,
"grad_norm": 0.7317113280296326,
"learning_rate": 0.0002562611980572045,
"loss": 3.3492,
"step": 53200
},
{
"epoch": 5.741239892183288,
"grad_norm": 0.7221770286560059,
"learning_rate": 0.00025593739881273606,
"loss": 3.3405,
"step": 53250
},
{
"epoch": 5.7466307277628035,
"grad_norm": 0.688494086265564,
"learning_rate": 0.00025561359956826767,
"loss": 3.3615,
"step": 53300
},
{
"epoch": 5.752021563342318,
"grad_norm": 0.7138562798500061,
"learning_rate": 0.0002552898003237992,
"loss": 3.3267,
"step": 53350
},
{
"epoch": 5.757412398921833,
"grad_norm": 0.7028967142105103,
"learning_rate": 0.0002549660010793308,
"loss": 3.3464,
"step": 53400
},
{
"epoch": 5.762803234501348,
"grad_norm": 0.8505129814147949,
"learning_rate": 0.0002546422018348624,
"loss": 3.3365,
"step": 53450
},
{
"epoch": 5.768194070080862,
"grad_norm": 0.7250940799713135,
"learning_rate": 0.0002543184025903939,
"loss": 3.3477,
"step": 53500
},
{
"epoch": 5.773584905660377,
"grad_norm": 0.6903457641601562,
"learning_rate": 0.0002539946033459255,
"loss": 3.3318,
"step": 53550
},
{
"epoch": 5.7789757412398925,
"grad_norm": 0.7478795051574707,
"learning_rate": 0.0002536708041014571,
"loss": 3.3295,
"step": 53600
},
{
"epoch": 5.784366576819407,
"grad_norm": 0.6617110371589661,
"learning_rate": 0.00025334700485698863,
"loss": 3.3269,
"step": 53650
},
{
"epoch": 5.789757412398922,
"grad_norm": 0.7265610098838806,
"learning_rate": 0.0002530296815974096,
"loss": 3.3387,
"step": 53700
},
{
"epoch": 5.795148247978437,
"grad_norm": 0.7066839337348938,
"learning_rate": 0.0002527058823529412,
"loss": 3.3429,
"step": 53750
},
{
"epoch": 5.800539083557951,
"grad_norm": 0.7426128387451172,
"learning_rate": 0.0002523820831084727,
"loss": 3.3513,
"step": 53800
},
{
"epoch": 5.8059299191374665,
"grad_norm": 0.6835800409317017,
"learning_rate": 0.0002520582838640043,
"loss": 3.338,
"step": 53850
},
{
"epoch": 5.811320754716981,
"grad_norm": 0.7507166266441345,
"learning_rate": 0.00025173448461953584,
"loss": 3.3408,
"step": 53900
},
{
"epoch": 5.816711590296496,
"grad_norm": 0.7032175064086914,
"learning_rate": 0.00025141068537506744,
"loss": 3.339,
"step": 53950
},
{
"epoch": 5.822102425876011,
"grad_norm": 0.6539283990859985,
"learning_rate": 0.000251086886130599,
"loss": 3.3347,
"step": 54000
},
{
"epoch": 5.822102425876011,
"eval_accuracy": 0.38358424363628185,
"eval_loss": 3.3862242698669434,
"eval_runtime": 144.5984,
"eval_samples_per_second": 124.559,
"eval_steps_per_second": 7.787,
"step": 54000
},
{
"epoch": 5.827493261455525,
"grad_norm": 0.7013541460037231,
"learning_rate": 0.0002507630868861306,
"loss": 3.3546,
"step": 54050
},
{
"epoch": 5.83288409703504,
"grad_norm": 0.7572037577629089,
"learning_rate": 0.00025043928764166215,
"loss": 3.3372,
"step": 54100
},
{
"epoch": 5.8382749326145555,
"grad_norm": 0.6840663552284241,
"learning_rate": 0.00025011548839719375,
"loss": 3.3602,
"step": 54150
},
{
"epoch": 5.84366576819407,
"grad_norm": 0.7282071709632874,
"learning_rate": 0.0002497916891527253,
"loss": 3.3645,
"step": 54200
},
{
"epoch": 5.849056603773585,
"grad_norm": 0.691611111164093,
"learning_rate": 0.00024946788990825686,
"loss": 3.3393,
"step": 54250
},
{
"epoch": 5.8544474393531,
"grad_norm": 0.7439831495285034,
"learning_rate": 0.0002491440906637884,
"loss": 3.347,
"step": 54300
},
{
"epoch": 5.859838274932614,
"grad_norm": 0.7478641271591187,
"learning_rate": 0.00024882029141932,
"loss": 3.3495,
"step": 54350
},
{
"epoch": 5.8652291105121295,
"grad_norm": 0.6928412914276123,
"learning_rate": 0.00024849649217485156,
"loss": 3.3451,
"step": 54400
},
{
"epoch": 5.870619946091644,
"grad_norm": 0.7091377973556519,
"learning_rate": 0.00024817269293038317,
"loss": 3.3328,
"step": 54450
},
{
"epoch": 5.876010781671159,
"grad_norm": 0.7480646967887878,
"learning_rate": 0.0002478488936859147,
"loss": 3.3442,
"step": 54500
},
{
"epoch": 5.881401617250674,
"grad_norm": 0.7517173290252686,
"learning_rate": 0.00024752509444144627,
"loss": 3.3458,
"step": 54550
},
{
"epoch": 5.886792452830189,
"grad_norm": 0.7140108942985535,
"learning_rate": 0.00024720129519697787,
"loss": 3.3454,
"step": 54600
},
{
"epoch": 5.892183288409703,
"grad_norm": 0.749847948551178,
"learning_rate": 0.0002468774959525094,
"loss": 3.3509,
"step": 54650
},
{
"epoch": 5.8975741239892185,
"grad_norm": 0.7366791367530823,
"learning_rate": 0.00024655369670804097,
"loss": 3.3389,
"step": 54700
},
{
"epoch": 5.902964959568733,
"grad_norm": 0.776725709438324,
"learning_rate": 0.0002462363734484619,
"loss": 3.3402,
"step": 54750
},
{
"epoch": 5.908355795148248,
"grad_norm": 0.7790849208831787,
"learning_rate": 0.00024591257420399353,
"loss": 3.3374,
"step": 54800
},
{
"epoch": 5.913746630727763,
"grad_norm": 0.6976567506790161,
"learning_rate": 0.0002455887749595251,
"loss": 3.341,
"step": 54850
},
{
"epoch": 5.919137466307277,
"grad_norm": 0.7408641576766968,
"learning_rate": 0.00024526497571505663,
"loss": 3.3418,
"step": 54900
},
{
"epoch": 5.9245283018867925,
"grad_norm": 0.8166471719741821,
"learning_rate": 0.00024494117647058824,
"loss": 3.3486,
"step": 54950
},
{
"epoch": 5.929919137466308,
"grad_norm": 0.7015873789787292,
"learning_rate": 0.0002446173772261198,
"loss": 3.3565,
"step": 55000
},
{
"epoch": 5.929919137466308,
"eval_accuracy": 0.3844023998815249,
"eval_loss": 3.38118839263916,
"eval_runtime": 144.4609,
"eval_samples_per_second": 124.677,
"eval_steps_per_second": 7.794,
"step": 55000
},
{
"epoch": 5.935309973045822,
"grad_norm": 0.772317111492157,
"learning_rate": 0.00024429357798165134,
"loss": 3.3602,
"step": 55050
},
{
"epoch": 5.940700808625337,
"grad_norm": 0.7168392539024353,
"learning_rate": 0.00024396977873718291,
"loss": 3.3497,
"step": 55100
},
{
"epoch": 5.946091644204852,
"grad_norm": 0.7470517754554749,
"learning_rate": 0.0002436459794927145,
"loss": 3.3451,
"step": 55150
},
{
"epoch": 5.951482479784366,
"grad_norm": 0.7503311634063721,
"learning_rate": 0.00024332218024824607,
"loss": 3.3545,
"step": 55200
},
{
"epoch": 5.9568733153638815,
"grad_norm": 0.7465424537658691,
"learning_rate": 0.00024299838100377765,
"loss": 3.3588,
"step": 55250
},
{
"epoch": 5.962264150943396,
"grad_norm": 0.6964436769485474,
"learning_rate": 0.00024267458175930922,
"loss": 3.3242,
"step": 55300
},
{
"epoch": 5.967654986522911,
"grad_norm": 0.7544224858283997,
"learning_rate": 0.0002423507825148408,
"loss": 3.334,
"step": 55350
},
{
"epoch": 5.973045822102426,
"grad_norm": 0.6809358596801758,
"learning_rate": 0.00024202698327037233,
"loss": 3.3367,
"step": 55400
},
{
"epoch": 5.97843665768194,
"grad_norm": 0.7538425922393799,
"learning_rate": 0.0002417031840259039,
"loss": 3.3287,
"step": 55450
},
{
"epoch": 5.9838274932614555,
"grad_norm": 0.7212376594543457,
"learning_rate": 0.00024137938478143548,
"loss": 3.3497,
"step": 55500
},
{
"epoch": 5.989218328840971,
"grad_norm": 0.7525458931922913,
"learning_rate": 0.00024105558553696706,
"loss": 3.3425,
"step": 55550
},
{
"epoch": 5.994609164420485,
"grad_norm": 0.7261298894882202,
"learning_rate": 0.00024073178629249864,
"loss": 3.3237,
"step": 55600
},
{
"epoch": 6.0,
"grad_norm": 1.5976569652557373,
"learning_rate": 0.00024040798704803021,
"loss": 3.3372,
"step": 55650
},
{
"epoch": 6.005390835579515,
"grad_norm": 0.7560473680496216,
"learning_rate": 0.00024008418780356176,
"loss": 3.2451,
"step": 55700
},
{
"epoch": 6.010781671159029,
"grad_norm": 0.7089710235595703,
"learning_rate": 0.00023976038855909334,
"loss": 3.2499,
"step": 55750
},
{
"epoch": 6.0161725067385445,
"grad_norm": 0.727899432182312,
"learning_rate": 0.00023943658931462492,
"loss": 3.2603,
"step": 55800
},
{
"epoch": 6.02156334231806,
"grad_norm": 0.7405480146408081,
"learning_rate": 0.00023911279007015647,
"loss": 3.2515,
"step": 55850
},
{
"epoch": 6.026954177897574,
"grad_norm": 0.7307968735694885,
"learning_rate": 0.00023878899082568805,
"loss": 3.2599,
"step": 55900
},
{
"epoch": 6.032345013477089,
"grad_norm": 0.7412907481193542,
"learning_rate": 0.00023846519158121963,
"loss": 3.2536,
"step": 55950
},
{
"epoch": 6.037735849056604,
"grad_norm": 0.7412428855895996,
"learning_rate": 0.0002381413923367512,
"loss": 3.2551,
"step": 56000
},
{
"epoch": 6.037735849056604,
"eval_accuracy": 0.3845462563049913,
"eval_loss": 3.3851492404937744,
"eval_runtime": 144.3583,
"eval_samples_per_second": 124.766,
"eval_steps_per_second": 7.8,
"step": 56000
},
{
"epoch": 6.0431266846361185,
"grad_norm": 0.7608617544174194,
"learning_rate": 0.00023781759309228275,
"loss": 3.2543,
"step": 56050
},
{
"epoch": 6.048517520215634,
"grad_norm": 0.7123560905456543,
"learning_rate": 0.00023749379384781433,
"loss": 3.2758,
"step": 56100
},
{
"epoch": 6.053908355795148,
"grad_norm": 0.6870105266571045,
"learning_rate": 0.0002371699946033459,
"loss": 3.2504,
"step": 56150
},
{
"epoch": 6.059299191374663,
"grad_norm": 0.6946421265602112,
"learning_rate": 0.0002368461953588775,
"loss": 3.2571,
"step": 56200
},
{
"epoch": 6.064690026954178,
"grad_norm": 0.6991093754768372,
"learning_rate": 0.00023652239611440904,
"loss": 3.2747,
"step": 56250
},
{
"epoch": 6.070080862533692,
"grad_norm": 0.7521548867225647,
"learning_rate": 0.00023619859686994062,
"loss": 3.2576,
"step": 56300
},
{
"epoch": 6.0754716981132075,
"grad_norm": 0.7925395965576172,
"learning_rate": 0.00023587479762547217,
"loss": 3.2711,
"step": 56350
},
{
"epoch": 6.080862533692723,
"grad_norm": 0.734521746635437,
"learning_rate": 0.00023555099838100374,
"loss": 3.2722,
"step": 56400
},
{
"epoch": 6.086253369272237,
"grad_norm": 0.6941896080970764,
"learning_rate": 0.00023522719913653532,
"loss": 3.265,
"step": 56450
},
{
"epoch": 6.091644204851752,
"grad_norm": 0.7175527811050415,
"learning_rate": 0.0002349033998920669,
"loss": 3.2713,
"step": 56500
},
{
"epoch": 6.097035040431267,
"grad_norm": 0.7173822522163391,
"learning_rate": 0.00023457960064759848,
"loss": 3.2923,
"step": 56550
},
{
"epoch": 6.1024258760107815,
"grad_norm": 0.7203914523124695,
"learning_rate": 0.00023425580140313005,
"loss": 3.2881,
"step": 56600
},
{
"epoch": 6.107816711590297,
"grad_norm": 0.7852560877799988,
"learning_rate": 0.00023393200215866163,
"loss": 3.2643,
"step": 56650
},
{
"epoch": 6.113207547169812,
"grad_norm": 0.7339677810668945,
"learning_rate": 0.00023360820291419316,
"loss": 3.284,
"step": 56700
},
{
"epoch": 6.118598382749326,
"grad_norm": 0.7083250284194946,
"learning_rate": 0.00023328440366972473,
"loss": 3.2735,
"step": 56750
},
{
"epoch": 6.123989218328841,
"grad_norm": 0.7083043456077576,
"learning_rate": 0.0002329606044252563,
"loss": 3.2641,
"step": 56800
},
{
"epoch": 6.129380053908355,
"grad_norm": 0.7478223443031311,
"learning_rate": 0.0002326368051807879,
"loss": 3.2636,
"step": 56850
},
{
"epoch": 6.1347708894878705,
"grad_norm": 0.7465955018997192,
"learning_rate": 0.00023231300593631947,
"loss": 3.2659,
"step": 56900
},
{
"epoch": 6.140161725067386,
"grad_norm": 0.7111784815788269,
"learning_rate": 0.00023198920669185104,
"loss": 3.2787,
"step": 56950
},
{
"epoch": 6.1455525606469,
"grad_norm": 0.7591080665588379,
"learning_rate": 0.00023166540744738262,
"loss": 3.2716,
"step": 57000
},
{
"epoch": 6.1455525606469,
"eval_accuracy": 0.3843093930095557,
"eval_loss": 3.3840153217315674,
"eval_runtime": 144.383,
"eval_samples_per_second": 124.745,
"eval_steps_per_second": 7.799,
"step": 57000
},
{
"epoch": 6.150943396226415,
"grad_norm": 0.7147782444953918,
"learning_rate": 0.0002313416082029142,
"loss": 3.2843,
"step": 57050
},
{
"epoch": 6.15633423180593,
"grad_norm": 0.7511909008026123,
"learning_rate": 0.00023101780895844572,
"loss": 3.2651,
"step": 57100
},
{
"epoch": 6.1617250673854445,
"grad_norm": 0.8061673045158386,
"learning_rate": 0.0002306940097139773,
"loss": 3.2749,
"step": 57150
},
{
"epoch": 6.16711590296496,
"grad_norm": 0.7521399259567261,
"learning_rate": 0.00023037021046950888,
"loss": 3.2692,
"step": 57200
},
{
"epoch": 6.172506738544475,
"grad_norm": 0.7413508296012878,
"learning_rate": 0.00023004641122504046,
"loss": 3.273,
"step": 57250
},
{
"epoch": 6.177897574123989,
"grad_norm": 0.7605106830596924,
"learning_rate": 0.00022972261198057203,
"loss": 3.2724,
"step": 57300
},
{
"epoch": 6.183288409703504,
"grad_norm": 0.7825957536697388,
"learning_rate": 0.0002293988127361036,
"loss": 3.2799,
"step": 57350
},
{
"epoch": 6.188679245283019,
"grad_norm": 0.7622434496879578,
"learning_rate": 0.00022907501349163516,
"loss": 3.2856,
"step": 57400
},
{
"epoch": 6.1940700808625335,
"grad_norm": 0.7651059627532959,
"learning_rate": 0.00022875121424716674,
"loss": 3.3013,
"step": 57450
},
{
"epoch": 6.199460916442049,
"grad_norm": 0.7417478561401367,
"learning_rate": 0.00022842741500269832,
"loss": 3.2535,
"step": 57500
},
{
"epoch": 6.204851752021563,
"grad_norm": 0.7812780737876892,
"learning_rate": 0.00022810361575822987,
"loss": 3.3047,
"step": 57550
},
{
"epoch": 6.210242587601078,
"grad_norm": 0.7450320720672607,
"learning_rate": 0.00022777981651376145,
"loss": 3.2724,
"step": 57600
},
{
"epoch": 6.215633423180593,
"grad_norm": 0.704971432685852,
"learning_rate": 0.00022745601726929302,
"loss": 3.2779,
"step": 57650
},
{
"epoch": 6.2210242587601075,
"grad_norm": 0.7325422167778015,
"learning_rate": 0.00022713221802482457,
"loss": 3.2972,
"step": 57700
},
{
"epoch": 6.226415094339623,
"grad_norm": 0.7226464748382568,
"learning_rate": 0.00022680841878035615,
"loss": 3.2812,
"step": 57750
},
{
"epoch": 6.231805929919138,
"grad_norm": 0.7323125004768372,
"learning_rate": 0.00022648461953588773,
"loss": 3.2871,
"step": 57800
},
{
"epoch": 6.237196765498652,
"grad_norm": 0.7361631989479065,
"learning_rate": 0.0002261608202914193,
"loss": 3.2881,
"step": 57850
},
{
"epoch": 6.242587601078167,
"grad_norm": 0.7684759497642517,
"learning_rate": 0.00022583702104695088,
"loss": 3.2876,
"step": 57900
},
{
"epoch": 6.247978436657682,
"grad_norm": 0.7734968662261963,
"learning_rate": 0.00022551322180248246,
"loss": 3.2693,
"step": 57950
},
{
"epoch": 6.2533692722371965,
"grad_norm": 0.7811702489852905,
"learning_rate": 0.00022518942255801399,
"loss": 3.2895,
"step": 58000
},
{
"epoch": 6.2533692722371965,
"eval_accuracy": 0.3848163673840981,
"eval_loss": 3.381255626678467,
"eval_runtime": 144.6854,
"eval_samples_per_second": 124.484,
"eval_steps_per_second": 7.782,
"step": 58000
},
{
"epoch": 6.258760107816712,
"grad_norm": 0.7119957804679871,
"learning_rate": 0.00022486562331354556,
"loss": 3.2806,
"step": 58050
},
{
"epoch": 6.264150943396227,
"grad_norm": 0.7434617877006531,
"learning_rate": 0.00022454182406907714,
"loss": 3.2857,
"step": 58100
},
{
"epoch": 6.269541778975741,
"grad_norm": 0.7248224020004272,
"learning_rate": 0.00022421802482460872,
"loss": 3.2801,
"step": 58150
},
{
"epoch": 6.274932614555256,
"grad_norm": 0.7341305017471313,
"learning_rate": 0.0002238942255801403,
"loss": 3.2775,
"step": 58200
},
{
"epoch": 6.280323450134771,
"grad_norm": 0.8089809417724609,
"learning_rate": 0.00022357042633567187,
"loss": 3.286,
"step": 58250
},
{
"epoch": 6.285714285714286,
"grad_norm": 0.7188864350318909,
"learning_rate": 0.00022324662709120345,
"loss": 3.2696,
"step": 58300
},
{
"epoch": 6.291105121293801,
"grad_norm": 0.7826777696609497,
"learning_rate": 0.00022292282784673503,
"loss": 3.3009,
"step": 58350
},
{
"epoch": 6.296495956873315,
"grad_norm": 0.7758088707923889,
"learning_rate": 0.00022259902860226655,
"loss": 3.2809,
"step": 58400
},
{
"epoch": 6.30188679245283,
"grad_norm": 0.8148401379585266,
"learning_rate": 0.00022227522935779813,
"loss": 3.2821,
"step": 58450
},
{
"epoch": 6.307277628032345,
"grad_norm": 0.7606106996536255,
"learning_rate": 0.0002219514301133297,
"loss": 3.2742,
"step": 58500
},
{
"epoch": 6.3126684636118595,
"grad_norm": 0.7480827569961548,
"learning_rate": 0.00022162763086886129,
"loss": 3.2842,
"step": 58550
},
{
"epoch": 6.318059299191375,
"grad_norm": 0.7575658559799194,
"learning_rate": 0.00022130383162439286,
"loss": 3.3005,
"step": 58600
},
{
"epoch": 6.32345013477089,
"grad_norm": 0.7664201855659485,
"learning_rate": 0.00022098003237992444,
"loss": 3.2767,
"step": 58650
},
{
"epoch": 6.328840970350404,
"grad_norm": 0.7767562866210938,
"learning_rate": 0.00022065623313545602,
"loss": 3.291,
"step": 58700
},
{
"epoch": 6.334231805929919,
"grad_norm": 0.7493231296539307,
"learning_rate": 0.00022033243389098757,
"loss": 3.2951,
"step": 58750
},
{
"epoch": 6.339622641509434,
"grad_norm": 0.7707173228263855,
"learning_rate": 0.00022000863464651915,
"loss": 3.2937,
"step": 58800
},
{
"epoch": 6.345013477088949,
"grad_norm": 0.72657310962677,
"learning_rate": 0.0002196848354020507,
"loss": 3.2795,
"step": 58850
},
{
"epoch": 6.350404312668464,
"grad_norm": 0.7847123742103577,
"learning_rate": 0.00021936103615758227,
"loss": 3.2807,
"step": 58900
},
{
"epoch": 6.355795148247978,
"grad_norm": 0.7235740423202515,
"learning_rate": 0.00021903723691311385,
"loss": 3.2834,
"step": 58950
},
{
"epoch": 6.361185983827493,
"grad_norm": 0.746766209602356,
"learning_rate": 0.0002187199136535348,
"loss": 3.2759,
"step": 59000
},
{
"epoch": 6.361185983827493,
"eval_accuracy": 0.38557791547478976,
"eval_loss": 3.3767313957214355,
"eval_runtime": 144.3707,
"eval_samples_per_second": 124.755,
"eval_steps_per_second": 7.799,
"step": 59000
},
{
"epoch": 6.366576819407008,
"grad_norm": 0.7392002940177917,
"learning_rate": 0.00021839611440906638,
"loss": 3.2819,
"step": 59050
},
{
"epoch": 6.3719676549865225,
"grad_norm": 0.7772441506385803,
"learning_rate": 0.00021807231516459793,
"loss": 3.2792,
"step": 59100
},
{
"epoch": 6.377358490566038,
"grad_norm": 0.7302360534667969,
"learning_rate": 0.00021774851592012948,
"loss": 3.2906,
"step": 59150
},
{
"epoch": 6.382749326145553,
"grad_norm": 0.7985934019088745,
"learning_rate": 0.00021742471667566106,
"loss": 3.295,
"step": 59200
},
{
"epoch": 6.388140161725067,
"grad_norm": 0.7689845561981201,
"learning_rate": 0.00021710091743119264,
"loss": 3.2852,
"step": 59250
},
{
"epoch": 6.393530997304582,
"grad_norm": 0.7513689994812012,
"learning_rate": 0.00021677711818672422,
"loss": 3.304,
"step": 59300
},
{
"epoch": 6.398921832884097,
"grad_norm": 0.7443680763244629,
"learning_rate": 0.0002164533189422558,
"loss": 3.2819,
"step": 59350
},
{
"epoch": 6.404312668463612,
"grad_norm": 0.7067058682441711,
"learning_rate": 0.00021612951969778734,
"loss": 3.2802,
"step": 59400
},
{
"epoch": 6.409703504043127,
"grad_norm": 0.7308558821678162,
"learning_rate": 0.00021580572045331892,
"loss": 3.2935,
"step": 59450
},
{
"epoch": 6.415094339622642,
"grad_norm": 0.7577722072601318,
"learning_rate": 0.0002154819212088505,
"loss": 3.2836,
"step": 59500
},
{
"epoch": 6.420485175202156,
"grad_norm": 0.7250074744224548,
"learning_rate": 0.00021515812196438208,
"loss": 3.2736,
"step": 59550
},
{
"epoch": 6.425876010781671,
"grad_norm": 0.7675480842590332,
"learning_rate": 0.00021483432271991363,
"loss": 3.2841,
"step": 59600
},
{
"epoch": 6.431266846361186,
"grad_norm": 0.7345036268234253,
"learning_rate": 0.0002145105234754452,
"loss": 3.2902,
"step": 59650
},
{
"epoch": 6.436657681940701,
"grad_norm": 0.7689955830574036,
"learning_rate": 0.00021418672423097676,
"loss": 3.2837,
"step": 59700
},
{
"epoch": 6.442048517520216,
"grad_norm": 0.7610852122306824,
"learning_rate": 0.00021386292498650833,
"loss": 3.296,
"step": 59750
},
{
"epoch": 6.44743935309973,
"grad_norm": 0.7398020029067993,
"learning_rate": 0.0002135391257420399,
"loss": 3.2849,
"step": 59800
},
{
"epoch": 6.452830188679245,
"grad_norm": 0.802862286567688,
"learning_rate": 0.0002132153264975715,
"loss": 3.2935,
"step": 59850
},
{
"epoch": 6.45822102425876,
"grad_norm": 0.7436445951461792,
"learning_rate": 0.00021289152725310307,
"loss": 3.2808,
"step": 59900
},
{
"epoch": 6.463611859838275,
"grad_norm": 0.7063605189323425,
"learning_rate": 0.00021256772800863464,
"loss": 3.2951,
"step": 59950
},
{
"epoch": 6.46900269541779,
"grad_norm": 0.7640798091888428,
"learning_rate": 0.0002122439287641662,
"loss": 3.2909,
"step": 60000
},
{
"epoch": 6.46900269541779,
"eval_accuracy": 0.3852879209172129,
"eval_loss": 3.3732810020446777,
"eval_runtime": 144.6215,
"eval_samples_per_second": 124.539,
"eval_steps_per_second": 7.786,
"step": 60000
},
{
"epoch": 6.474393530997305,
"grad_norm": 0.7530982494354248,
"learning_rate": 0.00021192012951969775,
"loss": 3.2986,
"step": 60050
},
{
"epoch": 6.479784366576819,
"grad_norm": 0.7675153017044067,
"learning_rate": 0.00021159633027522932,
"loss": 3.2966,
"step": 60100
},
{
"epoch": 6.485175202156334,
"grad_norm": 0.7828149199485779,
"learning_rate": 0.0002112725310307609,
"loss": 3.2864,
"step": 60150
},
{
"epoch": 6.490566037735849,
"grad_norm": 0.8154207468032837,
"learning_rate": 0.00021094873178629248,
"loss": 3.3123,
"step": 60200
},
{
"epoch": 6.495956873315364,
"grad_norm": 0.7550340294837952,
"learning_rate": 0.00021062493254182406,
"loss": 3.2852,
"step": 60250
},
{
"epoch": 6.501347708894879,
"grad_norm": 0.7429071664810181,
"learning_rate": 0.00021030113329735563,
"loss": 3.2918,
"step": 60300
},
{
"epoch": 6.506738544474393,
"grad_norm": 0.7747331857681274,
"learning_rate": 0.0002099773340528872,
"loss": 3.2918,
"step": 60350
},
{
"epoch": 6.512129380053908,
"grad_norm": 0.7545101642608643,
"learning_rate": 0.0002096535348084188,
"loss": 3.2888,
"step": 60400
},
{
"epoch": 6.517520215633423,
"grad_norm": 0.7542675733566284,
"learning_rate": 0.0002093297355639503,
"loss": 3.305,
"step": 60450
},
{
"epoch": 6.5229110512129385,
"grad_norm": 0.756078839302063,
"learning_rate": 0.0002090059363194819,
"loss": 3.2923,
"step": 60500
},
{
"epoch": 6.528301886792453,
"grad_norm": 0.7515999674797058,
"learning_rate": 0.00020868213707501347,
"loss": 3.3115,
"step": 60550
},
{
"epoch": 6.533692722371968,
"grad_norm": 0.7256953716278076,
"learning_rate": 0.00020835833783054505,
"loss": 3.3078,
"step": 60600
},
{
"epoch": 6.539083557951482,
"grad_norm": 0.7823131084442139,
"learning_rate": 0.00020803453858607662,
"loss": 3.2934,
"step": 60650
},
{
"epoch": 6.544474393530997,
"grad_norm": 0.7566144466400146,
"learning_rate": 0.0002077107393416082,
"loss": 3.3089,
"step": 60700
},
{
"epoch": 6.549865229110512,
"grad_norm": 0.7706971764564514,
"learning_rate": 0.00020738694009713975,
"loss": 3.2953,
"step": 60750
},
{
"epoch": 6.555256064690027,
"grad_norm": 0.736495316028595,
"learning_rate": 0.00020706314085267133,
"loss": 3.2867,
"step": 60800
},
{
"epoch": 6.560646900269542,
"grad_norm": 0.8221260905265808,
"learning_rate": 0.00020673934160820288,
"loss": 3.3029,
"step": 60850
},
{
"epoch": 6.566037735849057,
"grad_norm": 0.8052125573158264,
"learning_rate": 0.00020641554236373446,
"loss": 3.2873,
"step": 60900
},
{
"epoch": 6.571428571428571,
"grad_norm": 0.7632663249969482,
"learning_rate": 0.00020609174311926604,
"loss": 3.2892,
"step": 60950
},
{
"epoch": 6.576819407008086,
"grad_norm": 0.7742993831634521,
"learning_rate": 0.0002057679438747976,
"loss": 3.2835,
"step": 61000
},
{
"epoch": 6.576819407008086,
"eval_accuracy": 0.3859351661699238,
"eval_loss": 3.3691229820251465,
"eval_runtime": 144.5776,
"eval_samples_per_second": 124.577,
"eval_steps_per_second": 7.788,
"step": 61000
},
{
"epoch": 6.5822102425876015,
"grad_norm": 0.7621163129806519,
"learning_rate": 0.00020544414463032916,
"loss": 3.282,
"step": 61050
},
{
"epoch": 6.587601078167116,
"grad_norm": 0.7276231646537781,
"learning_rate": 0.00020512034538586074,
"loss": 3.2727,
"step": 61100
},
{
"epoch": 6.592991913746631,
"grad_norm": 0.7370527982711792,
"learning_rate": 0.00020479654614139232,
"loss": 3.2849,
"step": 61150
},
{
"epoch": 6.598382749326145,
"grad_norm": 0.769776463508606,
"learning_rate": 0.0002044727468969239,
"loss": 3.2873,
"step": 61200
},
{
"epoch": 6.60377358490566,
"grad_norm": 0.7550665140151978,
"learning_rate": 0.00020414894765245547,
"loss": 3.287,
"step": 61250
},
{
"epoch": 6.609164420485175,
"grad_norm": 0.730098307132721,
"learning_rate": 0.00020382514840798702,
"loss": 3.294,
"step": 61300
},
{
"epoch": 6.6145552560646905,
"grad_norm": 0.7400188446044922,
"learning_rate": 0.0002035013491635186,
"loss": 3.2924,
"step": 61350
},
{
"epoch": 6.619946091644205,
"grad_norm": 0.8200154900550842,
"learning_rate": 0.00020317754991905015,
"loss": 3.2754,
"step": 61400
},
{
"epoch": 6.62533692722372,
"grad_norm": 0.7727325558662415,
"learning_rate": 0.00020285375067458173,
"loss": 3.3058,
"step": 61450
},
{
"epoch": 6.630727762803234,
"grad_norm": 0.7708116769790649,
"learning_rate": 0.0002025299514301133,
"loss": 3.2838,
"step": 61500
},
{
"epoch": 6.636118598382749,
"grad_norm": 0.799055278301239,
"learning_rate": 0.00020220615218564489,
"loss": 3.2998,
"step": 61550
},
{
"epoch": 6.6415094339622645,
"grad_norm": 0.7551531195640564,
"learning_rate": 0.00020188235294117646,
"loss": 3.2843,
"step": 61600
},
{
"epoch": 6.646900269541779,
"grad_norm": 0.7552589774131775,
"learning_rate": 0.00020155855369670804,
"loss": 3.2813,
"step": 61650
},
{
"epoch": 6.652291105121294,
"grad_norm": 0.783363401889801,
"learning_rate": 0.00020123475445223956,
"loss": 3.3085,
"step": 61700
},
{
"epoch": 6.657681940700809,
"grad_norm": 0.741226851940155,
"learning_rate": 0.00020091743119266052,
"loss": 3.2997,
"step": 61750
},
{
"epoch": 6.663072776280323,
"grad_norm": 0.8101751208305359,
"learning_rate": 0.0002005936319481921,
"loss": 3.2885,
"step": 61800
},
{
"epoch": 6.668463611859838,
"grad_norm": 0.743066132068634,
"learning_rate": 0.00020026983270372367,
"loss": 3.2971,
"step": 61850
},
{
"epoch": 6.6738544474393535,
"grad_norm": 0.7548127770423889,
"learning_rate": 0.00019994603345925525,
"loss": 3.2919,
"step": 61900
},
{
"epoch": 6.679245283018868,
"grad_norm": 0.7908535003662109,
"learning_rate": 0.00019962223421478683,
"loss": 3.2903,
"step": 61950
},
{
"epoch": 6.684636118598383,
"grad_norm": 0.724936306476593,
"learning_rate": 0.0001992984349703184,
"loss": 3.3046,
"step": 62000
},
{
"epoch": 6.684636118598383,
"eval_accuracy": 0.38654590405232897,
"eval_loss": 3.365043878555298,
"eval_runtime": 144.3625,
"eval_samples_per_second": 124.762,
"eval_steps_per_second": 7.8,
"step": 62000
},
{
"epoch": 6.690026954177897,
"grad_norm": 0.7651430368423462,
"learning_rate": 0.00019897463572584993,
"loss": 3.2898,
"step": 62050
},
{
"epoch": 6.695417789757412,
"grad_norm": 0.7043150663375854,
"learning_rate": 0.0001986508364813815,
"loss": 3.2864,
"step": 62100
},
{
"epoch": 6.7008086253369274,
"grad_norm": 0.7986223101615906,
"learning_rate": 0.00019832703723691308,
"loss": 3.2932,
"step": 62150
},
{
"epoch": 6.706199460916442,
"grad_norm": 0.8105931282043457,
"learning_rate": 0.00019800323799244466,
"loss": 3.2967,
"step": 62200
},
{
"epoch": 6.711590296495957,
"grad_norm": 0.7135691046714783,
"learning_rate": 0.00019767943874797624,
"loss": 3.2969,
"step": 62250
},
{
"epoch": 6.716981132075472,
"grad_norm": 0.7264803647994995,
"learning_rate": 0.00019735563950350782,
"loss": 3.2891,
"step": 62300
},
{
"epoch": 6.722371967654986,
"grad_norm": 0.7567557096481323,
"learning_rate": 0.0001970318402590394,
"loss": 3.2933,
"step": 62350
},
{
"epoch": 6.727762803234501,
"grad_norm": 0.7331606149673462,
"learning_rate": 0.00019670804101457097,
"loss": 3.2959,
"step": 62400
},
{
"epoch": 6.7331536388140165,
"grad_norm": 0.779417097568512,
"learning_rate": 0.00019638424177010252,
"loss": 3.2915,
"step": 62450
},
{
"epoch": 6.738544474393531,
"grad_norm": 0.7825338244438171,
"learning_rate": 0.00019606044252563407,
"loss": 3.2879,
"step": 62500
},
{
"epoch": 6.743935309973046,
"grad_norm": 0.7534582614898682,
"learning_rate": 0.00019573664328116565,
"loss": 3.3049,
"step": 62550
},
{
"epoch": 6.74932614555256,
"grad_norm": 0.7738751173019409,
"learning_rate": 0.00019541284403669723,
"loss": 3.2839,
"step": 62600
},
{
"epoch": 6.754716981132075,
"grad_norm": 0.7626593112945557,
"learning_rate": 0.0001950890447922288,
"loss": 3.2903,
"step": 62650
},
{
"epoch": 6.7601078167115904,
"grad_norm": 0.7758113145828247,
"learning_rate": 0.00019476524554776038,
"loss": 3.2867,
"step": 62700
},
{
"epoch": 6.765498652291106,
"grad_norm": 0.7640058398246765,
"learning_rate": 0.00019444144630329193,
"loss": 3.2852,
"step": 62750
},
{
"epoch": 6.77088948787062,
"grad_norm": 0.7460499405860901,
"learning_rate": 0.0001941176470588235,
"loss": 3.2869,
"step": 62800
},
{
"epoch": 6.776280323450135,
"grad_norm": 0.7564590573310852,
"learning_rate": 0.0001937938478143551,
"loss": 3.2819,
"step": 62850
},
{
"epoch": 6.781671159029649,
"grad_norm": 0.7723701000213623,
"learning_rate": 0.00019347004856988664,
"loss": 3.309,
"step": 62900
},
{
"epoch": 6.787061994609164,
"grad_norm": 0.7954161167144775,
"learning_rate": 0.00019314624932541822,
"loss": 3.2894,
"step": 62950
},
{
"epoch": 6.7924528301886795,
"grad_norm": 0.7797737121582031,
"learning_rate": 0.0001928224500809498,
"loss": 3.2794,
"step": 63000
},
{
"epoch": 6.7924528301886795,
"eval_accuracy": 0.3867218130777006,
"eval_loss": 3.3618416786193848,
"eval_runtime": 144.5911,
"eval_samples_per_second": 124.565,
"eval_steps_per_second": 7.787,
"step": 63000
},
{
"epoch": 6.797843665768194,
"grad_norm": 0.7578842043876648,
"learning_rate": 0.00019249865083648137,
"loss": 3.3,
"step": 63050
},
{
"epoch": 6.803234501347709,
"grad_norm": 0.7800530195236206,
"learning_rate": 0.00019217485159201292,
"loss": 3.2958,
"step": 63100
},
{
"epoch": 6.808625336927224,
"grad_norm": 0.7879495024681091,
"learning_rate": 0.0001918510523475445,
"loss": 3.2885,
"step": 63150
},
{
"epoch": 6.814016172506738,
"grad_norm": 0.7620939612388611,
"learning_rate": 0.00019152725310307608,
"loss": 3.3088,
"step": 63200
},
{
"epoch": 6.819407008086253,
"grad_norm": 1.0602725744247437,
"learning_rate": 0.00019120345385860766,
"loss": 3.2894,
"step": 63250
},
{
"epoch": 6.824797843665769,
"grad_norm": 0.7571866512298584,
"learning_rate": 0.00019087965461413923,
"loss": 3.2947,
"step": 63300
},
{
"epoch": 6.830188679245283,
"grad_norm": 0.775108814239502,
"learning_rate": 0.00019055585536967079,
"loss": 3.2748,
"step": 63350
},
{
"epoch": 6.835579514824798,
"grad_norm": 0.7376972436904907,
"learning_rate": 0.00019023205612520234,
"loss": 3.2901,
"step": 63400
},
{
"epoch": 6.840970350404312,
"grad_norm": 0.7462282776832581,
"learning_rate": 0.00018990825688073391,
"loss": 3.2836,
"step": 63450
},
{
"epoch": 6.846361185983827,
"grad_norm": 0.7361283302307129,
"learning_rate": 0.0001895844576362655,
"loss": 3.3086,
"step": 63500
},
{
"epoch": 6.8517520215633425,
"grad_norm": 0.8018236756324768,
"learning_rate": 0.00018926065839179707,
"loss": 3.2844,
"step": 63550
},
{
"epoch": 6.857142857142857,
"grad_norm": 0.7797961831092834,
"learning_rate": 0.00018893685914732865,
"loss": 3.3116,
"step": 63600
},
{
"epoch": 6.862533692722372,
"grad_norm": 0.775970458984375,
"learning_rate": 0.00018861305990286022,
"loss": 3.2958,
"step": 63650
},
{
"epoch": 6.867924528301887,
"grad_norm": 0.8205097913742065,
"learning_rate": 0.0001882892606583918,
"loss": 3.2961,
"step": 63700
},
{
"epoch": 6.873315363881401,
"grad_norm": 0.7419096827507019,
"learning_rate": 0.00018796546141392333,
"loss": 3.3136,
"step": 63750
},
{
"epoch": 6.878706199460916,
"grad_norm": 0.8057942390441895,
"learning_rate": 0.0001876416621694549,
"loss": 3.3051,
"step": 63800
},
{
"epoch": 6.884097035040432,
"grad_norm": 0.76636803150177,
"learning_rate": 0.00018731786292498648,
"loss": 3.2925,
"step": 63850
},
{
"epoch": 6.889487870619946,
"grad_norm": 0.7636591792106628,
"learning_rate": 0.00018699406368051806,
"loss": 3.2883,
"step": 63900
},
{
"epoch": 6.894878706199461,
"grad_norm": 0.7943454384803772,
"learning_rate": 0.00018667026443604964,
"loss": 3.2975,
"step": 63950
},
{
"epoch": 6.900269541778976,
"grad_norm": 0.7841466069221497,
"learning_rate": 0.00018634646519158121,
"loss": 3.2831,
"step": 64000
},
{
"epoch": 6.900269541778976,
"eval_accuracy": 0.38728007161529143,
"eval_loss": 3.355625867843628,
"eval_runtime": 144.5744,
"eval_samples_per_second": 124.579,
"eval_steps_per_second": 7.788,
"step": 64000
},
{
"epoch": 6.90566037735849,
"grad_norm": 0.7233343124389648,
"learning_rate": 0.0001860226659471128,
"loss": 3.2908,
"step": 64050
},
{
"epoch": 6.9110512129380055,
"grad_norm": 0.7608718276023865,
"learning_rate": 0.00018569886670264434,
"loss": 3.2898,
"step": 64100
},
{
"epoch": 6.916442048517521,
"grad_norm": 0.8043278455734253,
"learning_rate": 0.00018537506745817592,
"loss": 3.3015,
"step": 64150
},
{
"epoch": 6.921832884097035,
"grad_norm": 0.791998565196991,
"learning_rate": 0.00018505126821370747,
"loss": 3.2864,
"step": 64200
},
{
"epoch": 6.92722371967655,
"grad_norm": 0.757971465587616,
"learning_rate": 0.00018472746896923905,
"loss": 3.3027,
"step": 64250
},
{
"epoch": 6.932614555256064,
"grad_norm": 0.7807918190956116,
"learning_rate": 0.00018440366972477063,
"loss": 3.2941,
"step": 64300
},
{
"epoch": 6.938005390835579,
"grad_norm": 0.7496076226234436,
"learning_rate": 0.0001840798704803022,
"loss": 3.2869,
"step": 64350
},
{
"epoch": 6.943396226415095,
"grad_norm": 0.762690544128418,
"learning_rate": 0.00018375607123583378,
"loss": 3.3001,
"step": 64400
},
{
"epoch": 6.948787061994609,
"grad_norm": 0.7572615742683411,
"learning_rate": 0.00018343227199136533,
"loss": 3.2748,
"step": 64450
},
{
"epoch": 6.954177897574124,
"grad_norm": 0.7597922682762146,
"learning_rate": 0.0001831084727468969,
"loss": 3.3052,
"step": 64500
},
{
"epoch": 6.959568733153639,
"grad_norm": 0.7559393644332886,
"learning_rate": 0.0001827846735024285,
"loss": 3.2943,
"step": 64550
},
{
"epoch": 6.964959568733153,
"grad_norm": 0.788176417350769,
"learning_rate": 0.00018246087425796004,
"loss": 3.2798,
"step": 64600
},
{
"epoch": 6.9703504043126685,
"grad_norm": 0.777248740196228,
"learning_rate": 0.00018213707501349162,
"loss": 3.2945,
"step": 64650
},
{
"epoch": 6.975741239892184,
"grad_norm": 0.7345637679100037,
"learning_rate": 0.0001818132757690232,
"loss": 3.3001,
"step": 64700
},
{
"epoch": 6.981132075471698,
"grad_norm": 0.7891983985900879,
"learning_rate": 0.00018148947652455474,
"loss": 3.285,
"step": 64750
},
{
"epoch": 6.986522911051213,
"grad_norm": 0.8329722881317139,
"learning_rate": 0.00018116567728008632,
"loss": 3.2956,
"step": 64800
},
{
"epoch": 6.991913746630727,
"grad_norm": 0.7928962707519531,
"learning_rate": 0.0001808418780356179,
"loss": 3.268,
"step": 64850
},
{
"epoch": 6.997304582210242,
"grad_norm": 0.7622669339179993,
"learning_rate": 0.00018051807879114948,
"loss": 3.3115,
"step": 64900
},
{
"epoch": 7.002695417789758,
"grad_norm": 0.7296371459960938,
"learning_rate": 0.00018019427954668105,
"loss": 3.2624,
"step": 64950
},
{
"epoch": 7.008086253369272,
"grad_norm": 0.8181959390640259,
"learning_rate": 0.00017987048030221263,
"loss": 3.2134,
"step": 65000
},
{
"epoch": 7.008086253369272,
"eval_accuracy": 0.38744533265765724,
"eval_loss": 3.3601229190826416,
"eval_runtime": 145.6904,
"eval_samples_per_second": 123.625,
"eval_steps_per_second": 7.729,
"step": 65000
},
{
"epoch": 7.013477088948787,
"grad_norm": 0.8222470283508301,
"learning_rate": 0.00017954668105774416,
"loss": 3.2144,
"step": 65050
},
{
"epoch": 7.018867924528302,
"grad_norm": 0.7402681708335876,
"learning_rate": 0.00017922288181327573,
"loss": 3.215,
"step": 65100
},
{
"epoch": 7.024258760107816,
"grad_norm": 0.8450102806091309,
"learning_rate": 0.0001788990825688073,
"loss": 3.2204,
"step": 65150
},
{
"epoch": 7.0296495956873315,
"grad_norm": 0.7753925323486328,
"learning_rate": 0.0001785752833243389,
"loss": 3.2281,
"step": 65200
},
{
"epoch": 7.035040431266847,
"grad_norm": 0.775593638420105,
"learning_rate": 0.00017825148407987047,
"loss": 3.2031,
"step": 65250
},
{
"epoch": 7.040431266846361,
"grad_norm": 0.7500091195106506,
"learning_rate": 0.00017793416082029142,
"loss": 3.2181,
"step": 65300
},
{
"epoch": 7.045822102425876,
"grad_norm": 0.7804417014122009,
"learning_rate": 0.000177610361575823,
"loss": 3.2131,
"step": 65350
},
{
"epoch": 7.051212938005391,
"grad_norm": 0.7608639597892761,
"learning_rate": 0.00017728656233135452,
"loss": 3.2145,
"step": 65400
},
{
"epoch": 7.056603773584905,
"grad_norm": 0.7375009059906006,
"learning_rate": 0.0001769627630868861,
"loss": 3.2039,
"step": 65450
},
{
"epoch": 7.061994609164421,
"grad_norm": 0.7771769762039185,
"learning_rate": 0.00017663896384241767,
"loss": 3.215,
"step": 65500
},
{
"epoch": 7.067385444743936,
"grad_norm": 0.8169140219688416,
"learning_rate": 0.00017631516459794925,
"loss": 3.2009,
"step": 65550
},
{
"epoch": 7.07277628032345,
"grad_norm": 0.7878122329711914,
"learning_rate": 0.00017599136535348083,
"loss": 3.1953,
"step": 65600
},
{
"epoch": 7.078167115902965,
"grad_norm": 0.7984527349472046,
"learning_rate": 0.0001756675661090124,
"loss": 3.2114,
"step": 65650
},
{
"epoch": 7.083557951482479,
"grad_norm": 0.7757741808891296,
"learning_rate": 0.00017534376686454398,
"loss": 3.2395,
"step": 65700
},
{
"epoch": 7.0889487870619945,
"grad_norm": 0.7992537617683411,
"learning_rate": 0.00017501996762007556,
"loss": 3.2246,
"step": 65750
},
{
"epoch": 7.09433962264151,
"grad_norm": 0.7660715579986572,
"learning_rate": 0.00017469616837560709,
"loss": 3.2147,
"step": 65800
},
{
"epoch": 7.099730458221024,
"grad_norm": 0.7683412432670593,
"learning_rate": 0.00017437236913113866,
"loss": 3.2256,
"step": 65850
},
{
"epoch": 7.105121293800539,
"grad_norm": 0.7562675476074219,
"learning_rate": 0.00017404856988667024,
"loss": 3.2372,
"step": 65900
},
{
"epoch": 7.110512129380054,
"grad_norm": 0.7684675455093384,
"learning_rate": 0.00017372477064220182,
"loss": 3.2343,
"step": 65950
},
{
"epoch": 7.115902964959568,
"grad_norm": 0.7754034996032715,
"learning_rate": 0.0001734009713977334,
"loss": 3.2066,
"step": 66000
},
{
"epoch": 7.115902964959568,
"eval_accuracy": 0.3872903936396338,
"eval_loss": 3.3606462478637695,
"eval_runtime": 145.4852,
"eval_samples_per_second": 123.8,
"eval_steps_per_second": 7.74,
"step": 66000
},
{
"epoch": 7.121293800539084,
"grad_norm": 0.8015467524528503,
"learning_rate": 0.00017307717215326497,
"loss": 3.259,
"step": 66050
},
{
"epoch": 7.126684636118599,
"grad_norm": 0.8027785420417786,
"learning_rate": 0.00017275337290879655,
"loss": 3.215,
"step": 66100
},
{
"epoch": 7.132075471698113,
"grad_norm": 0.8227463364601135,
"learning_rate": 0.0001724295736643281,
"loss": 3.2174,
"step": 66150
},
{
"epoch": 7.137466307277628,
"grad_norm": 0.8033183217048645,
"learning_rate": 0.00017210577441985968,
"loss": 3.2155,
"step": 66200
},
{
"epoch": 7.142857142857143,
"grad_norm": 0.9108396768569946,
"learning_rate": 0.00017178197517539123,
"loss": 3.2324,
"step": 66250
},
{
"epoch": 7.1482479784366575,
"grad_norm": 0.7421971559524536,
"learning_rate": 0.0001714581759309228,
"loss": 3.2349,
"step": 66300
},
{
"epoch": 7.153638814016173,
"grad_norm": 0.7555287480354309,
"learning_rate": 0.00017113437668645439,
"loss": 3.233,
"step": 66350
},
{
"epoch": 7.159029649595688,
"grad_norm": 0.7612764835357666,
"learning_rate": 0.00017081057744198596,
"loss": 3.24,
"step": 66400
},
{
"epoch": 7.164420485175202,
"grad_norm": 0.8320484757423401,
"learning_rate": 0.00017048677819751751,
"loss": 3.2352,
"step": 66450
},
{
"epoch": 7.169811320754717,
"grad_norm": 0.7784168124198914,
"learning_rate": 0.0001701629789530491,
"loss": 3.223,
"step": 66500
},
{
"epoch": 7.175202156334231,
"grad_norm": 0.8041927814483643,
"learning_rate": 0.00016983917970858067,
"loss": 3.2248,
"step": 66550
},
{
"epoch": 7.180592991913747,
"grad_norm": 0.7509095072746277,
"learning_rate": 0.00016951538046411225,
"loss": 3.2273,
"step": 66600
},
{
"epoch": 7.185983827493262,
"grad_norm": 0.8264787197113037,
"learning_rate": 0.0001691915812196438,
"loss": 3.2287,
"step": 66650
},
{
"epoch": 7.191374663072776,
"grad_norm": 0.790759801864624,
"learning_rate": 0.00016886778197517538,
"loss": 3.2261,
"step": 66700
},
{
"epoch": 7.196765498652291,
"grad_norm": 0.8296477794647217,
"learning_rate": 0.00016854398273070693,
"loss": 3.2172,
"step": 66750
},
{
"epoch": 7.202156334231806,
"grad_norm": 0.8137305378913879,
"learning_rate": 0.0001682201834862385,
"loss": 3.2367,
"step": 66800
},
{
"epoch": 7.2075471698113205,
"grad_norm": 0.8298159837722778,
"learning_rate": 0.00016789638424177008,
"loss": 3.2433,
"step": 66850
},
{
"epoch": 7.212938005390836,
"grad_norm": 0.8120679259300232,
"learning_rate": 0.00016757258499730166,
"loss": 3.2214,
"step": 66900
},
{
"epoch": 7.218328840970351,
"grad_norm": 0.7779634594917297,
"learning_rate": 0.00016724878575283324,
"loss": 3.2443,
"step": 66950
},
{
"epoch": 7.223719676549865,
"grad_norm": 0.7775816321372986,
"learning_rate": 0.00016692498650836481,
"loss": 3.2313,
"step": 67000
},
{
"epoch": 7.223719676549865,
"eval_accuracy": 0.3881083325791012,
"eval_loss": 3.3569769859313965,
"eval_runtime": 145.8514,
"eval_samples_per_second": 123.489,
"eval_steps_per_second": 7.72,
"step": 67000
},
{
"epoch": 7.22911051212938,
"grad_norm": 0.9137212038040161,
"learning_rate": 0.0001666011872638964,
"loss": 3.2572,
"step": 67050
},
{
"epoch": 7.234501347708895,
"grad_norm": 0.7530747652053833,
"learning_rate": 0.00016627738801942792,
"loss": 3.2319,
"step": 67100
},
{
"epoch": 7.2398921832884096,
"grad_norm": 0.7923324704170227,
"learning_rate": 0.0001659535887749595,
"loss": 3.2455,
"step": 67150
},
{
"epoch": 7.245283018867925,
"grad_norm": 0.839113175868988,
"learning_rate": 0.00016562978953049107,
"loss": 3.2298,
"step": 67200
},
{
"epoch": 7.250673854447439,
"grad_norm": 0.8368660807609558,
"learning_rate": 0.00016530599028602265,
"loss": 3.2377,
"step": 67250
},
{
"epoch": 7.256064690026954,
"grad_norm": 0.8083794116973877,
"learning_rate": 0.00016498219104155423,
"loss": 3.2366,
"step": 67300
},
{
"epoch": 7.261455525606469,
"grad_norm": 0.7712308168411255,
"learning_rate": 0.0001646583917970858,
"loss": 3.2317,
"step": 67350
},
{
"epoch": 7.2668463611859835,
"grad_norm": 0.8151483535766602,
"learning_rate": 0.00016433459255261738,
"loss": 3.2363,
"step": 67400
},
{
"epoch": 7.272237196765499,
"grad_norm": 0.7958182692527771,
"learning_rate": 0.00016401079330814896,
"loss": 3.2325,
"step": 67450
},
{
"epoch": 7.277628032345014,
"grad_norm": 0.8089446425437927,
"learning_rate": 0.00016368699406368048,
"loss": 3.2437,
"step": 67500
},
{
"epoch": 7.283018867924528,
"grad_norm": 0.8065378069877625,
"learning_rate": 0.00016336319481921206,
"loss": 3.2462,
"step": 67550
},
{
"epoch": 7.288409703504043,
"grad_norm": 0.7744677662849426,
"learning_rate": 0.00016303939557474364,
"loss": 3.221,
"step": 67600
},
{
"epoch": 7.293800539083558,
"grad_norm": 0.7702655792236328,
"learning_rate": 0.00016271559633027522,
"loss": 3.2416,
"step": 67650
},
{
"epoch": 7.2991913746630726,
"grad_norm": 0.828003466129303,
"learning_rate": 0.00016239827307069617,
"loss": 3.2267,
"step": 67700
},
{
"epoch": 7.304582210242588,
"grad_norm": 0.7710825800895691,
"learning_rate": 0.00016207447382622775,
"loss": 3.2379,
"step": 67750
},
{
"epoch": 7.309973045822103,
"grad_norm": 0.8105592727661133,
"learning_rate": 0.00016175067458175932,
"loss": 3.2276,
"step": 67800
},
{
"epoch": 7.315363881401617,
"grad_norm": 0.7686548829078674,
"learning_rate": 0.00016142687533729085,
"loss": 3.2388,
"step": 67850
},
{
"epoch": 7.320754716981132,
"grad_norm": 0.8127673268318176,
"learning_rate": 0.00016110307609282242,
"loss": 3.2269,
"step": 67900
},
{
"epoch": 7.3261455525606465,
"grad_norm": 0.7651295065879822,
"learning_rate": 0.000160779276848354,
"loss": 3.2275,
"step": 67950
},
{
"epoch": 7.331536388140162,
"grad_norm": 0.8131924271583557,
"learning_rate": 0.00016045547760388558,
"loss": 3.2319,
"step": 68000
},
{
"epoch": 7.331536388140162,
"eval_accuracy": 0.3880722598203468,
"eval_loss": 3.353992462158203,
"eval_runtime": 145.7,
"eval_samples_per_second": 123.617,
"eval_steps_per_second": 7.728,
"step": 68000
},
{
"epoch": 7.336927223719677,
"grad_norm": 0.9014919400215149,
"learning_rate": 0.00016013167835941716,
"loss": 3.2378,
"step": 68050
},
{
"epoch": 7.342318059299191,
"grad_norm": 0.8048769235610962,
"learning_rate": 0.00015980787911494873,
"loss": 3.2657,
"step": 68100
},
{
"epoch": 7.347708894878706,
"grad_norm": 0.8662220239639282,
"learning_rate": 0.00015948407987048029,
"loss": 3.2301,
"step": 68150
},
{
"epoch": 7.353099730458221,
"grad_norm": 0.787700891494751,
"learning_rate": 0.00015916028062601186,
"loss": 3.2274,
"step": 68200
},
{
"epoch": 7.3584905660377355,
"grad_norm": 0.8394333720207214,
"learning_rate": 0.00015883648138154344,
"loss": 3.2574,
"step": 68250
},
{
"epoch": 7.363881401617251,
"grad_norm": 0.8151025772094727,
"learning_rate": 0.000158512682137075,
"loss": 3.2351,
"step": 68300
},
{
"epoch": 7.369272237196766,
"grad_norm": 0.7831388115882874,
"learning_rate": 0.00015818888289260657,
"loss": 3.2432,
"step": 68350
},
{
"epoch": 7.37466307277628,
"grad_norm": 0.853025496006012,
"learning_rate": 0.00015786508364813815,
"loss": 3.2455,
"step": 68400
},
{
"epoch": 7.380053908355795,
"grad_norm": 0.8126118779182434,
"learning_rate": 0.0001575412844036697,
"loss": 3.2292,
"step": 68450
},
{
"epoch": 7.38544474393531,
"grad_norm": 0.8116118311882019,
"learning_rate": 0.00015721748515920127,
"loss": 3.218,
"step": 68500
},
{
"epoch": 7.390835579514825,
"grad_norm": 0.7965927124023438,
"learning_rate": 0.00015689368591473285,
"loss": 3.2409,
"step": 68550
},
{
"epoch": 7.39622641509434,
"grad_norm": 0.8036773800849915,
"learning_rate": 0.00015656988667026443,
"loss": 3.2246,
"step": 68600
},
{
"epoch": 7.401617250673855,
"grad_norm": 0.8369459509849548,
"learning_rate": 0.000156246087425796,
"loss": 3.2465,
"step": 68650
},
{
"epoch": 7.407008086253369,
"grad_norm": 0.8160671591758728,
"learning_rate": 0.00015592228818132756,
"loss": 3.2222,
"step": 68700
},
{
"epoch": 7.412398921832884,
"grad_norm": 0.8571521043777466,
"learning_rate": 0.0001555984889368591,
"loss": 3.2302,
"step": 68750
},
{
"epoch": 7.4177897574123985,
"grad_norm": 0.8442217707633972,
"learning_rate": 0.0001552746896923907,
"loss": 3.2493,
"step": 68800
},
{
"epoch": 7.423180592991914,
"grad_norm": 0.7977373600006104,
"learning_rate": 0.00015495089044792226,
"loss": 3.2411,
"step": 68850
},
{
"epoch": 7.428571428571429,
"grad_norm": 0.8184266686439514,
"learning_rate": 0.00015462709120345384,
"loss": 3.2495,
"step": 68900
},
{
"epoch": 7.433962264150943,
"grad_norm": 0.8047189116477966,
"learning_rate": 0.00015430329195898542,
"loss": 3.2391,
"step": 68950
},
{
"epoch": 7.439353099730458,
"grad_norm": 0.7755230069160461,
"learning_rate": 0.000153979492714517,
"loss": 3.2282,
"step": 69000
},
{
"epoch": 7.439353099730458,
"eval_accuracy": 0.38815201104000263,
"eval_loss": 3.350517988204956,
"eval_runtime": 145.7673,
"eval_samples_per_second": 123.56,
"eval_steps_per_second": 7.725,
"step": 69000
},
{
"epoch": 7.444743935309973,
"grad_norm": 0.7657031416893005,
"learning_rate": 0.00015365569347004858,
"loss": 3.225,
"step": 69050
},
{
"epoch": 7.450134770889488,
"grad_norm": 0.8080745339393616,
"learning_rate": 0.00015333189422558015,
"loss": 3.2301,
"step": 69100
},
{
"epoch": 7.455525606469003,
"grad_norm": 0.8516858220100403,
"learning_rate": 0.00015300809498111168,
"loss": 3.2443,
"step": 69150
},
{
"epoch": 7.460916442048518,
"grad_norm": 0.9866870641708374,
"learning_rate": 0.00015268429573664325,
"loss": 3.2562,
"step": 69200
},
{
"epoch": 7.466307277628032,
"grad_norm": 0.8220759630203247,
"learning_rate": 0.00015236049649217483,
"loss": 3.2332,
"step": 69250
},
{
"epoch": 7.471698113207547,
"grad_norm": 0.9025027751922607,
"learning_rate": 0.0001520366972477064,
"loss": 3.2619,
"step": 69300
},
{
"epoch": 7.4770889487870615,
"grad_norm": 0.8095342516899109,
"learning_rate": 0.000151712898003238,
"loss": 3.2457,
"step": 69350
},
{
"epoch": 7.482479784366577,
"grad_norm": 0.8287063837051392,
"learning_rate": 0.00015138909875876956,
"loss": 3.2382,
"step": 69400
},
{
"epoch": 7.487870619946092,
"grad_norm": 0.8205540776252747,
"learning_rate": 0.00015106529951430114,
"loss": 3.2394,
"step": 69450
},
{
"epoch": 7.493261455525606,
"grad_norm": 0.8113540410995483,
"learning_rate": 0.0001507415002698327,
"loss": 3.252,
"step": 69500
},
{
"epoch": 7.498652291105121,
"grad_norm": 0.8273891806602478,
"learning_rate": 0.00015041770102536424,
"loss": 3.2314,
"step": 69550
},
{
"epoch": 7.504043126684636,
"grad_norm": 0.7961973547935486,
"learning_rate": 0.00015009390178089582,
"loss": 3.2401,
"step": 69600
},
{
"epoch": 7.509433962264151,
"grad_norm": 0.8073656558990479,
"learning_rate": 0.0001497701025364274,
"loss": 3.2553,
"step": 69650
},
{
"epoch": 7.514824797843666,
"grad_norm": 0.795110821723938,
"learning_rate": 0.00014944630329195898,
"loss": 3.2479,
"step": 69700
},
{
"epoch": 7.520215633423181,
"grad_norm": 0.790119469165802,
"learning_rate": 0.00014912250404749055,
"loss": 3.2681,
"step": 69750
},
{
"epoch": 7.525606469002695,
"grad_norm": 0.8171307444572449,
"learning_rate": 0.0001487987048030221,
"loss": 3.2537,
"step": 69800
},
{
"epoch": 7.53099730458221,
"grad_norm": 0.7953904271125793,
"learning_rate": 0.00014847490555855368,
"loss": 3.2338,
"step": 69850
},
{
"epoch": 7.536388140161725,
"grad_norm": 0.8385747671127319,
"learning_rate": 0.00014815110631408526,
"loss": 3.2212,
"step": 69900
},
{
"epoch": 7.54177897574124,
"grad_norm": 0.8413951396942139,
"learning_rate": 0.0001478273070696168,
"loss": 3.2573,
"step": 69950
},
{
"epoch": 7.547169811320755,
"grad_norm": 0.7891599535942078,
"learning_rate": 0.0001475035078251484,
"loss": 3.2386,
"step": 70000
},
{
"epoch": 7.547169811320755,
"eval_accuracy": 0.38888606995007724,
"eval_loss": 3.3455100059509277,
"eval_runtime": 145.5338,
"eval_samples_per_second": 123.758,
"eval_steps_per_second": 7.737,
"step": 70000
},
{
"epoch": 7.55256064690027,
"grad_norm": 0.7678188681602478,
"learning_rate": 0.00014717970858067997,
"loss": 3.2291,
"step": 70050
},
{
"epoch": 7.557951482479784,
"grad_norm": 0.8082351684570312,
"learning_rate": 0.00014685590933621154,
"loss": 3.2463,
"step": 70100
},
{
"epoch": 7.563342318059299,
"grad_norm": 0.7960216999053955,
"learning_rate": 0.0001465321100917431,
"loss": 3.2447,
"step": 70150
},
{
"epoch": 7.568733153638814,
"grad_norm": 0.8490812182426453,
"learning_rate": 0.00014620831084727467,
"loss": 3.238,
"step": 70200
},
{
"epoch": 7.574123989218329,
"grad_norm": 0.810519814491272,
"learning_rate": 0.00014588451160280625,
"loss": 3.249,
"step": 70250
},
{
"epoch": 7.579514824797844,
"grad_norm": 0.7496234178543091,
"learning_rate": 0.0001455607123583378,
"loss": 3.2416,
"step": 70300
},
{
"epoch": 7.584905660377358,
"grad_norm": 0.8409221172332764,
"learning_rate": 0.00014523691311386938,
"loss": 3.2436,
"step": 70350
},
{
"epoch": 7.590296495956873,
"grad_norm": 0.7896668910980225,
"learning_rate": 0.00014491311386940096,
"loss": 3.2539,
"step": 70400
},
{
"epoch": 7.595687331536388,
"grad_norm": 0.8397171497344971,
"learning_rate": 0.00014458931462493253,
"loss": 3.2556,
"step": 70450
},
{
"epoch": 7.601078167115903,
"grad_norm": 0.7945877909660339,
"learning_rate": 0.00014426551538046408,
"loss": 3.2471,
"step": 70500
},
{
"epoch": 7.606469002695418,
"grad_norm": 0.7988942861557007,
"learning_rate": 0.00014394171613599566,
"loss": 3.2303,
"step": 70550
},
{
"epoch": 7.611859838274933,
"grad_norm": 0.7489826083183289,
"learning_rate": 0.00014361791689152724,
"loss": 3.2461,
"step": 70600
},
{
"epoch": 7.617250673854447,
"grad_norm": 0.7894483208656311,
"learning_rate": 0.00014329411764705882,
"loss": 3.2269,
"step": 70650
},
{
"epoch": 7.622641509433962,
"grad_norm": 0.7881432771682739,
"learning_rate": 0.0001429703184025904,
"loss": 3.2276,
"step": 70700
},
{
"epoch": 7.628032345013477,
"grad_norm": 0.7781510353088379,
"learning_rate": 0.00014264651915812194,
"loss": 3.2452,
"step": 70750
},
{
"epoch": 7.633423180592992,
"grad_norm": 0.7770166397094727,
"learning_rate": 0.00014232271991365352,
"loss": 3.2514,
"step": 70800
},
{
"epoch": 7.638814016172507,
"grad_norm": 0.788841724395752,
"learning_rate": 0.0001419989206691851,
"loss": 3.2447,
"step": 70850
},
{
"epoch": 7.644204851752022,
"grad_norm": 0.8239420652389526,
"learning_rate": 0.00014167512142471668,
"loss": 3.2494,
"step": 70900
},
{
"epoch": 7.649595687331536,
"grad_norm": 0.9228202700614929,
"learning_rate": 0.00014135132218024823,
"loss": 3.2437,
"step": 70950
},
{
"epoch": 7.654986522911051,
"grad_norm": 0.7943241596221924,
"learning_rate": 0.0001410275229357798,
"loss": 3.233,
"step": 71000
},
{
"epoch": 7.654986522911051,
"eval_accuracy": 0.38909696520532516,
"eval_loss": 3.342852830886841,
"eval_runtime": 145.7317,
"eval_samples_per_second": 123.59,
"eval_steps_per_second": 7.727,
"step": 71000
},
{
"epoch": 7.660377358490566,
"grad_norm": 0.8611652851104736,
"learning_rate": 0.00014070372369131138,
"loss": 3.255,
"step": 71050
},
{
"epoch": 7.665768194070081,
"grad_norm": 0.8180993795394897,
"learning_rate": 0.00014037992444684296,
"loss": 3.2475,
"step": 71100
},
{
"epoch": 7.671159029649596,
"grad_norm": 0.8241807818412781,
"learning_rate": 0.0001400561252023745,
"loss": 3.2518,
"step": 71150
},
{
"epoch": 7.67654986522911,
"grad_norm": 0.8034350872039795,
"learning_rate": 0.0001397323259579061,
"loss": 3.2353,
"step": 71200
},
{
"epoch": 7.681940700808625,
"grad_norm": 0.796752393245697,
"learning_rate": 0.00013940852671343767,
"loss": 3.234,
"step": 71250
},
{
"epoch": 7.6873315363881405,
"grad_norm": 0.7918026447296143,
"learning_rate": 0.00013908472746896924,
"loss": 3.2394,
"step": 71300
},
{
"epoch": 7.692722371967655,
"grad_norm": 0.8091316819190979,
"learning_rate": 0.0001387609282245008,
"loss": 3.2258,
"step": 71350
},
{
"epoch": 7.69811320754717,
"grad_norm": 0.7816873788833618,
"learning_rate": 0.00013843712898003237,
"loss": 3.2449,
"step": 71400
},
{
"epoch": 7.703504043126685,
"grad_norm": 0.7878933548927307,
"learning_rate": 0.00013811332973556395,
"loss": 3.2466,
"step": 71450
},
{
"epoch": 7.708894878706199,
"grad_norm": 0.7926165461540222,
"learning_rate": 0.0001377895304910955,
"loss": 3.2307,
"step": 71500
},
{
"epoch": 7.714285714285714,
"grad_norm": 0.8380984663963318,
"learning_rate": 0.00013746573124662708,
"loss": 3.2601,
"step": 71550
},
{
"epoch": 7.719676549865229,
"grad_norm": 0.80472731590271,
"learning_rate": 0.00013714193200215866,
"loss": 3.2394,
"step": 71600
},
{
"epoch": 7.725067385444744,
"grad_norm": 0.8318880200386047,
"learning_rate": 0.0001368181327576902,
"loss": 3.2225,
"step": 71650
},
{
"epoch": 7.730458221024259,
"grad_norm": 0.8377485275268555,
"learning_rate": 0.00013650080949811116,
"loss": 3.2499,
"step": 71700
},
{
"epoch": 7.735849056603773,
"grad_norm": 0.8369395136833191,
"learning_rate": 0.00013617701025364274,
"loss": 3.2494,
"step": 71750
},
{
"epoch": 7.741239892183288,
"grad_norm": 0.8334224224090576,
"learning_rate": 0.0001358532110091743,
"loss": 3.2524,
"step": 71800
},
{
"epoch": 7.7466307277628035,
"grad_norm": 0.8204824924468994,
"learning_rate": 0.00013553588774959524,
"loss": 3.2437,
"step": 71850
},
{
"epoch": 7.752021563342318,
"grad_norm": 0.8162007927894592,
"learning_rate": 0.00013521208850512682,
"loss": 3.213,
"step": 71900
},
{
"epoch": 7.757412398921833,
"grad_norm": 0.8048626780509949,
"learning_rate": 0.0001348882892606584,
"loss": 3.2554,
"step": 71950
},
{
"epoch": 7.762803234501348,
"grad_norm": 0.8684512972831726,
"learning_rate": 0.00013456449001618995,
"loss": 3.2386,
"step": 72000
},
{
"epoch": 7.762803234501348,
"eval_accuracy": 0.389787562960274,
"eval_loss": 3.3377344608306885,
"eval_runtime": 146.2901,
"eval_samples_per_second": 123.118,
"eval_steps_per_second": 7.697,
"step": 72000
},
{
"epoch": 7.768194070080862,
"grad_norm": 0.8167480230331421,
"learning_rate": 0.00013424069077172152,
"loss": 3.2601,
"step": 72050
},
{
"epoch": 7.773584905660377,
"grad_norm": 0.8325658440589905,
"learning_rate": 0.0001339168915272531,
"loss": 3.2449,
"step": 72100
},
{
"epoch": 7.7789757412398925,
"grad_norm": 0.8947717547416687,
"learning_rate": 0.00013359309228278465,
"loss": 3.2361,
"step": 72150
},
{
"epoch": 7.784366576819407,
"grad_norm": 0.7974467873573303,
"learning_rate": 0.00013326929303831623,
"loss": 3.2321,
"step": 72200
},
{
"epoch": 7.789757412398922,
"grad_norm": 0.7896796464920044,
"learning_rate": 0.0001329454937938478,
"loss": 3.2473,
"step": 72250
},
{
"epoch": 7.795148247978437,
"grad_norm": 0.8590373396873474,
"learning_rate": 0.00013262169454937936,
"loss": 3.2422,
"step": 72300
},
{
"epoch": 7.800539083557951,
"grad_norm": 0.8200933337211609,
"learning_rate": 0.00013229789530491093,
"loss": 3.2284,
"step": 72350
},
{
"epoch": 7.8059299191374665,
"grad_norm": 0.8711622953414917,
"learning_rate": 0.0001319740960604425,
"loss": 3.2369,
"step": 72400
},
{
"epoch": 7.811320754716981,
"grad_norm": 0.7932628393173218,
"learning_rate": 0.0001316502968159741,
"loss": 3.2496,
"step": 72450
},
{
"epoch": 7.816711590296496,
"grad_norm": 0.8283679485321045,
"learning_rate": 0.00013132649757150564,
"loss": 3.2394,
"step": 72500
},
{
"epoch": 7.822102425876011,
"grad_norm": 0.8596921563148499,
"learning_rate": 0.00013100269832703722,
"loss": 3.2328,
"step": 72550
},
{
"epoch": 7.827493261455525,
"grad_norm": 0.8052673935890198,
"learning_rate": 0.0001306788990825688,
"loss": 3.226,
"step": 72600
},
{
"epoch": 7.83288409703504,
"grad_norm": 0.8220215439796448,
"learning_rate": 0.00013035509983810037,
"loss": 3.2267,
"step": 72650
},
{
"epoch": 7.8382749326145555,
"grad_norm": 0.8143494129180908,
"learning_rate": 0.00013003130059363192,
"loss": 3.2501,
"step": 72700
},
{
"epoch": 7.84366576819407,
"grad_norm": 0.8493731617927551,
"learning_rate": 0.0001297075013491635,
"loss": 3.2412,
"step": 72750
},
{
"epoch": 7.849056603773585,
"grad_norm": 0.8398842215538025,
"learning_rate": 0.00012938370210469508,
"loss": 3.2191,
"step": 72800
},
{
"epoch": 7.8544474393531,
"grad_norm": 0.8476974368095398,
"learning_rate": 0.00012905990286022666,
"loss": 3.2391,
"step": 72850
},
{
"epoch": 7.859838274932614,
"grad_norm": 0.8712428212165833,
"learning_rate": 0.0001287361036157582,
"loss": 3.238,
"step": 72900
},
{
"epoch": 7.8652291105121295,
"grad_norm": 0.8013792634010315,
"learning_rate": 0.00012841230437128979,
"loss": 3.2479,
"step": 72950
},
{
"epoch": 7.870619946091644,
"grad_norm": 0.8213805556297302,
"learning_rate": 0.00012808850512682136,
"loss": 3.247,
"step": 73000
},
{
"epoch": 7.870619946091644,
"eval_accuracy": 0.38990479942622586,
"eval_loss": 3.335577964782715,
"eval_runtime": 144.3715,
"eval_samples_per_second": 124.755,
"eval_steps_per_second": 7.799,
"step": 73000
},
{
"epoch": 7.876010781671159,
"grad_norm": 0.8413445353507996,
"learning_rate": 0.00012776470588235294,
"loss": 3.2344,
"step": 73050
},
{
"epoch": 7.881401617250674,
"grad_norm": 0.8258917927742004,
"learning_rate": 0.0001274409066378845,
"loss": 3.2385,
"step": 73100
},
{
"epoch": 7.886792452830189,
"grad_norm": 0.8316524028778076,
"learning_rate": 0.00012711710739341607,
"loss": 3.2509,
"step": 73150
},
{
"epoch": 7.892183288409703,
"grad_norm": 0.8293790221214294,
"learning_rate": 0.00012679330814894765,
"loss": 3.2158,
"step": 73200
},
{
"epoch": 7.8975741239892185,
"grad_norm": 0.8351441025733948,
"learning_rate": 0.00012646950890447922,
"loss": 3.2404,
"step": 73250
},
{
"epoch": 7.902964959568733,
"grad_norm": 0.8133177161216736,
"learning_rate": 0.00012614570966001077,
"loss": 3.2332,
"step": 73300
},
{
"epoch": 7.908355795148248,
"grad_norm": 0.8715577125549316,
"learning_rate": 0.00012582191041554235,
"loss": 3.2674,
"step": 73350
},
{
"epoch": 7.913746630727763,
"grad_norm": 0.8071417212486267,
"learning_rate": 0.00012549811117107393,
"loss": 3.2542,
"step": 73400
},
{
"epoch": 7.919137466307277,
"grad_norm": 0.811610758304596,
"learning_rate": 0.0001251743119266055,
"loss": 3.2546,
"step": 73450
},
{
"epoch": 7.9245283018867925,
"grad_norm": 0.8281325101852417,
"learning_rate": 0.00012485051268213706,
"loss": 3.2587,
"step": 73500
},
{
"epoch": 7.929919137466308,
"grad_norm": 0.8137043714523315,
"learning_rate": 0.00012452671343766864,
"loss": 3.2316,
"step": 73550
},
{
"epoch": 7.935309973045822,
"grad_norm": 0.8106021285057068,
"learning_rate": 0.00012420291419320021,
"loss": 3.2544,
"step": 73600
},
{
"epoch": 7.940700808625337,
"grad_norm": 0.8555049896240234,
"learning_rate": 0.0001238791149487318,
"loss": 3.2407,
"step": 73650
},
{
"epoch": 7.946091644204852,
"grad_norm": 0.8347945809364319,
"learning_rate": 0.00012355531570426334,
"loss": 3.2317,
"step": 73700
},
{
"epoch": 7.951482479784366,
"grad_norm": 0.8815454244613647,
"learning_rate": 0.00012323151645979492,
"loss": 3.2408,
"step": 73750
},
{
"epoch": 7.9568733153638815,
"grad_norm": 0.8233853578567505,
"learning_rate": 0.0001229077172153265,
"loss": 3.237,
"step": 73800
},
{
"epoch": 7.962264150943396,
"grad_norm": 0.7983092665672302,
"learning_rate": 0.00012258391797085805,
"loss": 3.2377,
"step": 73850
},
{
"epoch": 7.967654986522911,
"grad_norm": 0.834989607334137,
"learning_rate": 0.00012226011872638963,
"loss": 3.2338,
"step": 73900
},
{
"epoch": 7.973045822102426,
"grad_norm": 0.8054497241973877,
"learning_rate": 0.00012193631948192119,
"loss": 3.2545,
"step": 73950
},
{
"epoch": 7.97843665768194,
"grad_norm": 0.8481306433677673,
"learning_rate": 0.00012161899622234214,
"loss": 3.2571,
"step": 74000
},
{
"epoch": 7.97843665768194,
"eval_accuracy": 0.3907868435695038,
"eval_loss": 3.3311500549316406,
"eval_runtime": 144.658,
"eval_samples_per_second": 124.507,
"eval_steps_per_second": 7.784,
"step": 74000
},
{
"epoch": 7.9838274932614555,
"grad_norm": 0.8332591652870178,
"learning_rate": 0.00012129519697787372,
"loss": 3.2535,
"step": 74050
},
{
"epoch": 7.989218328840971,
"grad_norm": 0.861570417881012,
"learning_rate": 0.00012097139773340527,
"loss": 3.2489,
"step": 74100
},
{
"epoch": 7.994609164420485,
"grad_norm": 0.8170042037963867,
"learning_rate": 0.00012064759848893685,
"loss": 3.2502,
"step": 74150
},
{
"epoch": 8.0,
"grad_norm": 1.6789113283157349,
"learning_rate": 0.00012032379924446843,
"loss": 3.2217,
"step": 74200
},
{
"epoch": 8.005390835579515,
"grad_norm": 0.8028563261032104,
"learning_rate": 0.00011999999999999999,
"loss": 3.1772,
"step": 74250
},
{
"epoch": 8.01078167115903,
"grad_norm": 0.8054965734481812,
"learning_rate": 0.00011967620075553155,
"loss": 3.1594,
"step": 74300
},
{
"epoch": 8.016172506738544,
"grad_norm": 0.8452708125114441,
"learning_rate": 0.00011935240151106313,
"loss": 3.1603,
"step": 74350
},
{
"epoch": 8.021563342318059,
"grad_norm": 0.8379788398742676,
"learning_rate": 0.00011902860226659471,
"loss": 3.1634,
"step": 74400
},
{
"epoch": 8.026954177897574,
"grad_norm": 0.8776307702064514,
"learning_rate": 0.00011870480302212627,
"loss": 3.1622,
"step": 74450
},
{
"epoch": 8.032345013477089,
"grad_norm": 0.8483014106750488,
"learning_rate": 0.00011838100377765784,
"loss": 3.1671,
"step": 74500
},
{
"epoch": 8.037735849056604,
"grad_norm": 0.8685150742530823,
"learning_rate": 0.00011805720453318941,
"loss": 3.1845,
"step": 74550
},
{
"epoch": 8.04312668463612,
"grad_norm": 0.8260387778282166,
"learning_rate": 0.00011773340528872098,
"loss": 3.18,
"step": 74600
},
{
"epoch": 8.048517520215633,
"grad_norm": 0.8391966819763184,
"learning_rate": 0.00011740960604425256,
"loss": 3.1739,
"step": 74650
},
{
"epoch": 8.053908355795148,
"grad_norm": 0.8282108902931213,
"learning_rate": 0.00011708580679978412,
"loss": 3.1652,
"step": 74700
},
{
"epoch": 8.059299191374663,
"grad_norm": 0.8193286061286926,
"learning_rate": 0.00011676200755531568,
"loss": 3.1945,
"step": 74750
},
{
"epoch": 8.064690026954178,
"grad_norm": 0.8471682667732239,
"learning_rate": 0.00011643820831084726,
"loss": 3.1715,
"step": 74800
},
{
"epoch": 8.070080862533693,
"grad_norm": 0.8014234304428101,
"learning_rate": 0.00011611440906637884,
"loss": 3.1752,
"step": 74850
},
{
"epoch": 8.075471698113208,
"grad_norm": 0.840215802192688,
"learning_rate": 0.00011579060982191042,
"loss": 3.1704,
"step": 74900
},
{
"epoch": 8.080862533692722,
"grad_norm": 0.8199812173843384,
"learning_rate": 0.00011546681057744197,
"loss": 3.1716,
"step": 74950
},
{
"epoch": 8.086253369272237,
"grad_norm": 0.810865581035614,
"learning_rate": 0.00011514301133297355,
"loss": 3.1695,
"step": 75000
},
{
"epoch": 8.086253369272237,
"eval_accuracy": 0.39048239817784763,
"eval_loss": 3.3377249240875244,
"eval_runtime": 144.4091,
"eval_samples_per_second": 124.722,
"eval_steps_per_second": 7.797,
"step": 75000
},
{
"epoch": 8.091644204851752,
"grad_norm": 0.8431777954101562,
"learning_rate": 0.00011481921208850512,
"loss": 3.1765,
"step": 75050
},
{
"epoch": 8.097035040431267,
"grad_norm": 0.8121163249015808,
"learning_rate": 0.00011449541284403669,
"loss": 3.1908,
"step": 75100
},
{
"epoch": 8.102425876010782,
"grad_norm": 0.8409184217453003,
"learning_rate": 0.00011417161359956825,
"loss": 3.1766,
"step": 75150
},
{
"epoch": 8.107816711590296,
"grad_norm": 0.8380523920059204,
"learning_rate": 0.00011384781435509983,
"loss": 3.1783,
"step": 75200
},
{
"epoch": 8.11320754716981,
"grad_norm": 0.8611305356025696,
"learning_rate": 0.0001135240151106314,
"loss": 3.1792,
"step": 75250
},
{
"epoch": 8.118598382749326,
"grad_norm": 0.8434821963310242,
"learning_rate": 0.00011320021586616297,
"loss": 3.1825,
"step": 75300
},
{
"epoch": 8.123989218328841,
"grad_norm": 0.8211022019386292,
"learning_rate": 0.00011287641662169454,
"loss": 3.1659,
"step": 75350
},
{
"epoch": 8.129380053908356,
"grad_norm": 0.8678478002548218,
"learning_rate": 0.0001125526173772261,
"loss": 3.1744,
"step": 75400
},
{
"epoch": 8.134770889487871,
"grad_norm": 0.825543224811554,
"learning_rate": 0.00011222881813275768,
"loss": 3.1756,
"step": 75450
},
{
"epoch": 8.140161725067385,
"grad_norm": 0.8677042722702026,
"learning_rate": 0.00011190501888828925,
"loss": 3.1792,
"step": 75500
},
{
"epoch": 8.1455525606469,
"grad_norm": 0.8366581201553345,
"learning_rate": 0.00011158121964382083,
"loss": 3.1813,
"step": 75550
},
{
"epoch": 8.150943396226415,
"grad_norm": 0.84686678647995,
"learning_rate": 0.00011125742039935238,
"loss": 3.1831,
"step": 75600
},
{
"epoch": 8.15633423180593,
"grad_norm": 0.8925803303718567,
"learning_rate": 0.00011093362115488396,
"loss": 3.1653,
"step": 75650
},
{
"epoch": 8.161725067385445,
"grad_norm": 0.821288526058197,
"learning_rate": 0.00011060982191041554,
"loss": 3.1753,
"step": 75700
},
{
"epoch": 8.167115902964959,
"grad_norm": 0.8067049980163574,
"learning_rate": 0.00011028602266594712,
"loss": 3.1959,
"step": 75750
},
{
"epoch": 8.172506738544474,
"grad_norm": 0.8495302200317383,
"learning_rate": 0.00010996222342147867,
"loss": 3.2006,
"step": 75800
},
{
"epoch": 8.177897574123989,
"grad_norm": 0.8403761982917786,
"learning_rate": 0.00010963842417701024,
"loss": 3.1888,
"step": 75850
},
{
"epoch": 8.183288409703504,
"grad_norm": 0.8572573661804199,
"learning_rate": 0.00010931462493254182,
"loss": 3.1722,
"step": 75900
},
{
"epoch": 8.18867924528302,
"grad_norm": 0.8850594758987427,
"learning_rate": 0.00010899082568807339,
"loss": 3.1801,
"step": 75950
},
{
"epoch": 8.194070080862534,
"grad_norm": 0.8388485908508301,
"learning_rate": 0.00010866702644360495,
"loss": 3.1903,
"step": 76000
},
{
"epoch": 8.194070080862534,
"eval_accuracy": 0.39056584359568913,
"eval_loss": 3.335054636001587,
"eval_runtime": 144.6208,
"eval_samples_per_second": 124.539,
"eval_steps_per_second": 7.786,
"step": 76000
},
{
"epoch": 8.199460916442048,
"grad_norm": 0.8369682431221008,
"learning_rate": 0.00010834322719913653,
"loss": 3.1824,
"step": 76050
},
{
"epoch": 8.204851752021563,
"grad_norm": 0.8276783227920532,
"learning_rate": 0.00010801942795466809,
"loss": 3.1894,
"step": 76100
},
{
"epoch": 8.210242587601078,
"grad_norm": 0.84661865234375,
"learning_rate": 0.00010769562871019967,
"loss": 3.1976,
"step": 76150
},
{
"epoch": 8.215633423180593,
"grad_norm": 0.8589726090431213,
"learning_rate": 0.00010737182946573123,
"loss": 3.1923,
"step": 76200
},
{
"epoch": 8.221024258760108,
"grad_norm": 0.8558250069618225,
"learning_rate": 0.0001070480302212628,
"loss": 3.1844,
"step": 76250
},
{
"epoch": 8.226415094339623,
"grad_norm": 0.8655318021774292,
"learning_rate": 0.00010673070696168375,
"loss": 3.183,
"step": 76300
},
{
"epoch": 8.231805929919137,
"grad_norm": 0.8613353371620178,
"learning_rate": 0.00010640690771721531,
"loss": 3.1881,
"step": 76350
},
{
"epoch": 8.237196765498652,
"grad_norm": 0.8411716818809509,
"learning_rate": 0.00010608310847274689,
"loss": 3.1974,
"step": 76400
},
{
"epoch": 8.242587601078167,
"grad_norm": 0.9113624691963196,
"learning_rate": 0.00010575930922827846,
"loss": 3.1991,
"step": 76450
},
{
"epoch": 8.247978436657682,
"grad_norm": 0.8439015746116638,
"learning_rate": 0.00010543550998381003,
"loss": 3.1769,
"step": 76500
},
{
"epoch": 8.253369272237197,
"grad_norm": 0.8146746158599854,
"learning_rate": 0.0001051117107393416,
"loss": 3.176,
"step": 76550
},
{
"epoch": 8.25876010781671,
"grad_norm": 0.8571010828018188,
"learning_rate": 0.00010478791149487316,
"loss": 3.1831,
"step": 76600
},
{
"epoch": 8.264150943396226,
"grad_norm": 0.8149386644363403,
"learning_rate": 0.00010446411225040474,
"loss": 3.1922,
"step": 76650
},
{
"epoch": 8.269541778975741,
"grad_norm": 0.8525885343551636,
"learning_rate": 0.00010414031300593632,
"loss": 3.1834,
"step": 76700
},
{
"epoch": 8.274932614555256,
"grad_norm": 0.8435251712799072,
"learning_rate": 0.00010381651376146787,
"loss": 3.1785,
"step": 76750
},
{
"epoch": 8.280323450134771,
"grad_norm": 0.8805217146873474,
"learning_rate": 0.00010349271451699945,
"loss": 3.1966,
"step": 76800
},
{
"epoch": 8.285714285714286,
"grad_norm": 0.8451163172721863,
"learning_rate": 0.00010316891527253102,
"loss": 3.1841,
"step": 76850
},
{
"epoch": 8.2911051212938,
"grad_norm": 0.857768714427948,
"learning_rate": 0.0001028451160280626,
"loss": 3.1886,
"step": 76900
},
{
"epoch": 8.296495956873315,
"grad_norm": 0.7757983207702637,
"learning_rate": 0.00010252131678359416,
"loss": 3.1946,
"step": 76950
},
{
"epoch": 8.30188679245283,
"grad_norm": 0.8731544613838196,
"learning_rate": 0.00010219751753912573,
"loss": 3.1892,
"step": 77000
},
{
"epoch": 8.30188679245283,
"eval_accuracy": 0.3908692024584672,
"eval_loss": 3.3317456245422363,
"eval_runtime": 144.5864,
"eval_samples_per_second": 124.569,
"eval_steps_per_second": 7.788,
"step": 77000
},
{
"epoch": 8.307277628032345,
"grad_norm": 0.8421865701675415,
"learning_rate": 0.0001018737182946573,
"loss": 3.204,
"step": 77050
},
{
"epoch": 8.31266846361186,
"grad_norm": 0.8552886843681335,
"learning_rate": 0.00010154991905018887,
"loss": 3.1945,
"step": 77100
},
{
"epoch": 8.318059299191376,
"grad_norm": 0.8377566337585449,
"learning_rate": 0.00010122611980572045,
"loss": 3.1763,
"step": 77150
},
{
"epoch": 8.323450134770889,
"grad_norm": 0.8450587391853333,
"learning_rate": 0.00010090232056125201,
"loss": 3.1859,
"step": 77200
},
{
"epoch": 8.328840970350404,
"grad_norm": 0.8754865527153015,
"learning_rate": 0.00010057852131678359,
"loss": 3.1952,
"step": 77250
},
{
"epoch": 8.33423180592992,
"grad_norm": 0.864917516708374,
"learning_rate": 0.00010025472207231515,
"loss": 3.1895,
"step": 77300
},
{
"epoch": 8.339622641509434,
"grad_norm": 0.8625168800354004,
"learning_rate": 9.993092282784673e-05,
"loss": 3.1927,
"step": 77350
},
{
"epoch": 8.34501347708895,
"grad_norm": 0.8321080803871155,
"learning_rate": 9.96071235833783e-05,
"loss": 3.1905,
"step": 77400
},
{
"epoch": 8.350404312668463,
"grad_norm": 0.8598225116729736,
"learning_rate": 9.928332433890986e-05,
"loss": 3.1889,
"step": 77450
},
{
"epoch": 8.355795148247978,
"grad_norm": 0.8627040386199951,
"learning_rate": 9.895952509444144e-05,
"loss": 3.206,
"step": 77500
},
{
"epoch": 8.361185983827493,
"grad_norm": 0.8766219019889832,
"learning_rate": 9.863572584997302e-05,
"loss": 3.1915,
"step": 77550
},
{
"epoch": 8.366576819407008,
"grad_norm": 0.8476821184158325,
"learning_rate": 9.831192660550457e-05,
"loss": 3.1989,
"step": 77600
},
{
"epoch": 8.371967654986523,
"grad_norm": 0.8090409636497498,
"learning_rate": 9.798812736103614e-05,
"loss": 3.181,
"step": 77650
},
{
"epoch": 8.377358490566039,
"grad_norm": 0.8444557189941406,
"learning_rate": 9.766432811656772e-05,
"loss": 3.1876,
"step": 77700
},
{
"epoch": 8.382749326145552,
"grad_norm": 0.8485190272331238,
"learning_rate": 9.73405288720993e-05,
"loss": 3.1766,
"step": 77750
},
{
"epoch": 8.388140161725067,
"grad_norm": 0.8302055597305298,
"learning_rate": 9.701672962763086e-05,
"loss": 3.1932,
"step": 77800
},
{
"epoch": 8.393530997304582,
"grad_norm": 0.8414790630340576,
"learning_rate": 9.669293038316243e-05,
"loss": 3.1906,
"step": 77850
},
{
"epoch": 8.398921832884097,
"grad_norm": 0.8987350463867188,
"learning_rate": 9.6369131138694e-05,
"loss": 3.1754,
"step": 77900
},
{
"epoch": 8.404312668463612,
"grad_norm": 0.8369892239570618,
"learning_rate": 9.604533189422557e-05,
"loss": 3.1942,
"step": 77950
},
{
"epoch": 8.409703504043126,
"grad_norm": 0.8732051253318787,
"learning_rate": 9.572153264975715e-05,
"loss": 3.1795,
"step": 78000
},
{
"epoch": 8.409703504043126,
"eval_accuracy": 0.3911585450987171,
"eval_loss": 3.3292782306671143,
"eval_runtime": 144.8275,
"eval_samples_per_second": 124.362,
"eval_steps_per_second": 7.775,
"step": 78000
},
{
"epoch": 8.415094339622641,
"grad_norm": 0.809229850769043,
"learning_rate": 9.539773340528871e-05,
"loss": 3.2037,
"step": 78050
},
{
"epoch": 8.420485175202156,
"grad_norm": 0.9127547144889832,
"learning_rate": 9.507393416082027e-05,
"loss": 3.1848,
"step": 78100
},
{
"epoch": 8.425876010781671,
"grad_norm": 0.8915658593177795,
"learning_rate": 9.475013491635185e-05,
"loss": 3.186,
"step": 78150
},
{
"epoch": 8.431266846361186,
"grad_norm": 0.8642625212669373,
"learning_rate": 9.442633567188343e-05,
"loss": 3.1958,
"step": 78200
},
{
"epoch": 8.436657681940702,
"grad_norm": 0.8737843036651611,
"learning_rate": 9.410253642741498e-05,
"loss": 3.2002,
"step": 78250
},
{
"epoch": 8.442048517520215,
"grad_norm": 0.8277490735054016,
"learning_rate": 9.377873718294656e-05,
"loss": 3.1863,
"step": 78300
},
{
"epoch": 8.44743935309973,
"grad_norm": 0.8157387971878052,
"learning_rate": 9.345493793847814e-05,
"loss": 3.2046,
"step": 78350
},
{
"epoch": 8.452830188679245,
"grad_norm": 0.8451686501502991,
"learning_rate": 9.313113869400971e-05,
"loss": 3.1796,
"step": 78400
},
{
"epoch": 8.45822102425876,
"grad_norm": 0.9138479828834534,
"learning_rate": 9.280733944954126e-05,
"loss": 3.1717,
"step": 78450
},
{
"epoch": 8.463611859838275,
"grad_norm": 0.8984581828117371,
"learning_rate": 9.248354020507284e-05,
"loss": 3.197,
"step": 78500
},
{
"epoch": 8.46900269541779,
"grad_norm": 0.8548042178153992,
"learning_rate": 9.215974096060442e-05,
"loss": 3.1831,
"step": 78550
},
{
"epoch": 8.474393530997304,
"grad_norm": 0.8356130123138428,
"learning_rate": 9.1835941716136e-05,
"loss": 3.1798,
"step": 78600
},
{
"epoch": 8.479784366576819,
"grad_norm": 0.8204013705253601,
"learning_rate": 9.151214247166756e-05,
"loss": 3.1918,
"step": 78650
},
{
"epoch": 8.485175202156334,
"grad_norm": 0.8625116944313049,
"learning_rate": 9.118834322719913e-05,
"loss": 3.2035,
"step": 78700
},
{
"epoch": 8.49056603773585,
"grad_norm": 0.8142582774162292,
"learning_rate": 9.08645439827307e-05,
"loss": 3.2203,
"step": 78750
},
{
"epoch": 8.495956873315365,
"grad_norm": 0.8834460973739624,
"learning_rate": 9.054074473826227e-05,
"loss": 3.1837,
"step": 78800
},
{
"epoch": 8.501347708894878,
"grad_norm": 0.8685952425003052,
"learning_rate": 9.021694549379385e-05,
"loss": 3.1928,
"step": 78850
},
{
"epoch": 8.506738544474393,
"grad_norm": 0.8517999649047852,
"learning_rate": 8.989314624932541e-05,
"loss": 3.1989,
"step": 78900
},
{
"epoch": 8.512129380053908,
"grad_norm": 0.8131477236747742,
"learning_rate": 8.956934700485697e-05,
"loss": 3.1803,
"step": 78950
},
{
"epoch": 8.517520215633423,
"grad_norm": 0.8599629402160645,
"learning_rate": 8.924554776038855e-05,
"loss": 3.179,
"step": 79000
},
{
"epoch": 8.517520215633423,
"eval_accuracy": 0.391562733841387,
"eval_loss": 3.325984239578247,
"eval_runtime": 144.6148,
"eval_samples_per_second": 124.545,
"eval_steps_per_second": 7.786,
"step": 79000
},
{
"epoch": 8.522911051212938,
"grad_norm": 0.8564680218696594,
"learning_rate": 8.892174851592013e-05,
"loss": 3.1729,
"step": 79050
},
{
"epoch": 8.528301886792454,
"grad_norm": 0.8181639313697815,
"learning_rate": 8.859794927145168e-05,
"loss": 3.1896,
"step": 79100
},
{
"epoch": 8.533692722371967,
"grad_norm": 0.8143757581710815,
"learning_rate": 8.827415002698326e-05,
"loss": 3.1927,
"step": 79150
},
{
"epoch": 8.539083557951482,
"grad_norm": 0.8497621417045593,
"learning_rate": 8.795035078251483e-05,
"loss": 3.1863,
"step": 79200
},
{
"epoch": 8.544474393530997,
"grad_norm": 0.8546693325042725,
"learning_rate": 8.762655153804641e-05,
"loss": 3.1824,
"step": 79250
},
{
"epoch": 8.549865229110512,
"grad_norm": 0.8754087686538696,
"learning_rate": 8.730275229357798e-05,
"loss": 3.1838,
"step": 79300
},
{
"epoch": 8.555256064690028,
"grad_norm": 0.8723635077476501,
"learning_rate": 8.697895304910954e-05,
"loss": 3.1863,
"step": 79350
},
{
"epoch": 8.560646900269543,
"grad_norm": 0.8661187887191772,
"learning_rate": 8.665515380464112e-05,
"loss": 3.1918,
"step": 79400
},
{
"epoch": 8.566037735849056,
"grad_norm": 0.86223965883255,
"learning_rate": 8.633135456017268e-05,
"loss": 3.2093,
"step": 79450
},
{
"epoch": 8.571428571428571,
"grad_norm": 0.8359453082084656,
"learning_rate": 8.600755531570426e-05,
"loss": 3.1824,
"step": 79500
},
{
"epoch": 8.576819407008086,
"grad_norm": 0.8947768807411194,
"learning_rate": 8.568375607123582e-05,
"loss": 3.1772,
"step": 79550
},
{
"epoch": 8.582210242587601,
"grad_norm": 0.8508313298225403,
"learning_rate": 8.53599568267674e-05,
"loss": 3.1889,
"step": 79600
},
{
"epoch": 8.587601078167117,
"grad_norm": 0.8601868748664856,
"learning_rate": 8.503615758229897e-05,
"loss": 3.1874,
"step": 79650
},
{
"epoch": 8.59299191374663,
"grad_norm": 0.8759069442749023,
"learning_rate": 8.471235833783054e-05,
"loss": 3.1996,
"step": 79700
},
{
"epoch": 8.598382749326145,
"grad_norm": 0.8844007849693298,
"learning_rate": 8.438855909336211e-05,
"loss": 3.1962,
"step": 79750
},
{
"epoch": 8.60377358490566,
"grad_norm": 0.8556910753250122,
"learning_rate": 8.406475984889367e-05,
"loss": 3.2086,
"step": 79800
},
{
"epoch": 8.609164420485175,
"grad_norm": 0.831992506980896,
"learning_rate": 8.374096060442525e-05,
"loss": 3.1863,
"step": 79850
},
{
"epoch": 8.61455525606469,
"grad_norm": 0.8430582880973816,
"learning_rate": 8.341716135995683e-05,
"loss": 3.1847,
"step": 79900
},
{
"epoch": 8.619946091644206,
"grad_norm": 0.8734549283981323,
"learning_rate": 8.309336211548838e-05,
"loss": 3.2038,
"step": 79950
},
{
"epoch": 8.625336927223719,
"grad_norm": 0.9261046051979065,
"learning_rate": 8.276956287101996e-05,
"loss": 3.2147,
"step": 80000
},
{
"epoch": 8.625336927223719,
"eval_accuracy": 0.39186641866282845,
"eval_loss": 3.32257342338562,
"eval_runtime": 144.7379,
"eval_samples_per_second": 124.439,
"eval_steps_per_second": 7.78,
"step": 80000
},
{
"epoch": 8.630727762803234,
"grad_norm": 0.8802093863487244,
"learning_rate": 8.244576362655153e-05,
"loss": 3.1817,
"step": 80050
},
{
"epoch": 8.63611859838275,
"grad_norm": 0.8583772778511047,
"learning_rate": 8.212196438208311e-05,
"loss": 3.1909,
"step": 80100
},
{
"epoch": 8.641509433962264,
"grad_norm": 0.8642351627349854,
"learning_rate": 8.179816513761467e-05,
"loss": 3.1963,
"step": 80150
},
{
"epoch": 8.64690026954178,
"grad_norm": 0.8973760604858398,
"learning_rate": 8.147436589314624e-05,
"loss": 3.2099,
"step": 80200
},
{
"epoch": 8.652291105121293,
"grad_norm": 0.8562058210372925,
"learning_rate": 8.115056664867782e-05,
"loss": 3.1906,
"step": 80250
},
{
"epoch": 8.657681940700808,
"grad_norm": 0.8994555473327637,
"learning_rate": 8.083324338909874e-05,
"loss": 3.2017,
"step": 80300
},
{
"epoch": 8.663072776280323,
"grad_norm": 0.8898001313209534,
"learning_rate": 8.050944414463032e-05,
"loss": 3.1889,
"step": 80350
},
{
"epoch": 8.668463611859838,
"grad_norm": 0.9196224212646484,
"learning_rate": 8.01856449001619e-05,
"loss": 3.1986,
"step": 80400
},
{
"epoch": 8.673854447439354,
"grad_norm": 0.8797485828399658,
"learning_rate": 7.986184565569347e-05,
"loss": 3.1926,
"step": 80450
},
{
"epoch": 8.679245283018869,
"grad_norm": 0.8511892557144165,
"learning_rate": 7.953804641122503e-05,
"loss": 3.1938,
"step": 80500
},
{
"epoch": 8.684636118598382,
"grad_norm": 0.8300173282623291,
"learning_rate": 7.92142471667566e-05,
"loss": 3.1853,
"step": 80550
},
{
"epoch": 8.690026954177897,
"grad_norm": 0.8500287532806396,
"learning_rate": 7.889044792228818e-05,
"loss": 3.1736,
"step": 80600
},
{
"epoch": 8.695417789757412,
"grad_norm": 0.8612384796142578,
"learning_rate": 7.856664867781974e-05,
"loss": 3.1919,
"step": 80650
},
{
"epoch": 8.700808625336927,
"grad_norm": 0.8997857570648193,
"learning_rate": 7.824284943335132e-05,
"loss": 3.1807,
"step": 80700
},
{
"epoch": 8.706199460916443,
"grad_norm": 0.9068294167518616,
"learning_rate": 7.791905018888289e-05,
"loss": 3.1832,
"step": 80750
},
{
"epoch": 8.711590296495956,
"grad_norm": 0.8837656378746033,
"learning_rate": 7.760172692930382e-05,
"loss": 3.1801,
"step": 80800
},
{
"epoch": 8.716981132075471,
"grad_norm": 0.8144077658653259,
"learning_rate": 7.727792768483539e-05,
"loss": 3.1855,
"step": 80850
},
{
"epoch": 8.722371967654986,
"grad_norm": 0.8907399773597717,
"learning_rate": 7.695412844036697e-05,
"loss": 3.2069,
"step": 80900
},
{
"epoch": 8.727762803234501,
"grad_norm": 0.8766629695892334,
"learning_rate": 7.663032919589854e-05,
"loss": 3.185,
"step": 80950
},
{
"epoch": 8.733153638814017,
"grad_norm": 0.9268998503684998,
"learning_rate": 7.630652995143011e-05,
"loss": 3.1679,
"step": 81000
},
{
"epoch": 8.733153638814017,
"eval_accuracy": 0.3921787957152951,
"eval_loss": 3.319448947906494,
"eval_runtime": 144.5661,
"eval_samples_per_second": 124.587,
"eval_steps_per_second": 7.789,
"step": 81000
},
{
"epoch": 8.738544474393532,
"grad_norm": 0.8556450009346008,
"learning_rate": 7.598273070696167e-05,
"loss": 3.189,
"step": 81050
},
{
"epoch": 8.743935309973045,
"grad_norm": 0.8391192555427551,
"learning_rate": 7.565893146249325e-05,
"loss": 3.1888,
"step": 81100
},
{
"epoch": 8.74932614555256,
"grad_norm": 0.8569978475570679,
"learning_rate": 7.533513221802481e-05,
"loss": 3.1793,
"step": 81150
},
{
"epoch": 8.754716981132075,
"grad_norm": 0.83697509765625,
"learning_rate": 7.501133297355639e-05,
"loss": 3.2029,
"step": 81200
},
{
"epoch": 8.76010781671159,
"grad_norm": 0.9166164994239807,
"learning_rate": 7.468753372908796e-05,
"loss": 3.1904,
"step": 81250
},
{
"epoch": 8.765498652291106,
"grad_norm": 0.8388158082962036,
"learning_rate": 7.436373448461953e-05,
"loss": 3.1851,
"step": 81300
},
{
"epoch": 8.77088948787062,
"grad_norm": 0.8800873160362244,
"learning_rate": 7.40399352401511e-05,
"loss": 3.1813,
"step": 81350
},
{
"epoch": 8.776280323450134,
"grad_norm": 0.875529408454895,
"learning_rate": 7.371613599568268e-05,
"loss": 3.1869,
"step": 81400
},
{
"epoch": 8.78167115902965,
"grad_norm": 0.8746694922447205,
"learning_rate": 7.339233675121424e-05,
"loss": 3.1953,
"step": 81450
},
{
"epoch": 8.787061994609164,
"grad_norm": 0.8494973182678223,
"learning_rate": 7.306853750674582e-05,
"loss": 3.2106,
"step": 81500
},
{
"epoch": 8.79245283018868,
"grad_norm": 0.850772500038147,
"learning_rate": 7.274473826227738e-05,
"loss": 3.1906,
"step": 81550
},
{
"epoch": 8.797843665768195,
"grad_norm": 0.839963436126709,
"learning_rate": 7.242093901780896e-05,
"loss": 3.1795,
"step": 81600
},
{
"epoch": 8.80323450134771,
"grad_norm": 0.8491251468658447,
"learning_rate": 7.209713977334052e-05,
"loss": 3.192,
"step": 81650
},
{
"epoch": 8.808625336927223,
"grad_norm": 0.8784267902374268,
"learning_rate": 7.17733405288721e-05,
"loss": 3.1744,
"step": 81700
},
{
"epoch": 8.814016172506738,
"grad_norm": 0.9126667380332947,
"learning_rate": 7.144954128440366e-05,
"loss": 3.1796,
"step": 81750
},
{
"epoch": 8.819407008086253,
"grad_norm": 0.8692651987075806,
"learning_rate": 7.112574203993523e-05,
"loss": 3.1832,
"step": 81800
},
{
"epoch": 8.824797843665769,
"grad_norm": 0.8770309090614319,
"learning_rate": 7.08019427954668e-05,
"loss": 3.2016,
"step": 81850
},
{
"epoch": 8.830188679245284,
"grad_norm": 0.8823293447494507,
"learning_rate": 7.047814355099837e-05,
"loss": 3.1873,
"step": 81900
},
{
"epoch": 8.835579514824797,
"grad_norm": 0.8704630732536316,
"learning_rate": 7.015434430652993e-05,
"loss": 3.2026,
"step": 81950
},
{
"epoch": 8.840970350404312,
"grad_norm": 0.9046744704246521,
"learning_rate": 6.983054506206151e-05,
"loss": 3.1853,
"step": 82000
},
{
"epoch": 8.840970350404312,
"eval_accuracy": 0.3925066014778314,
"eval_loss": 3.3156585693359375,
"eval_runtime": 145.1073,
"eval_samples_per_second": 124.122,
"eval_steps_per_second": 7.76,
"step": 82000
},
{
"epoch": 8.846361185983827,
"grad_norm": 0.8452231884002686,
"learning_rate": 6.950674581759309e-05,
"loss": 3.1848,
"step": 82050
},
{
"epoch": 8.851752021563343,
"grad_norm": 0.8799333572387695,
"learning_rate": 6.918294657312465e-05,
"loss": 3.1834,
"step": 82100
},
{
"epoch": 8.857142857142858,
"grad_norm": 0.8334299325942993,
"learning_rate": 6.885914732865623e-05,
"loss": 3.1938,
"step": 82150
},
{
"epoch": 8.862533692722373,
"grad_norm": 0.8946378827095032,
"learning_rate": 6.85353480841878e-05,
"loss": 3.1897,
"step": 82200
},
{
"epoch": 8.867924528301886,
"grad_norm": 0.8897405862808228,
"learning_rate": 6.821154883971937e-05,
"loss": 3.2168,
"step": 82250
},
{
"epoch": 8.873315363881401,
"grad_norm": 0.8711948990821838,
"learning_rate": 6.788774959525094e-05,
"loss": 3.2046,
"step": 82300
},
{
"epoch": 8.878706199460916,
"grad_norm": 0.885848879814148,
"learning_rate": 6.756395035078252e-05,
"loss": 3.1739,
"step": 82350
},
{
"epoch": 8.884097035040432,
"grad_norm": 0.8410148620605469,
"learning_rate": 6.724015110631408e-05,
"loss": 3.1843,
"step": 82400
},
{
"epoch": 8.889487870619947,
"grad_norm": 0.8781217336654663,
"learning_rate": 6.691635186184566e-05,
"loss": 3.1943,
"step": 82450
},
{
"epoch": 8.89487870619946,
"grad_norm": 0.9249593615531921,
"learning_rate": 6.659255261737722e-05,
"loss": 3.1968,
"step": 82500
},
{
"epoch": 8.900269541778975,
"grad_norm": 0.8449723124504089,
"learning_rate": 6.626875337290879e-05,
"loss": 3.201,
"step": 82550
},
{
"epoch": 8.90566037735849,
"grad_norm": 0.8960906863212585,
"learning_rate": 6.594495412844036e-05,
"loss": 3.1813,
"step": 82600
},
{
"epoch": 8.911051212938006,
"grad_norm": 0.8776979446411133,
"learning_rate": 6.562115488397193e-05,
"loss": 3.195,
"step": 82650
},
{
"epoch": 8.91644204851752,
"grad_norm": 0.9270035028457642,
"learning_rate": 6.52973556395035e-05,
"loss": 3.1869,
"step": 82700
},
{
"epoch": 8.921832884097036,
"grad_norm": 0.8415541052818298,
"learning_rate": 6.497355639503507e-05,
"loss": 3.2007,
"step": 82750
},
{
"epoch": 8.92722371967655,
"grad_norm": 0.8638697862625122,
"learning_rate": 6.464975715056663e-05,
"loss": 3.196,
"step": 82800
},
{
"epoch": 8.932614555256064,
"grad_norm": 0.8706114888191223,
"learning_rate": 6.432595790609821e-05,
"loss": 3.2005,
"step": 82850
},
{
"epoch": 8.93800539083558,
"grad_norm": 0.8472486734390259,
"learning_rate": 6.400215866162979e-05,
"loss": 3.1974,
"step": 82900
},
{
"epoch": 8.943396226415095,
"grad_norm": 0.8397724032402039,
"learning_rate": 6.367835941716135e-05,
"loss": 3.1775,
"step": 82950
},
{
"epoch": 8.94878706199461,
"grad_norm": 0.8822309970855713,
"learning_rate": 6.335456017269293e-05,
"loss": 3.1983,
"step": 83000
},
{
"epoch": 8.94878706199461,
"eval_accuracy": 0.3929463197148166,
"eval_loss": 3.3126132488250732,
"eval_runtime": 144.3502,
"eval_samples_per_second": 124.773,
"eval_steps_per_second": 7.8,
"step": 83000
},
{
"epoch": 8.954177897574123,
"grad_norm": 0.8723492622375488,
"learning_rate": 6.30307609282245e-05,
"loss": 3.1766,
"step": 83050
},
{
"epoch": 8.959568733153638,
"grad_norm": 0.8709846138954163,
"learning_rate": 6.270696168375607e-05,
"loss": 3.175,
"step": 83100
},
{
"epoch": 8.964959568733153,
"grad_norm": 0.8878198862075806,
"learning_rate": 6.238316243928764e-05,
"loss": 3.1941,
"step": 83150
},
{
"epoch": 8.970350404312669,
"grad_norm": 0.8836817145347595,
"learning_rate": 6.205936319481921e-05,
"loss": 3.2027,
"step": 83200
},
{
"epoch": 8.975741239892184,
"grad_norm": 0.8428633809089661,
"learning_rate": 6.173556395035078e-05,
"loss": 3.1906,
"step": 83250
},
{
"epoch": 8.981132075471699,
"grad_norm": 0.9594284892082214,
"learning_rate": 6.141176470588236e-05,
"loss": 3.1918,
"step": 83300
},
{
"epoch": 8.986522911051212,
"grad_norm": 0.8987261056900024,
"learning_rate": 6.108796546141392e-05,
"loss": 3.173,
"step": 83350
},
{
"epoch": 8.991913746630727,
"grad_norm": 0.8823418617248535,
"learning_rate": 6.076416621694549e-05,
"loss": 3.1891,
"step": 83400
},
{
"epoch": 8.997304582210242,
"grad_norm": 0.8893031477928162,
"learning_rate": 6.0440366972477055e-05,
"loss": 3.2037,
"step": 83450
},
{
"epoch": 9.002695417789758,
"grad_norm": 0.9136379361152649,
"learning_rate": 6.011656772800863e-05,
"loss": 3.1651,
"step": 83500
},
{
"epoch": 9.008086253369273,
"grad_norm": 0.8600441217422485,
"learning_rate": 5.9792768483540197e-05,
"loss": 3.1415,
"step": 83550
},
{
"epoch": 9.013477088948788,
"grad_norm": 0.8468543887138367,
"learning_rate": 5.946896923907177e-05,
"loss": 3.1207,
"step": 83600
},
{
"epoch": 9.018867924528301,
"grad_norm": 0.9664818644523621,
"learning_rate": 5.9145169994603345e-05,
"loss": 3.1457,
"step": 83650
},
{
"epoch": 9.024258760107816,
"grad_norm": 0.8863913416862488,
"learning_rate": 5.882137075013491e-05,
"loss": 3.1286,
"step": 83700
},
{
"epoch": 9.029649595687331,
"grad_norm": 0.9203535914421082,
"learning_rate": 5.849757150566649e-05,
"loss": 3.1146,
"step": 83750
},
{
"epoch": 9.035040431266847,
"grad_norm": 0.9077261090278625,
"learning_rate": 5.817377226119805e-05,
"loss": 3.1489,
"step": 83800
},
{
"epoch": 9.040431266846362,
"grad_norm": 0.8649826645851135,
"learning_rate": 5.784997301672963e-05,
"loss": 3.1433,
"step": 83850
},
{
"epoch": 9.045822102425875,
"grad_norm": 0.8495699763298035,
"learning_rate": 5.752617377226119e-05,
"loss": 3.1463,
"step": 83900
},
{
"epoch": 9.05121293800539,
"grad_norm": 0.9024592638015747,
"learning_rate": 5.7202374527792764e-05,
"loss": 3.1301,
"step": 83950
},
{
"epoch": 9.056603773584905,
"grad_norm": 0.8416799902915955,
"learning_rate": 5.6878575283324335e-05,
"loss": 3.1342,
"step": 84000
},
{
"epoch": 9.056603773584905,
"eval_accuracy": 0.39281093821659974,
"eval_loss": 3.3160035610198975,
"eval_runtime": 144.3133,
"eval_samples_per_second": 124.805,
"eval_steps_per_second": 7.802,
"step": 84000
},
{
"epoch": 9.06199460916442,
"grad_norm": 0.9014863967895508,
"learning_rate": 5.6554776038855905e-05,
"loss": 3.1314,
"step": 84050
},
{
"epoch": 9.067385444743936,
"grad_norm": 0.8855228424072266,
"learning_rate": 5.623097679438747e-05,
"loss": 3.1385,
"step": 84100
},
{
"epoch": 9.07277628032345,
"grad_norm": 0.8690458536148071,
"learning_rate": 5.590717754991905e-05,
"loss": 3.1295,
"step": 84150
},
{
"epoch": 9.078167115902964,
"grad_norm": 0.8666431903839111,
"learning_rate": 5.558337830545061e-05,
"loss": 3.1376,
"step": 84200
},
{
"epoch": 9.08355795148248,
"grad_norm": 0.8776350617408752,
"learning_rate": 5.525957906098219e-05,
"loss": 3.1337,
"step": 84250
},
{
"epoch": 9.088948787061994,
"grad_norm": 0.8806595206260681,
"learning_rate": 5.493577981651375e-05,
"loss": 3.1243,
"step": 84300
},
{
"epoch": 9.09433962264151,
"grad_norm": 0.8908720016479492,
"learning_rate": 5.4611980572045324e-05,
"loss": 3.1214,
"step": 84350
},
{
"epoch": 9.099730458221025,
"grad_norm": 0.9047110676765442,
"learning_rate": 5.4288181327576895e-05,
"loss": 3.1415,
"step": 84400
},
{
"epoch": 9.10512129380054,
"grad_norm": 0.8715435862541199,
"learning_rate": 5.3964382083108466e-05,
"loss": 3.1467,
"step": 84450
},
{
"epoch": 9.110512129380053,
"grad_norm": 0.8695960640907288,
"learning_rate": 5.3640582838640043e-05,
"loss": 3.1282,
"step": 84500
},
{
"epoch": 9.115902964959568,
"grad_norm": 0.8799136281013489,
"learning_rate": 5.331678359417161e-05,
"loss": 3.1293,
"step": 84550
},
{
"epoch": 9.121293800539084,
"grad_norm": 0.9170258641242981,
"learning_rate": 5.2992984349703185e-05,
"loss": 3.1545,
"step": 84600
},
{
"epoch": 9.126684636118599,
"grad_norm": 0.8688546419143677,
"learning_rate": 5.266918510523475e-05,
"loss": 3.1321,
"step": 84650
},
{
"epoch": 9.132075471698114,
"grad_norm": 0.8524189591407776,
"learning_rate": 5.234538586076632e-05,
"loss": 3.1319,
"step": 84700
},
{
"epoch": 9.137466307277627,
"grad_norm": 0.8805209994316101,
"learning_rate": 5.202158661629789e-05,
"loss": 3.1293,
"step": 84750
},
{
"epoch": 9.142857142857142,
"grad_norm": 0.8989729881286621,
"learning_rate": 5.170426335671883e-05,
"loss": 3.1294,
"step": 84800
},
{
"epoch": 9.148247978436657,
"grad_norm": 0.8614277839660645,
"learning_rate": 5.1380464112250394e-05,
"loss": 3.1353,
"step": 84850
},
{
"epoch": 9.153638814016173,
"grad_norm": 0.9164130687713623,
"learning_rate": 5.105666486778197e-05,
"loss": 3.1406,
"step": 84900
},
{
"epoch": 9.159029649595688,
"grad_norm": 0.8775104880332947,
"learning_rate": 5.0732865623313536e-05,
"loss": 3.1511,
"step": 84950
},
{
"epoch": 9.164420485175203,
"grad_norm": 0.8897138833999634,
"learning_rate": 5.040906637884511e-05,
"loss": 3.1281,
"step": 85000
},
{
"epoch": 9.164420485175203,
"eval_accuracy": 0.39302259404206236,
"eval_loss": 3.314425230026245,
"eval_runtime": 144.4145,
"eval_samples_per_second": 124.717,
"eval_steps_per_second": 7.797,
"step": 85000
},
{
"epoch": 9.169811320754716,
"grad_norm": 0.879646897315979,
"learning_rate": 5.0085267134376684e-05,
"loss": 3.1298,
"step": 85050
},
{
"epoch": 9.175202156334231,
"grad_norm": 0.8806794881820679,
"learning_rate": 4.9761467889908255e-05,
"loss": 3.1558,
"step": 85100
},
{
"epoch": 9.180592991913747,
"grad_norm": 0.8712208867073059,
"learning_rate": 4.9437668645439826e-05,
"loss": 3.1331,
"step": 85150
},
{
"epoch": 9.185983827493262,
"grad_norm": 0.8732476830482483,
"learning_rate": 4.911386940097139e-05,
"loss": 3.151,
"step": 85200
},
{
"epoch": 9.191374663072777,
"grad_norm": 0.8525885939598083,
"learning_rate": 4.879007015650297e-05,
"loss": 3.1314,
"step": 85250
},
{
"epoch": 9.19676549865229,
"grad_norm": 0.8860559463500977,
"learning_rate": 4.846627091203453e-05,
"loss": 3.1299,
"step": 85300
},
{
"epoch": 9.202156334231805,
"grad_norm": 0.874052882194519,
"learning_rate": 4.814247166756611e-05,
"loss": 3.1431,
"step": 85350
},
{
"epoch": 9.20754716981132,
"grad_norm": 0.9141334891319275,
"learning_rate": 4.7818672423097674e-05,
"loss": 3.1152,
"step": 85400
},
{
"epoch": 9.212938005390836,
"grad_norm": 0.9071762561798096,
"learning_rate": 4.7494873178629244e-05,
"loss": 3.1379,
"step": 85450
},
{
"epoch": 9.21832884097035,
"grad_norm": 0.8646944761276245,
"learning_rate": 4.7171073934160815e-05,
"loss": 3.156,
"step": 85500
},
{
"epoch": 9.223719676549866,
"grad_norm": 0.9076562523841858,
"learning_rate": 4.6847274689692386e-05,
"loss": 3.137,
"step": 85550
},
{
"epoch": 9.22911051212938,
"grad_norm": 0.9522117376327515,
"learning_rate": 4.652347544522396e-05,
"loss": 3.1492,
"step": 85600
},
{
"epoch": 9.234501347708894,
"grad_norm": 0.8870116472244263,
"learning_rate": 4.619967620075553e-05,
"loss": 3.1478,
"step": 85650
},
{
"epoch": 9.23989218328841,
"grad_norm": 0.8715599775314331,
"learning_rate": 4.587587695628709e-05,
"loss": 3.1358,
"step": 85700
},
{
"epoch": 9.245283018867925,
"grad_norm": 0.9028836488723755,
"learning_rate": 4.555207771181867e-05,
"loss": 3.1411,
"step": 85750
},
{
"epoch": 9.25067385444744,
"grad_norm": 0.8923622965812683,
"learning_rate": 4.522827846735024e-05,
"loss": 3.1441,
"step": 85800
},
{
"epoch": 9.256064690026955,
"grad_norm": 0.8909099102020264,
"learning_rate": 4.490447922288181e-05,
"loss": 3.1425,
"step": 85850
},
{
"epoch": 9.261455525606468,
"grad_norm": 0.9339903593063354,
"learning_rate": 4.458067997841338e-05,
"loss": 3.1517,
"step": 85900
},
{
"epoch": 9.266846361185983,
"grad_norm": 0.9018579125404358,
"learning_rate": 4.4256880733944947e-05,
"loss": 3.1367,
"step": 85950
},
{
"epoch": 9.272237196765499,
"grad_norm": 0.8876761794090271,
"learning_rate": 4.3933081489476524e-05,
"loss": 3.129,
"step": 86000
},
{
"epoch": 9.272237196765499,
"eval_accuracy": 0.39344416724678255,
"eval_loss": 3.3115830421447754,
"eval_runtime": 145.8058,
"eval_samples_per_second": 123.527,
"eval_steps_per_second": 7.723,
"step": 86000
},
{
"epoch": 9.277628032345014,
"grad_norm": 0.8612155914306641,
"learning_rate": 4.360928224500809e-05,
"loss": 3.1131,
"step": 86050
},
{
"epoch": 9.283018867924529,
"grad_norm": 0.8957250118255615,
"learning_rate": 4.3285483000539666e-05,
"loss": 3.1292,
"step": 86100
},
{
"epoch": 9.288409703504042,
"grad_norm": 0.9094768166542053,
"learning_rate": 4.296168375607123e-05,
"loss": 3.1504,
"step": 86150
},
{
"epoch": 9.293800539083557,
"grad_norm": 0.8822762966156006,
"learning_rate": 4.263788451160281e-05,
"loss": 3.1272,
"step": 86200
},
{
"epoch": 9.299191374663073,
"grad_norm": 0.8798335194587708,
"learning_rate": 4.231408526713437e-05,
"loss": 3.1571,
"step": 86250
},
{
"epoch": 9.304582210242588,
"grad_norm": 0.9003534317016602,
"learning_rate": 4.199028602266594e-05,
"loss": 3.1374,
"step": 86300
},
{
"epoch": 9.309973045822103,
"grad_norm": 0.8739389777183533,
"learning_rate": 4.1666486778197514e-05,
"loss": 3.1306,
"step": 86350
},
{
"epoch": 9.315363881401618,
"grad_norm": 0.8704226613044739,
"learning_rate": 4.1342687533729085e-05,
"loss": 3.1398,
"step": 86400
},
{
"epoch": 9.320754716981131,
"grad_norm": 0.8752451539039612,
"learning_rate": 4.101888828926065e-05,
"loss": 3.1445,
"step": 86450
},
{
"epoch": 9.326145552560646,
"grad_norm": 0.9010379910469055,
"learning_rate": 4.0695089044792226e-05,
"loss": 3.1416,
"step": 86500
},
{
"epoch": 9.331536388140162,
"grad_norm": 0.893140435218811,
"learning_rate": 4.037128980032379e-05,
"loss": 3.1512,
"step": 86550
},
{
"epoch": 9.336927223719677,
"grad_norm": 0.8925532698631287,
"learning_rate": 4.004749055585537e-05,
"loss": 3.1407,
"step": 86600
},
{
"epoch": 9.342318059299192,
"grad_norm": 0.9114087820053101,
"learning_rate": 3.972369131138694e-05,
"loss": 3.1276,
"step": 86650
},
{
"epoch": 9.347708894878707,
"grad_norm": 0.8704652190208435,
"learning_rate": 3.93998920669185e-05,
"loss": 3.1325,
"step": 86700
},
{
"epoch": 9.35309973045822,
"grad_norm": 0.8590611815452576,
"learning_rate": 3.907609282245008e-05,
"loss": 3.1379,
"step": 86750
},
{
"epoch": 9.358490566037736,
"grad_norm": 0.8476240634918213,
"learning_rate": 3.875876956287101e-05,
"loss": 3.1505,
"step": 86800
},
{
"epoch": 9.36388140161725,
"grad_norm": 0.8863951563835144,
"learning_rate": 3.843497031840259e-05,
"loss": 3.1557,
"step": 86850
},
{
"epoch": 9.369272237196766,
"grad_norm": 0.8881932497024536,
"learning_rate": 3.8111171073934154e-05,
"loss": 3.1306,
"step": 86900
},
{
"epoch": 9.374663072776281,
"grad_norm": 0.9211652874946594,
"learning_rate": 3.778737182946573e-05,
"loss": 3.1398,
"step": 86950
},
{
"epoch": 9.380053908355794,
"grad_norm": 0.8532884120941162,
"learning_rate": 3.7463572584997296e-05,
"loss": 3.1315,
"step": 87000
},
{
"epoch": 9.380053908355794,
"eval_accuracy": 0.39368352955863783,
"eval_loss": 3.309629440307617,
"eval_runtime": 145.2816,
"eval_samples_per_second": 123.973,
"eval_steps_per_second": 7.75,
"step": 87000
},
{
"epoch": 9.38544474393531,
"grad_norm": 0.8766670227050781,
"learning_rate": 3.713977334052887e-05,
"loss": 3.1307,
"step": 87050
},
{
"epoch": 9.390835579514825,
"grad_norm": 0.8692664504051208,
"learning_rate": 3.681597409606044e-05,
"loss": 3.1387,
"step": 87100
},
{
"epoch": 9.39622641509434,
"grad_norm": 0.8868532180786133,
"learning_rate": 3.6498650836481376e-05,
"loss": 3.1427,
"step": 87150
},
{
"epoch": 9.401617250673855,
"grad_norm": 0.8694487810134888,
"learning_rate": 3.617485159201295e-05,
"loss": 3.1385,
"step": 87200
},
{
"epoch": 9.40700808625337,
"grad_norm": 0.8496524095535278,
"learning_rate": 3.585105234754452e-05,
"loss": 3.1259,
"step": 87250
},
{
"epoch": 9.412398921832883,
"grad_norm": 0.8991537094116211,
"learning_rate": 3.552725310307609e-05,
"loss": 3.154,
"step": 87300
},
{
"epoch": 9.417789757412399,
"grad_norm": 0.8673107624053955,
"learning_rate": 3.520345385860766e-05,
"loss": 3.1481,
"step": 87350
},
{
"epoch": 9.423180592991914,
"grad_norm": 0.8731547594070435,
"learning_rate": 3.487965461413923e-05,
"loss": 3.1545,
"step": 87400
},
{
"epoch": 9.428571428571429,
"grad_norm": 0.871950626373291,
"learning_rate": 3.45558553696708e-05,
"loss": 3.1449,
"step": 87450
},
{
"epoch": 9.433962264150944,
"grad_norm": 0.8890537023544312,
"learning_rate": 3.423205612520237e-05,
"loss": 3.1474,
"step": 87500
},
{
"epoch": 9.439353099730457,
"grad_norm": 0.9017044901847839,
"learning_rate": 3.3908256880733944e-05,
"loss": 3.1375,
"step": 87550
},
{
"epoch": 9.444743935309972,
"grad_norm": 0.9141184091567993,
"learning_rate": 3.3584457636265514e-05,
"loss": 3.1241,
"step": 87600
},
{
"epoch": 9.450134770889488,
"grad_norm": 0.8654083013534546,
"learning_rate": 3.3260658391797085e-05,
"loss": 3.1301,
"step": 87650
},
{
"epoch": 9.455525606469003,
"grad_norm": 0.8626784086227417,
"learning_rate": 3.2936859147328656e-05,
"loss": 3.1428,
"step": 87700
},
{
"epoch": 9.460916442048518,
"grad_norm": 0.9087199568748474,
"learning_rate": 3.261305990286023e-05,
"loss": 3.1462,
"step": 87750
},
{
"epoch": 9.466307277628033,
"grad_norm": 0.8533098101615906,
"learning_rate": 3.228926065839179e-05,
"loss": 3.1419,
"step": 87800
},
{
"epoch": 9.471698113207546,
"grad_norm": 0.8794834017753601,
"learning_rate": 3.196546141392336e-05,
"loss": 3.1435,
"step": 87850
},
{
"epoch": 9.477088948787062,
"grad_norm": 0.9012707471847534,
"learning_rate": 3.164166216945493e-05,
"loss": 3.1423,
"step": 87900
},
{
"epoch": 9.482479784366577,
"grad_norm": 0.8623753786087036,
"learning_rate": 3.1317862924986504e-05,
"loss": 3.1362,
"step": 87950
},
{
"epoch": 9.487870619946092,
"grad_norm": 0.9301363825798035,
"learning_rate": 3.0994063680518075e-05,
"loss": 3.1295,
"step": 88000
},
{
"epoch": 9.487870619946092,
"eval_accuracy": 0.3939586386705841,
"eval_loss": 3.307420253753662,
"eval_runtime": 149.154,
"eval_samples_per_second": 120.754,
"eval_steps_per_second": 7.549,
"step": 88000
},
{
"epoch": 9.493261455525607,
"grad_norm": 0.8840963840484619,
"learning_rate": 3.0670264436049646e-05,
"loss": 3.1311,
"step": 88050
},
{
"epoch": 9.498652291105122,
"grad_norm": 0.8954595923423767,
"learning_rate": 3.0346465191581217e-05,
"loss": 3.1296,
"step": 88100
},
{
"epoch": 9.504043126684635,
"grad_norm": 0.8655648827552795,
"learning_rate": 3.002266594711279e-05,
"loss": 3.1302,
"step": 88150
},
{
"epoch": 9.50943396226415,
"grad_norm": 0.8797849416732788,
"learning_rate": 2.969886670264436e-05,
"loss": 3.1371,
"step": 88200
},
{
"epoch": 9.514824797843666,
"grad_norm": 0.9118847846984863,
"learning_rate": 2.937506745817593e-05,
"loss": 3.1351,
"step": 88250
},
{
"epoch": 9.520215633423181,
"grad_norm": 0.9413871765136719,
"learning_rate": 2.90512682137075e-05,
"loss": 3.1271,
"step": 88300
},
{
"epoch": 9.525606469002696,
"grad_norm": 0.8877630233764648,
"learning_rate": 2.872746896923907e-05,
"loss": 3.1265,
"step": 88350
},
{
"epoch": 9.530997304582211,
"grad_norm": 0.9085790514945984,
"learning_rate": 2.8403669724770642e-05,
"loss": 3.1349,
"step": 88400
},
{
"epoch": 9.536388140161725,
"grad_norm": 0.8877115249633789,
"learning_rate": 2.807987048030221e-05,
"loss": 3.1435,
"step": 88450
},
{
"epoch": 9.54177897574124,
"grad_norm": 0.855800449848175,
"learning_rate": 2.775607123583378e-05,
"loss": 3.1391,
"step": 88500
},
{
"epoch": 9.547169811320755,
"grad_norm": 0.8799571394920349,
"learning_rate": 2.743227199136535e-05,
"loss": 3.1526,
"step": 88550
},
{
"epoch": 9.55256064690027,
"grad_norm": 0.8849266171455383,
"learning_rate": 2.7108472746896922e-05,
"loss": 3.1457,
"step": 88600
},
{
"epoch": 9.557951482479785,
"grad_norm": 0.9104188680648804,
"learning_rate": 2.6784673502428493e-05,
"loss": 3.1455,
"step": 88650
},
{
"epoch": 9.563342318059298,
"grad_norm": 0.8904628157615662,
"learning_rate": 2.646087425796006e-05,
"loss": 3.1416,
"step": 88700
},
{
"epoch": 9.568733153638814,
"grad_norm": 0.9009488821029663,
"learning_rate": 2.613707501349163e-05,
"loss": 3.1337,
"step": 88750
},
{
"epoch": 9.574123989218329,
"grad_norm": 0.8888774514198303,
"learning_rate": 2.5813275769023202e-05,
"loss": 3.1275,
"step": 88800
},
{
"epoch": 9.579514824797844,
"grad_norm": 0.8575297594070435,
"learning_rate": 2.5489476524554773e-05,
"loss": 3.147,
"step": 88850
},
{
"epoch": 9.584905660377359,
"grad_norm": 0.8705415725708008,
"learning_rate": 2.5165677280086344e-05,
"loss": 3.1359,
"step": 88900
},
{
"epoch": 9.590296495956874,
"grad_norm": 0.8735716342926025,
"learning_rate": 2.484187803561792e-05,
"loss": 3.1204,
"step": 88950
},
{
"epoch": 9.595687331536388,
"grad_norm": 0.8858790993690491,
"learning_rate": 2.4518078791149486e-05,
"loss": 3.1314,
"step": 89000
},
{
"epoch": 9.595687331536388,
"eval_accuracy": 0.3941762704048765,
"eval_loss": 3.3044450283050537,
"eval_runtime": 148.2284,
"eval_samples_per_second": 121.508,
"eval_steps_per_second": 7.596,
"step": 89000
},
{
"epoch": 9.601078167115903,
"grad_norm": 0.8914674520492554,
"learning_rate": 2.4194279546681057e-05,
"loss": 3.1446,
"step": 89050
},
{
"epoch": 9.606469002695418,
"grad_norm": 0.8769115805625916,
"learning_rate": 2.3870480302212628e-05,
"loss": 3.1347,
"step": 89100
},
{
"epoch": 9.611859838274933,
"grad_norm": 0.8797803521156311,
"learning_rate": 2.35466810577442e-05,
"loss": 3.1514,
"step": 89150
},
{
"epoch": 9.617250673854448,
"grad_norm": 0.9202283024787903,
"learning_rate": 2.3222881813275766e-05,
"loss": 3.1392,
"step": 89200
},
{
"epoch": 9.622641509433961,
"grad_norm": 0.9270514249801636,
"learning_rate": 2.2899082568807337e-05,
"loss": 3.1504,
"step": 89250
},
{
"epoch": 9.628032345013477,
"grad_norm": 0.8745276927947998,
"learning_rate": 2.2575283324338908e-05,
"loss": 3.134,
"step": 89300
},
{
"epoch": 9.633423180592992,
"grad_norm": 0.9120118618011475,
"learning_rate": 2.225148407987048e-05,
"loss": 3.1519,
"step": 89350
},
{
"epoch": 9.638814016172507,
"grad_norm": 0.8961083889007568,
"learning_rate": 2.192768483540205e-05,
"loss": 3.1427,
"step": 89400
},
{
"epoch": 9.644204851752022,
"grad_norm": 0.9004408717155457,
"learning_rate": 2.1603885590933617e-05,
"loss": 3.138,
"step": 89450
},
{
"epoch": 9.649595687331537,
"grad_norm": 0.9192554950714111,
"learning_rate": 2.1280086346465188e-05,
"loss": 3.138,
"step": 89500
},
{
"epoch": 9.65498652291105,
"grad_norm": 0.8526079058647156,
"learning_rate": 2.095628710199676e-05,
"loss": 3.1405,
"step": 89550
},
{
"epoch": 9.660377358490566,
"grad_norm": 0.8591670989990234,
"learning_rate": 2.063248785752833e-05,
"loss": 3.1186,
"step": 89600
},
{
"epoch": 9.66576819407008,
"grad_norm": 0.8714630007743835,
"learning_rate": 2.03086886130599e-05,
"loss": 3.1363,
"step": 89650
},
{
"epoch": 9.671159029649596,
"grad_norm": 0.884052038192749,
"learning_rate": 1.9984889368591468e-05,
"loss": 3.1389,
"step": 89700
},
{
"epoch": 9.676549865229111,
"grad_norm": 0.8649567365646362,
"learning_rate": 1.9661090124123042e-05,
"loss": 3.1361,
"step": 89750
},
{
"epoch": 9.681940700808624,
"grad_norm": 0.8387906551361084,
"learning_rate": 1.9337290879654613e-05,
"loss": 3.1336,
"step": 89800
},
{
"epoch": 9.68733153638814,
"grad_norm": 0.9404808878898621,
"learning_rate": 1.9013491635186184e-05,
"loss": 3.1533,
"step": 89850
},
{
"epoch": 9.692722371967655,
"grad_norm": 0.8834516406059265,
"learning_rate": 1.868969239071775e-05,
"loss": 3.1351,
"step": 89900
},
{
"epoch": 9.69811320754717,
"grad_norm": 0.8900846838951111,
"learning_rate": 1.8365893146249326e-05,
"loss": 3.1486,
"step": 89950
},
{
"epoch": 9.703504043126685,
"grad_norm": 0.8816651105880737,
"learning_rate": 1.804856988667026e-05,
"loss": 3.1393,
"step": 90000
},
{
"epoch": 9.703504043126685,
"eval_accuracy": 0.3945371066453084,
"eval_loss": 3.302194118499756,
"eval_runtime": 144.8797,
"eval_samples_per_second": 124.317,
"eval_steps_per_second": 7.772,
"step": 90000
}
],
"logging_steps": 50,
"max_steps": 92750,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7.524622614528e+17,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}