100M__495 / checkpoint-30000 /trainer_state.json
craa's picture
Training in progress, step 30000, checkpoint
6cd3e42 verified
{
"best_metric": 3.493159770965576,
"best_model_checkpoint": "/scratch/cl5625/exceptions/models/100M__495/checkpoint-30000",
"epoch": 3.234501347708895,
"eval_steps": 1000,
"global_step": 30000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005390835579514825,
"grad_norm": 0.9464320540428162,
"learning_rate": 0.000276,
"loss": 8.7571,
"step": 50
},
{
"epoch": 0.01078167115902965,
"grad_norm": 1.1756318807601929,
"learning_rate": 0.0005759999999999999,
"loss": 7.0078,
"step": 100
},
{
"epoch": 0.016172506738544475,
"grad_norm": 1.551442265510559,
"learning_rate": 0.000599702104695089,
"loss": 6.6048,
"step": 150
},
{
"epoch": 0.0215633423180593,
"grad_norm": 0.7034249901771545,
"learning_rate": 0.0005993783054506205,
"loss": 6.3332,
"step": 200
},
{
"epoch": 0.026954177897574125,
"grad_norm": 1.0177743434906006,
"learning_rate": 0.0005990545062061521,
"loss": 6.1389,
"step": 250
},
{
"epoch": 0.03234501347708895,
"grad_norm": 1.3281992673873901,
"learning_rate": 0.0005987307069616836,
"loss": 6.0274,
"step": 300
},
{
"epoch": 0.03773584905660377,
"grad_norm": 1.3769625425338745,
"learning_rate": 0.0005984069077172153,
"loss": 5.9227,
"step": 350
},
{
"epoch": 0.0431266846361186,
"grad_norm": 1.3584883213043213,
"learning_rate": 0.0005980831084727469,
"loss": 5.8655,
"step": 400
},
{
"epoch": 0.04851752021563342,
"grad_norm": 0.9795990586280823,
"learning_rate": 0.0005977593092282784,
"loss": 5.78,
"step": 450
},
{
"epoch": 0.05390835579514825,
"grad_norm": 1.4081448316574097,
"learning_rate": 0.00059743550998381,
"loss": 5.7304,
"step": 500
},
{
"epoch": 0.05929919137466307,
"grad_norm": 1.7430726289749146,
"learning_rate": 0.0005971117107393416,
"loss": 5.6254,
"step": 550
},
{
"epoch": 0.0646900269541779,
"grad_norm": 1.4288103580474854,
"learning_rate": 0.0005967879114948732,
"loss": 5.5997,
"step": 600
},
{
"epoch": 0.07008086253369272,
"grad_norm": 0.9352820515632629,
"learning_rate": 0.0005964641122504047,
"loss": 5.5009,
"step": 650
},
{
"epoch": 0.07547169811320754,
"grad_norm": 1.0235956907272339,
"learning_rate": 0.0005961403130059363,
"loss": 5.4834,
"step": 700
},
{
"epoch": 0.08086253369272237,
"grad_norm": 1.1283656358718872,
"learning_rate": 0.0005958165137614678,
"loss": 5.4096,
"step": 750
},
{
"epoch": 0.0862533692722372,
"grad_norm": 1.1757662296295166,
"learning_rate": 0.0005954927145169995,
"loss": 5.3508,
"step": 800
},
{
"epoch": 0.09164420485175202,
"grad_norm": 1.2652894258499146,
"learning_rate": 0.0005951689152725309,
"loss": 5.3137,
"step": 850
},
{
"epoch": 0.09703504043126684,
"grad_norm": 0.8220955729484558,
"learning_rate": 0.0005948451160280626,
"loss": 5.234,
"step": 900
},
{
"epoch": 0.10242587601078167,
"grad_norm": 1.0178474187850952,
"learning_rate": 0.0005945213167835941,
"loss": 5.2211,
"step": 950
},
{
"epoch": 0.1078167115902965,
"grad_norm": 1.0003489255905151,
"learning_rate": 0.0005941975175391257,
"loss": 5.1665,
"step": 1000
},
{
"epoch": 0.1078167115902965,
"eval_accuracy": 0.2206364212520268,
"eval_loss": 5.09123420715332,
"eval_runtime": 146.3271,
"eval_samples_per_second": 123.087,
"eval_steps_per_second": 7.695,
"step": 1000
},
{
"epoch": 0.11320754716981132,
"grad_norm": 1.0129122734069824,
"learning_rate": 0.0005938737182946572,
"loss": 5.1253,
"step": 1050
},
{
"epoch": 0.11859838274932614,
"grad_norm": 1.2447277307510376,
"learning_rate": 0.0005935499190501888,
"loss": 5.0843,
"step": 1100
},
{
"epoch": 0.12398921832884097,
"grad_norm": 1.1244721412658691,
"learning_rate": 0.0005932261198057204,
"loss": 5.0632,
"step": 1150
},
{
"epoch": 0.1293800539083558,
"grad_norm": 1.0002185106277466,
"learning_rate": 0.000592902320561252,
"loss": 5.0409,
"step": 1200
},
{
"epoch": 0.1347708894878706,
"grad_norm": 1.237203598022461,
"learning_rate": 0.0005925785213167835,
"loss": 5.0158,
"step": 1250
},
{
"epoch": 0.14016172506738545,
"grad_norm": 0.9483816623687744,
"learning_rate": 0.0005922547220723151,
"loss": 4.9578,
"step": 1300
},
{
"epoch": 0.14555256064690028,
"grad_norm": 1.1492619514465332,
"learning_rate": 0.0005919309228278468,
"loss": 4.9422,
"step": 1350
},
{
"epoch": 0.1509433962264151,
"grad_norm": 0.8103901743888855,
"learning_rate": 0.0005916071235833783,
"loss": 4.9161,
"step": 1400
},
{
"epoch": 0.15633423180592992,
"grad_norm": 0.9849348068237305,
"learning_rate": 0.0005912833243389097,
"loss": 4.8937,
"step": 1450
},
{
"epoch": 0.16172506738544473,
"grad_norm": 1.0992136001586914,
"learning_rate": 0.0005909595250944414,
"loss": 4.8771,
"step": 1500
},
{
"epoch": 0.16711590296495957,
"grad_norm": 0.7955754995346069,
"learning_rate": 0.000590635725849973,
"loss": 4.8337,
"step": 1550
},
{
"epoch": 0.1725067385444744,
"grad_norm": 0.8510985970497131,
"learning_rate": 0.0005903119266055045,
"loss": 4.8487,
"step": 1600
},
{
"epoch": 0.1778975741239892,
"grad_norm": 0.8423263430595398,
"learning_rate": 0.0005899881273610361,
"loss": 4.8251,
"step": 1650
},
{
"epoch": 0.18328840970350405,
"grad_norm": 0.8307051658630371,
"learning_rate": 0.0005896643281165677,
"loss": 4.7528,
"step": 1700
},
{
"epoch": 0.18867924528301888,
"grad_norm": 1.171060562133789,
"learning_rate": 0.0005893405288720993,
"loss": 4.7617,
"step": 1750
},
{
"epoch": 0.1940700808625337,
"grad_norm": 1.0469765663146973,
"learning_rate": 0.0005890167296276308,
"loss": 4.7597,
"step": 1800
},
{
"epoch": 0.19946091644204852,
"grad_norm": 0.8637204170227051,
"learning_rate": 0.0005886929303831624,
"loss": 4.7128,
"step": 1850
},
{
"epoch": 0.20485175202156333,
"grad_norm": 0.9173099398612976,
"learning_rate": 0.0005883691311386939,
"loss": 4.7067,
"step": 1900
},
{
"epoch": 0.21024258760107817,
"grad_norm": 0.9003922343254089,
"learning_rate": 0.0005880453318942256,
"loss": 4.6783,
"step": 1950
},
{
"epoch": 0.215633423180593,
"grad_norm": 0.9870163798332214,
"learning_rate": 0.0005877215326497571,
"loss": 4.6555,
"step": 2000
},
{
"epoch": 0.215633423180593,
"eval_accuracy": 0.260708366848222,
"eval_loss": 4.6007513999938965,
"eval_runtime": 144.747,
"eval_samples_per_second": 124.431,
"eval_steps_per_second": 7.779,
"step": 2000
},
{
"epoch": 0.2210242587601078,
"grad_norm": 0.8778396248817444,
"learning_rate": 0.0005873977334052887,
"loss": 4.6696,
"step": 2050
},
{
"epoch": 0.22641509433962265,
"grad_norm": 0.8375086188316345,
"learning_rate": 0.0005870739341608202,
"loss": 4.6395,
"step": 2100
},
{
"epoch": 0.23180592991913745,
"grad_norm": 0.8078502416610718,
"learning_rate": 0.0005867501349163519,
"loss": 4.5823,
"step": 2150
},
{
"epoch": 0.2371967654986523,
"grad_norm": 0.8603857755661011,
"learning_rate": 0.0005864263356718833,
"loss": 4.6175,
"step": 2200
},
{
"epoch": 0.24258760107816713,
"grad_norm": 0.8307099342346191,
"learning_rate": 0.000586102536427415,
"loss": 4.5541,
"step": 2250
},
{
"epoch": 0.24797843665768193,
"grad_norm": 1.0236228704452515,
"learning_rate": 0.0005857787371829465,
"loss": 4.5368,
"step": 2300
},
{
"epoch": 0.25336927223719674,
"grad_norm": 0.9307533502578735,
"learning_rate": 0.0005854549379384781,
"loss": 4.5326,
"step": 2350
},
{
"epoch": 0.2587601078167116,
"grad_norm": 1.4191182851791382,
"learning_rate": 0.0005851311386940096,
"loss": 4.5253,
"step": 2400
},
{
"epoch": 0.2641509433962264,
"grad_norm": 1.015572428703308,
"learning_rate": 0.0005848073394495412,
"loss": 4.5272,
"step": 2450
},
{
"epoch": 0.2695417789757412,
"grad_norm": 0.8903120756149292,
"learning_rate": 0.0005844835402050728,
"loss": 4.5219,
"step": 2500
},
{
"epoch": 0.2749326145552561,
"grad_norm": 1.0123368501663208,
"learning_rate": 0.0005841597409606044,
"loss": 4.4772,
"step": 2550
},
{
"epoch": 0.2803234501347709,
"grad_norm": 0.925151526927948,
"learning_rate": 0.000583835941716136,
"loss": 4.4624,
"step": 2600
},
{
"epoch": 0.2857142857142857,
"grad_norm": 1.1478705406188965,
"learning_rate": 0.0005835121424716675,
"loss": 4.4605,
"step": 2650
},
{
"epoch": 0.29110512129380056,
"grad_norm": 1.0130943059921265,
"learning_rate": 0.0005831883432271992,
"loss": 4.4529,
"step": 2700
},
{
"epoch": 0.29649595687331537,
"grad_norm": 0.991671621799469,
"learning_rate": 0.0005828645439827307,
"loss": 4.4101,
"step": 2750
},
{
"epoch": 0.3018867924528302,
"grad_norm": 0.8699747323989868,
"learning_rate": 0.0005825407447382622,
"loss": 4.4505,
"step": 2800
},
{
"epoch": 0.30727762803234504,
"grad_norm": 1.0381041765213013,
"learning_rate": 0.0005822169454937938,
"loss": 4.4171,
"step": 2850
},
{
"epoch": 0.31266846361185985,
"grad_norm": 1.0296149253845215,
"learning_rate": 0.0005818931462493254,
"loss": 4.4014,
"step": 2900
},
{
"epoch": 0.31805929919137466,
"grad_norm": 0.9270951747894287,
"learning_rate": 0.0005815693470048569,
"loss": 4.3968,
"step": 2950
},
{
"epoch": 0.32345013477088946,
"grad_norm": 0.7531670331954956,
"learning_rate": 0.0005812455477603885,
"loss": 4.3901,
"step": 3000
},
{
"epoch": 0.32345013477088946,
"eval_accuracy": 0.29159338482103947,
"eval_loss": 4.296316146850586,
"eval_runtime": 144.5889,
"eval_samples_per_second": 124.567,
"eval_steps_per_second": 7.788,
"step": 3000
},
{
"epoch": 0.3288409703504043,
"grad_norm": 0.8791877627372742,
"learning_rate": 0.0005809217485159201,
"loss": 4.3683,
"step": 3050
},
{
"epoch": 0.33423180592991913,
"grad_norm": 0.843708872795105,
"learning_rate": 0.0005805979492714517,
"loss": 4.3643,
"step": 3100
},
{
"epoch": 0.33962264150943394,
"grad_norm": 0.8123868703842163,
"learning_rate": 0.0005802741500269832,
"loss": 4.3262,
"step": 3150
},
{
"epoch": 0.3450134770889488,
"grad_norm": 0.9056026339530945,
"learning_rate": 0.0005799503507825148,
"loss": 4.3298,
"step": 3200
},
{
"epoch": 0.3504043126684636,
"grad_norm": 0.6567044854164124,
"learning_rate": 0.0005796265515380463,
"loss": 4.306,
"step": 3250
},
{
"epoch": 0.3557951482479784,
"grad_norm": 0.9095497131347656,
"learning_rate": 0.000579302752293578,
"loss": 4.328,
"step": 3300
},
{
"epoch": 0.3611859838274933,
"grad_norm": 0.8091392517089844,
"learning_rate": 0.0005789789530491095,
"loss": 4.3044,
"step": 3350
},
{
"epoch": 0.3665768194070081,
"grad_norm": 0.9594192504882812,
"learning_rate": 0.0005786551538046411,
"loss": 4.2963,
"step": 3400
},
{
"epoch": 0.3719676549865229,
"grad_norm": 0.72095787525177,
"learning_rate": 0.0005783313545601726,
"loss": 4.2802,
"step": 3450
},
{
"epoch": 0.37735849056603776,
"grad_norm": 0.7558978199958801,
"learning_rate": 0.0005780075553157043,
"loss": 4.2589,
"step": 3500
},
{
"epoch": 0.38274932614555257,
"grad_norm": 0.7811341881752014,
"learning_rate": 0.0005776837560712357,
"loss": 4.264,
"step": 3550
},
{
"epoch": 0.3881401617250674,
"grad_norm": 0.9022195935249329,
"learning_rate": 0.0005773599568267673,
"loss": 4.2651,
"step": 3600
},
{
"epoch": 0.3935309973045822,
"grad_norm": 0.9639933109283447,
"learning_rate": 0.0005770361575822989,
"loss": 4.2789,
"step": 3650
},
{
"epoch": 0.39892183288409705,
"grad_norm": 0.9333063960075378,
"learning_rate": 0.0005767123583378305,
"loss": 4.24,
"step": 3700
},
{
"epoch": 0.40431266846361186,
"grad_norm": 0.7508504986763,
"learning_rate": 0.000576388559093362,
"loss": 4.2359,
"step": 3750
},
{
"epoch": 0.40970350404312667,
"grad_norm": 0.7458257079124451,
"learning_rate": 0.0005760647598488936,
"loss": 4.2555,
"step": 3800
},
{
"epoch": 0.41509433962264153,
"grad_norm": 0.7919742465019226,
"learning_rate": 0.0005757409606044253,
"loss": 4.2366,
"step": 3850
},
{
"epoch": 0.42048517520215634,
"grad_norm": 0.9453123211860657,
"learning_rate": 0.0005754171613599568,
"loss": 4.2055,
"step": 3900
},
{
"epoch": 0.42587601078167114,
"grad_norm": 0.6152997612953186,
"learning_rate": 0.0005750933621154884,
"loss": 4.201,
"step": 3950
},
{
"epoch": 0.431266846361186,
"grad_norm": 0.8247600197792053,
"learning_rate": 0.0005747695628710199,
"loss": 4.2075,
"step": 4000
},
{
"epoch": 0.431266846361186,
"eval_accuracy": 0.30819272110400897,
"eval_loss": 4.134169101715088,
"eval_runtime": 144.6112,
"eval_samples_per_second": 124.548,
"eval_steps_per_second": 7.786,
"step": 4000
},
{
"epoch": 0.4366576819407008,
"grad_norm": 0.8243815898895264,
"learning_rate": 0.0005744457636265515,
"loss": 4.2068,
"step": 4050
},
{
"epoch": 0.4420485175202156,
"grad_norm": 0.6849672794342041,
"learning_rate": 0.0005741219643820831,
"loss": 4.2091,
"step": 4100
},
{
"epoch": 0.4474393530997305,
"grad_norm": 0.7505493760108948,
"learning_rate": 0.0005737981651376146,
"loss": 4.193,
"step": 4150
},
{
"epoch": 0.4528301886792453,
"grad_norm": 0.6466169953346252,
"learning_rate": 0.0005734743658931462,
"loss": 4.1829,
"step": 4200
},
{
"epoch": 0.4582210242587601,
"grad_norm": 0.5937972664833069,
"learning_rate": 0.0005731505666486778,
"loss": 4.1738,
"step": 4250
},
{
"epoch": 0.4636118598382749,
"grad_norm": 0.7906216979026794,
"learning_rate": 0.0005728267674042093,
"loss": 4.1668,
"step": 4300
},
{
"epoch": 0.46900269541778977,
"grad_norm": 0.744433581829071,
"learning_rate": 0.0005725029681597409,
"loss": 4.1723,
"step": 4350
},
{
"epoch": 0.4743935309973046,
"grad_norm": 0.8073228597640991,
"learning_rate": 0.0005721791689152725,
"loss": 4.158,
"step": 4400
},
{
"epoch": 0.4797843665768194,
"grad_norm": 0.8262885808944702,
"learning_rate": 0.0005718553696708041,
"loss": 4.1602,
"step": 4450
},
{
"epoch": 0.48517520215633425,
"grad_norm": 0.6594825387001038,
"learning_rate": 0.0005715315704263356,
"loss": 4.1434,
"step": 4500
},
{
"epoch": 0.49056603773584906,
"grad_norm": 0.6674824357032776,
"learning_rate": 0.0005712077711818672,
"loss": 4.1315,
"step": 4550
},
{
"epoch": 0.49595687331536387,
"grad_norm": 0.7260637879371643,
"learning_rate": 0.0005708839719373987,
"loss": 4.1407,
"step": 4600
},
{
"epoch": 0.5013477088948787,
"grad_norm": 0.6827527284622192,
"learning_rate": 0.0005705601726929304,
"loss": 4.1196,
"step": 4650
},
{
"epoch": 0.5067385444743935,
"grad_norm": 0.774723470211029,
"learning_rate": 0.0005702363734484619,
"loss": 4.1256,
"step": 4700
},
{
"epoch": 0.5121293800539084,
"grad_norm": 0.6491437554359436,
"learning_rate": 0.0005699125742039935,
"loss": 4.1063,
"step": 4750
},
{
"epoch": 0.5175202156334232,
"grad_norm": 0.7277990579605103,
"learning_rate": 0.000569588774959525,
"loss": 4.1197,
"step": 4800
},
{
"epoch": 0.522911051212938,
"grad_norm": 0.6551647782325745,
"learning_rate": 0.0005692649757150567,
"loss": 4.1135,
"step": 4850
},
{
"epoch": 0.5283018867924528,
"grad_norm": 0.7088435292243958,
"learning_rate": 0.0005689411764705881,
"loss": 4.1188,
"step": 4900
},
{
"epoch": 0.5336927223719676,
"grad_norm": 0.7585951089859009,
"learning_rate": 0.0005686173772261197,
"loss": 4.0928,
"step": 4950
},
{
"epoch": 0.5390835579514824,
"grad_norm": 0.673933744430542,
"learning_rate": 0.0005682935779816514,
"loss": 4.0819,
"step": 5000
},
{
"epoch": 0.5390835579514824,
"eval_accuracy": 0.31859362744293795,
"eval_loss": 4.022838115692139,
"eval_runtime": 144.8217,
"eval_samples_per_second": 124.367,
"eval_steps_per_second": 7.775,
"step": 5000
},
{
"epoch": 0.5444743935309974,
"grad_norm": 0.617730438709259,
"learning_rate": 0.0005679697787371829,
"loss": 4.0793,
"step": 5050
},
{
"epoch": 0.5498652291105122,
"grad_norm": 0.6957946419715881,
"learning_rate": 0.0005676459794927145,
"loss": 4.0821,
"step": 5100
},
{
"epoch": 0.555256064690027,
"grad_norm": 0.6225258708000183,
"learning_rate": 0.000567322180248246,
"loss": 4.0873,
"step": 5150
},
{
"epoch": 0.5606469002695418,
"grad_norm": 0.6864507794380188,
"learning_rate": 0.0005669983810037777,
"loss": 4.0634,
"step": 5200
},
{
"epoch": 0.5660377358490566,
"grad_norm": 0.642590343952179,
"learning_rate": 0.0005666745817593092,
"loss": 4.0649,
"step": 5250
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.614847719669342,
"learning_rate": 0.0005663507825148408,
"loss": 4.0476,
"step": 5300
},
{
"epoch": 0.5768194070080862,
"grad_norm": 0.7870457768440247,
"learning_rate": 0.0005660269832703723,
"loss": 4.063,
"step": 5350
},
{
"epoch": 0.5822102425876011,
"grad_norm": 0.6789854764938354,
"learning_rate": 0.0005657031840259039,
"loss": 4.0518,
"step": 5400
},
{
"epoch": 0.5876010781671159,
"grad_norm": 0.6867729425430298,
"learning_rate": 0.0005653793847814355,
"loss": 4.0614,
"step": 5450
},
{
"epoch": 0.5929919137466307,
"grad_norm": 0.5899894833564758,
"learning_rate": 0.000565055585536967,
"loss": 4.0639,
"step": 5500
},
{
"epoch": 0.5983827493261455,
"grad_norm": 0.6574368476867676,
"learning_rate": 0.0005647317862924986,
"loss": 4.067,
"step": 5550
},
{
"epoch": 0.6037735849056604,
"grad_norm": 0.7431745529174805,
"learning_rate": 0.0005644079870480302,
"loss": 4.0425,
"step": 5600
},
{
"epoch": 0.6091644204851752,
"grad_norm": 0.6241595149040222,
"learning_rate": 0.0005640841878035617,
"loss": 4.0319,
"step": 5650
},
{
"epoch": 0.6145552560646901,
"grad_norm": 0.6736788749694824,
"learning_rate": 0.0005637603885590933,
"loss": 4.0366,
"step": 5700
},
{
"epoch": 0.6199460916442049,
"grad_norm": 0.6149032711982727,
"learning_rate": 0.0005634365893146248,
"loss": 4.0495,
"step": 5750
},
{
"epoch": 0.6253369272237197,
"grad_norm": 0.6543477177619934,
"learning_rate": 0.0005631127900701565,
"loss": 4.042,
"step": 5800
},
{
"epoch": 0.6307277628032345,
"grad_norm": 0.6215724945068359,
"learning_rate": 0.000562788990825688,
"loss": 4.0478,
"step": 5850
},
{
"epoch": 0.6361185983827493,
"grad_norm": 0.6606348752975464,
"learning_rate": 0.0005624651915812196,
"loss": 4.0192,
"step": 5900
},
{
"epoch": 0.6415094339622641,
"grad_norm": 0.7944669723510742,
"learning_rate": 0.0005621413923367511,
"loss": 4.012,
"step": 5950
},
{
"epoch": 0.6469002695417789,
"grad_norm": 0.6075884699821472,
"learning_rate": 0.0005618175930922828,
"loss": 4.0201,
"step": 6000
},
{
"epoch": 0.6469002695417789,
"eval_accuracy": 0.3257018080166491,
"eval_loss": 3.940925359725952,
"eval_runtime": 144.8645,
"eval_samples_per_second": 124.33,
"eval_steps_per_second": 7.773,
"step": 6000
},
{
"epoch": 0.6522911051212938,
"grad_norm": 0.7395045757293701,
"learning_rate": 0.0005614937938478143,
"loss": 3.9927,
"step": 6050
},
{
"epoch": 0.6576819407008087,
"grad_norm": 0.69764643907547,
"learning_rate": 0.0005611699946033459,
"loss": 4.0252,
"step": 6100
},
{
"epoch": 0.6630727762803235,
"grad_norm": 0.5907176733016968,
"learning_rate": 0.0005608461953588774,
"loss": 4.0071,
"step": 6150
},
{
"epoch": 0.6684636118598383,
"grad_norm": 0.7374498248100281,
"learning_rate": 0.000560522396114409,
"loss": 3.9854,
"step": 6200
},
{
"epoch": 0.6738544474393531,
"grad_norm": 0.5937222838401794,
"learning_rate": 0.0005601985968699405,
"loss": 4.003,
"step": 6250
},
{
"epoch": 0.6792452830188679,
"grad_norm": 0.6928643584251404,
"learning_rate": 0.0005598747976254721,
"loss": 4.022,
"step": 6300
},
{
"epoch": 0.6846361185983828,
"grad_norm": 0.6832301020622253,
"learning_rate": 0.0005595509983810038,
"loss": 3.9816,
"step": 6350
},
{
"epoch": 0.6900269541778976,
"grad_norm": 0.6227492690086365,
"learning_rate": 0.0005592271991365353,
"loss": 3.9855,
"step": 6400
},
{
"epoch": 0.6954177897574124,
"grad_norm": 0.6595360636711121,
"learning_rate": 0.0005589033998920669,
"loss": 3.9733,
"step": 6450
},
{
"epoch": 0.7008086253369272,
"grad_norm": 0.6538481116294861,
"learning_rate": 0.0005585796006475984,
"loss": 3.9867,
"step": 6500
},
{
"epoch": 0.706199460916442,
"grad_norm": 0.6099511384963989,
"learning_rate": 0.0005582558014031301,
"loss": 4.009,
"step": 6550
},
{
"epoch": 0.7115902964959568,
"grad_norm": 0.5673043131828308,
"learning_rate": 0.0005579320021586616,
"loss": 3.9638,
"step": 6600
},
{
"epoch": 0.7169811320754716,
"grad_norm": 0.7762152552604675,
"learning_rate": 0.0005576082029141932,
"loss": 3.9942,
"step": 6650
},
{
"epoch": 0.7223719676549866,
"grad_norm": 0.6117172241210938,
"learning_rate": 0.0005572844036697247,
"loss": 3.9672,
"step": 6700
},
{
"epoch": 0.7277628032345014,
"grad_norm": 0.6088191866874695,
"learning_rate": 0.0005569606044252563,
"loss": 3.9738,
"step": 6750
},
{
"epoch": 0.7331536388140162,
"grad_norm": 0.6414440274238586,
"learning_rate": 0.0005566368051807879,
"loss": 3.9641,
"step": 6800
},
{
"epoch": 0.738544474393531,
"grad_norm": 0.8121737241744995,
"learning_rate": 0.0005563130059363194,
"loss": 3.9423,
"step": 6850
},
{
"epoch": 0.7439353099730458,
"grad_norm": 0.7500906586647034,
"learning_rate": 0.000555989206691851,
"loss": 3.9677,
"step": 6900
},
{
"epoch": 0.7493261455525606,
"grad_norm": 0.6089574694633484,
"learning_rate": 0.0005556654074473826,
"loss": 3.9647,
"step": 6950
},
{
"epoch": 0.7547169811320755,
"grad_norm": 0.7751880288124084,
"learning_rate": 0.0005553416082029141,
"loss": 3.9519,
"step": 7000
},
{
"epoch": 0.7547169811320755,
"eval_accuracy": 0.3312385418738994,
"eval_loss": 3.88531494140625,
"eval_runtime": 144.5963,
"eval_samples_per_second": 124.561,
"eval_steps_per_second": 7.787,
"step": 7000
},
{
"epoch": 0.7601078167115903,
"grad_norm": 0.6487019658088684,
"learning_rate": 0.0005550178089584457,
"loss": 3.9428,
"step": 7050
},
{
"epoch": 0.7654986522911051,
"grad_norm": 0.6093623638153076,
"learning_rate": 0.0005546940097139772,
"loss": 3.9544,
"step": 7100
},
{
"epoch": 0.77088948787062,
"grad_norm": 0.5531768798828125,
"learning_rate": 0.0005543702104695089,
"loss": 3.9569,
"step": 7150
},
{
"epoch": 0.7762803234501348,
"grad_norm": 0.6401906609535217,
"learning_rate": 0.0005540464112250404,
"loss": 3.9564,
"step": 7200
},
{
"epoch": 0.7816711590296496,
"grad_norm": 0.5921440720558167,
"learning_rate": 0.000553722611980572,
"loss": 3.9108,
"step": 7250
},
{
"epoch": 0.7870619946091644,
"grad_norm": 0.6791409254074097,
"learning_rate": 0.0005533988127361035,
"loss": 3.9488,
"step": 7300
},
{
"epoch": 0.7924528301886793,
"grad_norm": 0.6472693681716919,
"learning_rate": 0.0005530750134916352,
"loss": 3.9341,
"step": 7350
},
{
"epoch": 0.7978436657681941,
"grad_norm": 0.6375269889831543,
"learning_rate": 0.0005527512142471668,
"loss": 3.9339,
"step": 7400
},
{
"epoch": 0.8032345013477089,
"grad_norm": 0.626977264881134,
"learning_rate": 0.0005524274150026982,
"loss": 3.9247,
"step": 7450
},
{
"epoch": 0.8086253369272237,
"grad_norm": 0.696706235408783,
"learning_rate": 0.0005521036157582299,
"loss": 3.9116,
"step": 7500
},
{
"epoch": 0.8140161725067385,
"grad_norm": 0.594398558139801,
"learning_rate": 0.0005517798165137614,
"loss": 3.9356,
"step": 7550
},
{
"epoch": 0.8194070080862533,
"grad_norm": 0.6184767484664917,
"learning_rate": 0.000551456017269293,
"loss": 3.9384,
"step": 7600
},
{
"epoch": 0.8247978436657682,
"grad_norm": 0.5797574520111084,
"learning_rate": 0.0005511322180248245,
"loss": 3.9149,
"step": 7650
},
{
"epoch": 0.8301886792452831,
"grad_norm": 0.5616925954818726,
"learning_rate": 0.0005508084187803562,
"loss": 3.9173,
"step": 7700
},
{
"epoch": 0.8355795148247979,
"grad_norm": 0.6098619103431702,
"learning_rate": 0.0005504846195358877,
"loss": 3.8999,
"step": 7750
},
{
"epoch": 0.8409703504043127,
"grad_norm": 0.6354513764381409,
"learning_rate": 0.0005501608202914193,
"loss": 3.9284,
"step": 7800
},
{
"epoch": 0.8463611859838275,
"grad_norm": 0.6673269867897034,
"learning_rate": 0.0005498370210469508,
"loss": 3.909,
"step": 7850
},
{
"epoch": 0.8517520215633423,
"grad_norm": 0.539669930934906,
"learning_rate": 0.0005495132218024824,
"loss": 3.9074,
"step": 7900
},
{
"epoch": 0.8571428571428571,
"grad_norm": 0.618360161781311,
"learning_rate": 0.000549189422558014,
"loss": 3.9046,
"step": 7950
},
{
"epoch": 0.862533692722372,
"grad_norm": 0.6503387689590454,
"learning_rate": 0.0005488656233135456,
"loss": 3.8917,
"step": 8000
},
{
"epoch": 0.862533692722372,
"eval_accuracy": 0.33594255999893957,
"eval_loss": 3.8360989093780518,
"eval_runtime": 144.8786,
"eval_samples_per_second": 124.318,
"eval_steps_per_second": 7.772,
"step": 8000
},
{
"epoch": 0.8679245283018868,
"grad_norm": 0.6546068787574768,
"learning_rate": 0.0005485418240690771,
"loss": 3.9022,
"step": 8050
},
{
"epoch": 0.8733153638814016,
"grad_norm": 0.6453102231025696,
"learning_rate": 0.0005482180248246087,
"loss": 3.8843,
"step": 8100
},
{
"epoch": 0.8787061994609164,
"grad_norm": 0.5534443259239197,
"learning_rate": 0.0005478942255801403,
"loss": 3.8988,
"step": 8150
},
{
"epoch": 0.8840970350404312,
"grad_norm": 0.6230263113975525,
"learning_rate": 0.0005475704263356718,
"loss": 3.8879,
"step": 8200
},
{
"epoch": 0.889487870619946,
"grad_norm": 0.6733997464179993,
"learning_rate": 0.0005472466270912034,
"loss": 3.8729,
"step": 8250
},
{
"epoch": 0.894878706199461,
"grad_norm": 0.6633635759353638,
"learning_rate": 0.000546922827846735,
"loss": 3.8991,
"step": 8300
},
{
"epoch": 0.9002695417789758,
"grad_norm": 0.6503227949142456,
"learning_rate": 0.0005465990286022665,
"loss": 3.8792,
"step": 8350
},
{
"epoch": 0.9056603773584906,
"grad_norm": 0.7666671276092529,
"learning_rate": 0.0005462752293577981,
"loss": 3.8944,
"step": 8400
},
{
"epoch": 0.9110512129380054,
"grad_norm": 0.6036889553070068,
"learning_rate": 0.0005459514301133296,
"loss": 3.9057,
"step": 8450
},
{
"epoch": 0.9164420485175202,
"grad_norm": 0.6154916882514954,
"learning_rate": 0.0005456276308688613,
"loss": 3.894,
"step": 8500
},
{
"epoch": 0.921832884097035,
"grad_norm": 0.5653623938560486,
"learning_rate": 0.0005453038316243929,
"loss": 3.8763,
"step": 8550
},
{
"epoch": 0.9272237196765498,
"grad_norm": 0.5695276856422424,
"learning_rate": 0.0005449800323799244,
"loss": 3.8709,
"step": 8600
},
{
"epoch": 0.9326145552560647,
"grad_norm": 0.5406414270401001,
"learning_rate": 0.000544656233135456,
"loss": 3.8623,
"step": 8650
},
{
"epoch": 0.9380053908355795,
"grad_norm": 0.5876409411430359,
"learning_rate": 0.0005443324338909875,
"loss": 3.867,
"step": 8700
},
{
"epoch": 0.9433962264150944,
"grad_norm": 0.5680667757987976,
"learning_rate": 0.0005440086346465192,
"loss": 3.8691,
"step": 8750
},
{
"epoch": 0.9487870619946092,
"grad_norm": 0.5708035230636597,
"learning_rate": 0.0005436848354020506,
"loss": 3.8589,
"step": 8800
},
{
"epoch": 0.954177897574124,
"grad_norm": 0.5115900635719299,
"learning_rate": 0.0005433610361575823,
"loss": 3.8488,
"step": 8850
},
{
"epoch": 0.9595687331536388,
"grad_norm": 0.6006278395652771,
"learning_rate": 0.0005430372369131138,
"loss": 3.8652,
"step": 8900
},
{
"epoch": 0.9649595687331537,
"grad_norm": 0.6494414210319519,
"learning_rate": 0.0005427134376686454,
"loss": 3.8716,
"step": 8950
},
{
"epoch": 0.9703504043126685,
"grad_norm": 0.5888278484344482,
"learning_rate": 0.0005423896384241769,
"loss": 3.8592,
"step": 9000
},
{
"epoch": 0.9703504043126685,
"eval_accuracy": 0.34041004078720755,
"eval_loss": 3.7949342727661133,
"eval_runtime": 144.616,
"eval_samples_per_second": 124.544,
"eval_steps_per_second": 7.786,
"step": 9000
},
{
"epoch": 0.9757412398921833,
"grad_norm": 0.5697623491287231,
"learning_rate": 0.0005420658391797086,
"loss": 3.855,
"step": 9050
},
{
"epoch": 0.9811320754716981,
"grad_norm": 0.6447634696960449,
"learning_rate": 0.0005417420399352401,
"loss": 3.866,
"step": 9100
},
{
"epoch": 0.9865229110512129,
"grad_norm": 0.583088219165802,
"learning_rate": 0.0005414182406907717,
"loss": 3.8415,
"step": 9150
},
{
"epoch": 0.9919137466307277,
"grad_norm": 0.5796465277671814,
"learning_rate": 0.0005410944414463032,
"loss": 3.8376,
"step": 9200
},
{
"epoch": 0.9973045822102425,
"grad_norm": 0.6316399574279785,
"learning_rate": 0.0005407706422018348,
"loss": 3.8524,
"step": 9250
},
{
"epoch": 1.0026954177897573,
"grad_norm": 0.5727218389511108,
"learning_rate": 0.0005404468429573664,
"loss": 3.8268,
"step": 9300
},
{
"epoch": 1.0080862533692723,
"grad_norm": 0.5713456869125366,
"learning_rate": 0.000540123043712898,
"loss": 3.7921,
"step": 9350
},
{
"epoch": 1.013477088948787,
"grad_norm": 0.6198617815971375,
"learning_rate": 0.0005397992444684295,
"loss": 3.7876,
"step": 9400
},
{
"epoch": 1.0188679245283019,
"grad_norm": 0.5770072937011719,
"learning_rate": 0.0005394754452239611,
"loss": 3.7856,
"step": 9450
},
{
"epoch": 1.0242587601078168,
"grad_norm": 0.5920796990394592,
"learning_rate": 0.0005391516459794927,
"loss": 3.7855,
"step": 9500
},
{
"epoch": 1.0296495956873315,
"grad_norm": 0.5636431574821472,
"learning_rate": 0.0005388278467350242,
"loss": 3.7857,
"step": 9550
},
{
"epoch": 1.0350404312668464,
"grad_norm": 0.669791579246521,
"learning_rate": 0.0005385040474905557,
"loss": 3.7723,
"step": 9600
},
{
"epoch": 1.0404312668463611,
"grad_norm": 0.6635991930961609,
"learning_rate": 0.0005381802482460874,
"loss": 3.8142,
"step": 9650
},
{
"epoch": 1.045822102425876,
"grad_norm": 0.601437509059906,
"learning_rate": 0.000537856449001619,
"loss": 3.798,
"step": 9700
},
{
"epoch": 1.0512129380053907,
"grad_norm": 0.5584002137184143,
"learning_rate": 0.0005375326497571505,
"loss": 3.7985,
"step": 9750
},
{
"epoch": 1.0566037735849056,
"grad_norm": 0.5526735782623291,
"learning_rate": 0.000537208850512682,
"loss": 3.8074,
"step": 9800
},
{
"epoch": 1.0619946091644206,
"grad_norm": 0.5977774858474731,
"learning_rate": 0.0005368850512682137,
"loss": 3.7989,
"step": 9850
},
{
"epoch": 1.0673854447439353,
"grad_norm": 0.5950897932052612,
"learning_rate": 0.0005365612520237453,
"loss": 3.7924,
"step": 9900
},
{
"epoch": 1.0727762803234502,
"grad_norm": 0.5768362283706665,
"learning_rate": 0.0005362374527792768,
"loss": 3.7855,
"step": 9950
},
{
"epoch": 1.0781671159029649,
"grad_norm": 0.6516294479370117,
"learning_rate": 0.0005359136535348084,
"loss": 3.7811,
"step": 10000
},
{
"epoch": 1.0781671159029649,
"eval_accuracy": 0.34365658907510427,
"eval_loss": 3.762051582336426,
"eval_runtime": 144.5563,
"eval_samples_per_second": 124.595,
"eval_steps_per_second": 7.789,
"step": 10000
},
{
"epoch": 1.0835579514824798,
"grad_norm": 0.54844731092453,
"learning_rate": 0.0005355898542903399,
"loss": 3.7802,
"step": 10050
},
{
"epoch": 1.0889487870619945,
"grad_norm": 0.6120631694793701,
"learning_rate": 0.0005352660550458716,
"loss": 3.789,
"step": 10100
},
{
"epoch": 1.0943396226415094,
"grad_norm": 0.5617305636405945,
"learning_rate": 0.000534942255801403,
"loss": 3.784,
"step": 10150
},
{
"epoch": 1.0997304582210243,
"grad_norm": 0.6198379993438721,
"learning_rate": 0.0005346184565569347,
"loss": 3.7835,
"step": 10200
},
{
"epoch": 1.105121293800539,
"grad_norm": 0.5185474753379822,
"learning_rate": 0.0005342946573124662,
"loss": 3.7928,
"step": 10250
},
{
"epoch": 1.110512129380054,
"grad_norm": 0.5625671744346619,
"learning_rate": 0.0005339708580679978,
"loss": 3.7792,
"step": 10300
},
{
"epoch": 1.1159029649595686,
"grad_norm": 0.6791179180145264,
"learning_rate": 0.0005336470588235293,
"loss": 3.7647,
"step": 10350
},
{
"epoch": 1.1212938005390836,
"grad_norm": 0.5952463746070862,
"learning_rate": 0.000533323259579061,
"loss": 3.7826,
"step": 10400
},
{
"epoch": 1.1266846361185983,
"grad_norm": 0.5367695689201355,
"learning_rate": 0.0005329994603345925,
"loss": 3.7714,
"step": 10450
},
{
"epoch": 1.1320754716981132,
"grad_norm": 0.6577426195144653,
"learning_rate": 0.0005326756610901241,
"loss": 3.7588,
"step": 10500
},
{
"epoch": 1.137466307277628,
"grad_norm": 0.5481351017951965,
"learning_rate": 0.0005323518618456556,
"loss": 3.7808,
"step": 10550
},
{
"epoch": 1.1428571428571428,
"grad_norm": 0.5973223447799683,
"learning_rate": 0.0005320280626011872,
"loss": 3.7903,
"step": 10600
},
{
"epoch": 1.1482479784366577,
"grad_norm": 0.6764339804649353,
"learning_rate": 0.0005317042633567188,
"loss": 3.7673,
"step": 10650
},
{
"epoch": 1.1536388140161726,
"grad_norm": 0.6202555894851685,
"learning_rate": 0.0005313804641122504,
"loss": 3.7633,
"step": 10700
},
{
"epoch": 1.1590296495956873,
"grad_norm": 0.6169992089271545,
"learning_rate": 0.0005310566648677819,
"loss": 3.7519,
"step": 10750
},
{
"epoch": 1.1644204851752022,
"grad_norm": 0.5893779993057251,
"learning_rate": 0.0005307328656233135,
"loss": 3.7433,
"step": 10800
},
{
"epoch": 1.169811320754717,
"grad_norm": 0.5708191394805908,
"learning_rate": 0.000530409066378845,
"loss": 3.7609,
"step": 10850
},
{
"epoch": 1.1752021563342319,
"grad_norm": 0.5436216592788696,
"learning_rate": 0.0005300852671343766,
"loss": 3.7635,
"step": 10900
},
{
"epoch": 1.1805929919137466,
"grad_norm": 0.5945119261741638,
"learning_rate": 0.0005297614678899081,
"loss": 3.7755,
"step": 10950
},
{
"epoch": 1.1859838274932615,
"grad_norm": 0.5752778053283691,
"learning_rate": 0.0005294376686454398,
"loss": 3.7908,
"step": 11000
},
{
"epoch": 1.1859838274932615,
"eval_accuracy": 0.3459605735612103,
"eval_loss": 3.7342617511749268,
"eval_runtime": 145.0651,
"eval_samples_per_second": 124.158,
"eval_steps_per_second": 7.762,
"step": 11000
},
{
"epoch": 1.1913746630727764,
"grad_norm": 0.5780414342880249,
"learning_rate": 0.0005291138694009714,
"loss": 3.7773,
"step": 11050
},
{
"epoch": 1.196765498652291,
"grad_norm": 0.6393083930015564,
"learning_rate": 0.0005287900701565029,
"loss": 3.7719,
"step": 11100
},
{
"epoch": 1.202156334231806,
"grad_norm": 0.5772005319595337,
"learning_rate": 0.0005284662709120345,
"loss": 3.7688,
"step": 11150
},
{
"epoch": 1.2075471698113207,
"grad_norm": 0.5036531686782837,
"learning_rate": 0.0005281424716675661,
"loss": 3.7578,
"step": 11200
},
{
"epoch": 1.2129380053908356,
"grad_norm": 0.56954026222229,
"learning_rate": 0.0005278186724230977,
"loss": 3.7505,
"step": 11250
},
{
"epoch": 1.2183288409703503,
"grad_norm": 0.6294519901275635,
"learning_rate": 0.0005274948731786292,
"loss": 3.7672,
"step": 11300
},
{
"epoch": 1.2237196765498652,
"grad_norm": 0.6276326775550842,
"learning_rate": 0.0005271710739341608,
"loss": 3.7523,
"step": 11350
},
{
"epoch": 1.2291105121293802,
"grad_norm": 0.6219097971916199,
"learning_rate": 0.0005268472746896923,
"loss": 3.7687,
"step": 11400
},
{
"epoch": 1.2345013477088949,
"grad_norm": 0.5939977765083313,
"learning_rate": 0.000526523475445224,
"loss": 3.7595,
"step": 11450
},
{
"epoch": 1.2398921832884098,
"grad_norm": 0.6375150680541992,
"learning_rate": 0.0005261996762007554,
"loss": 3.7498,
"step": 11500
},
{
"epoch": 1.2452830188679245,
"grad_norm": 0.534351110458374,
"learning_rate": 0.0005258758769562871,
"loss": 3.7602,
"step": 11550
},
{
"epoch": 1.2506738544474394,
"grad_norm": 0.5753054618835449,
"learning_rate": 0.0005255520777118186,
"loss": 3.7345,
"step": 11600
},
{
"epoch": 1.256064690026954,
"grad_norm": 0.5968658328056335,
"learning_rate": 0.0005252282784673502,
"loss": 3.7679,
"step": 11650
},
{
"epoch": 1.261455525606469,
"grad_norm": 0.6453511118888855,
"learning_rate": 0.0005249044792228817,
"loss": 3.7641,
"step": 11700
},
{
"epoch": 1.266846361185984,
"grad_norm": 0.5844177007675171,
"learning_rate": 0.0005245806799784133,
"loss": 3.7551,
"step": 11750
},
{
"epoch": 1.2722371967654986,
"grad_norm": 0.5597920417785645,
"learning_rate": 0.0005242568807339449,
"loss": 3.7458,
"step": 11800
},
{
"epoch": 1.2776280323450135,
"grad_norm": 0.6149951815605164,
"learning_rate": 0.0005239330814894765,
"loss": 3.7582,
"step": 11850
},
{
"epoch": 1.2830188679245282,
"grad_norm": 0.5561873316764832,
"learning_rate": 0.000523609282245008,
"loss": 3.7456,
"step": 11900
},
{
"epoch": 1.2884097035040432,
"grad_norm": 0.6077041029930115,
"learning_rate": 0.0005232854830005396,
"loss": 3.7601,
"step": 11950
},
{
"epoch": 1.2938005390835579,
"grad_norm": 0.6389844417572021,
"learning_rate": 0.0005229616837560712,
"loss": 3.7514,
"step": 12000
},
{
"epoch": 1.2938005390835579,
"eval_accuracy": 0.3482391332715678,
"eval_loss": 3.7092418670654297,
"eval_runtime": 144.795,
"eval_samples_per_second": 124.39,
"eval_steps_per_second": 7.777,
"step": 12000
},
{
"epoch": 1.2991913746630728,
"grad_norm": 0.6131287217140198,
"learning_rate": 0.0005226378845116028,
"loss": 3.7472,
"step": 12050
},
{
"epoch": 1.3045822102425877,
"grad_norm": 0.5687003135681152,
"learning_rate": 0.0005223140852671344,
"loss": 3.7541,
"step": 12100
},
{
"epoch": 1.3099730458221024,
"grad_norm": 0.603330135345459,
"learning_rate": 0.0005219902860226659,
"loss": 3.7719,
"step": 12150
},
{
"epoch": 1.3153638814016173,
"grad_norm": 0.5906956791877747,
"learning_rate": 0.0005216664867781975,
"loss": 3.7419,
"step": 12200
},
{
"epoch": 1.320754716981132,
"grad_norm": 0.5257678627967834,
"learning_rate": 0.000521342687533729,
"loss": 3.7494,
"step": 12250
},
{
"epoch": 1.326145552560647,
"grad_norm": 0.55323725938797,
"learning_rate": 0.0005210188882892606,
"loss": 3.7598,
"step": 12300
},
{
"epoch": 1.3315363881401616,
"grad_norm": 0.6195726990699768,
"learning_rate": 0.0005206950890447922,
"loss": 3.7455,
"step": 12350
},
{
"epoch": 1.3369272237196765,
"grad_norm": 0.6453518271446228,
"learning_rate": 0.0005203712898003238,
"loss": 3.7346,
"step": 12400
},
{
"epoch": 1.3423180592991915,
"grad_norm": 0.591871976852417,
"learning_rate": 0.0005200474905558553,
"loss": 3.7498,
"step": 12450
},
{
"epoch": 1.3477088948787062,
"grad_norm": 0.8153529167175293,
"learning_rate": 0.0005197236913113869,
"loss": 3.73,
"step": 12500
},
{
"epoch": 1.353099730458221,
"grad_norm": 0.6054574847221375,
"learning_rate": 0.0005193998920669184,
"loss": 3.7438,
"step": 12550
},
{
"epoch": 1.3584905660377358,
"grad_norm": 0.5890757441520691,
"learning_rate": 0.0005190760928224501,
"loss": 3.7383,
"step": 12600
},
{
"epoch": 1.3638814016172507,
"grad_norm": 0.5858767032623291,
"learning_rate": 0.0005187522935779816,
"loss": 3.7272,
"step": 12650
},
{
"epoch": 1.3692722371967654,
"grad_norm": 0.5663976073265076,
"learning_rate": 0.0005184284943335132,
"loss": 3.7232,
"step": 12700
},
{
"epoch": 1.3746630727762803,
"grad_norm": 0.6173025965690613,
"learning_rate": 0.0005181046950890447,
"loss": 3.7428,
"step": 12750
},
{
"epoch": 1.3800539083557952,
"grad_norm": 0.5534438490867615,
"learning_rate": 0.0005177808958445764,
"loss": 3.7214,
"step": 12800
},
{
"epoch": 1.38544474393531,
"grad_norm": 0.6565149426460266,
"learning_rate": 0.0005174570966001078,
"loss": 3.7343,
"step": 12850
},
{
"epoch": 1.3908355795148248,
"grad_norm": 0.518814742565155,
"learning_rate": 0.0005171332973556395,
"loss": 3.7343,
"step": 12900
},
{
"epoch": 1.3962264150943398,
"grad_norm": 0.6175557374954224,
"learning_rate": 0.000516809498111171,
"loss": 3.726,
"step": 12950
},
{
"epoch": 1.4016172506738545,
"grad_norm": 0.525554895401001,
"learning_rate": 0.0005164856988667026,
"loss": 3.7207,
"step": 13000
},
{
"epoch": 1.4016172506738545,
"eval_accuracy": 0.3513273743019188,
"eval_loss": 3.684098482131958,
"eval_runtime": 145.0832,
"eval_samples_per_second": 124.143,
"eval_steps_per_second": 7.761,
"step": 13000
},
{
"epoch": 1.4070080862533692,
"grad_norm": 0.5694339275360107,
"learning_rate": 0.0005161618996222341,
"loss": 3.7017,
"step": 13050
},
{
"epoch": 1.412398921832884,
"grad_norm": 0.5720424056053162,
"learning_rate": 0.0005158381003777657,
"loss": 3.7224,
"step": 13100
},
{
"epoch": 1.417789757412399,
"grad_norm": 0.5959078073501587,
"learning_rate": 0.0005155143011332973,
"loss": 3.7268,
"step": 13150
},
{
"epoch": 1.4231805929919137,
"grad_norm": 0.5883017182350159,
"learning_rate": 0.0005151905018888289,
"loss": 3.7077,
"step": 13200
},
{
"epoch": 1.4285714285714286,
"grad_norm": 0.5214637517929077,
"learning_rate": 0.0005148667026443604,
"loss": 3.7251,
"step": 13250
},
{
"epoch": 1.4339622641509435,
"grad_norm": 0.5803607106208801,
"learning_rate": 0.000514542903399892,
"loss": 3.7207,
"step": 13300
},
{
"epoch": 1.4393530997304582,
"grad_norm": 0.6287586688995361,
"learning_rate": 0.0005142191041554237,
"loss": 3.7256,
"step": 13350
},
{
"epoch": 1.444743935309973,
"grad_norm": 0.614562451839447,
"learning_rate": 0.0005138953049109552,
"loss": 3.7166,
"step": 13400
},
{
"epoch": 1.4501347708894878,
"grad_norm": 0.5314472317695618,
"learning_rate": 0.0005135715056664868,
"loss": 3.7338,
"step": 13450
},
{
"epoch": 1.4555256064690028,
"grad_norm": 0.6482129693031311,
"learning_rate": 0.0005132477064220183,
"loss": 3.7264,
"step": 13500
},
{
"epoch": 1.4609164420485174,
"grad_norm": 0.5918267369270325,
"learning_rate": 0.0005129239071775499,
"loss": 3.7023,
"step": 13550
},
{
"epoch": 1.4663072776280324,
"grad_norm": 0.5850944519042969,
"learning_rate": 0.0005126001079330814,
"loss": 3.7298,
"step": 13600
},
{
"epoch": 1.4716981132075473,
"grad_norm": 0.5356786847114563,
"learning_rate": 0.000512276308688613,
"loss": 3.7098,
"step": 13650
},
{
"epoch": 1.477088948787062,
"grad_norm": 0.5910783410072327,
"learning_rate": 0.0005119525094441446,
"loss": 3.7309,
"step": 13700
},
{
"epoch": 1.482479784366577,
"grad_norm": 0.5758869051933289,
"learning_rate": 0.0005116287101996762,
"loss": 3.703,
"step": 13750
},
{
"epoch": 1.4878706199460916,
"grad_norm": 0.5499842762947083,
"learning_rate": 0.0005113049109552077,
"loss": 3.7275,
"step": 13800
},
{
"epoch": 1.4932614555256065,
"grad_norm": 0.6198793053627014,
"learning_rate": 0.0005109811117107393,
"loss": 3.7118,
"step": 13850
},
{
"epoch": 1.4986522911051212,
"grad_norm": 0.5693920850753784,
"learning_rate": 0.0005106573124662708,
"loss": 3.7209,
"step": 13900
},
{
"epoch": 1.5040431266846361,
"grad_norm": 0.6063724160194397,
"learning_rate": 0.0005103335132218025,
"loss": 3.7108,
"step": 13950
},
{
"epoch": 1.509433962264151,
"grad_norm": 0.5341413021087646,
"learning_rate": 0.000510009713977334,
"loss": 3.7092,
"step": 14000
},
{
"epoch": 1.509433962264151,
"eval_accuracy": 0.35327356482845174,
"eval_loss": 3.66573429107666,
"eval_runtime": 144.4021,
"eval_samples_per_second": 124.728,
"eval_steps_per_second": 7.798,
"step": 14000
},
{
"epoch": 1.5148247978436657,
"grad_norm": 0.5940130352973938,
"learning_rate": 0.0005096859147328656,
"loss": 3.7083,
"step": 14050
},
{
"epoch": 1.5202156334231804,
"grad_norm": 0.5854769945144653,
"learning_rate": 0.0005093621154883971,
"loss": 3.7072,
"step": 14100
},
{
"epoch": 1.5256064690026954,
"grad_norm": 0.5369813442230225,
"learning_rate": 0.0005090383162439288,
"loss": 3.6994,
"step": 14150
},
{
"epoch": 1.5309973045822103,
"grad_norm": 0.6851678490638733,
"learning_rate": 0.0005087145169994602,
"loss": 3.7003,
"step": 14200
},
{
"epoch": 1.536388140161725,
"grad_norm": 0.5451446771621704,
"learning_rate": 0.0005083907177549918,
"loss": 3.7145,
"step": 14250
},
{
"epoch": 1.54177897574124,
"grad_norm": 0.550528347492218,
"learning_rate": 0.0005080669185105234,
"loss": 3.7145,
"step": 14300
},
{
"epoch": 1.5471698113207548,
"grad_norm": 0.5607097744941711,
"learning_rate": 0.000507743119266055,
"loss": 3.7074,
"step": 14350
},
{
"epoch": 1.5525606469002695,
"grad_norm": 0.5699529051780701,
"learning_rate": 0.0005074193200215865,
"loss": 3.7239,
"step": 14400
},
{
"epoch": 1.5579514824797842,
"grad_norm": 0.6193976402282715,
"learning_rate": 0.0005070955207771181,
"loss": 3.7146,
"step": 14450
},
{
"epoch": 1.5633423180592994,
"grad_norm": 0.5902137756347656,
"learning_rate": 0.0005067717215326498,
"loss": 3.6944,
"step": 14500
},
{
"epoch": 1.568733153638814,
"grad_norm": 0.5481423735618591,
"learning_rate": 0.0005064479222881813,
"loss": 3.7103,
"step": 14550
},
{
"epoch": 1.5741239892183287,
"grad_norm": 0.6323958039283752,
"learning_rate": 0.0005061241230437129,
"loss": 3.6955,
"step": 14600
},
{
"epoch": 1.5795148247978437,
"grad_norm": 0.6062991619110107,
"learning_rate": 0.0005058003237992444,
"loss": 3.6937,
"step": 14650
},
{
"epoch": 1.5849056603773586,
"grad_norm": 0.5340829491615295,
"learning_rate": 0.0005054830005396654,
"loss": 3.7098,
"step": 14700
},
{
"epoch": 1.5902964959568733,
"grad_norm": 0.590190052986145,
"learning_rate": 0.0005051592012951969,
"loss": 3.7098,
"step": 14750
},
{
"epoch": 1.595687331536388,
"grad_norm": 0.5628999471664429,
"learning_rate": 0.0005048354020507286,
"loss": 3.6799,
"step": 14800
},
{
"epoch": 1.6010781671159031,
"grad_norm": 0.5697661638259888,
"learning_rate": 0.00050451160280626,
"loss": 3.7014,
"step": 14850
},
{
"epoch": 1.6064690026954178,
"grad_norm": 0.5965352058410645,
"learning_rate": 0.0005041878035617917,
"loss": 3.7081,
"step": 14900
},
{
"epoch": 1.6118598382749325,
"grad_norm": 0.5691752433776855,
"learning_rate": 0.0005038640043173232,
"loss": 3.697,
"step": 14950
},
{
"epoch": 1.6172506738544474,
"grad_norm": 0.5844449400901794,
"learning_rate": 0.0005035402050728548,
"loss": 3.6968,
"step": 15000
},
{
"epoch": 1.6172506738544474,
"eval_accuracy": 0.35501950808274263,
"eval_loss": 3.6473617553710938,
"eval_runtime": 144.9331,
"eval_samples_per_second": 124.271,
"eval_steps_per_second": 7.769,
"step": 15000
},
{
"epoch": 1.6226415094339623,
"grad_norm": 0.5243232846260071,
"learning_rate": 0.0005032164058283863,
"loss": 3.6876,
"step": 15050
},
{
"epoch": 1.628032345013477,
"grad_norm": 0.521431565284729,
"learning_rate": 0.0005028926065839179,
"loss": 3.6816,
"step": 15100
},
{
"epoch": 1.633423180592992,
"grad_norm": 0.54181307554245,
"learning_rate": 0.0005025688073394495,
"loss": 3.681,
"step": 15150
},
{
"epoch": 1.6388140161725069,
"grad_norm": 0.5746421217918396,
"learning_rate": 0.0005022450080949811,
"loss": 3.6847,
"step": 15200
},
{
"epoch": 1.6442048517520216,
"grad_norm": 0.5637415051460266,
"learning_rate": 0.0005019212088505126,
"loss": 3.6782,
"step": 15250
},
{
"epoch": 1.6495956873315363,
"grad_norm": 0.5300856828689575,
"learning_rate": 0.0005015974096060442,
"loss": 3.6848,
"step": 15300
},
{
"epoch": 1.6549865229110512,
"grad_norm": 0.5390794277191162,
"learning_rate": 0.0005012736103615758,
"loss": 3.7087,
"step": 15350
},
{
"epoch": 1.6603773584905661,
"grad_norm": 0.6010688543319702,
"learning_rate": 0.0005009498111171074,
"loss": 3.6966,
"step": 15400
},
{
"epoch": 1.6657681940700808,
"grad_norm": 0.548942506313324,
"learning_rate": 0.0005006260118726389,
"loss": 3.6648,
"step": 15450
},
{
"epoch": 1.6711590296495957,
"grad_norm": 0.5794510245323181,
"learning_rate": 0.0005003022126281705,
"loss": 3.6827,
"step": 15500
},
{
"epoch": 1.6765498652291106,
"grad_norm": 0.5393055081367493,
"learning_rate": 0.000499978413383702,
"loss": 3.68,
"step": 15550
},
{
"epoch": 1.6819407008086253,
"grad_norm": 0.5647615194320679,
"learning_rate": 0.0004996546141392336,
"loss": 3.6677,
"step": 15600
},
{
"epoch": 1.68733153638814,
"grad_norm": 0.5918028354644775,
"learning_rate": 0.0004993308148947651,
"loss": 3.6859,
"step": 15650
},
{
"epoch": 1.692722371967655,
"grad_norm": 0.5556984543800354,
"learning_rate": 0.0004990070156502968,
"loss": 3.6733,
"step": 15700
},
{
"epoch": 1.6981132075471699,
"grad_norm": 0.5747886896133423,
"learning_rate": 0.0004986832164058284,
"loss": 3.6979,
"step": 15750
},
{
"epoch": 1.7035040431266846,
"grad_norm": 0.5632966756820679,
"learning_rate": 0.0004983594171613599,
"loss": 3.6957,
"step": 15800
},
{
"epoch": 1.7088948787061995,
"grad_norm": 0.5908682942390442,
"learning_rate": 0.0004980356179168915,
"loss": 3.6886,
"step": 15850
},
{
"epoch": 1.7142857142857144,
"grad_norm": 0.6491772532463074,
"learning_rate": 0.000497711818672423,
"loss": 3.6902,
"step": 15900
},
{
"epoch": 1.719676549865229,
"grad_norm": 0.594153106212616,
"learning_rate": 0.0004973880194279547,
"loss": 3.6752,
"step": 15950
},
{
"epoch": 1.7250673854447438,
"grad_norm": 0.6420136094093323,
"learning_rate": 0.0004970642201834862,
"loss": 3.6724,
"step": 16000
},
{
"epoch": 1.7250673854447438,
"eval_accuracy": 0.3570638121669721,
"eval_loss": 3.6263442039489746,
"eval_runtime": 144.5518,
"eval_samples_per_second": 124.599,
"eval_steps_per_second": 7.79,
"step": 16000
},
{
"epoch": 1.7304582210242587,
"grad_norm": 0.5980614423751831,
"learning_rate": 0.0004967404209390178,
"loss": 3.6727,
"step": 16050
},
{
"epoch": 1.7358490566037736,
"grad_norm": 0.5954625010490417,
"learning_rate": 0.0004964166216945493,
"loss": 3.6587,
"step": 16100
},
{
"epoch": 1.7412398921832883,
"grad_norm": 0.563701868057251,
"learning_rate": 0.000496092822450081,
"loss": 3.683,
"step": 16150
},
{
"epoch": 1.7466307277628033,
"grad_norm": 0.5353266596794128,
"learning_rate": 0.0004957690232056125,
"loss": 3.6928,
"step": 16200
},
{
"epoch": 1.7520215633423182,
"grad_norm": 0.6115831136703491,
"learning_rate": 0.0004954452239611441,
"loss": 3.6697,
"step": 16250
},
{
"epoch": 1.7574123989218329,
"grad_norm": 0.5373592376708984,
"learning_rate": 0.0004951214247166756,
"loss": 3.6907,
"step": 16300
},
{
"epoch": 1.7628032345013476,
"grad_norm": 0.7120974063873291,
"learning_rate": 0.0004947976254722072,
"loss": 3.6741,
"step": 16350
},
{
"epoch": 1.7681940700808625,
"grad_norm": 0.5365995764732361,
"learning_rate": 0.0004944738262277387,
"loss": 3.6656,
"step": 16400
},
{
"epoch": 1.7735849056603774,
"grad_norm": 0.6026771664619446,
"learning_rate": 0.0004941500269832703,
"loss": 3.6708,
"step": 16450
},
{
"epoch": 1.778975741239892,
"grad_norm": 0.5560598969459534,
"learning_rate": 0.0004938262277388019,
"loss": 3.6594,
"step": 16500
},
{
"epoch": 1.784366576819407,
"grad_norm": 0.6191685199737549,
"learning_rate": 0.0004935024284943335,
"loss": 3.685,
"step": 16550
},
{
"epoch": 1.789757412398922,
"grad_norm": 0.6867356896400452,
"learning_rate": 0.000493178629249865,
"loss": 3.6698,
"step": 16600
},
{
"epoch": 1.7951482479784366,
"grad_norm": 0.5517817139625549,
"learning_rate": 0.0004928548300053966,
"loss": 3.6725,
"step": 16650
},
{
"epoch": 1.8005390835579513,
"grad_norm": 0.5496578216552734,
"learning_rate": 0.0004925310307609282,
"loss": 3.6745,
"step": 16700
},
{
"epoch": 1.8059299191374663,
"grad_norm": 0.540912926197052,
"learning_rate": 0.0004922072315164598,
"loss": 3.6624,
"step": 16750
},
{
"epoch": 1.8113207547169812,
"grad_norm": 0.5637075304985046,
"learning_rate": 0.0004918834322719913,
"loss": 3.6658,
"step": 16800
},
{
"epoch": 1.8167115902964959,
"grad_norm": 0.5674017071723938,
"learning_rate": 0.0004915596330275229,
"loss": 3.6701,
"step": 16850
},
{
"epoch": 1.8221024258760108,
"grad_norm": 0.5619826912879944,
"learning_rate": 0.0004912487857528331,
"loss": 3.6714,
"step": 16900
},
{
"epoch": 1.8274932614555257,
"grad_norm": 0.5217414498329163,
"learning_rate": 0.0004909249865083648,
"loss": 3.6863,
"step": 16950
},
{
"epoch": 1.8328840970350404,
"grad_norm": 0.565362811088562,
"learning_rate": 0.0004906011872638964,
"loss": 3.6737,
"step": 17000
},
{
"epoch": 1.8328840970350404,
"eval_accuracy": 0.3582435652228617,
"eval_loss": 3.6162045001983643,
"eval_runtime": 144.7573,
"eval_samples_per_second": 124.422,
"eval_steps_per_second": 7.779,
"step": 17000
},
{
"epoch": 1.838274932614555,
"grad_norm": 0.5198625326156616,
"learning_rate": 0.0004902773880194279,
"loss": 3.6515,
"step": 17050
},
{
"epoch": 1.8436657681940702,
"grad_norm": 0.5708647966384888,
"learning_rate": 0.0004899535887749595,
"loss": 3.6661,
"step": 17100
},
{
"epoch": 1.849056603773585,
"grad_norm": 0.583521842956543,
"learning_rate": 0.000489629789530491,
"loss": 3.6622,
"step": 17150
},
{
"epoch": 1.8544474393530996,
"grad_norm": 0.5446178913116455,
"learning_rate": 0.0004893059902860227,
"loss": 3.647,
"step": 17200
},
{
"epoch": 1.8598382749326146,
"grad_norm": 0.5899081230163574,
"learning_rate": 0.0004889821910415542,
"loss": 3.6697,
"step": 17250
},
{
"epoch": 1.8652291105121295,
"grad_norm": 0.6142265796661377,
"learning_rate": 0.0004886583917970858,
"loss": 3.6597,
"step": 17300
},
{
"epoch": 1.8706199460916442,
"grad_norm": 0.9669179320335388,
"learning_rate": 0.0004883345925526173,
"loss": 3.6618,
"step": 17350
},
{
"epoch": 1.8760107816711589,
"grad_norm": 0.564871072769165,
"learning_rate": 0.00048801079330814887,
"loss": 3.6571,
"step": 17400
},
{
"epoch": 1.881401617250674,
"grad_norm": 0.5841614007949829,
"learning_rate": 0.00048768699406368047,
"loss": 3.6743,
"step": 17450
},
{
"epoch": 1.8867924528301887,
"grad_norm": 0.5918266773223877,
"learning_rate": 0.000487363194819212,
"loss": 3.6675,
"step": 17500
},
{
"epoch": 1.8921832884097034,
"grad_norm": 0.5641137361526489,
"learning_rate": 0.0004870393955747436,
"loss": 3.6488,
"step": 17550
},
{
"epoch": 1.8975741239892183,
"grad_norm": 0.5747765898704529,
"learning_rate": 0.0004867155963302752,
"loss": 3.6558,
"step": 17600
},
{
"epoch": 1.9029649595687332,
"grad_norm": 0.5801583528518677,
"learning_rate": 0.0004863917970858068,
"loss": 3.6348,
"step": 17650
},
{
"epoch": 1.908355795148248,
"grad_norm": 0.5788767337799072,
"learning_rate": 0.00048606799784133833,
"loss": 3.6446,
"step": 17700
},
{
"epoch": 1.9137466307277629,
"grad_norm": 0.5854235291481018,
"learning_rate": 0.00048574419859686994,
"loss": 3.648,
"step": 17750
},
{
"epoch": 1.9191374663072778,
"grad_norm": 0.5577847957611084,
"learning_rate": 0.0004854203993524015,
"loss": 3.6665,
"step": 17800
},
{
"epoch": 1.9245283018867925,
"grad_norm": 0.536088228225708,
"learning_rate": 0.000485096600107933,
"loss": 3.6671,
"step": 17850
},
{
"epoch": 1.9299191374663072,
"grad_norm": 0.5481696724891663,
"learning_rate": 0.00048477280086346464,
"loss": 3.6721,
"step": 17900
},
{
"epoch": 1.935309973045822,
"grad_norm": 0.547725260257721,
"learning_rate": 0.00048444900161899614,
"loss": 3.6622,
"step": 17950
},
{
"epoch": 1.940700808625337,
"grad_norm": 0.5937644243240356,
"learning_rate": 0.00048412520237452774,
"loss": 3.6477,
"step": 18000
},
{
"epoch": 1.940700808625337,
"eval_accuracy": 0.3597537317105952,
"eval_loss": 3.5971779823303223,
"eval_runtime": 144.7293,
"eval_samples_per_second": 124.446,
"eval_steps_per_second": 7.78,
"step": 18000
},
{
"epoch": 1.9460916442048517,
"grad_norm": 0.5931717753410339,
"learning_rate": 0.0004838014031300593,
"loss": 3.6358,
"step": 18050
},
{
"epoch": 1.9514824797843666,
"grad_norm": 0.6099575757980347,
"learning_rate": 0.0004834776038855909,
"loss": 3.644,
"step": 18100
},
{
"epoch": 1.9568733153638815,
"grad_norm": 0.5704085230827332,
"learning_rate": 0.00048315380464112245,
"loss": 3.6517,
"step": 18150
},
{
"epoch": 1.9622641509433962,
"grad_norm": 0.5262129902839661,
"learning_rate": 0.00048283000539665405,
"loss": 3.6523,
"step": 18200
},
{
"epoch": 1.967654986522911,
"grad_norm": 0.512693464756012,
"learning_rate": 0.0004825062061521856,
"loss": 3.6432,
"step": 18250
},
{
"epoch": 1.9730458221024259,
"grad_norm": 0.5602070093154907,
"learning_rate": 0.00048218240690771716,
"loss": 3.6688,
"step": 18300
},
{
"epoch": 1.9784366576819408,
"grad_norm": 0.5258756875991821,
"learning_rate": 0.00048185860766324876,
"loss": 3.6471,
"step": 18350
},
{
"epoch": 1.9838274932614555,
"grad_norm": 0.5815879106521606,
"learning_rate": 0.0004815348084187803,
"loss": 3.6496,
"step": 18400
},
{
"epoch": 1.9892183288409704,
"grad_norm": 0.5756340622901917,
"learning_rate": 0.0004812110091743119,
"loss": 3.6589,
"step": 18450
},
{
"epoch": 1.9946091644204853,
"grad_norm": 0.6157427430152893,
"learning_rate": 0.00048088720992984347,
"loss": 3.6378,
"step": 18500
},
{
"epoch": 2.0,
"grad_norm": 1.1887223720550537,
"learning_rate": 0.00048056341068537507,
"loss": 3.6418,
"step": 18550
},
{
"epoch": 2.0053908355795147,
"grad_norm": 0.5796721577644348,
"learning_rate": 0.00048023961144090657,
"loss": 3.5398,
"step": 18600
},
{
"epoch": 2.01078167115903,
"grad_norm": 0.5702629089355469,
"learning_rate": 0.00047991581219643817,
"loss": 3.5522,
"step": 18650
},
{
"epoch": 2.0161725067385445,
"grad_norm": 0.5687718391418457,
"learning_rate": 0.0004795920129519697,
"loss": 3.5469,
"step": 18700
},
{
"epoch": 2.0215633423180592,
"grad_norm": 0.6058363914489746,
"learning_rate": 0.0004792682137075013,
"loss": 3.5565,
"step": 18750
},
{
"epoch": 2.026954177897574,
"grad_norm": 0.5502753257751465,
"learning_rate": 0.0004789444144630329,
"loss": 3.5781,
"step": 18800
},
{
"epoch": 2.032345013477089,
"grad_norm": 0.5815129280090332,
"learning_rate": 0.00047862061521856443,
"loss": 3.5754,
"step": 18850
},
{
"epoch": 2.0377358490566038,
"grad_norm": 0.5740176439285278,
"learning_rate": 0.00047829681597409603,
"loss": 3.5584,
"step": 18900
},
{
"epoch": 2.0431266846361185,
"grad_norm": 0.5666323304176331,
"learning_rate": 0.0004779730167296276,
"loss": 3.5638,
"step": 18950
},
{
"epoch": 2.0485175202156336,
"grad_norm": 0.5930228233337402,
"learning_rate": 0.0004776492174851592,
"loss": 3.5922,
"step": 19000
},
{
"epoch": 2.0485175202156336,
"eval_accuracy": 0.36108874974317173,
"eval_loss": 3.588949203491211,
"eval_runtime": 144.8607,
"eval_samples_per_second": 124.333,
"eval_steps_per_second": 7.773,
"step": 19000
},
{
"epoch": 2.0539083557951483,
"grad_norm": 0.5626940727233887,
"learning_rate": 0.00047732541824069074,
"loss": 3.5695,
"step": 19050
},
{
"epoch": 2.059299191374663,
"grad_norm": 0.6210435628890991,
"learning_rate": 0.0004770016189962223,
"loss": 3.5607,
"step": 19100
},
{
"epoch": 2.0646900269541777,
"grad_norm": 0.5758371949195862,
"learning_rate": 0.0004766778197517539,
"loss": 3.5751,
"step": 19150
},
{
"epoch": 2.070080862533693,
"grad_norm": 0.6037834286689758,
"learning_rate": 0.0004763540205072854,
"loss": 3.5875,
"step": 19200
},
{
"epoch": 2.0754716981132075,
"grad_norm": 0.5686853528022766,
"learning_rate": 0.00047603022126281705,
"loss": 3.5713,
"step": 19250
},
{
"epoch": 2.0808625336927222,
"grad_norm": 0.6485291123390198,
"learning_rate": 0.00047570642201834855,
"loss": 3.5609,
"step": 19300
},
{
"epoch": 2.0862533692722374,
"grad_norm": 0.6449443697929382,
"learning_rate": 0.00047538262277388015,
"loss": 3.5789,
"step": 19350
},
{
"epoch": 2.091644204851752,
"grad_norm": 0.550469696521759,
"learning_rate": 0.0004750588235294117,
"loss": 3.554,
"step": 19400
},
{
"epoch": 2.0970350404312668,
"grad_norm": 0.5864623785018921,
"learning_rate": 0.0004747350242849433,
"loss": 3.5732,
"step": 19450
},
{
"epoch": 2.1024258760107815,
"grad_norm": 0.6241238117218018,
"learning_rate": 0.00047441122504047486,
"loss": 3.5717,
"step": 19500
},
{
"epoch": 2.1078167115902966,
"grad_norm": 0.602936327457428,
"learning_rate": 0.0004740874257960064,
"loss": 3.5604,
"step": 19550
},
{
"epoch": 2.1132075471698113,
"grad_norm": 0.5674468874931335,
"learning_rate": 0.000473763626551538,
"loss": 3.5643,
"step": 19600
},
{
"epoch": 2.118598382749326,
"grad_norm": 0.600834310054779,
"learning_rate": 0.00047343982730706956,
"loss": 3.5695,
"step": 19650
},
{
"epoch": 2.123989218328841,
"grad_norm": 0.5601726770401001,
"learning_rate": 0.00047311602806260117,
"loss": 3.5724,
"step": 19700
},
{
"epoch": 2.129380053908356,
"grad_norm": 0.5551213622093201,
"learning_rate": 0.0004727922288181327,
"loss": 3.5783,
"step": 19750
},
{
"epoch": 2.1347708894878705,
"grad_norm": 0.5737272500991821,
"learning_rate": 0.0004724684295736643,
"loss": 3.5642,
"step": 19800
},
{
"epoch": 2.1401617250673857,
"grad_norm": 0.5712146162986755,
"learning_rate": 0.0004721446303291959,
"loss": 3.5894,
"step": 19850
},
{
"epoch": 2.1455525606469004,
"grad_norm": 0.5902134776115417,
"learning_rate": 0.0004718208310847275,
"loss": 3.5903,
"step": 19900
},
{
"epoch": 2.150943396226415,
"grad_norm": 0.6139191389083862,
"learning_rate": 0.000471497031840259,
"loss": 3.5862,
"step": 19950
},
{
"epoch": 2.1563342318059298,
"grad_norm": 0.5515206456184387,
"learning_rate": 0.0004711732325957905,
"loss": 3.5706,
"step": 20000
},
{
"epoch": 2.1563342318059298,
"eval_accuracy": 0.3621358376230399,
"eval_loss": 3.5808968544006348,
"eval_runtime": 144.61,
"eval_samples_per_second": 124.549,
"eval_steps_per_second": 7.786,
"step": 20000
},
{
"epoch": 2.161725067385445,
"grad_norm": 0.6040294170379639,
"learning_rate": 0.00047084943335132213,
"loss": 3.5692,
"step": 20050
},
{
"epoch": 2.1671159029649596,
"grad_norm": 0.6468523740768433,
"learning_rate": 0.0004705256341068537,
"loss": 3.5764,
"step": 20100
},
{
"epoch": 2.1725067385444743,
"grad_norm": 0.5732126235961914,
"learning_rate": 0.0004702018348623853,
"loss": 3.572,
"step": 20150
},
{
"epoch": 2.177897574123989,
"grad_norm": 0.545032262802124,
"learning_rate": 0.00046987803561791684,
"loss": 3.5862,
"step": 20200
},
{
"epoch": 2.183288409703504,
"grad_norm": 0.5633872747421265,
"learning_rate": 0.00046955423637344844,
"loss": 3.5738,
"step": 20250
},
{
"epoch": 2.188679245283019,
"grad_norm": 0.6415254473686218,
"learning_rate": 0.00046923043712898,
"loss": 3.5789,
"step": 20300
},
{
"epoch": 2.1940700808625335,
"grad_norm": 0.6461573839187622,
"learning_rate": 0.0004689066378845116,
"loss": 3.5603,
"step": 20350
},
{
"epoch": 2.1994609164420487,
"grad_norm": 0.5884630680084229,
"learning_rate": 0.00046858283864004315,
"loss": 3.5676,
"step": 20400
},
{
"epoch": 2.2048517520215634,
"grad_norm": 0.5497499704360962,
"learning_rate": 0.0004682590393955747,
"loss": 3.5713,
"step": 20450
},
{
"epoch": 2.210242587601078,
"grad_norm": 0.5444649457931519,
"learning_rate": 0.0004679352401511063,
"loss": 3.5807,
"step": 20500
},
{
"epoch": 2.215633423180593,
"grad_norm": 0.5752549171447754,
"learning_rate": 0.0004676114409066378,
"loss": 3.576,
"step": 20550
},
{
"epoch": 2.221024258760108,
"grad_norm": 0.5602301359176636,
"learning_rate": 0.00046728764166216946,
"loss": 3.5703,
"step": 20600
},
{
"epoch": 2.2264150943396226,
"grad_norm": 0.5674179792404175,
"learning_rate": 0.00046696384241770095,
"loss": 3.5705,
"step": 20650
},
{
"epoch": 2.2318059299191373,
"grad_norm": 0.6246010661125183,
"learning_rate": 0.00046664004317323256,
"loss": 3.5675,
"step": 20700
},
{
"epoch": 2.2371967654986524,
"grad_norm": 0.5755497217178345,
"learning_rate": 0.0004663162439287641,
"loss": 3.5682,
"step": 20750
},
{
"epoch": 2.242587601078167,
"grad_norm": 0.601298987865448,
"learning_rate": 0.00046599244468429566,
"loss": 3.5731,
"step": 20800
},
{
"epoch": 2.247978436657682,
"grad_norm": 0.6204710602760315,
"learning_rate": 0.00046566864543982726,
"loss": 3.5701,
"step": 20850
},
{
"epoch": 2.2533692722371965,
"grad_norm": 0.5474254488945007,
"learning_rate": 0.0004653448461953588,
"loss": 3.5649,
"step": 20900
},
{
"epoch": 2.2587601078167117,
"grad_norm": 0.5631576776504517,
"learning_rate": 0.00046502752293577977,
"loss": 3.5743,
"step": 20950
},
{
"epoch": 2.2641509433962264,
"grad_norm": 0.5887001752853394,
"learning_rate": 0.00046470372369131137,
"loss": 3.5568,
"step": 21000
},
{
"epoch": 2.2641509433962264,
"eval_accuracy": 0.3631279471416739,
"eval_loss": 3.5703935623168945,
"eval_runtime": 144.8136,
"eval_samples_per_second": 124.374,
"eval_steps_per_second": 7.776,
"step": 21000
},
{
"epoch": 2.269541778975741,
"grad_norm": 0.5233503580093384,
"learning_rate": 0.0004643799244468429,
"loss": 3.5712,
"step": 21050
},
{
"epoch": 2.274932614555256,
"grad_norm": 0.5863848924636841,
"learning_rate": 0.0004640561252023745,
"loss": 3.5732,
"step": 21100
},
{
"epoch": 2.280323450134771,
"grad_norm": 0.610373854637146,
"learning_rate": 0.0004637323259579061,
"loss": 3.5865,
"step": 21150
},
{
"epoch": 2.2857142857142856,
"grad_norm": 0.5934001207351685,
"learning_rate": 0.0004634085267134376,
"loss": 3.5383,
"step": 21200
},
{
"epoch": 2.2911051212938007,
"grad_norm": 0.550365686416626,
"learning_rate": 0.00046308472746896923,
"loss": 3.5677,
"step": 21250
},
{
"epoch": 2.2964959568733154,
"grad_norm": 0.5905864238739014,
"learning_rate": 0.00046276092822450073,
"loss": 3.5684,
"step": 21300
},
{
"epoch": 2.30188679245283,
"grad_norm": 0.5769224166870117,
"learning_rate": 0.00046243712898003233,
"loss": 3.5621,
"step": 21350
},
{
"epoch": 2.3072776280323453,
"grad_norm": 0.5868223905563354,
"learning_rate": 0.0004621133297355639,
"loss": 3.5773,
"step": 21400
},
{
"epoch": 2.31266846361186,
"grad_norm": 0.5571302175521851,
"learning_rate": 0.0004617895304910955,
"loss": 3.5739,
"step": 21450
},
{
"epoch": 2.3180592991913747,
"grad_norm": 0.5874112844467163,
"learning_rate": 0.00046146573124662704,
"loss": 3.5778,
"step": 21500
},
{
"epoch": 2.3234501347708894,
"grad_norm": 0.6028329133987427,
"learning_rate": 0.00046114193200215864,
"loss": 3.558,
"step": 21550
},
{
"epoch": 2.3288409703504045,
"grad_norm": 0.5316165089607239,
"learning_rate": 0.0004608181327576902,
"loss": 3.5785,
"step": 21600
},
{
"epoch": 2.334231805929919,
"grad_norm": 0.5828713178634644,
"learning_rate": 0.00046049433351322175,
"loss": 3.5738,
"step": 21650
},
{
"epoch": 2.339622641509434,
"grad_norm": 0.5575670003890991,
"learning_rate": 0.00046017053426875335,
"loss": 3.5545,
"step": 21700
},
{
"epoch": 2.3450134770889486,
"grad_norm": 0.578111469745636,
"learning_rate": 0.0004598467350242849,
"loss": 3.5695,
"step": 21750
},
{
"epoch": 2.3504043126684637,
"grad_norm": 0.5924418568611145,
"learning_rate": 0.0004595229357798165,
"loss": 3.5927,
"step": 21800
},
{
"epoch": 2.3557951482479784,
"grad_norm": 0.5741672515869141,
"learning_rate": 0.00045919913653534806,
"loss": 3.576,
"step": 21850
},
{
"epoch": 2.361185983827493,
"grad_norm": 0.6206271648406982,
"learning_rate": 0.00045887533729087966,
"loss": 3.5548,
"step": 21900
},
{
"epoch": 2.3665768194070083,
"grad_norm": 0.6218240857124329,
"learning_rate": 0.00045855153804641116,
"loss": 3.5706,
"step": 21950
},
{
"epoch": 2.371967654986523,
"grad_norm": 0.5502433180809021,
"learning_rate": 0.0004582277388019427,
"loss": 3.5596,
"step": 22000
},
{
"epoch": 2.371967654986523,
"eval_accuracy": 0.36454162986502814,
"eval_loss": 3.558351993560791,
"eval_runtime": 144.2942,
"eval_samples_per_second": 124.821,
"eval_steps_per_second": 7.804,
"step": 22000
},
{
"epoch": 2.3773584905660377,
"grad_norm": 0.5923473238945007,
"learning_rate": 0.0004579039395574743,
"loss": 3.5625,
"step": 22050
},
{
"epoch": 2.382749326145553,
"grad_norm": 0.5796228051185608,
"learning_rate": 0.00045758014031300586,
"loss": 3.5745,
"step": 22100
},
{
"epoch": 2.3881401617250675,
"grad_norm": 0.5643661618232727,
"learning_rate": 0.00045725634106853747,
"loss": 3.5762,
"step": 22150
},
{
"epoch": 2.393530997304582,
"grad_norm": 0.563675582408905,
"learning_rate": 0.000456932541824069,
"loss": 3.5626,
"step": 22200
},
{
"epoch": 2.398921832884097,
"grad_norm": 0.5896977782249451,
"learning_rate": 0.0004566087425796006,
"loss": 3.5699,
"step": 22250
},
{
"epoch": 2.404312668463612,
"grad_norm": 0.5657273530960083,
"learning_rate": 0.0004562849433351322,
"loss": 3.5572,
"step": 22300
},
{
"epoch": 2.4097035040431267,
"grad_norm": 0.5821571946144104,
"learning_rate": 0.0004559611440906638,
"loss": 3.5794,
"step": 22350
},
{
"epoch": 2.4150943396226414,
"grad_norm": 0.576914370059967,
"learning_rate": 0.00045563734484619533,
"loss": 3.5643,
"step": 22400
},
{
"epoch": 2.420485175202156,
"grad_norm": 0.5930933952331543,
"learning_rate": 0.0004553135456017269,
"loss": 3.5656,
"step": 22450
},
{
"epoch": 2.4258760107816713,
"grad_norm": 0.6359766721725464,
"learning_rate": 0.0004549897463572585,
"loss": 3.5722,
"step": 22500
},
{
"epoch": 2.431266846361186,
"grad_norm": 0.5717756152153015,
"learning_rate": 0.00045466594711279,
"loss": 3.5636,
"step": 22550
},
{
"epoch": 2.4366576819407006,
"grad_norm": 0.6088978052139282,
"learning_rate": 0.00045434214786832164,
"loss": 3.5952,
"step": 22600
},
{
"epoch": 2.442048517520216,
"grad_norm": 0.596017062664032,
"learning_rate": 0.00045401834862385314,
"loss": 3.5571,
"step": 22650
},
{
"epoch": 2.4474393530997305,
"grad_norm": 0.5859695076942444,
"learning_rate": 0.00045369454937938474,
"loss": 3.5462,
"step": 22700
},
{
"epoch": 2.452830188679245,
"grad_norm": 0.5753331780433655,
"learning_rate": 0.0004533707501349163,
"loss": 3.5518,
"step": 22750
},
{
"epoch": 2.4582210242587603,
"grad_norm": 0.569882333278656,
"learning_rate": 0.0004530469508904479,
"loss": 3.5628,
"step": 22800
},
{
"epoch": 2.463611859838275,
"grad_norm": 0.604755699634552,
"learning_rate": 0.00045272315164597945,
"loss": 3.5464,
"step": 22850
},
{
"epoch": 2.4690026954177897,
"grad_norm": 0.6051239967346191,
"learning_rate": 0.000452399352401511,
"loss": 3.563,
"step": 22900
},
{
"epoch": 2.4743935309973044,
"grad_norm": 0.5919761061668396,
"learning_rate": 0.0004520755531570426,
"loss": 3.5611,
"step": 22950
},
{
"epoch": 2.4797843665768196,
"grad_norm": 0.5852389335632324,
"learning_rate": 0.00045175822989746355,
"loss": 3.5529,
"step": 23000
},
{
"epoch": 2.4797843665768196,
"eval_accuracy": 0.3654994050711128,
"eval_loss": 3.5451345443725586,
"eval_runtime": 144.5225,
"eval_samples_per_second": 124.624,
"eval_steps_per_second": 7.791,
"step": 23000
},
{
"epoch": 2.4851752021563343,
"grad_norm": 0.5408614873886108,
"learning_rate": 0.0004514344306529951,
"loss": 3.5645,
"step": 23050
},
{
"epoch": 2.490566037735849,
"grad_norm": 0.5751845836639404,
"learning_rate": 0.0004511106314085267,
"loss": 3.5671,
"step": 23100
},
{
"epoch": 2.4959568733153636,
"grad_norm": 0.5665581226348877,
"learning_rate": 0.00045078683216405826,
"loss": 3.5651,
"step": 23150
},
{
"epoch": 2.501347708894879,
"grad_norm": 0.5446679592132568,
"learning_rate": 0.00045046303291958976,
"loss": 3.5602,
"step": 23200
},
{
"epoch": 2.5067385444743935,
"grad_norm": 0.6096646189689636,
"learning_rate": 0.0004501392336751214,
"loss": 3.5606,
"step": 23250
},
{
"epoch": 2.512129380053908,
"grad_norm": 0.5711411237716675,
"learning_rate": 0.0004498154344306529,
"loss": 3.5462,
"step": 23300
},
{
"epoch": 2.5175202156334233,
"grad_norm": 0.5946795344352722,
"learning_rate": 0.0004494916351861845,
"loss": 3.5517,
"step": 23350
},
{
"epoch": 2.522911051212938,
"grad_norm": 0.6075916886329651,
"learning_rate": 0.00044916783594171607,
"loss": 3.5477,
"step": 23400
},
{
"epoch": 2.5283018867924527,
"grad_norm": 0.5496644377708435,
"learning_rate": 0.00044884403669724767,
"loss": 3.5583,
"step": 23450
},
{
"epoch": 2.533692722371968,
"grad_norm": 0.5712068676948547,
"learning_rate": 0.0004485202374527792,
"loss": 3.5607,
"step": 23500
},
{
"epoch": 2.5390835579514826,
"grad_norm": 0.5820903182029724,
"learning_rate": 0.00044819643820831083,
"loss": 3.5643,
"step": 23550
},
{
"epoch": 2.5444743935309972,
"grad_norm": 0.5886263847351074,
"learning_rate": 0.0004478726389638424,
"loss": 3.5662,
"step": 23600
},
{
"epoch": 2.5498652291105124,
"grad_norm": 0.5735592842102051,
"learning_rate": 0.00044754883971937393,
"loss": 3.5579,
"step": 23650
},
{
"epoch": 2.555256064690027,
"grad_norm": 0.6012240052223206,
"learning_rate": 0.00044722504047490553,
"loss": 3.5642,
"step": 23700
},
{
"epoch": 2.560646900269542,
"grad_norm": 0.5736075639724731,
"learning_rate": 0.0004469012412304371,
"loss": 3.5574,
"step": 23750
},
{
"epoch": 2.5660377358490565,
"grad_norm": 0.6146102547645569,
"learning_rate": 0.0004465774419859687,
"loss": 3.5704,
"step": 23800
},
{
"epoch": 2.571428571428571,
"grad_norm": 0.5984640717506409,
"learning_rate": 0.00044625364274150024,
"loss": 3.5763,
"step": 23850
},
{
"epoch": 2.5768194070080863,
"grad_norm": 0.5764756798744202,
"learning_rate": 0.00044592984349703184,
"loss": 3.5746,
"step": 23900
},
{
"epoch": 2.582210242587601,
"grad_norm": 0.6541941165924072,
"learning_rate": 0.00044560604425256334,
"loss": 3.55,
"step": 23950
},
{
"epoch": 2.5876010781671157,
"grad_norm": 0.5598556399345398,
"learning_rate": 0.000445282245008095,
"loss": 3.5466,
"step": 24000
},
{
"epoch": 2.5876010781671157,
"eval_accuracy": 0.36651433169618786,
"eval_loss": 3.5378851890563965,
"eval_runtime": 144.1764,
"eval_samples_per_second": 124.923,
"eval_steps_per_second": 7.81,
"step": 24000
},
{
"epoch": 2.592991913746631,
"grad_norm": 0.6188113689422607,
"learning_rate": 0.0004449584457636265,
"loss": 3.5458,
"step": 24050
},
{
"epoch": 2.5983827493261455,
"grad_norm": 0.5713769197463989,
"learning_rate": 0.00044463464651915805,
"loss": 3.5473,
"step": 24100
},
{
"epoch": 2.6037735849056602,
"grad_norm": 0.5993424654006958,
"learning_rate": 0.00044431084727468965,
"loss": 3.5493,
"step": 24150
},
{
"epoch": 2.6091644204851754,
"grad_norm": 0.6186326146125793,
"learning_rate": 0.0004439870480302212,
"loss": 3.5668,
"step": 24200
},
{
"epoch": 2.61455525606469,
"grad_norm": 0.5661455392837524,
"learning_rate": 0.0004436632487857528,
"loss": 3.5703,
"step": 24250
},
{
"epoch": 2.6199460916442048,
"grad_norm": 0.6343756318092346,
"learning_rate": 0.00044333944954128436,
"loss": 3.5559,
"step": 24300
},
{
"epoch": 2.62533692722372,
"grad_norm": 0.5699751377105713,
"learning_rate": 0.00044301565029681596,
"loss": 3.5509,
"step": 24350
},
{
"epoch": 2.6307277628032346,
"grad_norm": 0.5953941941261292,
"learning_rate": 0.0004426918510523475,
"loss": 3.5609,
"step": 24400
},
{
"epoch": 2.6361185983827493,
"grad_norm": 0.5832213163375854,
"learning_rate": 0.0004423680518078791,
"loss": 3.531,
"step": 24450
},
{
"epoch": 2.641509433962264,
"grad_norm": 0.6015488505363464,
"learning_rate": 0.00044204425256341067,
"loss": 3.5396,
"step": 24500
},
{
"epoch": 2.6469002695417787,
"grad_norm": 0.6897053718566895,
"learning_rate": 0.0004417204533189422,
"loss": 3.5562,
"step": 24550
},
{
"epoch": 2.652291105121294,
"grad_norm": 0.5880343317985535,
"learning_rate": 0.0004413966540744738,
"loss": 3.5371,
"step": 24600
},
{
"epoch": 2.6576819407008085,
"grad_norm": 0.574292778968811,
"learning_rate": 0.0004410728548300053,
"loss": 3.5532,
"step": 24650
},
{
"epoch": 2.6630727762803232,
"grad_norm": 0.6216583251953125,
"learning_rate": 0.0004407490555855369,
"loss": 3.5528,
"step": 24700
},
{
"epoch": 2.6684636118598384,
"grad_norm": 0.6199180483818054,
"learning_rate": 0.0004404252563410685,
"loss": 3.5568,
"step": 24750
},
{
"epoch": 2.673854447439353,
"grad_norm": 0.6043687462806702,
"learning_rate": 0.0004401014570966001,
"loss": 3.5543,
"step": 24800
},
{
"epoch": 2.6792452830188678,
"grad_norm": 0.5746572613716125,
"learning_rate": 0.00043977765785213163,
"loss": 3.5559,
"step": 24850
},
{
"epoch": 2.684636118598383,
"grad_norm": 0.5911267995834351,
"learning_rate": 0.0004394538586076632,
"loss": 3.5716,
"step": 24900
},
{
"epoch": 2.6900269541778976,
"grad_norm": 0.5925143361091614,
"learning_rate": 0.0004391300593631948,
"loss": 3.5607,
"step": 24950
},
{
"epoch": 2.6954177897574123,
"grad_norm": 0.5580176711082458,
"learning_rate": 0.00043880626011872634,
"loss": 3.555,
"step": 25000
},
{
"epoch": 2.6954177897574123,
"eval_accuracy": 0.36743233994533236,
"eval_loss": 3.5253796577453613,
"eval_runtime": 144.4202,
"eval_samples_per_second": 124.712,
"eval_steps_per_second": 7.797,
"step": 25000
},
{
"epoch": 2.7008086253369274,
"grad_norm": 0.576725959777832,
"learning_rate": 0.0004384889368591473,
"loss": 3.5449,
"step": 25050
},
{
"epoch": 2.706199460916442,
"grad_norm": 0.545190691947937,
"learning_rate": 0.0004381651376146789,
"loss": 3.5538,
"step": 25100
},
{
"epoch": 2.711590296495957,
"grad_norm": 0.5915215611457825,
"learning_rate": 0.00043784133837021044,
"loss": 3.5311,
"step": 25150
},
{
"epoch": 2.7169811320754715,
"grad_norm": 0.6212695837020874,
"learning_rate": 0.00043751753912574205,
"loss": 3.5534,
"step": 25200
},
{
"epoch": 2.7223719676549867,
"grad_norm": 0.5867090225219727,
"learning_rate": 0.0004371937398812736,
"loss": 3.5584,
"step": 25250
},
{
"epoch": 2.7277628032345014,
"grad_norm": 0.5455405116081238,
"learning_rate": 0.0004368699406368051,
"loss": 3.5605,
"step": 25300
},
{
"epoch": 2.733153638814016,
"grad_norm": 0.6148755550384521,
"learning_rate": 0.0004365461413923367,
"loss": 3.5503,
"step": 25350
},
{
"epoch": 2.7385444743935308,
"grad_norm": 0.6133694052696228,
"learning_rate": 0.00043622234214786825,
"loss": 3.5414,
"step": 25400
},
{
"epoch": 2.743935309973046,
"grad_norm": 0.5954641699790955,
"learning_rate": 0.00043589854290339985,
"loss": 3.5439,
"step": 25450
},
{
"epoch": 2.7493261455525606,
"grad_norm": 0.5887457728385925,
"learning_rate": 0.0004355747436589314,
"loss": 3.5571,
"step": 25500
},
{
"epoch": 2.7547169811320753,
"grad_norm": 0.573692798614502,
"learning_rate": 0.000435250944414463,
"loss": 3.5495,
"step": 25550
},
{
"epoch": 2.7601078167115904,
"grad_norm": 0.5801973938941956,
"learning_rate": 0.00043492714516999456,
"loss": 3.5522,
"step": 25600
},
{
"epoch": 2.765498652291105,
"grad_norm": 0.583278238773346,
"learning_rate": 0.0004346033459255261,
"loss": 3.5448,
"step": 25650
},
{
"epoch": 2.77088948787062,
"grad_norm": 0.6692058444023132,
"learning_rate": 0.0004342795466810577,
"loss": 3.5509,
"step": 25700
},
{
"epoch": 2.776280323450135,
"grad_norm": 0.609228253364563,
"learning_rate": 0.00043395574743658927,
"loss": 3.5385,
"step": 25750
},
{
"epoch": 2.7816711590296497,
"grad_norm": 0.5607845187187195,
"learning_rate": 0.00043363194819212087,
"loss": 3.5623,
"step": 25800
},
{
"epoch": 2.7870619946091644,
"grad_norm": 0.5615296363830566,
"learning_rate": 0.0004333081489476524,
"loss": 3.5353,
"step": 25850
},
{
"epoch": 2.7924528301886795,
"grad_norm": 0.5903550982475281,
"learning_rate": 0.000432984349703184,
"loss": 3.5447,
"step": 25900
},
{
"epoch": 2.797843665768194,
"grad_norm": 0.6404184103012085,
"learning_rate": 0.0004326605504587155,
"loss": 3.5401,
"step": 25950
},
{
"epoch": 2.803234501347709,
"grad_norm": 0.5671994090080261,
"learning_rate": 0.0004323367512142472,
"loss": 3.5597,
"step": 26000
},
{
"epoch": 2.803234501347709,
"eval_accuracy": 0.36840749961346736,
"eval_loss": 3.5180230140686035,
"eval_runtime": 144.5115,
"eval_samples_per_second": 124.634,
"eval_steps_per_second": 7.792,
"step": 26000
},
{
"epoch": 2.8086253369272236,
"grad_norm": 0.5972022414207458,
"learning_rate": 0.0004320129519697787,
"loss": 3.5636,
"step": 26050
},
{
"epoch": 2.8140161725067383,
"grad_norm": 0.5757322907447815,
"learning_rate": 0.00043168915272531023,
"loss": 3.5479,
"step": 26100
},
{
"epoch": 2.8194070080862534,
"grad_norm": 0.5859302282333374,
"learning_rate": 0.00043136535348084183,
"loss": 3.5425,
"step": 26150
},
{
"epoch": 2.824797843665768,
"grad_norm": 0.5637912154197693,
"learning_rate": 0.0004310415542363734,
"loss": 3.555,
"step": 26200
},
{
"epoch": 2.830188679245283,
"grad_norm": 0.5691296458244324,
"learning_rate": 0.000430717754991905,
"loss": 3.5357,
"step": 26250
},
{
"epoch": 2.835579514824798,
"grad_norm": 0.6390897631645203,
"learning_rate": 0.00043039395574743654,
"loss": 3.5369,
"step": 26300
},
{
"epoch": 2.8409703504043127,
"grad_norm": 0.6023745536804199,
"learning_rate": 0.00043007015650296814,
"loss": 3.5569,
"step": 26350
},
{
"epoch": 2.8463611859838274,
"grad_norm": 0.5968989133834839,
"learning_rate": 0.0004297463572584997,
"loss": 3.5324,
"step": 26400
},
{
"epoch": 2.8517520215633425,
"grad_norm": 0.5747265219688416,
"learning_rate": 0.0004294225580140313,
"loss": 3.5554,
"step": 26450
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.5473029017448425,
"learning_rate": 0.00042909875876956285,
"loss": 3.5601,
"step": 26500
},
{
"epoch": 2.862533692722372,
"grad_norm": 0.6153762936592102,
"learning_rate": 0.0004287749595250944,
"loss": 3.5416,
"step": 26550
},
{
"epoch": 2.867924528301887,
"grad_norm": 0.5814242959022522,
"learning_rate": 0.000428451160280626,
"loss": 3.5436,
"step": 26600
},
{
"epoch": 2.8733153638814017,
"grad_norm": 0.544052243232727,
"learning_rate": 0.0004281273610361575,
"loss": 3.5283,
"step": 26650
},
{
"epoch": 2.8787061994609164,
"grad_norm": 0.580087423324585,
"learning_rate": 0.0004278035617916891,
"loss": 3.5665,
"step": 26700
},
{
"epoch": 2.884097035040431,
"grad_norm": 0.606545627117157,
"learning_rate": 0.00042747976254722066,
"loss": 3.5333,
"step": 26750
},
{
"epoch": 2.889487870619946,
"grad_norm": 0.5613415241241455,
"learning_rate": 0.00042715596330275226,
"loss": 3.5188,
"step": 26800
},
{
"epoch": 2.894878706199461,
"grad_norm": 0.5772558450698853,
"learning_rate": 0.0004268321640582838,
"loss": 3.521,
"step": 26850
},
{
"epoch": 2.9002695417789757,
"grad_norm": 0.5379123091697693,
"learning_rate": 0.0004265083648138154,
"loss": 3.5405,
"step": 26900
},
{
"epoch": 2.9056603773584904,
"grad_norm": 0.5935842394828796,
"learning_rate": 0.00042618456556934697,
"loss": 3.5612,
"step": 26950
},
{
"epoch": 2.9110512129380055,
"grad_norm": 0.622439444065094,
"learning_rate": 0.0004258607663248785,
"loss": 3.5422,
"step": 27000
},
{
"epoch": 2.9110512129380055,
"eval_accuracy": 0.3692755275342172,
"eval_loss": 3.508702039718628,
"eval_runtime": 144.9148,
"eval_samples_per_second": 124.287,
"eval_steps_per_second": 7.77,
"step": 27000
},
{
"epoch": 2.91644204851752,
"grad_norm": 0.6212618947029114,
"learning_rate": 0.00042554344306529947,
"loss": 3.531,
"step": 27050
},
{
"epoch": 2.921832884097035,
"grad_norm": 0.6010525226593018,
"learning_rate": 0.0004252196438208311,
"loss": 3.5393,
"step": 27100
},
{
"epoch": 2.92722371967655,
"grad_norm": 0.5927774310112,
"learning_rate": 0.0004248958445763626,
"loss": 3.5333,
"step": 27150
},
{
"epoch": 2.9326145552560647,
"grad_norm": 0.6531330347061157,
"learning_rate": 0.00042457204533189423,
"loss": 3.5434,
"step": 27200
},
{
"epoch": 2.9380053908355794,
"grad_norm": 0.584989607334137,
"learning_rate": 0.0004242482460874258,
"loss": 3.5498,
"step": 27250
},
{
"epoch": 2.9433962264150946,
"grad_norm": 0.6219198107719421,
"learning_rate": 0.0004239244468429573,
"loss": 3.5142,
"step": 27300
},
{
"epoch": 2.9487870619946093,
"grad_norm": 0.6168526411056519,
"learning_rate": 0.0004236006475984889,
"loss": 3.5346,
"step": 27350
},
{
"epoch": 2.954177897574124,
"grad_norm": 0.583755373954773,
"learning_rate": 0.00042327684835402043,
"loss": 3.5291,
"step": 27400
},
{
"epoch": 2.9595687331536387,
"grad_norm": 0.6100607514381409,
"learning_rate": 0.00042295304910955204,
"loss": 3.5208,
"step": 27450
},
{
"epoch": 2.964959568733154,
"grad_norm": 0.5626996159553528,
"learning_rate": 0.0004226292498650836,
"loss": 3.5461,
"step": 27500
},
{
"epoch": 2.9703504043126685,
"grad_norm": 0.5924006700515747,
"learning_rate": 0.0004223054506206152,
"loss": 3.5485,
"step": 27550
},
{
"epoch": 2.975741239892183,
"grad_norm": 0.5733758807182312,
"learning_rate": 0.00042198165137614674,
"loss": 3.5297,
"step": 27600
},
{
"epoch": 2.981132075471698,
"grad_norm": 0.6193982362747192,
"learning_rate": 0.00042165785213167835,
"loss": 3.5365,
"step": 27650
},
{
"epoch": 2.986522911051213,
"grad_norm": 0.5635408759117126,
"learning_rate": 0.0004213340528872099,
"loss": 3.5253,
"step": 27700
},
{
"epoch": 2.9919137466307277,
"grad_norm": 0.6305370926856995,
"learning_rate": 0.00042101025364274145,
"loss": 3.5239,
"step": 27750
},
{
"epoch": 2.9973045822102424,
"grad_norm": 0.581883430480957,
"learning_rate": 0.00042068645439827305,
"loss": 3.5336,
"step": 27800
},
{
"epoch": 3.0026954177897576,
"grad_norm": 0.5631284713745117,
"learning_rate": 0.0004203626551538046,
"loss": 3.4953,
"step": 27850
},
{
"epoch": 3.0080862533692723,
"grad_norm": 0.6554384827613831,
"learning_rate": 0.0004200388559093362,
"loss": 3.4436,
"step": 27900
},
{
"epoch": 3.013477088948787,
"grad_norm": 0.5941973924636841,
"learning_rate": 0.0004197150566648677,
"loss": 3.4446,
"step": 27950
},
{
"epoch": 3.018867924528302,
"grad_norm": 0.5603510141372681,
"learning_rate": 0.00041939125742039936,
"loss": 3.4422,
"step": 28000
},
{
"epoch": 3.018867924528302,
"eval_accuracy": 0.3700082826096381,
"eval_loss": 3.5040652751922607,
"eval_runtime": 144.5182,
"eval_samples_per_second": 124.628,
"eval_steps_per_second": 7.791,
"step": 28000
},
{
"epoch": 3.024258760107817,
"grad_norm": 0.58504718542099,
"learning_rate": 0.00041906745817593086,
"loss": 3.442,
"step": 28050
},
{
"epoch": 3.0296495956873315,
"grad_norm": 0.590460479259491,
"learning_rate": 0.00041874365893146247,
"loss": 3.4482,
"step": 28100
},
{
"epoch": 3.035040431266846,
"grad_norm": 0.5873590707778931,
"learning_rate": 0.000418419859686994,
"loss": 3.4321,
"step": 28150
},
{
"epoch": 3.0404312668463613,
"grad_norm": 0.5897404551506042,
"learning_rate": 0.00041809606044252557,
"loss": 3.45,
"step": 28200
},
{
"epoch": 3.045822102425876,
"grad_norm": 0.5874994397163391,
"learning_rate": 0.00041777226119805717,
"loss": 3.4424,
"step": 28250
},
{
"epoch": 3.0512129380053907,
"grad_norm": 0.5833684206008911,
"learning_rate": 0.0004174484619535887,
"loss": 3.4642,
"step": 28300
},
{
"epoch": 3.056603773584906,
"grad_norm": 0.609053909778595,
"learning_rate": 0.00041712466270912033,
"loss": 3.4483,
"step": 28350
},
{
"epoch": 3.0619946091644206,
"grad_norm": 0.5735374093055725,
"learning_rate": 0.0004168008634646519,
"loss": 3.4695,
"step": 28400
},
{
"epoch": 3.0673854447439353,
"grad_norm": 0.60845947265625,
"learning_rate": 0.0004164770642201835,
"loss": 3.4465,
"step": 28450
},
{
"epoch": 3.07277628032345,
"grad_norm": 0.6065637469291687,
"learning_rate": 0.00041615326497571503,
"loss": 3.4424,
"step": 28500
},
{
"epoch": 3.078167115902965,
"grad_norm": 0.5800220370292664,
"learning_rate": 0.0004158294657312466,
"loss": 3.443,
"step": 28550
},
{
"epoch": 3.08355795148248,
"grad_norm": 0.6139459609985352,
"learning_rate": 0.0004155056664867782,
"loss": 3.4636,
"step": 28600
},
{
"epoch": 3.0889487870619945,
"grad_norm": 0.6870418190956116,
"learning_rate": 0.0004151818672423097,
"loss": 3.4479,
"step": 28650
},
{
"epoch": 3.0943396226415096,
"grad_norm": 0.5827794075012207,
"learning_rate": 0.0004148580679978413,
"loss": 3.4704,
"step": 28700
},
{
"epoch": 3.0997304582210243,
"grad_norm": 0.5892341732978821,
"learning_rate": 0.00041453426875337284,
"loss": 3.4355,
"step": 28750
},
{
"epoch": 3.105121293800539,
"grad_norm": 0.5540435314178467,
"learning_rate": 0.00041421046950890445,
"loss": 3.4644,
"step": 28800
},
{
"epoch": 3.1105121293800537,
"grad_norm": 0.6163308024406433,
"learning_rate": 0.000413886670264436,
"loss": 3.4522,
"step": 28850
},
{
"epoch": 3.115902964959569,
"grad_norm": 0.5884636640548706,
"learning_rate": 0.0004135628710199676,
"loss": 3.4664,
"step": 28900
},
{
"epoch": 3.1212938005390836,
"grad_norm": 0.6189049482345581,
"learning_rate": 0.00041323907177549915,
"loss": 3.465,
"step": 28950
},
{
"epoch": 3.1266846361185983,
"grad_norm": 0.6232777833938599,
"learning_rate": 0.0004129152725310307,
"loss": 3.4594,
"step": 29000
},
{
"epoch": 3.1266846361185983,
"eval_accuracy": 0.37088217778633,
"eval_loss": 3.4994044303894043,
"eval_runtime": 145.0712,
"eval_samples_per_second": 124.153,
"eval_steps_per_second": 7.762,
"step": 29000
},
{
"epoch": 3.1320754716981134,
"grad_norm": 0.6272684335708618,
"learning_rate": 0.00041259794927145165,
"loss": 3.4647,
"step": 29050
},
{
"epoch": 3.137466307277628,
"grad_norm": 0.5963659882545471,
"learning_rate": 0.00041227415002698326,
"loss": 3.4641,
"step": 29100
},
{
"epoch": 3.142857142857143,
"grad_norm": 0.5980457663536072,
"learning_rate": 0.0004119503507825148,
"loss": 3.4658,
"step": 29150
},
{
"epoch": 3.1482479784366575,
"grad_norm": 0.6148801445960999,
"learning_rate": 0.00041163302752293576,
"loss": 3.4656,
"step": 29200
},
{
"epoch": 3.1536388140161726,
"grad_norm": 0.6103987693786621,
"learning_rate": 0.00041130922827846726,
"loss": 3.4807,
"step": 29250
},
{
"epoch": 3.1590296495956873,
"grad_norm": 0.6764293313026428,
"learning_rate": 0.0004109854290339989,
"loss": 3.4681,
"step": 29300
},
{
"epoch": 3.164420485175202,
"grad_norm": 0.5955349802970886,
"learning_rate": 0.0004106616297895304,
"loss": 3.4627,
"step": 29350
},
{
"epoch": 3.169811320754717,
"grad_norm": 0.5901073813438416,
"learning_rate": 0.000410337830545062,
"loss": 3.4575,
"step": 29400
},
{
"epoch": 3.175202156334232,
"grad_norm": 0.6012365818023682,
"learning_rate": 0.00041001403130059357,
"loss": 3.4659,
"step": 29450
},
{
"epoch": 3.1805929919137466,
"grad_norm": 0.6144543886184692,
"learning_rate": 0.00040969023205612517,
"loss": 3.4615,
"step": 29500
},
{
"epoch": 3.1859838274932613,
"grad_norm": 0.7040764689445496,
"learning_rate": 0.0004093664328116567,
"loss": 3.4752,
"step": 29550
},
{
"epoch": 3.1913746630727764,
"grad_norm": 0.6097145676612854,
"learning_rate": 0.00040904263356718833,
"loss": 3.4716,
"step": 29600
},
{
"epoch": 3.196765498652291,
"grad_norm": 0.6183958649635315,
"learning_rate": 0.0004087188343227199,
"loss": 3.4591,
"step": 29650
},
{
"epoch": 3.202156334231806,
"grad_norm": 0.6033417582511902,
"learning_rate": 0.00040839503507825143,
"loss": 3.4897,
"step": 29700
},
{
"epoch": 3.207547169811321,
"grad_norm": 0.6251811981201172,
"learning_rate": 0.00040807123583378303,
"loss": 3.4603,
"step": 29750
},
{
"epoch": 3.2129380053908356,
"grad_norm": 0.6118080019950867,
"learning_rate": 0.0004077474365893146,
"loss": 3.4704,
"step": 29800
},
{
"epoch": 3.2183288409703503,
"grad_norm": 0.6248761415481567,
"learning_rate": 0.0004074236373448462,
"loss": 3.4467,
"step": 29850
},
{
"epoch": 3.223719676549865,
"grad_norm": 0.6107171177864075,
"learning_rate": 0.00040709983810037774,
"loss": 3.4591,
"step": 29900
},
{
"epoch": 3.22911051212938,
"grad_norm": 0.6119734644889832,
"learning_rate": 0.00040677603885590934,
"loss": 3.4716,
"step": 29950
},
{
"epoch": 3.234501347708895,
"grad_norm": 0.6032190918922424,
"learning_rate": 0.00040645223961144084,
"loss": 3.4767,
"step": 30000
},
{
"epoch": 3.234501347708895,
"eval_accuracy": 0.37148009462797305,
"eval_loss": 3.493159770965576,
"eval_runtime": 144.5095,
"eval_samples_per_second": 124.635,
"eval_steps_per_second": 7.792,
"step": 30000
}
],
"logging_steps": 50,
"max_steps": 92750,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.508207538176e+17,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}