direct_0.1p_seed42_level1_syntax / trainer_state.json
terry69's picture
Model save
242c22b verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9998676022772408,
"eval_steps": 500,
"global_step": 1888,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0005295908910366742,
"grad_norm": 0.41111565141978,
"learning_rate": 1.0582010582010582e-06,
"loss": 1.3689,
"step": 1
},
{
"epoch": 0.0026479544551833707,
"grad_norm": 0.4260948383357496,
"learning_rate": 5.291005291005291e-06,
"loss": 1.3797,
"step": 5
},
{
"epoch": 0.005295908910366741,
"grad_norm": 0.42426082631444323,
"learning_rate": 1.0582010582010582e-05,
"loss": 1.4097,
"step": 10
},
{
"epoch": 0.007943863365550112,
"grad_norm": 0.4917431791111332,
"learning_rate": 1.5873015873015872e-05,
"loss": 1.4192,
"step": 15
},
{
"epoch": 0.010591817820733483,
"grad_norm": 0.21890435137541137,
"learning_rate": 2.1164021164021164e-05,
"loss": 1.3564,
"step": 20
},
{
"epoch": 0.013239772275916854,
"grad_norm": 0.1563118955046167,
"learning_rate": 2.6455026455026456e-05,
"loss": 1.3689,
"step": 25
},
{
"epoch": 0.015887726731100223,
"grad_norm": 0.14441954891998668,
"learning_rate": 3.1746031746031745e-05,
"loss": 1.3235,
"step": 30
},
{
"epoch": 0.018535681186283594,
"grad_norm": 0.13273151212388007,
"learning_rate": 3.7037037037037037e-05,
"loss": 1.2872,
"step": 35
},
{
"epoch": 0.021183635641466966,
"grad_norm": 0.13964432905217536,
"learning_rate": 4.232804232804233e-05,
"loss": 1.2703,
"step": 40
},
{
"epoch": 0.023831590096650337,
"grad_norm": 0.14877385709600144,
"learning_rate": 4.761904761904762e-05,
"loss": 1.286,
"step": 45
},
{
"epoch": 0.026479544551833708,
"grad_norm": 0.11433768860303765,
"learning_rate": 5.291005291005291e-05,
"loss": 1.2649,
"step": 50
},
{
"epoch": 0.02912749900701708,
"grad_norm": 0.09609426563043175,
"learning_rate": 5.82010582010582e-05,
"loss": 1.2586,
"step": 55
},
{
"epoch": 0.03177545346220045,
"grad_norm": 0.08894905109962493,
"learning_rate": 6.349206349206349e-05,
"loss": 1.2265,
"step": 60
},
{
"epoch": 0.03442340791738382,
"grad_norm": 0.07490077864915894,
"learning_rate": 6.878306878306878e-05,
"loss": 1.2306,
"step": 65
},
{
"epoch": 0.03707136237256719,
"grad_norm": 0.07227158023314931,
"learning_rate": 7.407407407407407e-05,
"loss": 1.2407,
"step": 70
},
{
"epoch": 0.03971931682775056,
"grad_norm": 0.06869740415301212,
"learning_rate": 7.936507936507937e-05,
"loss": 1.2122,
"step": 75
},
{
"epoch": 0.04236727128293393,
"grad_norm": 0.06853429613019985,
"learning_rate": 8.465608465608466e-05,
"loss": 1.2229,
"step": 80
},
{
"epoch": 0.0450152257381173,
"grad_norm": 0.0712797771766177,
"learning_rate": 8.994708994708995e-05,
"loss": 1.1861,
"step": 85
},
{
"epoch": 0.047663180193300674,
"grad_norm": 0.06827620857683864,
"learning_rate": 9.523809523809524e-05,
"loss": 1.1737,
"step": 90
},
{
"epoch": 0.050311134648484045,
"grad_norm": 0.07511648347979284,
"learning_rate": 0.00010052910052910055,
"loss": 1.1917,
"step": 95
},
{
"epoch": 0.052959089103667416,
"grad_norm": 0.07161483769838682,
"learning_rate": 0.00010582010582010582,
"loss": 1.1657,
"step": 100
},
{
"epoch": 0.05560704355885079,
"grad_norm": 0.0828998822809963,
"learning_rate": 0.00011111111111111112,
"loss": 1.2058,
"step": 105
},
{
"epoch": 0.05825499801403416,
"grad_norm": 0.08432989244956403,
"learning_rate": 0.0001164021164021164,
"loss": 1.1959,
"step": 110
},
{
"epoch": 0.06090295246921753,
"grad_norm": 0.08546417778154296,
"learning_rate": 0.0001216931216931217,
"loss": 1.1939,
"step": 115
},
{
"epoch": 0.0635509069244009,
"grad_norm": 0.08953022960897895,
"learning_rate": 0.00012698412698412698,
"loss": 1.152,
"step": 120
},
{
"epoch": 0.06619886137958426,
"grad_norm": 0.09090497875499474,
"learning_rate": 0.00013227513227513228,
"loss": 1.162,
"step": 125
},
{
"epoch": 0.06884681583476764,
"grad_norm": 0.09067803395184647,
"learning_rate": 0.00013756613756613756,
"loss": 1.1564,
"step": 130
},
{
"epoch": 0.071494770289951,
"grad_norm": 0.09746896349076282,
"learning_rate": 0.00014285714285714287,
"loss": 1.153,
"step": 135
},
{
"epoch": 0.07414272474513438,
"grad_norm": 0.09151621194870359,
"learning_rate": 0.00014814814814814815,
"loss": 1.1733,
"step": 140
},
{
"epoch": 0.07679067920031775,
"grad_norm": 0.09449259114304437,
"learning_rate": 0.00015343915343915345,
"loss": 1.1674,
"step": 145
},
{
"epoch": 0.07943863365550112,
"grad_norm": 0.09545666936190501,
"learning_rate": 0.00015873015873015873,
"loss": 1.1701,
"step": 150
},
{
"epoch": 0.08208658811068449,
"grad_norm": 0.0933818650565856,
"learning_rate": 0.00016402116402116404,
"loss": 1.1243,
"step": 155
},
{
"epoch": 0.08473454256586786,
"grad_norm": 0.09593001870262655,
"learning_rate": 0.00016931216931216931,
"loss": 1.1347,
"step": 160
},
{
"epoch": 0.08738249702105123,
"grad_norm": 0.09994210185656571,
"learning_rate": 0.00017460317460317462,
"loss": 1.1522,
"step": 165
},
{
"epoch": 0.0900304514762346,
"grad_norm": 0.09791765751683923,
"learning_rate": 0.0001798941798941799,
"loss": 1.1597,
"step": 170
},
{
"epoch": 0.09267840593141798,
"grad_norm": 0.09168024517745389,
"learning_rate": 0.0001851851851851852,
"loss": 1.1296,
"step": 175
},
{
"epoch": 0.09532636038660135,
"grad_norm": 0.09617452615612945,
"learning_rate": 0.00019047619047619048,
"loss": 1.1261,
"step": 180
},
{
"epoch": 0.09797431484178472,
"grad_norm": 0.10949818534072465,
"learning_rate": 0.0001957671957671958,
"loss": 1.1437,
"step": 185
},
{
"epoch": 0.10062226929696809,
"grad_norm": 0.09788854738358516,
"learning_rate": 0.00019999982904458238,
"loss": 1.1351,
"step": 190
},
{
"epoch": 0.10327022375215146,
"grad_norm": 0.09651211835063897,
"learning_rate": 0.00019999384566633964,
"loss": 1.1285,
"step": 195
},
{
"epoch": 0.10591817820733483,
"grad_norm": 0.09895389323355416,
"learning_rate": 0.00019997931510172217,
"loss": 1.1592,
"step": 200
},
{
"epoch": 0.1085661326625182,
"grad_norm": 0.09504405875958917,
"learning_rate": 0.0001999562385927608,
"loss": 1.127,
"step": 205
},
{
"epoch": 0.11121408711770157,
"grad_norm": 0.08454569193883055,
"learning_rate": 0.00019992461811196922,
"loss": 1.1163,
"step": 210
},
{
"epoch": 0.11386204157288494,
"grad_norm": 0.10371923765022571,
"learning_rate": 0.00019988445636217513,
"loss": 1.1286,
"step": 215
},
{
"epoch": 0.11650999602806832,
"grad_norm": 0.09773134958204137,
"learning_rate": 0.00019983575677628946,
"loss": 1.1547,
"step": 220
},
{
"epoch": 0.11915795048325169,
"grad_norm": 0.08853073845684854,
"learning_rate": 0.0001997785235170127,
"loss": 1.1391,
"step": 225
},
{
"epoch": 0.12180590493843506,
"grad_norm": 0.11043850318148267,
"learning_rate": 0.00019971276147647937,
"loss": 1.1265,
"step": 230
},
{
"epoch": 0.12445385939361843,
"grad_norm": 0.08955488246598574,
"learning_rate": 0.00019963847627583952,
"loss": 1.1293,
"step": 235
},
{
"epoch": 0.1271018138488018,
"grad_norm": 0.08677646900814183,
"learning_rate": 0.00019955567426477847,
"loss": 1.1665,
"step": 240
},
{
"epoch": 0.12974976830398516,
"grad_norm": 0.0827435794481087,
"learning_rate": 0.00019946436252097401,
"loss": 1.1517,
"step": 245
},
{
"epoch": 0.13239772275916853,
"grad_norm": 0.08325869934671043,
"learning_rate": 0.00019936454884949146,
"loss": 1.1491,
"step": 250
},
{
"epoch": 0.1350456772143519,
"grad_norm": 0.08582838361774685,
"learning_rate": 0.0001992562417821164,
"loss": 1.1447,
"step": 255
},
{
"epoch": 0.13769363166953527,
"grad_norm": 0.0784880184114551,
"learning_rate": 0.00019913945057662547,
"loss": 1.1321,
"step": 260
},
{
"epoch": 0.14034158612471864,
"grad_norm": 0.09123702006719589,
"learning_rate": 0.00019901418521599506,
"loss": 1.0955,
"step": 265
},
{
"epoch": 0.142989540579902,
"grad_norm": 0.08265522736674327,
"learning_rate": 0.000198880456407548,
"loss": 1.1493,
"step": 270
},
{
"epoch": 0.14563749503508538,
"grad_norm": 0.09073002430436583,
"learning_rate": 0.00019873827558203826,
"loss": 1.1101,
"step": 275
},
{
"epoch": 0.14828544949026876,
"grad_norm": 0.08138379527510481,
"learning_rate": 0.0001985876548926739,
"loss": 1.0982,
"step": 280
},
{
"epoch": 0.15093340394545213,
"grad_norm": 0.09137910087915257,
"learning_rate": 0.00019842860721407837,
"loss": 1.1312,
"step": 285
},
{
"epoch": 0.1535813584006355,
"grad_norm": 0.08477969588194346,
"learning_rate": 0.0001982611461411898,
"loss": 1.1413,
"step": 290
},
{
"epoch": 0.15622931285581887,
"grad_norm": 0.08817787188441822,
"learning_rate": 0.0001980852859880992,
"loss": 1.1394,
"step": 295
},
{
"epoch": 0.15887726731100224,
"grad_norm": 0.08829334171222576,
"learning_rate": 0.00019790104178682666,
"loss": 1.1236,
"step": 300
},
{
"epoch": 0.1615252217661856,
"grad_norm": 0.09023785166170276,
"learning_rate": 0.00019770842928603673,
"loss": 1.1205,
"step": 305
},
{
"epoch": 0.16417317622136898,
"grad_norm": 0.09500691769663268,
"learning_rate": 0.000197507464949692,
"loss": 1.1167,
"step": 310
},
{
"epoch": 0.16682113067655235,
"grad_norm": 0.08426021874529936,
"learning_rate": 0.00019729816595564614,
"loss": 1.1106,
"step": 315
},
{
"epoch": 0.16946908513173572,
"grad_norm": 0.08147138058421277,
"learning_rate": 0.00019708055019417522,
"loss": 1.11,
"step": 320
},
{
"epoch": 0.1721170395869191,
"grad_norm": 0.08138544612829161,
"learning_rate": 0.00019685463626644873,
"loss": 1.1186,
"step": 325
},
{
"epoch": 0.17476499404210247,
"grad_norm": 0.08666272634258464,
"learning_rate": 0.00019662044348293955,
"loss": 1.1372,
"step": 330
},
{
"epoch": 0.17741294849728584,
"grad_norm": 0.0843383524929789,
"learning_rate": 0.00019637799186177332,
"loss": 1.1079,
"step": 335
},
{
"epoch": 0.1800609029524692,
"grad_norm": 0.0772317749907282,
"learning_rate": 0.00019612730212701746,
"loss": 1.1475,
"step": 340
},
{
"epoch": 0.18270885740765258,
"grad_norm": 0.0774927041377668,
"learning_rate": 0.00019586839570690957,
"loss": 1.1152,
"step": 345
},
{
"epoch": 0.18535681186283595,
"grad_norm": 0.07478193615591532,
"learning_rate": 0.00019560129473202587,
"loss": 1.1267,
"step": 350
},
{
"epoch": 0.18800476631801932,
"grad_norm": 0.08480217654240685,
"learning_rate": 0.00019532602203338958,
"loss": 1.1239,
"step": 355
},
{
"epoch": 0.1906527207732027,
"grad_norm": 0.07940327835709149,
"learning_rate": 0.00019504260114051945,
"loss": 1.1309,
"step": 360
},
{
"epoch": 0.19330067522838607,
"grad_norm": 0.07586072518369895,
"learning_rate": 0.00019475105627941828,
"loss": 1.134,
"step": 365
},
{
"epoch": 0.19594862968356944,
"grad_norm": 0.07748779749674514,
"learning_rate": 0.00019445141237050246,
"loss": 1.1257,
"step": 370
},
{
"epoch": 0.1985965841387528,
"grad_norm": 0.0749023019530035,
"learning_rate": 0.00019414369502647163,
"loss": 1.1194,
"step": 375
},
{
"epoch": 0.20124453859393618,
"grad_norm": 0.07770003396390662,
"learning_rate": 0.00019382793055011942,
"loss": 1.1199,
"step": 380
},
{
"epoch": 0.20389249304911955,
"grad_norm": 0.07762689051643917,
"learning_rate": 0.00019350414593208526,
"loss": 1.1427,
"step": 385
},
{
"epoch": 0.20654044750430292,
"grad_norm": 0.07896343783224463,
"learning_rate": 0.00019317236884854714,
"loss": 1.1399,
"step": 390
},
{
"epoch": 0.2091884019594863,
"grad_norm": 0.08341099352486485,
"learning_rate": 0.00019283262765885614,
"loss": 1.0938,
"step": 395
},
{
"epoch": 0.21183635641466966,
"grad_norm": 0.08207858561332201,
"learning_rate": 0.0001924849514031121,
"loss": 1.1024,
"step": 400
},
{
"epoch": 0.21448431086985303,
"grad_norm": 0.07902580455460373,
"learning_rate": 0.00019212936979968158,
"loss": 1.1302,
"step": 405
},
{
"epoch": 0.2171322653250364,
"grad_norm": 0.08126318805436292,
"learning_rate": 0.00019176591324265746,
"loss": 1.1256,
"step": 410
},
{
"epoch": 0.21978021978021978,
"grad_norm": 0.08262896761244223,
"learning_rate": 0.0001913946127992611,
"loss": 1.1168,
"step": 415
},
{
"epoch": 0.22242817423540315,
"grad_norm": 0.08486099971998615,
"learning_rate": 0.0001910155002071866,
"loss": 1.1093,
"step": 420
},
{
"epoch": 0.22507612869058652,
"grad_norm": 0.08249982374177663,
"learning_rate": 0.0001906286078718881,
"loss": 1.1281,
"step": 425
},
{
"epoch": 0.2277240831457699,
"grad_norm": 0.08241790745995113,
"learning_rate": 0.00019023396886380987,
"loss": 1.1306,
"step": 430
},
{
"epoch": 0.23037203760095326,
"grad_norm": 0.08583672564162287,
"learning_rate": 0.00018983161691555947,
"loss": 1.1391,
"step": 435
},
{
"epoch": 0.23301999205613663,
"grad_norm": 0.07678631371522364,
"learning_rate": 0.00018942158641902434,
"loss": 1.1249,
"step": 440
},
{
"epoch": 0.23566794651132,
"grad_norm": 0.07687501040269233,
"learning_rate": 0.00018900391242243224,
"loss": 1.1275,
"step": 445
},
{
"epoch": 0.23831590096650337,
"grad_norm": 0.07734964302998311,
"learning_rate": 0.00018857863062735525,
"loss": 1.1419,
"step": 450
},
{
"epoch": 0.24096385542168675,
"grad_norm": 0.07789633231442543,
"learning_rate": 0.00018814577738565826,
"loss": 1.1313,
"step": 455
},
{
"epoch": 0.24361180987687012,
"grad_norm": 0.0786175058826523,
"learning_rate": 0.00018770538969639162,
"loss": 1.0941,
"step": 460
},
{
"epoch": 0.2462597643320535,
"grad_norm": 0.07662475568587293,
"learning_rate": 0.0001872575052026286,
"loss": 1.0895,
"step": 465
},
{
"epoch": 0.24890771878723686,
"grad_norm": 0.07891566103155145,
"learning_rate": 0.00018680216218824763,
"loss": 1.1195,
"step": 470
},
{
"epoch": 0.25155567324242023,
"grad_norm": 0.08801022195618644,
"learning_rate": 0.0001863393995746603,
"loss": 1.1123,
"step": 475
},
{
"epoch": 0.2542036276976036,
"grad_norm": 0.07439626465552164,
"learning_rate": 0.00018586925691748394,
"loss": 1.105,
"step": 480
},
{
"epoch": 0.256851582152787,
"grad_norm": 0.08921415332985139,
"learning_rate": 0.00018539177440316096,
"loss": 1.1183,
"step": 485
},
{
"epoch": 0.2594995366079703,
"grad_norm": 0.0722928891718278,
"learning_rate": 0.00018490699284552352,
"loss": 1.1225,
"step": 490
},
{
"epoch": 0.2621474910631537,
"grad_norm": 0.08123610651623102,
"learning_rate": 0.00018441495368230518,
"loss": 1.12,
"step": 495
},
{
"epoch": 0.26479544551833706,
"grad_norm": 0.08591833713679868,
"learning_rate": 0.00018391569897159853,
"loss": 1.1164,
"step": 500
},
{
"epoch": 0.26744339997352046,
"grad_norm": 0.0766094125907269,
"learning_rate": 0.0001834092713882606,
"loss": 1.1197,
"step": 505
},
{
"epoch": 0.2700913544287038,
"grad_norm": 0.07548163044316222,
"learning_rate": 0.00018289571422026488,
"loss": 1.111,
"step": 510
},
{
"epoch": 0.2727393088838872,
"grad_norm": 0.07972346954097494,
"learning_rate": 0.00018237507136500126,
"loss": 1.101,
"step": 515
},
{
"epoch": 0.27538726333907054,
"grad_norm": 0.08328304497316567,
"learning_rate": 0.00018184738732552374,
"loss": 1.1116,
"step": 520
},
{
"epoch": 0.27803521779425394,
"grad_norm": 0.0972393144531956,
"learning_rate": 0.00018131270720674658,
"loss": 1.1116,
"step": 525
},
{
"epoch": 0.2806831722494373,
"grad_norm": 0.07637369852545137,
"learning_rate": 0.00018077107671158878,
"loss": 1.1091,
"step": 530
},
{
"epoch": 0.2833311267046207,
"grad_norm": 0.07677607153425371,
"learning_rate": 0.00018022254213706745,
"loss": 1.0991,
"step": 535
},
{
"epoch": 0.285979081159804,
"grad_norm": 0.07646177666293459,
"learning_rate": 0.0001796671503703406,
"loss": 1.1015,
"step": 540
},
{
"epoch": 0.2886270356149874,
"grad_norm": 0.07666293082207067,
"learning_rate": 0.00017910494888469938,
"loss": 1.1007,
"step": 545
},
{
"epoch": 0.29127499007017077,
"grad_norm": 0.075595023851119,
"learning_rate": 0.00017853598573550997,
"loss": 1.1323,
"step": 550
},
{
"epoch": 0.29392294452535417,
"grad_norm": 0.07622954120001359,
"learning_rate": 0.0001779603095561062,
"loss": 1.1397,
"step": 555
},
{
"epoch": 0.2965708989805375,
"grad_norm": 0.07515451588120746,
"learning_rate": 0.00017737796955363244,
"loss": 1.0916,
"step": 560
},
{
"epoch": 0.2992188534357209,
"grad_norm": 0.07859697697366838,
"learning_rate": 0.0001767890155048375,
"loss": 1.0997,
"step": 565
},
{
"epoch": 0.30186680789090425,
"grad_norm": 0.07702712175636178,
"learning_rate": 0.0001761934977518197,
"loss": 1.0988,
"step": 570
},
{
"epoch": 0.30451476234608765,
"grad_norm": 0.0786046652278411,
"learning_rate": 0.00017559146719772417,
"loss": 1.1111,
"step": 575
},
{
"epoch": 0.307162716801271,
"grad_norm": 0.07520793055259228,
"learning_rate": 0.00017498297530239132,
"loss": 1.0989,
"step": 580
},
{
"epoch": 0.3098106712564544,
"grad_norm": 0.07578073058602562,
"learning_rate": 0.00017436807407795852,
"loss": 1.0948,
"step": 585
},
{
"epoch": 0.31245862571163774,
"grad_norm": 0.07604308595840065,
"learning_rate": 0.00017374681608441422,
"loss": 1.1036,
"step": 590
},
{
"epoch": 0.31510658016682114,
"grad_norm": 0.08158146756884099,
"learning_rate": 0.0001731192544251051,
"loss": 1.1225,
"step": 595
},
{
"epoch": 0.3177545346220045,
"grad_norm": 0.07498544543280497,
"learning_rate": 0.00017248544274219715,
"loss": 1.1201,
"step": 600
},
{
"epoch": 0.3204024890771879,
"grad_norm": 0.08493466237400855,
"learning_rate": 0.00017184543521209036,
"loss": 1.103,
"step": 605
},
{
"epoch": 0.3230504435323712,
"grad_norm": 0.08527784775049631,
"learning_rate": 0.00017119928654078792,
"loss": 1.1194,
"step": 610
},
{
"epoch": 0.3256983979875546,
"grad_norm": 0.08169595986359099,
"learning_rate": 0.00017054705195922008,
"loss": 1.0933,
"step": 615
},
{
"epoch": 0.32834635244273797,
"grad_norm": 0.08197785273219102,
"learning_rate": 0.00016988878721852334,
"loss": 1.1117,
"step": 620
},
{
"epoch": 0.33099430689792136,
"grad_norm": 0.07335268515860134,
"learning_rate": 0.0001692245485852747,
"loss": 1.0865,
"step": 625
},
{
"epoch": 0.3336422613531047,
"grad_norm": 0.08352503426226525,
"learning_rate": 0.00016855439283668233,
"loss": 1.1084,
"step": 630
},
{
"epoch": 0.3362902158082881,
"grad_norm": 0.07593742235391308,
"learning_rate": 0.00016787837725573253,
"loss": 1.107,
"step": 635
},
{
"epoch": 0.33893817026347145,
"grad_norm": 0.07457902202614533,
"learning_rate": 0.0001671965596262931,
"loss": 1.0935,
"step": 640
},
{
"epoch": 0.34158612471865485,
"grad_norm": 0.08114151429238778,
"learning_rate": 0.00016650899822817433,
"loss": 1.0969,
"step": 645
},
{
"epoch": 0.3442340791738382,
"grad_norm": 0.0717382651710646,
"learning_rate": 0.00016581575183214722,
"loss": 1.1352,
"step": 650
},
{
"epoch": 0.3468820336290216,
"grad_norm": 0.09884940877067575,
"learning_rate": 0.00016511687969492024,
"loss": 1.0922,
"step": 655
},
{
"epoch": 0.34952998808420493,
"grad_norm": 0.08048808120404499,
"learning_rate": 0.00016441244155407386,
"loss": 1.1225,
"step": 660
},
{
"epoch": 0.35217794253938833,
"grad_norm": 0.07768291438253641,
"learning_rate": 0.00016370249762295464,
"loss": 1.1163,
"step": 665
},
{
"epoch": 0.3548258969945717,
"grad_norm": 0.07669697580314543,
"learning_rate": 0.00016298710858552817,
"loss": 1.1454,
"step": 670
},
{
"epoch": 0.3574738514497551,
"grad_norm": 0.07654243142581256,
"learning_rate": 0.0001622663355911922,
"loss": 1.0954,
"step": 675
},
{
"epoch": 0.3601218059049384,
"grad_norm": 0.07308865070979989,
"learning_rate": 0.0001615402402495495,
"loss": 1.0849,
"step": 680
},
{
"epoch": 0.3627697603601218,
"grad_norm": 0.07098787881731487,
"learning_rate": 0.00016080888462514185,
"loss": 1.115,
"step": 685
},
{
"epoch": 0.36541771481530516,
"grad_norm": 0.07985291028148536,
"learning_rate": 0.00016007233123214486,
"loss": 1.0802,
"step": 690
},
{
"epoch": 0.36806566927048856,
"grad_norm": 0.07631811342549887,
"learning_rate": 0.00015933064302902446,
"loss": 1.12,
"step": 695
},
{
"epoch": 0.3707136237256719,
"grad_norm": 0.0716109218501906,
"learning_rate": 0.00015858388341315535,
"loss": 1.1223,
"step": 700
},
{
"epoch": 0.3733615781808553,
"grad_norm": 0.07770179033811983,
"learning_rate": 0.00015783211621540208,
"loss": 1.1206,
"step": 705
},
{
"epoch": 0.37600953263603865,
"grad_norm": 0.08008613710569794,
"learning_rate": 0.00015707540569466282,
"loss": 1.0927,
"step": 710
},
{
"epoch": 0.37865748709122204,
"grad_norm": 0.07909376018837275,
"learning_rate": 0.00015631381653237683,
"loss": 1.1208,
"step": 715
},
{
"epoch": 0.3813054415464054,
"grad_norm": 0.08795687346992156,
"learning_rate": 0.00015554741382699556,
"loss": 1.1158,
"step": 720
},
{
"epoch": 0.3839533960015888,
"grad_norm": 0.07837037230945529,
"learning_rate": 0.00015477626308841832,
"loss": 1.0996,
"step": 725
},
{
"epoch": 0.38660135045677213,
"grad_norm": 0.07641657124459816,
"learning_rate": 0.0001540004302323927,
"loss": 1.1014,
"step": 730
},
{
"epoch": 0.38924930491195553,
"grad_norm": 0.08137671044593779,
"learning_rate": 0.00015321998157488014,
"loss": 1.1078,
"step": 735
},
{
"epoch": 0.3918972593671389,
"grad_norm": 0.08041109732972,
"learning_rate": 0.0001524349838263875,
"loss": 1.1118,
"step": 740
},
{
"epoch": 0.39454521382232227,
"grad_norm": 0.07625661557827795,
"learning_rate": 0.00015164550408626495,
"loss": 1.102,
"step": 745
},
{
"epoch": 0.3971931682775056,
"grad_norm": 0.07622512218698127,
"learning_rate": 0.00015085160983697033,
"loss": 1.0988,
"step": 750
},
{
"epoch": 0.399841122732689,
"grad_norm": 0.07118368595229167,
"learning_rate": 0.00015005336893830106,
"loss": 1.1268,
"step": 755
},
{
"epoch": 0.40248907718787236,
"grad_norm": 0.07416345532311784,
"learning_rate": 0.0001492508496215937,
"loss": 1.0972,
"step": 760
},
{
"epoch": 0.40513703164305576,
"grad_norm": 0.07934756335393346,
"learning_rate": 0.0001484441204838916,
"loss": 1.1218,
"step": 765
},
{
"epoch": 0.4077849860982391,
"grad_norm": 0.07517515602393443,
"learning_rate": 0.00014763325048208154,
"loss": 1.1191,
"step": 770
},
{
"epoch": 0.4104329405534225,
"grad_norm": 0.07599570184456439,
"learning_rate": 0.00014681830892699946,
"loss": 1.1095,
"step": 775
},
{
"epoch": 0.41308089500860584,
"grad_norm": 0.07910162374766451,
"learning_rate": 0.00014599936547750593,
"loss": 1.0652,
"step": 780
},
{
"epoch": 0.41572884946378924,
"grad_norm": 0.07982743824380482,
"learning_rate": 0.000145176490134532,
"loss": 1.1065,
"step": 785
},
{
"epoch": 0.4183768039189726,
"grad_norm": 0.07714607042652752,
"learning_rate": 0.00014434975323509566,
"loss": 1.1059,
"step": 790
},
{
"epoch": 0.421024758374156,
"grad_norm": 0.07606594821793712,
"learning_rate": 0.00014351922544628959,
"loss": 1.0735,
"step": 795
},
{
"epoch": 0.4236727128293393,
"grad_norm": 0.07739034795460513,
"learning_rate": 0.00014268497775924078,
"loss": 1.1292,
"step": 800
},
{
"epoch": 0.4263206672845227,
"grad_norm": 0.08112666266940216,
"learning_rate": 0.00014184708148304255,
"loss": 1.0935,
"step": 805
},
{
"epoch": 0.42896862173970607,
"grad_norm": 0.07417441389696992,
"learning_rate": 0.000141005608238659,
"loss": 1.0917,
"step": 810
},
{
"epoch": 0.43161657619488947,
"grad_norm": 0.07327160038369271,
"learning_rate": 0.0001401606299528033,
"loss": 1.1082,
"step": 815
},
{
"epoch": 0.4342645306500728,
"grad_norm": 0.07329251639640971,
"learning_rate": 0.0001393122188517893,
"loss": 1.0983,
"step": 820
},
{
"epoch": 0.4369124851052562,
"grad_norm": 0.07652688725710881,
"learning_rate": 0.00013846044745535822,
"loss": 1.1055,
"step": 825
},
{
"epoch": 0.43956043956043955,
"grad_norm": 0.07594879575700789,
"learning_rate": 0.00013760538857047957,
"loss": 1.0881,
"step": 830
},
{
"epoch": 0.44220839401562295,
"grad_norm": 0.07795025881048111,
"learning_rate": 0.00013674711528512793,
"loss": 1.135,
"step": 835
},
{
"epoch": 0.4448563484708063,
"grad_norm": 0.07525588786296598,
"learning_rate": 0.00013588570096203554,
"loss": 1.0939,
"step": 840
},
{
"epoch": 0.4475043029259897,
"grad_norm": 0.07793636775238774,
"learning_rate": 0.00013502121923242164,
"loss": 1.0912,
"step": 845
},
{
"epoch": 0.45015225738117304,
"grad_norm": 0.07426019016116724,
"learning_rate": 0.00013415374398969844,
"loss": 1.1182,
"step": 850
},
{
"epoch": 0.45280021183635644,
"grad_norm": 0.07684359220337984,
"learning_rate": 0.00013328334938315513,
"loss": 1.0972,
"step": 855
},
{
"epoch": 0.4554481662915398,
"grad_norm": 0.08240740529852435,
"learning_rate": 0.0001324101098116196,
"loss": 1.1124,
"step": 860
},
{
"epoch": 0.4580961207467232,
"grad_norm": 0.0763891854972936,
"learning_rate": 0.00013153409991709933,
"loss": 1.0883,
"step": 865
},
{
"epoch": 0.4607440752019065,
"grad_norm": 0.08060164757564352,
"learning_rate": 0.0001306553945784009,
"loss": 1.1097,
"step": 870
},
{
"epoch": 0.4633920296570899,
"grad_norm": 0.0769697871980839,
"learning_rate": 0.00012977406890472975,
"loss": 1.0881,
"step": 875
},
{
"epoch": 0.46603998411227326,
"grad_norm": 0.07383145111875959,
"learning_rate": 0.0001288901982292701,
"loss": 1.0872,
"step": 880
},
{
"epoch": 0.46868793856745666,
"grad_norm": 0.07293602517307703,
"learning_rate": 0.0001280038581027455,
"loss": 1.0712,
"step": 885
},
{
"epoch": 0.47133589302264,
"grad_norm": 0.07479709674097254,
"learning_rate": 0.00012711512428696114,
"loss": 1.0922,
"step": 890
},
{
"epoch": 0.4739838474778234,
"grad_norm": 0.0755945597589927,
"learning_rate": 0.0001262240727483278,
"loss": 1.0925,
"step": 895
},
{
"epoch": 0.47663180193300675,
"grad_norm": 0.07530841667815956,
"learning_rate": 0.0001253307796513686,
"loss": 1.1226,
"step": 900
},
{
"epoch": 0.47927975638819015,
"grad_norm": 0.08059154491334077,
"learning_rate": 0.0001244353213522087,
"loss": 1.1334,
"step": 905
},
{
"epoch": 0.4819277108433735,
"grad_norm": 0.07234568635036283,
"learning_rate": 0.00012353777439204838,
"loss": 1.1304,
"step": 910
},
{
"epoch": 0.4845756652985569,
"grad_norm": 0.07303744281420084,
"learning_rate": 0.00012263821549062074,
"loss": 1.0934,
"step": 915
},
{
"epoch": 0.48722361975374023,
"grad_norm": 0.07710065386256926,
"learning_rate": 0.00012173672153963377,
"loss": 1.1153,
"step": 920
},
{
"epoch": 0.48987157420892363,
"grad_norm": 0.07830816633015714,
"learning_rate": 0.00012083336959619802,
"loss": 1.112,
"step": 925
},
{
"epoch": 0.492519528664107,
"grad_norm": 0.0756022313506664,
"learning_rate": 0.00011992823687623977,
"loss": 1.122,
"step": 930
},
{
"epoch": 0.4951674831192904,
"grad_norm": 0.07163741166179009,
"learning_rate": 0.00011902140074790112,
"loss": 1.0669,
"step": 935
},
{
"epoch": 0.4978154375744737,
"grad_norm": 0.08576324173799038,
"learning_rate": 0.00011811293872492644,
"loss": 1.121,
"step": 940
},
{
"epoch": 0.5004633920296571,
"grad_norm": 0.0762857316535538,
"learning_rate": 0.00011720292846003706,
"loss": 1.1249,
"step": 945
},
{
"epoch": 0.5031113464848405,
"grad_norm": 0.07839664604260674,
"learning_rate": 0.00011629144773829344,
"loss": 1.1062,
"step": 950
},
{
"epoch": 0.5057593009400239,
"grad_norm": 0.08031109603032427,
"learning_rate": 0.00011537857447044656,
"loss": 1.1013,
"step": 955
},
{
"epoch": 0.5084072553952071,
"grad_norm": 0.07756013246128472,
"learning_rate": 0.00011446438668627819,
"loss": 1.0908,
"step": 960
},
{
"epoch": 0.5110552098503905,
"grad_norm": 0.07362707242031638,
"learning_rate": 0.00011354896252793117,
"loss": 1.0951,
"step": 965
},
{
"epoch": 0.513703164305574,
"grad_norm": 0.07249015180621271,
"learning_rate": 0.0001126323802432301,
"loss": 1.0966,
"step": 970
},
{
"epoch": 0.5163511187607573,
"grad_norm": 0.07541567661268533,
"learning_rate": 0.00011171471817899282,
"loss": 1.1181,
"step": 975
},
{
"epoch": 0.5189990732159406,
"grad_norm": 0.08071128877259519,
"learning_rate": 0.0001107960547743337,
"loss": 1.0811,
"step": 980
},
{
"epoch": 0.521647027671124,
"grad_norm": 0.08258197653732914,
"learning_rate": 0.00010987646855395872,
"loss": 1.1417,
"step": 985
},
{
"epoch": 0.5242949821263074,
"grad_norm": 0.07840010957906275,
"learning_rate": 0.00010895603812145359,
"loss": 1.1253,
"step": 990
},
{
"epoch": 0.5269429365814908,
"grad_norm": 0.07562413230243624,
"learning_rate": 0.00010803484215256471,
"loss": 1.1022,
"step": 995
},
{
"epoch": 0.5295908910366741,
"grad_norm": 0.0748436684540439,
"learning_rate": 0.00010711295938847452,
"loss": 1.0963,
"step": 1000
},
{
"epoch": 0.5322388454918575,
"grad_norm": 0.07432957206360974,
"learning_rate": 0.00010619046862907057,
"loss": 1.0804,
"step": 1005
},
{
"epoch": 0.5348867999470409,
"grad_norm": 0.07745940840059426,
"learning_rate": 0.00010526744872621013,
"loss": 1.1189,
"step": 1010
},
{
"epoch": 0.5375347544022243,
"grad_norm": 0.07755117796467678,
"learning_rate": 0.00010434397857698025,
"loss": 1.0881,
"step": 1015
},
{
"epoch": 0.5401827088574076,
"grad_norm": 0.07851509890246185,
"learning_rate": 0.00010342013711695356,
"loss": 1.1108,
"step": 1020
},
{
"epoch": 0.542830663312591,
"grad_norm": 0.07480756214232848,
"learning_rate": 0.00010249600331344144,
"loss": 1.1214,
"step": 1025
},
{
"epoch": 0.5454786177677744,
"grad_norm": 0.07388322231422537,
"learning_rate": 0.00010157165615874384,
"loss": 1.1129,
"step": 1030
},
{
"epoch": 0.5481265722229578,
"grad_norm": 0.07729477706022746,
"learning_rate": 0.00010064717466339746,
"loss": 1.1024,
"step": 1035
},
{
"epoch": 0.5507745266781411,
"grad_norm": 0.07418682486286104,
"learning_rate": 9.972263784942191e-05,
"loss": 1.0897,
"step": 1040
},
{
"epoch": 0.5534224811333245,
"grad_norm": 0.07818304753661111,
"learning_rate": 9.879812474356538e-05,
"loss": 1.1209,
"step": 1045
},
{
"epoch": 0.5560704355885079,
"grad_norm": 0.07539756214766863,
"learning_rate": 9.78737143705495e-05,
"loss": 1.0842,
"step": 1050
},
{
"epoch": 0.5587183900436913,
"grad_norm": 0.08000396424013696,
"learning_rate": 9.69494857463147e-05,
"loss": 1.1022,
"step": 1055
},
{
"epoch": 0.5613663444988746,
"grad_norm": 0.0780502131683453,
"learning_rate": 9.602551787126593e-05,
"loss": 1.0828,
"step": 1060
},
{
"epoch": 0.564014298954058,
"grad_norm": 0.0756338343630312,
"learning_rate": 9.510188972352003e-05,
"loss": 1.1017,
"step": 1065
},
{
"epoch": 0.5666622534092414,
"grad_norm": 0.0744886822174254,
"learning_rate": 9.417868025215508e-05,
"loss": 1.0982,
"step": 1070
},
{
"epoch": 0.5693102078644248,
"grad_norm": 0.0743961983677683,
"learning_rate": 9.325596837046177e-05,
"loss": 1.1129,
"step": 1075
},
{
"epoch": 0.571958162319608,
"grad_norm": 0.07437524331159841,
"learning_rate": 9.233383294919832e-05,
"loss": 1.0775,
"step": 1080
},
{
"epoch": 0.5746061167747915,
"grad_norm": 0.0717481378509695,
"learning_rate": 9.141235280984873e-05,
"loss": 1.1017,
"step": 1085
},
{
"epoch": 0.5772540712299749,
"grad_norm": 0.07661308153340447,
"learning_rate": 9.04916067178854e-05,
"loss": 1.0641,
"step": 1090
},
{
"epoch": 0.5799020256851583,
"grad_norm": 0.0770623797115787,
"learning_rate": 8.957167337603652e-05,
"loss": 1.1182,
"step": 1095
},
{
"epoch": 0.5825499801403415,
"grad_norm": 0.07405657746416461,
"learning_rate": 8.865263141755865e-05,
"loss": 1.1057,
"step": 1100
},
{
"epoch": 0.5851979345955249,
"grad_norm": 0.07499634726366083,
"learning_rate": 8.773455939951548e-05,
"loss": 1.091,
"step": 1105
},
{
"epoch": 0.5878458890507083,
"grad_norm": 0.08117162442341291,
"learning_rate": 8.681753579606312e-05,
"loss": 1.125,
"step": 1110
},
{
"epoch": 0.5904938435058917,
"grad_norm": 0.07499243002348535,
"learning_rate": 8.590163899174202e-05,
"loss": 1.0797,
"step": 1115
},
{
"epoch": 0.593141797961075,
"grad_norm": 0.0757499666860512,
"learning_rate": 8.498694727477712e-05,
"loss": 1.101,
"step": 1120
},
{
"epoch": 0.5957897524162584,
"grad_norm": 0.0790409067216223,
"learning_rate": 8.40735388303861e-05,
"loss": 1.1147,
"step": 1125
},
{
"epoch": 0.5984377068714418,
"grad_norm": 0.07791231584194006,
"learning_rate": 8.316149173409604e-05,
"loss": 1.0985,
"step": 1130
},
{
"epoch": 0.6010856613266252,
"grad_norm": 0.07490124978826228,
"learning_rate": 8.225088394506998e-05,
"loss": 1.1174,
"step": 1135
},
{
"epoch": 0.6037336157818085,
"grad_norm": 0.07693659200902671,
"learning_rate": 8.134179329944308e-05,
"loss": 1.1346,
"step": 1140
},
{
"epoch": 0.6063815702369919,
"grad_norm": 0.07605699796790699,
"learning_rate": 8.043429750366941e-05,
"loss": 1.1001,
"step": 1145
},
{
"epoch": 0.6090295246921753,
"grad_norm": 0.07462322261103259,
"learning_rate": 7.952847412787997e-05,
"loss": 1.0907,
"step": 1150
},
{
"epoch": 0.6116774791473587,
"grad_norm": 0.07830257859960496,
"learning_rate": 7.8624400599252e-05,
"loss": 1.079,
"step": 1155
},
{
"epoch": 0.614325433602542,
"grad_norm": 0.07768781511626291,
"learning_rate": 7.772215419539087e-05,
"loss": 1.092,
"step": 1160
},
{
"epoch": 0.6169733880577254,
"grad_norm": 0.07237763916654574,
"learning_rate": 7.682181203772462e-05,
"loss": 1.1278,
"step": 1165
},
{
"epoch": 0.6196213425129088,
"grad_norm": 0.07421572650644807,
"learning_rate": 7.592345108491177e-05,
"loss": 1.0914,
"step": 1170
},
{
"epoch": 0.6222692969680922,
"grad_norm": 0.07647986250910133,
"learning_rate": 7.502714812626306e-05,
"loss": 1.0973,
"step": 1175
},
{
"epoch": 0.6249172514232755,
"grad_norm": 0.07673429125787028,
"learning_rate": 7.413297977517804e-05,
"loss": 1.113,
"step": 1180
},
{
"epoch": 0.6275652058784589,
"grad_norm": 0.07847988781414174,
"learning_rate": 7.324102246259588e-05,
"loss": 1.1106,
"step": 1185
},
{
"epoch": 0.6302131603336423,
"grad_norm": 0.08602822092227703,
"learning_rate": 7.235135243046266e-05,
"loss": 1.0951,
"step": 1190
},
{
"epoch": 0.6328611147888257,
"grad_norm": 0.07679615832799559,
"learning_rate": 7.146404572521436e-05,
"loss": 1.0822,
"step": 1195
},
{
"epoch": 0.635509069244009,
"grad_norm": 0.07328313009689218,
"learning_rate": 7.057917819127646e-05,
"loss": 1.1144,
"step": 1200
},
{
"epoch": 0.6381570236991924,
"grad_norm": 0.07561356041759124,
"learning_rate": 6.969682546458119e-05,
"loss": 1.0949,
"step": 1205
},
{
"epoch": 0.6408049781543758,
"grad_norm": 0.07601697316878776,
"learning_rate": 6.881706296610222e-05,
"loss": 1.0881,
"step": 1210
},
{
"epoch": 0.6434529326095592,
"grad_norm": 0.07424508033316624,
"learning_rate": 6.793996589540794e-05,
"loss": 1.0894,
"step": 1215
},
{
"epoch": 0.6461008870647424,
"grad_norm": 0.0774103399193041,
"learning_rate": 6.706560922423375e-05,
"loss": 1.0889,
"step": 1220
},
{
"epoch": 0.6487488415199258,
"grad_norm": 0.07704030882428428,
"learning_rate": 6.619406769007347e-05,
"loss": 1.1173,
"step": 1225
},
{
"epoch": 0.6513967959751092,
"grad_norm": 0.07986295404405998,
"learning_rate": 6.532541578979114e-05,
"loss": 1.0828,
"step": 1230
},
{
"epoch": 0.6540447504302926,
"grad_norm": 0.08205457004998916,
"learning_rate": 6.445972777325322e-05,
"loss": 1.1107,
"step": 1235
},
{
"epoch": 0.6566927048854759,
"grad_norm": 0.07502810371034836,
"learning_rate": 6.359707763698192e-05,
"loss": 1.0947,
"step": 1240
},
{
"epoch": 0.6593406593406593,
"grad_norm": 0.07480668675284598,
"learning_rate": 6.27375391178301e-05,
"loss": 1.129,
"step": 1245
},
{
"epoch": 0.6619886137958427,
"grad_norm": 0.075324303274979,
"learning_rate": 6.188118568667869e-05,
"loss": 1.1049,
"step": 1250
},
{
"epoch": 0.6646365682510261,
"grad_norm": 0.07716903325547174,
"learning_rate": 6.102809054215626e-05,
"loss": 1.0853,
"step": 1255
},
{
"epoch": 0.6672845227062094,
"grad_norm": 0.08902130376105476,
"learning_rate": 6.017832660438261e-05,
"loss": 1.1014,
"step": 1260
},
{
"epoch": 0.6699324771613928,
"grad_norm": 0.07427630543595487,
"learning_rate": 5.9331966508735416e-05,
"loss": 1.0715,
"step": 1265
},
{
"epoch": 0.6725804316165762,
"grad_norm": 0.07483794256035971,
"learning_rate": 5.848908259964176e-05,
"loss": 1.1228,
"step": 1270
},
{
"epoch": 0.6752283860717596,
"grad_norm": 0.07249249950069708,
"learning_rate": 5.7649746924394374e-05,
"loss": 1.1036,
"step": 1275
},
{
"epoch": 0.6778763405269429,
"grad_norm": 0.07406719285536154,
"learning_rate": 5.681403122699306e-05,
"loss": 1.0975,
"step": 1280
},
{
"epoch": 0.6805242949821263,
"grad_norm": 0.07322431567652883,
"learning_rate": 5.598200694201228e-05,
"loss": 1.1062,
"step": 1285
},
{
"epoch": 0.6831722494373097,
"grad_norm": 0.07815522826559648,
"learning_rate": 5.5153745188495395e-05,
"loss": 1.0813,
"step": 1290
},
{
"epoch": 0.6858202038924931,
"grad_norm": 0.07426469053250764,
"learning_rate": 5.432931676387516e-05,
"loss": 1.104,
"step": 1295
},
{
"epoch": 0.6884681583476764,
"grad_norm": 0.08432084820000514,
"learning_rate": 5.350879213792255e-05,
"loss": 1.1244,
"step": 1300
},
{
"epoch": 0.6911161128028598,
"grad_norm": 0.07573346123023687,
"learning_rate": 5.2692241446723003e-05,
"loss": 1.1084,
"step": 1305
},
{
"epoch": 0.6937640672580432,
"grad_norm": 0.07843026074586353,
"learning_rate": 5.18797344866814e-05,
"loss": 1.102,
"step": 1310
},
{
"epoch": 0.6964120217132266,
"grad_norm": 0.07870397238308142,
"learning_rate": 5.107134070855617e-05,
"loss": 1.0843,
"step": 1315
},
{
"epoch": 0.6990599761684099,
"grad_norm": 0.07508751345932353,
"learning_rate": 5.0267129211522834e-05,
"loss": 1.0998,
"step": 1320
},
{
"epoch": 0.7017079306235933,
"grad_norm": 0.07699932244077094,
"learning_rate": 4.9467168737267376e-05,
"loss": 1.1199,
"step": 1325
},
{
"epoch": 0.7043558850787767,
"grad_norm": 0.07341861390300569,
"learning_rate": 4.867152766411075e-05,
"loss": 1.1099,
"step": 1330
},
{
"epoch": 0.7070038395339601,
"grad_norm": 0.07886199590243474,
"learning_rate": 4.788027400116387e-05,
"loss": 1.1213,
"step": 1335
},
{
"epoch": 0.7096517939891434,
"grad_norm": 0.07400224087388893,
"learning_rate": 4.7093475382514343e-05,
"loss": 1.1089,
"step": 1340
},
{
"epoch": 0.7122997484443268,
"grad_norm": 0.07628400003826659,
"learning_rate": 4.631119906144562e-05,
"loss": 1.1101,
"step": 1345
},
{
"epoch": 0.7149477028995102,
"grad_norm": 0.07655029779945569,
"learning_rate": 4.5533511904687977e-05,
"loss": 1.1464,
"step": 1350
},
{
"epoch": 0.7175956573546936,
"grad_norm": 0.07408511396805334,
"learning_rate": 4.4760480386703085e-05,
"loss": 1.1036,
"step": 1355
},
{
"epoch": 0.7202436118098768,
"grad_norm": 0.07722020217908175,
"learning_rate": 4.399217058400225e-05,
"loss": 1.1229,
"step": 1360
},
{
"epoch": 0.7228915662650602,
"grad_norm": 0.08311731202657702,
"learning_rate": 4.322864816949785e-05,
"loss": 1.111,
"step": 1365
},
{
"epoch": 0.7255395207202436,
"grad_norm": 0.07589827212004248,
"learning_rate": 4.2469978406890265e-05,
"loss": 1.1205,
"step": 1370
},
{
"epoch": 0.728187475175427,
"grad_norm": 0.07793858759229856,
"learning_rate": 4.171622614508911e-05,
"loss": 1.1104,
"step": 1375
},
{
"epoch": 0.7308354296306103,
"grad_norm": 0.07696199883343936,
"learning_rate": 4.0967455812670095e-05,
"loss": 1.1055,
"step": 1380
},
{
"epoch": 0.7334833840857937,
"grad_norm": 0.07532988662602433,
"learning_rate": 4.022373141236801e-05,
"loss": 1.1052,
"step": 1385
},
{
"epoch": 0.7361313385409771,
"grad_norm": 0.07570188583228322,
"learning_rate": 3.948511651560593e-05,
"loss": 1.0996,
"step": 1390
},
{
"epoch": 0.7387792929961605,
"grad_norm": 0.07840832338236073,
"learning_rate": 3.87516742570611e-05,
"loss": 1.0989,
"step": 1395
},
{
"epoch": 0.7414272474513438,
"grad_norm": 0.07688842225193993,
"learning_rate": 3.802346732926866e-05,
"loss": 1.094,
"step": 1400
},
{
"epoch": 0.7440752019065272,
"grad_norm": 0.07667453813475356,
"learning_rate": 3.730055797726271e-05,
"loss": 1.1138,
"step": 1405
},
{
"epoch": 0.7467231563617106,
"grad_norm": 0.07256620195156938,
"learning_rate": 3.658300799325568e-05,
"loss": 1.096,
"step": 1410
},
{
"epoch": 0.749371110816894,
"grad_norm": 0.07556178320717383,
"learning_rate": 3.5870878711356906e-05,
"loss": 1.0752,
"step": 1415
},
{
"epoch": 0.7520190652720773,
"grad_norm": 0.0755712488652408,
"learning_rate": 3.516423100232943e-05,
"loss": 1.0738,
"step": 1420
},
{
"epoch": 0.7546670197272607,
"grad_norm": 0.07976556756122673,
"learning_rate": 3.446312526838726e-05,
"loss": 1.0695,
"step": 1425
},
{
"epoch": 0.7573149741824441,
"grad_norm": 0.07904402049081768,
"learning_rate": 3.376762143803249e-05,
"loss": 1.0979,
"step": 1430
},
{
"epoch": 0.7599629286376274,
"grad_norm": 0.07776446642649297,
"learning_rate": 3.307777896093238e-05,
"loss": 1.1018,
"step": 1435
},
{
"epoch": 0.7626108830928108,
"grad_norm": 0.07527759382185836,
"learning_rate": 3.239365680283812e-05,
"loss": 1.0968,
"step": 1440
},
{
"epoch": 0.7652588375479942,
"grad_norm": 0.0776648894196293,
"learning_rate": 3.171531344054448e-05,
"loss": 1.0907,
"step": 1445
},
{
"epoch": 0.7679067920031776,
"grad_norm": 0.07845291586662016,
"learning_rate": 3.104280685689128e-05,
"loss": 1.0988,
"step": 1450
},
{
"epoch": 0.7705547464583609,
"grad_norm": 0.07774267939899475,
"learning_rate": 3.037619453580741e-05,
"loss": 1.1025,
"step": 1455
},
{
"epoch": 0.7732027009135443,
"grad_norm": 0.07875923469354247,
"learning_rate": 2.9715533457397115e-05,
"loss": 1.1106,
"step": 1460
},
{
"epoch": 0.7758506553687277,
"grad_norm": 0.07692024929981921,
"learning_rate": 2.906088009306942e-05,
"loss": 1.0852,
"step": 1465
},
{
"epoch": 0.7784986098239111,
"grad_norm": 0.07785685577501977,
"learning_rate": 2.841229040071145e-05,
"loss": 1.0682,
"step": 1470
},
{
"epoch": 0.7811465642790943,
"grad_norm": 0.0760319263173443,
"learning_rate": 2.7769819819904876e-05,
"loss": 1.1018,
"step": 1475
},
{
"epoch": 0.7837945187342777,
"grad_norm": 0.076129060223867,
"learning_rate": 2.7133523267187344e-05,
"loss": 1.1027,
"step": 1480
},
{
"epoch": 0.7864424731894611,
"grad_norm": 0.07951511310110099,
"learning_rate": 2.650345513135849e-05,
"loss": 1.1047,
"step": 1485
},
{
"epoch": 0.7890904276446445,
"grad_norm": 0.0766458764221786,
"learning_rate": 2.587966926883063e-05,
"loss": 1.1019,
"step": 1490
},
{
"epoch": 0.7917383820998278,
"grad_norm": 0.07749498443053499,
"learning_rate": 2.526221899902539e-05,
"loss": 1.1006,
"step": 1495
},
{
"epoch": 0.7943863365550112,
"grad_norm": 0.08039092314611351,
"learning_rate": 2.465115709981636e-05,
"loss": 1.0787,
"step": 1500
},
{
"epoch": 0.7970342910101946,
"grad_norm": 0.07771691438169755,
"learning_rate": 2.4046535803017344e-05,
"loss": 1.1049,
"step": 1505
},
{
"epoch": 0.799682245465378,
"grad_norm": 0.07950511536169985,
"learning_rate": 2.3448406789918133e-05,
"loss": 1.0991,
"step": 1510
},
{
"epoch": 0.8023301999205613,
"grad_norm": 0.08027602014187285,
"learning_rate": 2.285682118686674e-05,
"loss": 1.1072,
"step": 1515
},
{
"epoch": 0.8049781543757447,
"grad_norm": 0.07696693283704584,
"learning_rate": 2.227182956089924e-05,
"loss": 1.0927,
"step": 1520
},
{
"epoch": 0.8076261088309281,
"grad_norm": 0.07963101925667078,
"learning_rate": 2.169348191541757e-05,
"loss": 1.0934,
"step": 1525
},
{
"epoch": 0.8102740632861115,
"grad_norm": 0.08049222177031838,
"learning_rate": 2.112182768591534e-05,
"loss": 1.1383,
"step": 1530
},
{
"epoch": 0.8129220177412948,
"grad_norm": 0.07530307843428516,
"learning_rate": 2.0556915735752114e-05,
"loss": 1.0875,
"step": 1535
},
{
"epoch": 0.8155699721964782,
"grad_norm": 0.0773611919685891,
"learning_rate": 1.9998794351976958e-05,
"loss": 1.107,
"step": 1540
},
{
"epoch": 0.8182179266516616,
"grad_norm": 0.07662227830919144,
"learning_rate": 1.9447511241200722e-05,
"loss": 1.1042,
"step": 1545
},
{
"epoch": 0.820865881106845,
"grad_norm": 0.07670312386682876,
"learning_rate": 1.8903113525518334e-05,
"loss": 1.0996,
"step": 1550
},
{
"epoch": 0.8235138355620283,
"grad_norm": 0.07637261789090262,
"learning_rate": 1.836564773848112e-05,
"loss": 1.1056,
"step": 1555
},
{
"epoch": 0.8261617900172117,
"grad_norm": 0.0778692527235825,
"learning_rate": 1.783515982111885e-05,
"loss": 1.1013,
"step": 1560
},
{
"epoch": 0.8288097444723951,
"grad_norm": 0.07646896656564574,
"learning_rate": 1.7311695118013237e-05,
"loss": 1.123,
"step": 1565
},
{
"epoch": 0.8314576989275785,
"grad_norm": 0.07814676271315506,
"learning_rate": 1.6795298373421776e-05,
"loss": 1.1167,
"step": 1570
},
{
"epoch": 0.8341056533827618,
"grad_norm": 0.07623300427172139,
"learning_rate": 1.6286013727453154e-05,
"loss": 1.1107,
"step": 1575
},
{
"epoch": 0.8367536078379452,
"grad_norm": 0.0770697500075822,
"learning_rate": 1.5783884712294362e-05,
"loss": 1.083,
"step": 1580
},
{
"epoch": 0.8394015622931286,
"grad_norm": 0.07692133101024615,
"learning_rate": 1.5288954248489685e-05,
"loss": 1.1099,
"step": 1585
},
{
"epoch": 0.842049516748312,
"grad_norm": 0.0755198202012927,
"learning_rate": 1.4801264641271827e-05,
"loss": 1.0753,
"step": 1590
},
{
"epoch": 0.8446974712034953,
"grad_norm": 0.07515799406054297,
"learning_rate": 1.4320857576946e-05,
"loss": 1.1014,
"step": 1595
},
{
"epoch": 0.8473454256586787,
"grad_norm": 0.07479012037004316,
"learning_rate": 1.3847774119326573e-05,
"loss": 1.0942,
"step": 1600
},
{
"epoch": 0.849993380113862,
"grad_norm": 0.07721255980030046,
"learning_rate": 1.3382054706226987e-05,
"loss": 1.1133,
"step": 1605
},
{
"epoch": 0.8526413345690455,
"grad_norm": 0.07642790610807988,
"learning_rate": 1.2923739146003489e-05,
"loss": 1.0951,
"step": 1610
},
{
"epoch": 0.8552892890242287,
"grad_norm": 0.07855286960878372,
"learning_rate": 1.2472866614152124e-05,
"loss": 1.0965,
"step": 1615
},
{
"epoch": 0.8579372434794121,
"grad_norm": 0.07681456927547702,
"learning_rate": 1.2029475649960353e-05,
"loss": 1.0866,
"step": 1620
},
{
"epoch": 0.8605851979345955,
"grad_norm": 0.07738703516789706,
"learning_rate": 1.1593604153212767e-05,
"loss": 1.1066,
"step": 1625
},
{
"epoch": 0.8632331523897789,
"grad_norm": 0.07595140973388262,
"learning_rate": 1.1165289380951416e-05,
"loss": 1.1071,
"step": 1630
},
{
"epoch": 0.8658811068449622,
"grad_norm": 0.07583738961687224,
"learning_rate": 1.074456794429135e-05,
"loss": 1.1228,
"step": 1635
},
{
"epoch": 0.8685290613001456,
"grad_norm": 0.0759554158788922,
"learning_rate": 1.0331475805291136e-05,
"loss": 1.1145,
"step": 1640
},
{
"epoch": 0.871177015755329,
"grad_norm": 0.07558370365509604,
"learning_rate": 9.926048273878819e-06,
"loss": 1.0867,
"step": 1645
},
{
"epoch": 0.8738249702105124,
"grad_norm": 0.07447741708164467,
"learning_rate": 9.528320004833935e-06,
"loss": 1.0982,
"step": 1650
},
{
"epoch": 0.8764729246656957,
"grad_norm": 0.07741539189144307,
"learning_rate": 9.138324994825186e-06,
"loss": 1.0949,
"step": 1655
},
{
"epoch": 0.8791208791208791,
"grad_norm": 0.07536616901375505,
"learning_rate": 8.75609657950448e-06,
"loss": 1.0941,
"step": 1660
},
{
"epoch": 0.8817688335760625,
"grad_norm": 0.07569809049947313,
"learning_rate": 8.381667430657603e-06,
"loss": 1.0907,
"step": 1665
},
{
"epoch": 0.8844167880312459,
"grad_norm": 0.07776927737637536,
"learning_rate": 8.015069553411469e-06,
"loss": 1.1134,
"step": 1670
},
{
"epoch": 0.8870647424864292,
"grad_norm": 0.07389019327060092,
"learning_rate": 7.656334283498334e-06,
"loss": 1.0757,
"step": 1675
},
{
"epoch": 0.8897126969416126,
"grad_norm": 0.07792778854648327,
"learning_rate": 7.305492284577531e-06,
"loss": 1.0795,
"step": 1680
},
{
"epoch": 0.892360651396796,
"grad_norm": 0.07609958493651112,
"learning_rate": 6.962573545614159e-06,
"loss": 1.0975,
"step": 1685
},
{
"epoch": 0.8950086058519794,
"grad_norm": 0.07556244044567743,
"learning_rate": 6.627607378315903e-06,
"loss": 1.1235,
"step": 1690
},
{
"epoch": 0.8976565603071627,
"grad_norm": 0.07801379209812083,
"learning_rate": 6.3006224146274994e-06,
"loss": 1.1113,
"step": 1695
},
{
"epoch": 0.9003045147623461,
"grad_norm": 0.07594137231580138,
"learning_rate": 5.9816466042833085e-06,
"loss": 1.1,
"step": 1700
},
{
"epoch": 0.9029524692175295,
"grad_norm": 0.0765503105906239,
"learning_rate": 5.670707212418313e-06,
"loss": 1.0999,
"step": 1705
},
{
"epoch": 0.9056004236727129,
"grad_norm": 0.0784364193602198,
"learning_rate": 5.367830817237596e-06,
"loss": 1.0789,
"step": 1710
},
{
"epoch": 0.9082483781278962,
"grad_norm": 0.07687073522520732,
"learning_rate": 5.073043307744396e-06,
"loss": 1.0941,
"step": 1715
},
{
"epoch": 0.9108963325830796,
"grad_norm": 0.07581921026660658,
"learning_rate": 4.7863698815273195e-06,
"loss": 1.1097,
"step": 1720
},
{
"epoch": 0.913544287038263,
"grad_norm": 0.07789236711738227,
"learning_rate": 4.507835042606456e-06,
"loss": 1.0713,
"step": 1725
},
{
"epoch": 0.9161922414934464,
"grad_norm": 0.07967848637291239,
"learning_rate": 4.237462599338837e-06,
"loss": 1.114,
"step": 1730
},
{
"epoch": 0.9188401959486296,
"grad_norm": 0.07753641186277775,
"learning_rate": 3.975275662383404e-06,
"loss": 1.086,
"step": 1735
},
{
"epoch": 0.921488150403813,
"grad_norm": 0.07788205466367802,
"learning_rate": 3.721296642725569e-06,
"loss": 1.1076,
"step": 1740
},
{
"epoch": 0.9241361048589964,
"grad_norm": 0.07489418836802078,
"learning_rate": 3.475547249761557e-06,
"loss": 1.0876,
"step": 1745
},
{
"epoch": 0.9267840593141798,
"grad_norm": 0.07633053121238625,
"learning_rate": 3.23804848944278e-06,
"loss": 1.1186,
"step": 1750
},
{
"epoch": 0.9294320137693631,
"grad_norm": 0.0749545234975198,
"learning_rate": 3.0088206624802716e-06,
"loss": 1.1176,
"step": 1755
},
{
"epoch": 0.9320799682245465,
"grad_norm": 0.07806751123666335,
"learning_rate": 2.7878833626095224e-06,
"loss": 1.1104,
"step": 1760
},
{
"epoch": 0.9347279226797299,
"grad_norm": 0.07350877409736523,
"learning_rate": 2.575255474915561e-06,
"loss": 1.1033,
"step": 1765
},
{
"epoch": 0.9373758771349133,
"grad_norm": 0.07754465442688381,
"learning_rate": 2.3709551742187586e-06,
"loss": 1.0918,
"step": 1770
},
{
"epoch": 0.9400238315900966,
"grad_norm": 0.07943198880463362,
"learning_rate": 2.1749999235213147e-06,
"loss": 1.0957,
"step": 1775
},
{
"epoch": 0.94267178604528,
"grad_norm": 0.0738980478341624,
"learning_rate": 1.9874064725145746e-06,
"loss": 1.0965,
"step": 1780
},
{
"epoch": 0.9453197405004634,
"grad_norm": 0.07568301411412179,
"learning_rate": 1.8081908561472406e-06,
"loss": 1.0928,
"step": 1785
},
{
"epoch": 0.9479676949556468,
"grad_norm": 0.07808219875357872,
"learning_rate": 1.6373683932548455e-06,
"loss": 1.0951,
"step": 1790
},
{
"epoch": 0.9506156494108301,
"grad_norm": 0.0768674233407858,
"learning_rate": 1.4749536852502687e-06,
"loss": 1.0992,
"step": 1795
},
{
"epoch": 0.9532636038660135,
"grad_norm": 0.0768177210422567,
"learning_rate": 1.3209606148756993e-06,
"loss": 1.1247,
"step": 1800
},
{
"epoch": 0.9559115583211969,
"grad_norm": 0.076755922231129,
"learning_rate": 1.1754023450159523e-06,
"loss": 1.0955,
"step": 1805
},
{
"epoch": 0.9585595127763803,
"grad_norm": 0.07587714541337397,
"learning_rate": 1.0382913175733587e-06,
"loss": 1.1138,
"step": 1810
},
{
"epoch": 0.9612074672315636,
"grad_norm": 0.07977716829550086,
"learning_rate": 9.096392524042374e-07,
"loss": 1.1018,
"step": 1815
},
{
"epoch": 0.963855421686747,
"grad_norm": 0.07805623190906948,
"learning_rate": 7.894571463171851e-07,
"loss": 1.0995,
"step": 1820
},
{
"epoch": 0.9665033761419304,
"grad_norm": 0.07827918723132034,
"learning_rate": 6.77755272132985e-07,
"loss": 1.1072,
"step": 1825
},
{
"epoch": 0.9691513305971138,
"grad_norm": 0.07504095329870522,
"learning_rate": 5.745431778066412e-07,
"loss": 1.0945,
"step": 1830
},
{
"epoch": 0.9717992850522971,
"grad_norm": 0.07543686944798814,
"learning_rate": 4.798296856111772e-07,
"loss": 1.1205,
"step": 1835
},
{
"epoch": 0.9744472395074805,
"grad_norm": 0.0775313859719524,
"learning_rate": 3.9362289138351604e-07,
"loss": 1.0969,
"step": 1840
},
{
"epoch": 0.9770951939626639,
"grad_norm": 0.08399677573379252,
"learning_rate": 3.15930163832534e-07,
"loss": 1.1326,
"step": 1845
},
{
"epoch": 0.9797431484178473,
"grad_norm": 0.08167978103101492,
"learning_rate": 2.4675814390916483e-07,
"loss": 1.1006,
"step": 1850
},
{
"epoch": 0.9823911028730306,
"grad_norm": 0.07627980254031101,
"learning_rate": 1.8611274423874226e-07,
"loss": 1.1026,
"step": 1855
},
{
"epoch": 0.985039057328214,
"grad_norm": 0.07798059783576732,
"learning_rate": 1.339991486156489e-07,
"loss": 1.112,
"step": 1860
},
{
"epoch": 0.9876870117833974,
"grad_norm": 0.07847559734854997,
"learning_rate": 9.04218115601485e-08,
"loss": 1.0969,
"step": 1865
},
{
"epoch": 0.9903349662385807,
"grad_norm": 0.07602247898283418,
"learning_rate": 5.5384457937723843e-08,
"loss": 1.0857,
"step": 1870
},
{
"epoch": 0.992982920693764,
"grad_norm": 0.07830104923923938,
"learning_rate": 2.8890082640598004e-08,
"loss": 1.0924,
"step": 1875
},
{
"epoch": 0.9956308751489474,
"grad_norm": 0.0737777189572401,
"learning_rate": 1.0940950331772559e-08,
"loss": 1.0997,
"step": 1880
},
{
"epoch": 0.9982788296041308,
"grad_norm": 0.07376783137923695,
"learning_rate": 1.5385952514712644e-09,
"loss": 1.1069,
"step": 1885
},
{
"epoch": 0.9998676022772408,
"eval_loss": 1.0810853242874146,
"eval_runtime": 4476.4649,
"eval_samples_per_second": 3.006,
"eval_steps_per_second": 0.752,
"step": 1888
},
{
"epoch": 0.9998676022772408,
"step": 1888,
"total_flos": 2.3745805538033664e+16,
"train_loss": 1.1182949897848953,
"train_runtime": 43452.0384,
"train_samples_per_second": 2.781,
"train_steps_per_second": 0.043
}
],
"logging_steps": 5,
"max_steps": 1888,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.3745805538033664e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}