Optimization_7k / checkpoint-2421 /trainer_state.json
Arvyniitb's picture
Upload folder using huggingface_hub
7121ba4 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 2421,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.012397334573066791,
"grad_norm": 0.6915941834449768,
"learning_rate": 0.00018,
"loss": 2.3006,
"step": 10
},
{
"epoch": 0.024794669146133583,
"grad_norm": 0.6273291707038879,
"learning_rate": 0.0001992534218166736,
"loss": 2.1317,
"step": 20
},
{
"epoch": 0.037192003719200374,
"grad_norm": 0.45228877663612366,
"learning_rate": 0.00019842389050186645,
"loss": 1.9808,
"step": 30
},
{
"epoch": 0.049589338292267165,
"grad_norm": 0.3886250853538513,
"learning_rate": 0.00019759435918705932,
"loss": 1.8968,
"step": 40
},
{
"epoch": 0.06198667286533395,
"grad_norm": 0.3263673484325409,
"learning_rate": 0.00019676482787225219,
"loss": 1.9159,
"step": 50
},
{
"epoch": 0.07438400743840075,
"grad_norm": 0.3357856273651123,
"learning_rate": 0.00019593529655744505,
"loss": 1.9052,
"step": 60
},
{
"epoch": 0.08678134201146753,
"grad_norm": 0.3885687291622162,
"learning_rate": 0.00019510576524263792,
"loss": 1.8788,
"step": 70
},
{
"epoch": 0.09917867658453433,
"grad_norm": 0.33165615797042847,
"learning_rate": 0.00019427623392783078,
"loss": 1.876,
"step": 80
},
{
"epoch": 0.11157601115760112,
"grad_norm": 0.3510221242904663,
"learning_rate": 0.00019344670261302365,
"loss": 1.7992,
"step": 90
},
{
"epoch": 0.1239733457306679,
"grad_norm": 0.35817059874534607,
"learning_rate": 0.0001926171712982165,
"loss": 1.7386,
"step": 100
},
{
"epoch": 0.1363706803037347,
"grad_norm": 0.3222461938858032,
"learning_rate": 0.00019178763998340938,
"loss": 1.8766,
"step": 110
},
{
"epoch": 0.1487680148768015,
"grad_norm": 0.302379310131073,
"learning_rate": 0.00019095810866860224,
"loss": 1.833,
"step": 120
},
{
"epoch": 0.16116534944986827,
"grad_norm": 0.3621092736721039,
"learning_rate": 0.00019012857735379514,
"loss": 1.7433,
"step": 130
},
{
"epoch": 0.17356268402293507,
"grad_norm": 0.35103851556777954,
"learning_rate": 0.00018929904603898798,
"loss": 1.8552,
"step": 140
},
{
"epoch": 0.18596001859600186,
"grad_norm": 0.3761669993400574,
"learning_rate": 0.00018846951472418084,
"loss": 1.8072,
"step": 150
},
{
"epoch": 0.19835735316906866,
"grad_norm": 0.2791743576526642,
"learning_rate": 0.00018763998340937373,
"loss": 1.882,
"step": 160
},
{
"epoch": 0.21075468774213543,
"grad_norm": 0.28887104988098145,
"learning_rate": 0.00018681045209456657,
"loss": 1.945,
"step": 170
},
{
"epoch": 0.22315202231520223,
"grad_norm": 0.3621903955936432,
"learning_rate": 0.00018598092077975944,
"loss": 1.7506,
"step": 180
},
{
"epoch": 0.23554935688826903,
"grad_norm": 0.36899787187576294,
"learning_rate": 0.00018515138946495233,
"loss": 1.8959,
"step": 190
},
{
"epoch": 0.2479466914613358,
"grad_norm": 0.3354776203632355,
"learning_rate": 0.00018432185815014517,
"loss": 1.9282,
"step": 200
},
{
"epoch": 0.2603440260344026,
"grad_norm": 0.3030059337615967,
"learning_rate": 0.00018349232683533803,
"loss": 1.9876,
"step": 210
},
{
"epoch": 0.2727413606074694,
"grad_norm": 0.380134254693985,
"learning_rate": 0.00018266279552053093,
"loss": 1.795,
"step": 220
},
{
"epoch": 0.2851386951805362,
"grad_norm": 0.3484257161617279,
"learning_rate": 0.00018183326420572376,
"loss": 1.9131,
"step": 230
},
{
"epoch": 0.297536029753603,
"grad_norm": 0.3204387426376343,
"learning_rate": 0.00018100373289091663,
"loss": 1.9829,
"step": 240
},
{
"epoch": 0.3099333643266698,
"grad_norm": 0.3759450912475586,
"learning_rate": 0.00018017420157610952,
"loss": 1.9002,
"step": 250
},
{
"epoch": 0.32233069889973653,
"grad_norm": 0.3721698820590973,
"learning_rate": 0.00017934467026130236,
"loss": 1.8657,
"step": 260
},
{
"epoch": 0.33472803347280333,
"grad_norm": 0.35085615515708923,
"learning_rate": 0.00017851513894649523,
"loss": 1.8623,
"step": 270
},
{
"epoch": 0.34712536804587013,
"grad_norm": 0.35750696063041687,
"learning_rate": 0.00017768560763168812,
"loss": 1.8743,
"step": 280
},
{
"epoch": 0.35952270261893693,
"grad_norm": 0.3072109520435333,
"learning_rate": 0.00017685607631688096,
"loss": 1.9626,
"step": 290
},
{
"epoch": 0.3719200371920037,
"grad_norm": 0.40647512674331665,
"learning_rate": 0.00017602654500207382,
"loss": 1.8034,
"step": 300
},
{
"epoch": 0.3843173717650705,
"grad_norm": 0.3000311851501465,
"learning_rate": 0.00017519701368726672,
"loss": 1.8995,
"step": 310
},
{
"epoch": 0.3967147063381373,
"grad_norm": 0.36904624104499817,
"learning_rate": 0.00017436748237245955,
"loss": 1.82,
"step": 320
},
{
"epoch": 0.40911204091120407,
"grad_norm": 0.337799072265625,
"learning_rate": 0.00017353795105765242,
"loss": 1.8706,
"step": 330
},
{
"epoch": 0.42150937548427087,
"grad_norm": 0.4223800003528595,
"learning_rate": 0.0001727084197428453,
"loss": 1.8584,
"step": 340
},
{
"epoch": 0.43390671005733766,
"grad_norm": 0.34497585892677307,
"learning_rate": 0.00017187888842803818,
"loss": 1.8417,
"step": 350
},
{
"epoch": 0.44630404463040446,
"grad_norm": 0.34032031893730164,
"learning_rate": 0.00017104935711323102,
"loss": 1.7917,
"step": 360
},
{
"epoch": 0.45870137920347126,
"grad_norm": 0.4158859848976135,
"learning_rate": 0.0001702198257984239,
"loss": 1.8749,
"step": 370
},
{
"epoch": 0.47109871377653806,
"grad_norm": 0.36545196175575256,
"learning_rate": 0.00016939029448361678,
"loss": 1.8337,
"step": 380
},
{
"epoch": 0.48349604834960486,
"grad_norm": 0.32123473286628723,
"learning_rate": 0.00016856076316880961,
"loss": 1.9321,
"step": 390
},
{
"epoch": 0.4958933829226716,
"grad_norm": 0.45476439595222473,
"learning_rate": 0.0001677312318540025,
"loss": 1.8185,
"step": 400
},
{
"epoch": 0.5082907174957384,
"grad_norm": 0.3410905599594116,
"learning_rate": 0.00016690170053919537,
"loss": 1.9082,
"step": 410
},
{
"epoch": 0.5206880520688052,
"grad_norm": 0.3436656892299652,
"learning_rate": 0.0001660721692243882,
"loss": 1.7821,
"step": 420
},
{
"epoch": 0.533085386641872,
"grad_norm": 0.34343594312667847,
"learning_rate": 0.0001652426379095811,
"loss": 1.8184,
"step": 430
},
{
"epoch": 0.5454827212149388,
"grad_norm": 0.4309318959712982,
"learning_rate": 0.00016441310659477397,
"loss": 1.9124,
"step": 440
},
{
"epoch": 0.5578800557880056,
"grad_norm": 0.4032953679561615,
"learning_rate": 0.0001635835752799668,
"loss": 1.7962,
"step": 450
},
{
"epoch": 0.5702773903610724,
"grad_norm": 0.3726664185523987,
"learning_rate": 0.0001627540439651597,
"loss": 1.8158,
"step": 460
},
{
"epoch": 0.5826747249341392,
"grad_norm": 0.36948224902153015,
"learning_rate": 0.00016192451265035257,
"loss": 1.7539,
"step": 470
},
{
"epoch": 0.595072059507206,
"grad_norm": 0.33594000339508057,
"learning_rate": 0.0001610949813355454,
"loss": 1.9287,
"step": 480
},
{
"epoch": 0.6074693940802728,
"grad_norm": 0.3209264576435089,
"learning_rate": 0.0001602654500207383,
"loss": 1.8375,
"step": 490
},
{
"epoch": 0.6198667286533396,
"grad_norm": 0.38256484270095825,
"learning_rate": 0.00015943591870593116,
"loss": 1.817,
"step": 500
},
{
"epoch": 0.6322640632264063,
"grad_norm": 0.38966864347457886,
"learning_rate": 0.000158606387391124,
"loss": 1.7397,
"step": 510
},
{
"epoch": 0.6446613977994731,
"grad_norm": 0.45601052045822144,
"learning_rate": 0.0001577768560763169,
"loss": 1.7448,
"step": 520
},
{
"epoch": 0.6570587323725399,
"grad_norm": 0.39306697249412537,
"learning_rate": 0.00015694732476150976,
"loss": 1.8505,
"step": 530
},
{
"epoch": 0.6694560669456067,
"grad_norm": 0.4036141633987427,
"learning_rate": 0.00015611779344670262,
"loss": 1.9946,
"step": 540
},
{
"epoch": 0.6818534015186735,
"grad_norm": 0.34463125467300415,
"learning_rate": 0.0001552882621318955,
"loss": 2.0461,
"step": 550
},
{
"epoch": 0.6942507360917403,
"grad_norm": 0.3309987485408783,
"learning_rate": 0.00015445873081708835,
"loss": 1.8714,
"step": 560
},
{
"epoch": 0.7066480706648071,
"grad_norm": 0.40711745619773865,
"learning_rate": 0.00015362919950228122,
"loss": 1.9515,
"step": 570
},
{
"epoch": 0.7190454052378739,
"grad_norm": 0.4855351150035858,
"learning_rate": 0.00015279966818747409,
"loss": 1.928,
"step": 580
},
{
"epoch": 0.7314427398109407,
"grad_norm": 0.3159841299057007,
"learning_rate": 0.00015197013687266695,
"loss": 1.8648,
"step": 590
},
{
"epoch": 0.7438400743840075,
"grad_norm": 0.34454017877578735,
"learning_rate": 0.00015114060555785982,
"loss": 1.7614,
"step": 600
},
{
"epoch": 0.7562374089570743,
"grad_norm": 0.42112237215042114,
"learning_rate": 0.00015031107424305268,
"loss": 1.7943,
"step": 610
},
{
"epoch": 0.768634743530141,
"grad_norm": 0.4868924617767334,
"learning_rate": 0.00014948154292824555,
"loss": 1.8236,
"step": 620
},
{
"epoch": 0.7810320781032078,
"grad_norm": 0.27235159277915955,
"learning_rate": 0.00014865201161343841,
"loss": 1.8792,
"step": 630
},
{
"epoch": 0.7934294126762746,
"grad_norm": 0.36492735147476196,
"learning_rate": 0.00014782248029863128,
"loss": 1.8139,
"step": 640
},
{
"epoch": 0.8058267472493414,
"grad_norm": 0.3278910517692566,
"learning_rate": 0.00014699294898382414,
"loss": 1.9303,
"step": 650
},
{
"epoch": 0.8182240818224081,
"grad_norm": 0.4410141110420227,
"learning_rate": 0.000146163417669017,
"loss": 1.7458,
"step": 660
},
{
"epoch": 0.8306214163954749,
"grad_norm": 0.44660821557044983,
"learning_rate": 0.00014533388635420988,
"loss": 1.6484,
"step": 670
},
{
"epoch": 0.8430187509685417,
"grad_norm": 0.36396560072898865,
"learning_rate": 0.00014450435503940274,
"loss": 1.7457,
"step": 680
},
{
"epoch": 0.8554160855416085,
"grad_norm": 0.4536712169647217,
"learning_rate": 0.0001436748237245956,
"loss": 1.7763,
"step": 690
},
{
"epoch": 0.8678134201146753,
"grad_norm": 0.45438772439956665,
"learning_rate": 0.00014284529240978847,
"loss": 1.8736,
"step": 700
},
{
"epoch": 0.8802107546877421,
"grad_norm": 0.331462562084198,
"learning_rate": 0.00014201576109498134,
"loss": 1.9909,
"step": 710
},
{
"epoch": 0.8926080892608089,
"grad_norm": 0.29686763882637024,
"learning_rate": 0.00014118622978017423,
"loss": 1.9242,
"step": 720
},
{
"epoch": 0.9050054238338757,
"grad_norm": 0.4546560049057007,
"learning_rate": 0.00014035669846536707,
"loss": 1.8032,
"step": 730
},
{
"epoch": 0.9174027584069425,
"grad_norm": 0.3135245442390442,
"learning_rate": 0.00013952716715055993,
"loss": 1.835,
"step": 740
},
{
"epoch": 0.9298000929800093,
"grad_norm": 0.6448049545288086,
"learning_rate": 0.00013869763583575283,
"loss": 1.9127,
"step": 750
},
{
"epoch": 0.9421974275530761,
"grad_norm": 0.39725756645202637,
"learning_rate": 0.00013786810452094567,
"loss": 1.8041,
"step": 760
},
{
"epoch": 0.9545947621261429,
"grad_norm": 0.3762451708316803,
"learning_rate": 0.00013703857320613853,
"loss": 1.8003,
"step": 770
},
{
"epoch": 0.9669920966992097,
"grad_norm": 0.35813263058662415,
"learning_rate": 0.00013620904189133142,
"loss": 1.8474,
"step": 780
},
{
"epoch": 0.9793894312722765,
"grad_norm": 0.29999616742134094,
"learning_rate": 0.00013537951057652426,
"loss": 1.8705,
"step": 790
},
{
"epoch": 0.9917867658453432,
"grad_norm": 0.3202720880508423,
"learning_rate": 0.00013454997926171713,
"loss": 1.7752,
"step": 800
},
{
"epoch": 1.0,
"eval_loss": 1.8193774223327637,
"eval_runtime": 78.8064,
"eval_samples_per_second": 9.098,
"eval_steps_per_second": 1.142,
"step": 807
},
{
"epoch": 1.00371920037192,
"grad_norm": 0.30241405963897705,
"learning_rate": 0.00013372044794691002,
"loss": 1.8318,
"step": 810
},
{
"epoch": 1.0161165349449868,
"grad_norm": 0.3700416386127472,
"learning_rate": 0.00013289091663210286,
"loss": 1.8938,
"step": 820
},
{
"epoch": 1.0285138695180536,
"grad_norm": 0.4154430329799652,
"learning_rate": 0.00013206138531729572,
"loss": 1.8733,
"step": 830
},
{
"epoch": 1.0409112040911204,
"grad_norm": 0.38313189148902893,
"learning_rate": 0.00013123185400248862,
"loss": 1.8571,
"step": 840
},
{
"epoch": 1.0533085386641872,
"grad_norm": 0.23230139911174774,
"learning_rate": 0.00013040232268768146,
"loss": 1.798,
"step": 850
},
{
"epoch": 1.065705873237254,
"grad_norm": 0.3701108992099762,
"learning_rate": 0.00012957279137287432,
"loss": 1.8533,
"step": 860
},
{
"epoch": 1.0781032078103208,
"grad_norm": 0.29064834117889404,
"learning_rate": 0.00012874326005806721,
"loss": 1.7453,
"step": 870
},
{
"epoch": 1.0905005423833876,
"grad_norm": 0.3150763213634491,
"learning_rate": 0.00012791372874326005,
"loss": 1.8977,
"step": 880
},
{
"epoch": 1.1028978769564544,
"grad_norm": 0.428843230009079,
"learning_rate": 0.00012708419742845292,
"loss": 1.8688,
"step": 890
},
{
"epoch": 1.1152952115295212,
"grad_norm": 0.2608051896095276,
"learning_rate": 0.0001262546661136458,
"loss": 1.7242,
"step": 900
},
{
"epoch": 1.127692546102588,
"grad_norm": 0.3821583688259125,
"learning_rate": 0.00012542513479883865,
"loss": 1.9037,
"step": 910
},
{
"epoch": 1.1400898806756548,
"grad_norm": 0.28013911843299866,
"learning_rate": 0.00012459560348403151,
"loss": 1.8215,
"step": 920
},
{
"epoch": 1.1524872152487216,
"grad_norm": 0.30506208539009094,
"learning_rate": 0.0001237660721692244,
"loss": 1.8197,
"step": 930
},
{
"epoch": 1.1648845498217884,
"grad_norm": 0.29327717423439026,
"learning_rate": 0.00012293654085441727,
"loss": 1.7784,
"step": 940
},
{
"epoch": 1.1772818843948551,
"grad_norm": 0.23550163209438324,
"learning_rate": 0.0001221070095396101,
"loss": 1.9318,
"step": 950
},
{
"epoch": 1.189679218967922,
"grad_norm": 0.21349768340587616,
"learning_rate": 0.000121277478224803,
"loss": 1.8084,
"step": 960
},
{
"epoch": 1.2020765535409887,
"grad_norm": 0.34790855646133423,
"learning_rate": 0.00012044794690999586,
"loss": 1.7587,
"step": 970
},
{
"epoch": 1.2144738881140555,
"grad_norm": 0.2519979774951935,
"learning_rate": 0.00011961841559518872,
"loss": 1.8055,
"step": 980
},
{
"epoch": 1.2268712226871223,
"grad_norm": 0.3781174123287201,
"learning_rate": 0.0001187888842803816,
"loss": 1.8436,
"step": 990
},
{
"epoch": 1.2392685572601891,
"grad_norm": 0.26533016562461853,
"learning_rate": 0.00011795935296557445,
"loss": 1.6512,
"step": 1000
},
{
"epoch": 1.2516658918332557,
"grad_norm": 0.2862655818462372,
"learning_rate": 0.00011712982165076732,
"loss": 2.0418,
"step": 1010
},
{
"epoch": 1.2640632264063227,
"grad_norm": 0.27094656229019165,
"learning_rate": 0.0001163002903359602,
"loss": 1.7278,
"step": 1020
},
{
"epoch": 1.2764605609793893,
"grad_norm": 0.29184144735336304,
"learning_rate": 0.00011547075902115305,
"loss": 1.6938,
"step": 1030
},
{
"epoch": 1.2888578955524563,
"grad_norm": 0.30606576800346375,
"learning_rate": 0.00011464122770634591,
"loss": 1.6627,
"step": 1040
},
{
"epoch": 1.301255230125523,
"grad_norm": 0.23827411234378815,
"learning_rate": 0.0001138116963915388,
"loss": 1.7671,
"step": 1050
},
{
"epoch": 1.31365256469859,
"grad_norm": 0.3038440942764282,
"learning_rate": 0.00011298216507673165,
"loss": 1.9029,
"step": 1060
},
{
"epoch": 1.3260498992716565,
"grad_norm": 0.22325201332569122,
"learning_rate": 0.00011215263376192451,
"loss": 1.7413,
"step": 1070
},
{
"epoch": 1.3384472338447233,
"grad_norm": 0.30867546796798706,
"learning_rate": 0.00011132310244711739,
"loss": 1.8252,
"step": 1080
},
{
"epoch": 1.35084456841779,
"grad_norm": 0.3236595392227173,
"learning_rate": 0.00011049357113231024,
"loss": 1.8856,
"step": 1090
},
{
"epoch": 1.363241902990857,
"grad_norm": 0.32089242339134216,
"learning_rate": 0.00010966403981750311,
"loss": 1.8437,
"step": 1100
},
{
"epoch": 1.3756392375639237,
"grad_norm": 0.28864434361457825,
"learning_rate": 0.00010883450850269599,
"loss": 1.7875,
"step": 1110
},
{
"epoch": 1.3880365721369905,
"grad_norm": 0.3817216455936432,
"learning_rate": 0.00010800497718788884,
"loss": 1.8455,
"step": 1120
},
{
"epoch": 1.4004339067100573,
"grad_norm": 0.22240202128887177,
"learning_rate": 0.0001071754458730817,
"loss": 1.7744,
"step": 1130
},
{
"epoch": 1.412831241283124,
"grad_norm": 0.28633448481559753,
"learning_rate": 0.00010634591455827458,
"loss": 1.765,
"step": 1140
},
{
"epoch": 1.425228575856191,
"grad_norm": 0.19945985078811646,
"learning_rate": 0.00010551638324346745,
"loss": 1.9739,
"step": 1150
},
{
"epoch": 1.4376259104292577,
"grad_norm": 0.2815242111682892,
"learning_rate": 0.0001046868519286603,
"loss": 1.8369,
"step": 1160
},
{
"epoch": 1.4500232450023245,
"grad_norm": 0.20174123346805573,
"learning_rate": 0.00010385732061385318,
"loss": 1.9351,
"step": 1170
},
{
"epoch": 1.4624205795753913,
"grad_norm": 0.27899351716041565,
"learning_rate": 0.00010302778929904605,
"loss": 1.9544,
"step": 1180
},
{
"epoch": 1.474817914148458,
"grad_norm": 0.22028954327106476,
"learning_rate": 0.00010219825798423892,
"loss": 1.852,
"step": 1190
},
{
"epoch": 1.4872152487215249,
"grad_norm": 0.2520454525947571,
"learning_rate": 0.00010136872666943178,
"loss": 1.6232,
"step": 1200
},
{
"epoch": 1.4996125832945917,
"grad_norm": 0.34104567766189575,
"learning_rate": 0.00010053919535462464,
"loss": 1.9285,
"step": 1210
},
{
"epoch": 1.5120099178676585,
"grad_norm": 0.3216676414012909,
"learning_rate": 9.970966403981751e-05,
"loss": 1.6644,
"step": 1220
},
{
"epoch": 1.5244072524407253,
"grad_norm": 0.22132286429405212,
"learning_rate": 9.888013272501037e-05,
"loss": 1.796,
"step": 1230
},
{
"epoch": 1.536804587013792,
"grad_norm": 0.35185569524765015,
"learning_rate": 9.805060141020324e-05,
"loss": 1.7821,
"step": 1240
},
{
"epoch": 1.5492019215868589,
"grad_norm": 0.16420242190361023,
"learning_rate": 9.72210700953961e-05,
"loss": 1.8054,
"step": 1250
},
{
"epoch": 1.5615992561599255,
"grad_norm": 0.32100507616996765,
"learning_rate": 9.639153878058897e-05,
"loss": 1.8707,
"step": 1260
},
{
"epoch": 1.5739965907329925,
"grad_norm": 0.2014254331588745,
"learning_rate": 9.556200746578184e-05,
"loss": 1.8087,
"step": 1270
},
{
"epoch": 1.586393925306059,
"grad_norm": 0.23392640054225922,
"learning_rate": 9.47324761509747e-05,
"loss": 1.7327,
"step": 1280
},
{
"epoch": 1.598791259879126,
"grad_norm": 0.19987891614437103,
"learning_rate": 9.390294483616757e-05,
"loss": 1.8399,
"step": 1290
},
{
"epoch": 1.6111885944521926,
"grad_norm": 0.13928192853927612,
"learning_rate": 9.307341352136043e-05,
"loss": 1.9271,
"step": 1300
},
{
"epoch": 1.6235859290252597,
"grad_norm": 0.3423463702201843,
"learning_rate": 9.22438822065533e-05,
"loss": 1.81,
"step": 1310
},
{
"epoch": 1.6359832635983262,
"grad_norm": 0.31117212772369385,
"learning_rate": 9.141435089174618e-05,
"loss": 1.8919,
"step": 1320
},
{
"epoch": 1.6483805981713933,
"grad_norm": 0.1769014447927475,
"learning_rate": 9.058481957693903e-05,
"loss": 1.692,
"step": 1330
},
{
"epoch": 1.6607779327444598,
"grad_norm": 0.1725306212902069,
"learning_rate": 8.97552882621319e-05,
"loss": 1.7833,
"step": 1340
},
{
"epoch": 1.6731752673175269,
"grad_norm": 0.15333665907382965,
"learning_rate": 8.892575694732477e-05,
"loss": 1.7129,
"step": 1350
},
{
"epoch": 1.6855726018905934,
"grad_norm": 0.225737065076828,
"learning_rate": 8.809622563251764e-05,
"loss": 2.0034,
"step": 1360
},
{
"epoch": 1.6979699364636605,
"grad_norm": 0.27135154604911804,
"learning_rate": 8.726669431771049e-05,
"loss": 1.7641,
"step": 1370
},
{
"epoch": 1.710367271036727,
"grad_norm": 0.19318152964115143,
"learning_rate": 8.643716300290337e-05,
"loss": 1.9714,
"step": 1380
},
{
"epoch": 1.722764605609794,
"grad_norm": 0.09128980338573456,
"learning_rate": 8.560763168809624e-05,
"loss": 1.6477,
"step": 1390
},
{
"epoch": 1.7351619401828606,
"grad_norm": 0.20176462829113007,
"learning_rate": 8.477810037328909e-05,
"loss": 1.8707,
"step": 1400
},
{
"epoch": 1.7475592747559274,
"grad_norm": 0.1746564656496048,
"learning_rate": 8.394856905848197e-05,
"loss": 1.8979,
"step": 1410
},
{
"epoch": 1.7599566093289942,
"grad_norm": 0.8505027294158936,
"learning_rate": 8.311903774367483e-05,
"loss": 1.71,
"step": 1420
},
{
"epoch": 1.772353943902061,
"grad_norm": 0.18071773648262024,
"learning_rate": 8.22895064288677e-05,
"loss": 1.9505,
"step": 1430
},
{
"epoch": 1.7847512784751278,
"grad_norm": 0.29732322692871094,
"learning_rate": 8.145997511406056e-05,
"loss": 1.8041,
"step": 1440
},
{
"epoch": 1.7971486130481946,
"grad_norm": 0.21795178949832916,
"learning_rate": 8.063044379925343e-05,
"loss": 1.7381,
"step": 1450
},
{
"epoch": 1.8095459476212614,
"grad_norm": 0.23459061980247498,
"learning_rate": 7.98009124844463e-05,
"loss": 1.6636,
"step": 1460
},
{
"epoch": 1.8219432821943282,
"grad_norm": 0.20895689725875854,
"learning_rate": 7.897138116963916e-05,
"loss": 1.8257,
"step": 1470
},
{
"epoch": 1.834340616767395,
"grad_norm": 0.18129809200763702,
"learning_rate": 7.814184985483203e-05,
"loss": 1.9791,
"step": 1480
},
{
"epoch": 1.8467379513404618,
"grad_norm": 0.1758158951997757,
"learning_rate": 7.731231854002489e-05,
"loss": 1.7657,
"step": 1490
},
{
"epoch": 1.8591352859135286,
"grad_norm": 0.14756101369857788,
"learning_rate": 7.648278722521776e-05,
"loss": 1.7483,
"step": 1500
},
{
"epoch": 1.8715326204865954,
"grad_norm": 0.14168867468833923,
"learning_rate": 7.565325591041062e-05,
"loss": 1.8844,
"step": 1510
},
{
"epoch": 1.8839299550596622,
"grad_norm": 0.1295221447944641,
"learning_rate": 7.482372459560349e-05,
"loss": 1.8121,
"step": 1520
},
{
"epoch": 1.896327289632729,
"grad_norm": 0.19356060028076172,
"learning_rate": 7.399419328079635e-05,
"loss": 1.5851,
"step": 1530
},
{
"epoch": 1.9087246242057958,
"grad_norm": 0.20035363733768463,
"learning_rate": 7.316466196598922e-05,
"loss": 1.7063,
"step": 1540
},
{
"epoch": 1.9211219587788624,
"grad_norm": 0.12139635533094406,
"learning_rate": 7.233513065118208e-05,
"loss": 1.7962,
"step": 1550
},
{
"epoch": 1.9335192933519294,
"grad_norm": 0.10587511211633682,
"learning_rate": 7.150559933637495e-05,
"loss": 1.7143,
"step": 1560
},
{
"epoch": 1.945916627924996,
"grad_norm": 0.18997065722942352,
"learning_rate": 7.067606802156782e-05,
"loss": 1.7636,
"step": 1570
},
{
"epoch": 1.958313962498063,
"grad_norm": 0.15760765969753265,
"learning_rate": 6.984653670676068e-05,
"loss": 1.7672,
"step": 1580
},
{
"epoch": 1.9707112970711296,
"grad_norm": 0.1490117609500885,
"learning_rate": 6.901700539195355e-05,
"loss": 1.6405,
"step": 1590
},
{
"epoch": 1.9831086316441966,
"grad_norm": 0.09482846409082413,
"learning_rate": 6.818747407714641e-05,
"loss": 1.8141,
"step": 1600
},
{
"epoch": 1.9955059662172632,
"grad_norm": 0.182535782456398,
"learning_rate": 6.735794276233928e-05,
"loss": 1.7745,
"step": 1610
},
{
"epoch": 2.0,
"eval_loss": 1.8065738677978516,
"eval_runtime": 78.829,
"eval_samples_per_second": 9.096,
"eval_steps_per_second": 1.142,
"step": 1614
},
{
"epoch": 2.00743840074384,
"grad_norm": 0.24950097501277924,
"learning_rate": 6.652841144753214e-05,
"loss": 1.7537,
"step": 1620
},
{
"epoch": 2.019835735316907,
"grad_norm": 0.12352726608514786,
"learning_rate": 6.569888013272502e-05,
"loss": 1.7048,
"step": 1630
},
{
"epoch": 2.0322330698899735,
"grad_norm": 0.23885582387447357,
"learning_rate": 6.486934881791787e-05,
"loss": 1.7903,
"step": 1640
},
{
"epoch": 2.0446304044630406,
"grad_norm": 0.08030597865581512,
"learning_rate": 6.403981750311074e-05,
"loss": 1.8085,
"step": 1650
},
{
"epoch": 2.057027739036107,
"grad_norm": 0.12623316049575806,
"learning_rate": 6.321028618830362e-05,
"loss": 1.7876,
"step": 1660
},
{
"epoch": 2.069425073609174,
"grad_norm": 0.08768967539072037,
"learning_rate": 6.238075487349647e-05,
"loss": 1.767,
"step": 1670
},
{
"epoch": 2.0818224081822407,
"grad_norm": 0.39565062522888184,
"learning_rate": 6.155122355868934e-05,
"loss": 1.7355,
"step": 1680
},
{
"epoch": 2.0942197427553078,
"grad_norm": 0.1476745903491974,
"learning_rate": 6.072169224388221e-05,
"loss": 1.8694,
"step": 1690
},
{
"epoch": 2.1066170773283743,
"grad_norm": 0.10529963672161102,
"learning_rate": 5.989216092907508e-05,
"loss": 1.8461,
"step": 1700
},
{
"epoch": 2.1190144119014414,
"grad_norm": 0.11380179971456528,
"learning_rate": 5.906262961426794e-05,
"loss": 1.7705,
"step": 1710
},
{
"epoch": 2.131411746474508,
"grad_norm": 0.16582056879997253,
"learning_rate": 5.823309829946081e-05,
"loss": 1.7271,
"step": 1720
},
{
"epoch": 2.143809081047575,
"grad_norm": 0.09009672701358795,
"learning_rate": 5.740356698465368e-05,
"loss": 1.9473,
"step": 1730
},
{
"epoch": 2.1562064156206415,
"grad_norm": 0.09548009186983109,
"learning_rate": 5.6574035669846536e-05,
"loss": 1.8999,
"step": 1740
},
{
"epoch": 2.1686037501937085,
"grad_norm": 0.08916931599378586,
"learning_rate": 5.574450435503941e-05,
"loss": 1.7541,
"step": 1750
},
{
"epoch": 2.181001084766775,
"grad_norm": 0.09346842020750046,
"learning_rate": 5.4914973040232274e-05,
"loss": 1.7848,
"step": 1760
},
{
"epoch": 2.193398419339842,
"grad_norm": 0.09803362190723419,
"learning_rate": 5.408544172542513e-05,
"loss": 1.8555,
"step": 1770
},
{
"epoch": 2.2057957539129087,
"grad_norm": 0.23660095036029816,
"learning_rate": 5.3255910410618005e-05,
"loss": 1.8483,
"step": 1780
},
{
"epoch": 2.2181930884859753,
"grad_norm": 0.0797749012708664,
"learning_rate": 5.242637909581087e-05,
"loss": 1.8747,
"step": 1790
},
{
"epoch": 2.2305904230590423,
"grad_norm": 0.13405947387218475,
"learning_rate": 5.159684778100373e-05,
"loss": 1.874,
"step": 1800
},
{
"epoch": 2.242987757632109,
"grad_norm": 0.0895572081208229,
"learning_rate": 5.07673164661966e-05,
"loss": 1.7637,
"step": 1810
},
{
"epoch": 2.255385092205176,
"grad_norm": 0.1163070872426033,
"learning_rate": 4.993778515138947e-05,
"loss": 1.9363,
"step": 1820
},
{
"epoch": 2.2677824267782425,
"grad_norm": 0.08817609399557114,
"learning_rate": 4.910825383658233e-05,
"loss": 1.8142,
"step": 1830
},
{
"epoch": 2.2801797613513095,
"grad_norm": 0.1023801639676094,
"learning_rate": 4.82787225217752e-05,
"loss": 1.9672,
"step": 1840
},
{
"epoch": 2.292577095924376,
"grad_norm": 0.12135002017021179,
"learning_rate": 4.7449191206968064e-05,
"loss": 1.9507,
"step": 1850
},
{
"epoch": 2.304974430497443,
"grad_norm": 0.11531046777963638,
"learning_rate": 4.6619659892160936e-05,
"loss": 1.6656,
"step": 1860
},
{
"epoch": 2.3173717650705097,
"grad_norm": 0.08688156306743622,
"learning_rate": 4.5790128577353795e-05,
"loss": 1.6901,
"step": 1870
},
{
"epoch": 2.3297690996435767,
"grad_norm": 0.12937931716442108,
"learning_rate": 4.496059726254666e-05,
"loss": 1.6826,
"step": 1880
},
{
"epoch": 2.3421664342166433,
"grad_norm": 0.08356551826000214,
"learning_rate": 4.413106594773953e-05,
"loss": 1.7614,
"step": 1890
},
{
"epoch": 2.3545637687897103,
"grad_norm": 0.08626966178417206,
"learning_rate": 4.330153463293239e-05,
"loss": 1.924,
"step": 1900
},
{
"epoch": 2.366961103362777,
"grad_norm": 0.12821203470230103,
"learning_rate": 4.247200331812526e-05,
"loss": 1.6473,
"step": 1910
},
{
"epoch": 2.379358437935844,
"grad_norm": 0.08293508738279343,
"learning_rate": 4.164247200331813e-05,
"loss": 1.8332,
"step": 1920
},
{
"epoch": 2.3917557725089105,
"grad_norm": 0.07418167591094971,
"learning_rate": 4.0812940688510995e-05,
"loss": 1.8255,
"step": 1930
},
{
"epoch": 2.4041531070819775,
"grad_norm": 0.10695914179086685,
"learning_rate": 3.998340937370386e-05,
"loss": 1.7543,
"step": 1940
},
{
"epoch": 2.416550441655044,
"grad_norm": 0.0908871591091156,
"learning_rate": 3.9153878058896726e-05,
"loss": 1.7794,
"step": 1950
},
{
"epoch": 2.428947776228111,
"grad_norm": 0.11525363475084305,
"learning_rate": 3.832434674408959e-05,
"loss": 1.6607,
"step": 1960
},
{
"epoch": 2.4413451108011777,
"grad_norm": 0.07907426357269287,
"learning_rate": 3.749481542928246e-05,
"loss": 1.7764,
"step": 1970
},
{
"epoch": 2.4537424453742447,
"grad_norm": 0.0990724042057991,
"learning_rate": 3.666528411447532e-05,
"loss": 1.857,
"step": 1980
},
{
"epoch": 2.4661397799473113,
"grad_norm": 0.06209372356534004,
"learning_rate": 3.583575279966819e-05,
"loss": 1.9036,
"step": 1990
},
{
"epoch": 2.4785371145203783,
"grad_norm": 0.07287060469388962,
"learning_rate": 3.5006221484861054e-05,
"loss": 1.8647,
"step": 2000
},
{
"epoch": 2.490934449093445,
"grad_norm": 0.06776302307844162,
"learning_rate": 3.417669017005392e-05,
"loss": 1.8703,
"step": 2010
},
{
"epoch": 2.5033317836665114,
"grad_norm": 0.08577609807252884,
"learning_rate": 3.3347158855246785e-05,
"loss": 1.8978,
"step": 2020
},
{
"epoch": 2.5157291182395785,
"grad_norm": 0.087737075984478,
"learning_rate": 3.251762754043966e-05,
"loss": 1.7975,
"step": 2030
},
{
"epoch": 2.5281264528126455,
"grad_norm": 0.07157925516366959,
"learning_rate": 3.1688096225632516e-05,
"loss": 1.8695,
"step": 2040
},
{
"epoch": 2.540523787385712,
"grad_norm": 0.0907244011759758,
"learning_rate": 3.085856491082538e-05,
"loss": 1.8752,
"step": 2050
},
{
"epoch": 2.5529211219587786,
"grad_norm": 0.0730370432138443,
"learning_rate": 3.0029033596018254e-05,
"loss": 1.6779,
"step": 2060
},
{
"epoch": 2.5653184565318456,
"grad_norm": 0.09164728224277496,
"learning_rate": 2.9199502281211116e-05,
"loss": 1.7662,
"step": 2070
},
{
"epoch": 2.5777157911049127,
"grad_norm": 0.08118163049221039,
"learning_rate": 2.8369970966403985e-05,
"loss": 1.7423,
"step": 2080
},
{
"epoch": 2.5901131256779792,
"grad_norm": 0.07020942121744156,
"learning_rate": 2.754043965159685e-05,
"loss": 1.6809,
"step": 2090
},
{
"epoch": 2.602510460251046,
"grad_norm": 0.08827990293502808,
"learning_rate": 2.6710908336789713e-05,
"loss": 1.5771,
"step": 2100
},
{
"epoch": 2.614907794824113,
"grad_norm": 0.11959416419267654,
"learning_rate": 2.5881377021982585e-05,
"loss": 1.756,
"step": 2110
},
{
"epoch": 2.62730512939718,
"grad_norm": 0.18080498278141022,
"learning_rate": 2.5051845707175447e-05,
"loss": 1.7437,
"step": 2120
},
{
"epoch": 2.6397024639702464,
"grad_norm": 0.08301777392625809,
"learning_rate": 2.4222314392368316e-05,
"loss": 1.9331,
"step": 2130
},
{
"epoch": 2.652099798543313,
"grad_norm": 0.08320324867963791,
"learning_rate": 2.3392783077561178e-05,
"loss": 1.6681,
"step": 2140
},
{
"epoch": 2.66449713311638,
"grad_norm": 0.09895749390125275,
"learning_rate": 2.2563251762754044e-05,
"loss": 1.7887,
"step": 2150
},
{
"epoch": 2.6768944676894466,
"grad_norm": 0.08482034504413605,
"learning_rate": 2.1733720447946913e-05,
"loss": 1.8258,
"step": 2160
},
{
"epoch": 2.6892918022625136,
"grad_norm": 0.08512965589761734,
"learning_rate": 2.0904189133139775e-05,
"loss": 1.8644,
"step": 2170
},
{
"epoch": 2.70168913683558,
"grad_norm": 0.0718398168683052,
"learning_rate": 2.0074657818332644e-05,
"loss": 1.875,
"step": 2180
},
{
"epoch": 2.7140864714086472,
"grad_norm": 0.07293003052473068,
"learning_rate": 1.924512650352551e-05,
"loss": 1.905,
"step": 2190
},
{
"epoch": 2.726483805981714,
"grad_norm": 0.07507035881280899,
"learning_rate": 1.8415595188718375e-05,
"loss": 1.8567,
"step": 2200
},
{
"epoch": 2.738881140554781,
"grad_norm": 0.07978376746177673,
"learning_rate": 1.758606387391124e-05,
"loss": 1.763,
"step": 2210
},
{
"epoch": 2.7512784751278474,
"grad_norm": 0.06767022609710693,
"learning_rate": 1.6756532559104106e-05,
"loss": 1.7365,
"step": 2220
},
{
"epoch": 2.7636758097009144,
"grad_norm": 0.08018273115158081,
"learning_rate": 1.5927001244296975e-05,
"loss": 1.7595,
"step": 2230
},
{
"epoch": 2.776073144273981,
"grad_norm": 0.09407597035169601,
"learning_rate": 1.5097469929489839e-05,
"loss": 1.6947,
"step": 2240
},
{
"epoch": 2.788470478847048,
"grad_norm": 0.0730314627289772,
"learning_rate": 1.4267938614682704e-05,
"loss": 1.8826,
"step": 2250
},
{
"epoch": 2.8008678134201146,
"grad_norm": 0.06652677804231644,
"learning_rate": 1.3438407299875571e-05,
"loss": 1.8177,
"step": 2260
},
{
"epoch": 2.8132651479931816,
"grad_norm": 0.07838471233844757,
"learning_rate": 1.2608875985068435e-05,
"loss": 1.8317,
"step": 2270
},
{
"epoch": 2.825662482566248,
"grad_norm": 0.0950925275683403,
"learning_rate": 1.1779344670261303e-05,
"loss": 1.7767,
"step": 2280
},
{
"epoch": 2.838059817139315,
"grad_norm": 0.08422578871250153,
"learning_rate": 1.094981335545417e-05,
"loss": 1.8927,
"step": 2290
},
{
"epoch": 2.850457151712382,
"grad_norm": 0.08127064257860184,
"learning_rate": 1.0120282040647035e-05,
"loss": 1.8953,
"step": 2300
},
{
"epoch": 2.862854486285449,
"grad_norm": 0.0867166519165039,
"learning_rate": 9.290750725839901e-06,
"loss": 1.7904,
"step": 2310
},
{
"epoch": 2.8752518208585154,
"grad_norm": 0.09853280335664749,
"learning_rate": 8.461219411032766e-06,
"loss": 1.7074,
"step": 2320
},
{
"epoch": 2.887649155431582,
"grad_norm": 0.0887097492814064,
"learning_rate": 7.631688096225632e-06,
"loss": 1.7587,
"step": 2330
},
{
"epoch": 2.900046490004649,
"grad_norm": 0.0924605056643486,
"learning_rate": 6.802156781418499e-06,
"loss": 1.9213,
"step": 2340
},
{
"epoch": 2.912443824577716,
"grad_norm": 0.08043299615383148,
"learning_rate": 5.972625466611365e-06,
"loss": 1.8088,
"step": 2350
},
{
"epoch": 2.9248411591507826,
"grad_norm": 0.08430881798267365,
"learning_rate": 5.143094151804231e-06,
"loss": 1.8026,
"step": 2360
},
{
"epoch": 2.937238493723849,
"grad_norm": 0.07314042001962662,
"learning_rate": 4.313562836997097e-06,
"loss": 1.7945,
"step": 2370
},
{
"epoch": 2.949635828296916,
"grad_norm": 0.07442809641361237,
"learning_rate": 3.484031522189963e-06,
"loss": 1.6588,
"step": 2380
},
{
"epoch": 2.962033162869983,
"grad_norm": 0.08359961956739426,
"learning_rate": 2.6545002073828286e-06,
"loss": 1.7927,
"step": 2390
},
{
"epoch": 2.9744304974430498,
"grad_norm": 0.07364208996295929,
"learning_rate": 1.8249688925756948e-06,
"loss": 1.7357,
"step": 2400
},
{
"epoch": 2.9868278320161163,
"grad_norm": 0.07607521116733551,
"learning_rate": 9.95437577768561e-07,
"loss": 1.8463,
"step": 2410
},
{
"epoch": 2.9992251665891834,
"grad_norm": 0.08674101531505585,
"learning_rate": 1.6590626296142679e-07,
"loss": 1.9422,
"step": 2420
},
{
"epoch": 3.0,
"eval_loss": 1.8053849935531616,
"eval_runtime": 78.8378,
"eval_samples_per_second": 9.095,
"eval_steps_per_second": 1.142,
"step": 2421
}
],
"logging_steps": 10,
"max_steps": 2421,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.469388058720993e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}