ReDI_Interpretation_Dense / trainer_state.json
moshesbeta's picture
Upload folder using huggingface_hub
dcdf476 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 1797,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016711928138709004,
"grad_norm": 1.12109375,
"learning_rate": 5e-06,
"loss": 1.1894,
"step": 10
},
{
"epoch": 0.03342385627741801,
"grad_norm": 0.7248284816741943,
"learning_rate": 1.0555555555555555e-05,
"loss": 1.1506,
"step": 20
},
{
"epoch": 0.05013578441612701,
"grad_norm": 1.1556353569030762,
"learning_rate": 1.6111111111111115e-05,
"loss": 1.1033,
"step": 30
},
{
"epoch": 0.06684771255483601,
"grad_norm": 0.4772647023200989,
"learning_rate": 2.1666666666666667e-05,
"loss": 0.9371,
"step": 40
},
{
"epoch": 0.08355964069354502,
"grad_norm": 0.27529847621917725,
"learning_rate": 2.7222222222222223e-05,
"loss": 0.8313,
"step": 50
},
{
"epoch": 0.10027156883225402,
"grad_norm": 0.22090303897857666,
"learning_rate": 3.277777777777778e-05,
"loss": 0.7724,
"step": 60
},
{
"epoch": 0.11698349697096302,
"grad_norm": 0.21275630593299866,
"learning_rate": 3.8333333333333334e-05,
"loss": 0.733,
"step": 70
},
{
"epoch": 0.13369542510967203,
"grad_norm": 0.2134302407503128,
"learning_rate": 4.388888888888889e-05,
"loss": 0.7134,
"step": 80
},
{
"epoch": 0.15040735324838103,
"grad_norm": 0.18984167277812958,
"learning_rate": 4.9444444444444446e-05,
"loss": 0.6959,
"step": 90
},
{
"epoch": 0.16711928138709004,
"grad_norm": 0.2290167659521103,
"learning_rate": 5.500000000000001e-05,
"loss": 0.6518,
"step": 100
},
{
"epoch": 0.18383120952579904,
"grad_norm": 0.2181967943906784,
"learning_rate": 6.055555555555555e-05,
"loss": 0.6313,
"step": 110
},
{
"epoch": 0.20054313766450804,
"grad_norm": 0.26380836963653564,
"learning_rate": 6.611111111111111e-05,
"loss": 0.6059,
"step": 120
},
{
"epoch": 0.21725506580321705,
"grad_norm": 0.25219714641571045,
"learning_rate": 7.166666666666667e-05,
"loss": 0.6613,
"step": 130
},
{
"epoch": 0.23396699394192605,
"grad_norm": 0.24282532930374146,
"learning_rate": 7.722222222222223e-05,
"loss": 0.6676,
"step": 140
},
{
"epoch": 0.2506789220806351,
"grad_norm": 0.28735119104385376,
"learning_rate": 8.277777777777778e-05,
"loss": 0.647,
"step": 150
},
{
"epoch": 0.26739085021934406,
"grad_norm": 0.3064824938774109,
"learning_rate": 8.833333333333333e-05,
"loss": 0.6222,
"step": 160
},
{
"epoch": 0.2841027783580531,
"grad_norm": 0.33395013213157654,
"learning_rate": 9.388888888888889e-05,
"loss": 0.6109,
"step": 170
},
{
"epoch": 0.30081470649676206,
"grad_norm": 0.32029104232788086,
"learning_rate": 9.944444444444446e-05,
"loss": 0.6942,
"step": 180
},
{
"epoch": 0.3175266346354711,
"grad_norm": 0.29911231994628906,
"learning_rate": 9.999235647539953e-05,
"loss": 0.6312,
"step": 190
},
{
"epoch": 0.33423856277418007,
"grad_norm": 0.3333311676979065,
"learning_rate": 9.996593741531468e-05,
"loss": 0.6118,
"step": 200
},
{
"epoch": 0.3509504909128891,
"grad_norm": 0.31720009446144104,
"learning_rate": 9.992065842489567e-05,
"loss": 0.5505,
"step": 210
},
{
"epoch": 0.3676624190515981,
"grad_norm": 0.30165988206863403,
"learning_rate": 9.985653659495773e-05,
"loss": 0.6274,
"step": 220
},
{
"epoch": 0.3843743471903071,
"grad_norm": 0.31748878955841064,
"learning_rate": 9.977359612865423e-05,
"loss": 0.5831,
"step": 230
},
{
"epoch": 0.4010862753290161,
"grad_norm": 0.5018420815467834,
"learning_rate": 9.967186833234101e-05,
"loss": 0.6331,
"step": 240
},
{
"epoch": 0.4177982034677251,
"grad_norm": 0.3082675635814667,
"learning_rate": 9.955139160375959e-05,
"loss": 0.5884,
"step": 250
},
{
"epoch": 0.4345101316064341,
"grad_norm": 0.36464694142341614,
"learning_rate": 9.941221141754385e-05,
"loss": 0.5993,
"step": 260
},
{
"epoch": 0.4512220597451431,
"grad_norm": 0.36485177278518677,
"learning_rate": 9.925438030805518e-05,
"loss": 0.5846,
"step": 270
},
{
"epoch": 0.4679339878838521,
"grad_norm": 0.3401179015636444,
"learning_rate": 9.907795784955327e-05,
"loss": 0.61,
"step": 280
},
{
"epoch": 0.48464591602256113,
"grad_norm": 0.31191331148147583,
"learning_rate": 9.888301063370934e-05,
"loss": 0.5872,
"step": 290
},
{
"epoch": 0.5013578441612702,
"grad_norm": 0.2694801688194275,
"learning_rate": 9.866961224447075e-05,
"loss": 0.5905,
"step": 300
},
{
"epoch": 0.5180697722999791,
"grad_norm": 0.323143869638443,
"learning_rate": 9.843784323028638e-05,
"loss": 0.6213,
"step": 310
},
{
"epoch": 0.5347817004386881,
"grad_norm": 0.30978456139564514,
"learning_rate": 9.818779107370309e-05,
"loss": 0.5602,
"step": 320
},
{
"epoch": 0.5514936285773971,
"grad_norm": 0.36156347393989563,
"learning_rate": 9.791955015834492e-05,
"loss": 0.617,
"step": 330
},
{
"epoch": 0.5682055567161062,
"grad_norm": 0.3188885450363159,
"learning_rate": 9.763322173328753e-05,
"loss": 0.6133,
"step": 340
},
{
"epoch": 0.5849174848548151,
"grad_norm": 3.012507915496826,
"learning_rate": 9.732891387484104e-05,
"loss": 0.5401,
"step": 350
},
{
"epoch": 0.6016294129935241,
"grad_norm": 0.3500335216522217,
"learning_rate": 9.700674144575614e-05,
"loss": 0.5994,
"step": 360
},
{
"epoch": 0.6183413411322332,
"grad_norm": 0.38718223571777344,
"learning_rate": 9.666682605186835e-05,
"loss": 0.5362,
"step": 370
},
{
"epoch": 0.6350532692709422,
"grad_norm": 0.32986128330230713,
"learning_rate": 9.63092959961973e-05,
"loss": 0.596,
"step": 380
},
{
"epoch": 0.6517651974096511,
"grad_norm": 0.29063621163368225,
"learning_rate": 9.593428623051792e-05,
"loss": 0.5578,
"step": 390
},
{
"epoch": 0.6684771255483601,
"grad_norm": 0.3483268916606903,
"learning_rate": 9.554193830442229e-05,
"loss": 0.6073,
"step": 400
},
{
"epoch": 0.6851890536870692,
"grad_norm": 0.33267414569854736,
"learning_rate": 9.513240031189067e-05,
"loss": 0.5327,
"step": 410
},
{
"epoch": 0.7019009818257782,
"grad_norm": 0.3286801874637604,
"learning_rate": 9.470582683539285e-05,
"loss": 0.5884,
"step": 420
},
{
"epoch": 0.7186129099644871,
"grad_norm": 0.359754353761673,
"learning_rate": 9.42623788875399e-05,
"loss": 0.6042,
"step": 430
},
{
"epoch": 0.7353248381031962,
"grad_norm": 0.36169129610061646,
"learning_rate": 9.380222385030915e-05,
"loss": 0.4902,
"step": 440
},
{
"epoch": 0.7520367662419052,
"grad_norm": 0.31606003642082214,
"learning_rate": 9.332553541186485e-05,
"loss": 0.5816,
"step": 450
},
{
"epoch": 0.7687486943806142,
"grad_norm": 0.3151916265487671,
"learning_rate": 9.283249350099859e-05,
"loss": 0.6368,
"step": 460
},
{
"epoch": 0.7854606225193231,
"grad_norm": 0.2840172350406647,
"learning_rate": 9.23232842192142e-05,
"loss": 0.5906,
"step": 470
},
{
"epoch": 0.8021725506580322,
"grad_norm": 0.30779144167900085,
"learning_rate": 9.179809977048248e-05,
"loss": 0.5955,
"step": 480
},
{
"epoch": 0.8188844787967412,
"grad_norm": 0.31077197194099426,
"learning_rate": 9.125713838869299e-05,
"loss": 0.5831,
"step": 490
},
{
"epoch": 0.8355964069354502,
"grad_norm": 0.33284345269203186,
"learning_rate": 9.070060426282925e-05,
"loss": 0.6071,
"step": 500
},
{
"epoch": 0.8523083350741592,
"grad_norm": 0.33886072039604187,
"learning_rate": 9.012870745989663e-05,
"loss": 0.5619,
"step": 510
},
{
"epoch": 0.8690202632128682,
"grad_norm": 0.3419983386993408,
"learning_rate": 8.954166384563127e-05,
"loss": 0.5264,
"step": 520
},
{
"epoch": 0.8857321913515772,
"grad_norm": 0.34447890520095825,
"learning_rate": 8.893969500302031e-05,
"loss": 0.6026,
"step": 530
},
{
"epoch": 0.9024441194902862,
"grad_norm": 0.28169864416122437,
"learning_rate": 8.832302814866416e-05,
"loss": 0.5829,
"step": 540
},
{
"epoch": 0.9191560476289952,
"grad_norm": 0.36382725834846497,
"learning_rate": 8.76918960470122e-05,
"loss": 0.5803,
"step": 550
},
{
"epoch": 0.9358679757677042,
"grad_norm": 0.3617050349712372,
"learning_rate": 8.704653692250466e-05,
"loss": 0.5373,
"step": 560
},
{
"epoch": 0.9525799039064132,
"grad_norm": 0.35659492015838623,
"learning_rate": 8.638719436965325e-05,
"loss": 0.5686,
"step": 570
},
{
"epoch": 0.9692918320451223,
"grad_norm": 0.35579752922058105,
"learning_rate": 8.571411726109519e-05,
"loss": 0.5691,
"step": 580
},
{
"epoch": 0.9860037601838312,
"grad_norm": 0.31393831968307495,
"learning_rate": 8.50275596536546e-05,
"loss": 0.543,
"step": 590
},
{
"epoch": 1.0016711928138708,
"grad_norm": 0.3395228683948517,
"learning_rate": 8.432778069244749e-05,
"loss": 0.5515,
"step": 600
},
{
"epoch": 1.01838312095258,
"grad_norm": 0.32482120394706726,
"learning_rate": 8.361504451306585e-05,
"loss": 0.5337,
"step": 610
},
{
"epoch": 1.035095049091289,
"grad_norm": 0.34779229760169983,
"learning_rate": 8.288962014187811e-05,
"loss": 0.5536,
"step": 620
},
{
"epoch": 1.0518069772299978,
"grad_norm": 0.28236305713653564,
"learning_rate": 8.21517813944837e-05,
"loss": 0.4698,
"step": 630
},
{
"epoch": 1.068518905368707,
"grad_norm": 0.3089806139469147,
"learning_rate": 8.14018067723597e-05,
"loss": 0.5274,
"step": 640
},
{
"epoch": 1.085230833507416,
"grad_norm": 0.38024547696113586,
"learning_rate": 8.063997935773885e-05,
"loss": 0.5214,
"step": 650
},
{
"epoch": 1.101942761646125,
"grad_norm": 0.38784193992614746,
"learning_rate": 7.986658670675861e-05,
"loss": 0.5455,
"step": 660
},
{
"epoch": 1.118654689784834,
"grad_norm": 0.35978567600250244,
"learning_rate": 7.908192074092136e-05,
"loss": 0.5489,
"step": 670
},
{
"epoch": 1.1353666179235429,
"grad_norm": 0.38467374444007874,
"learning_rate": 7.828627763690697e-05,
"loss": 0.5604,
"step": 680
},
{
"epoch": 1.152078546062252,
"grad_norm": 0.42285487055778503,
"learning_rate": 7.747995771477928e-05,
"loss": 0.5591,
"step": 690
},
{
"epoch": 1.168790474200961,
"grad_norm": 0.46918097138404846,
"learning_rate": 7.666326532462842e-05,
"loss": 0.5201,
"step": 700
},
{
"epoch": 1.1855024023396699,
"grad_norm": 0.4248163104057312,
"learning_rate": 7.583650873169232e-05,
"loss": 0.5233,
"step": 710
},
{
"epoch": 1.202214330478379,
"grad_norm": 0.3677292764186859,
"learning_rate": 7.500000000000001e-05,
"loss": 0.5565,
"step": 720
},
{
"epoch": 1.218926258617088,
"grad_norm": 0.4452461302280426,
"learning_rate": 7.41540548745814e-05,
"loss": 0.5223,
"step": 730
},
{
"epoch": 1.2356381867557968,
"grad_norm": 0.46008288860321045,
"learning_rate": 7.329899266228748e-05,
"loss": 0.5347,
"step": 740
},
{
"epoch": 1.252350114894506,
"grad_norm": 0.32713180780410767,
"learning_rate": 7.243513611126608e-05,
"loss": 0.514,
"step": 750
},
{
"epoch": 1.269062043033215,
"grad_norm": 0.39738771319389343,
"learning_rate": 7.156281128913871e-05,
"loss": 0.5196,
"step": 760
},
{
"epoch": 1.285773971171924,
"grad_norm": 0.48977628350257874,
"learning_rate": 7.068234745992456e-05,
"loss": 0.5087,
"step": 770
},
{
"epoch": 1.302485899310633,
"grad_norm": 0.3981214165687561,
"learning_rate": 6.979407695975776e-05,
"loss": 0.4929,
"step": 780
},
{
"epoch": 1.3191978274493419,
"grad_norm": 0.4314100444316864,
"learning_rate": 6.889833507144532e-05,
"loss": 0.5511,
"step": 790
},
{
"epoch": 1.335909755588051,
"grad_norm": 0.42734798789024353,
"learning_rate": 6.799545989791268e-05,
"loss": 0.4975,
"step": 800
},
{
"epoch": 1.35262168372676,
"grad_norm": 0.44379082322120667,
"learning_rate": 6.708579223458475e-05,
"loss": 0.5344,
"step": 810
},
{
"epoch": 1.369333611865469,
"grad_norm": 0.41279590129852295,
"learning_rate": 6.616967544075077e-05,
"loss": 0.5416,
"step": 820
},
{
"epoch": 1.386045540004178,
"grad_norm": 0.4385620057582855,
"learning_rate": 6.524745530996137e-05,
"loss": 0.548,
"step": 830
},
{
"epoch": 1.402757468142887,
"grad_norm": 0.40365514159202576,
"learning_rate": 6.431947993950682e-05,
"loss": 0.5893,
"step": 840
},
{
"epoch": 1.4194693962815959,
"grad_norm": 0.4191945791244507,
"learning_rate": 6.338609959902569e-05,
"loss": 0.551,
"step": 850
},
{
"epoch": 1.436181324420305,
"grad_norm": 0.43660128116607666,
"learning_rate": 6.244766659829351e-05,
"loss": 0.5275,
"step": 860
},
{
"epoch": 1.452893252559014,
"grad_norm": 0.4230160713195801,
"learning_rate": 6.150453515424153e-05,
"loss": 0.5485,
"step": 870
},
{
"epoch": 1.469605180697723,
"grad_norm": 0.4209563434123993,
"learning_rate": 6.055706125725542e-05,
"loss": 0.5864,
"step": 880
},
{
"epoch": 1.486317108836432,
"grad_norm": 0.4084739089012146,
"learning_rate": 5.9605602536804673e-05,
"loss": 0.5408,
"step": 890
},
{
"epoch": 1.503029036975141,
"grad_norm": 0.3635924756526947,
"learning_rate": 5.865051812645329e-05,
"loss": 0.5245,
"step": 900
},
{
"epoch": 1.51974096511385,
"grad_norm": 0.4919414222240448,
"learning_rate": 5.7692168528302807e-05,
"loss": 0.5483,
"step": 910
},
{
"epoch": 1.536452893252559,
"grad_norm": 0.42606762051582336,
"learning_rate": 5.673091547691866e-05,
"loss": 0.4793,
"step": 920
},
{
"epoch": 1.553164821391268,
"grad_norm": 0.3808182179927826,
"learning_rate": 5.576712180279133e-05,
"loss": 0.5265,
"step": 930
},
{
"epoch": 1.569876749529977,
"grad_norm": 0.4470883011817932,
"learning_rate": 5.480115129538409e-05,
"loss": 0.5438,
"step": 940
},
{
"epoch": 1.586588677668686,
"grad_norm": 0.39399048686027527,
"learning_rate": 5.383336856581833e-05,
"loss": 0.5604,
"step": 950
},
{
"epoch": 1.6033006058073949,
"grad_norm": 0.4587889015674591,
"learning_rate": 5.2864138909249176e-05,
"loss": 0.5358,
"step": 960
},
{
"epoch": 1.620012533946104,
"grad_norm": 0.4291887879371643,
"learning_rate": 5.189382816698263e-05,
"loss": 0.5607,
"step": 970
},
{
"epoch": 1.6367244620848131,
"grad_norm": 0.4326860308647156,
"learning_rate": 5.0922802588386766e-05,
"loss": 0.548,
"step": 980
},
{
"epoch": 1.653436390223522,
"grad_norm": 0.4679637849330902,
"learning_rate": 4.9951428692648664e-05,
"loss": 0.5565,
"step": 990
},
{
"epoch": 1.670148318362231,
"grad_norm": 0.4345496594905853,
"learning_rate": 4.898007313042975e-05,
"loss": 0.5003,
"step": 1000
},
{
"epoch": 1.68686024650094,
"grad_norm": 0.4368157684803009,
"learning_rate": 4.8009102545471355e-05,
"loss": 0.5372,
"step": 1010
},
{
"epoch": 1.703572174639649,
"grad_norm": 0.4497671127319336,
"learning_rate": 4.7038883436202955e-05,
"loss": 0.4937,
"step": 1020
},
{
"epoch": 1.7202841027783582,
"grad_norm": 0.4601435661315918,
"learning_rate": 4.606978201740518e-05,
"loss": 0.5443,
"step": 1030
},
{
"epoch": 1.7369960309170671,
"grad_norm": 0.3899286091327667,
"learning_rate": 4.510216408197996e-05,
"loss": 0.4928,
"step": 1040
},
{
"epoch": 1.753707959055776,
"grad_norm": 0.45193353295326233,
"learning_rate": 4.4136394862879914e-05,
"loss": 0.5359,
"step": 1050
},
{
"epoch": 1.770419887194485,
"grad_norm": 0.4571657180786133,
"learning_rate": 4.3172838895249036e-05,
"loss": 0.5296,
"step": 1060
},
{
"epoch": 1.787131815333194,
"grad_norm": 0.4370465576648712,
"learning_rate": 4.221185987882684e-05,
"loss": 0.536,
"step": 1070
},
{
"epoch": 1.803843743471903,
"grad_norm": 0.4276420474052429,
"learning_rate": 4.125382054066781e-05,
"loss": 0.4892,
"step": 1080
},
{
"epoch": 1.8205556716106122,
"grad_norm": 0.44949424266815186,
"learning_rate": 4.029908249822795e-05,
"loss": 0.5455,
"step": 1090
},
{
"epoch": 1.837267599749321,
"grad_norm": 0.46534180641174316,
"learning_rate": 3.934800612287019e-05,
"loss": 0.5153,
"step": 1100
},
{
"epoch": 1.85397952788803,
"grad_norm": 0.4382587969303131,
"learning_rate": 3.840095040384023e-05,
"loss": 0.5316,
"step": 1110
},
{
"epoch": 1.870691456026739,
"grad_norm": 0.4234811067581177,
"learning_rate": 3.7458272812763875e-05,
"loss": 0.5119,
"step": 1120
},
{
"epoch": 1.887403384165448,
"grad_norm": 0.4511267840862274,
"learning_rate": 3.652032916871737e-05,
"loss": 0.5344,
"step": 1130
},
{
"epoch": 1.9041153123041572,
"grad_norm": 0.4052788317203522,
"learning_rate": 3.558747350392146e-05,
"loss": 0.5351,
"step": 1140
},
{
"epoch": 1.9208272404428661,
"grad_norm": 0.4650772213935852,
"learning_rate": 3.466005793010985e-05,
"loss": 0.5051,
"step": 1150
},
{
"epoch": 1.937539168581575,
"grad_norm": 0.47169771790504456,
"learning_rate": 3.373843250562265e-05,
"loss": 0.5262,
"step": 1160
},
{
"epoch": 1.954251096720284,
"grad_norm": 0.48139435052871704,
"learning_rate": 3.282294510327478e-05,
"loss": 0.5412,
"step": 1170
},
{
"epoch": 1.970963024858993,
"grad_norm": 0.43278786540031433,
"learning_rate": 3.1913941279049467e-05,
"loss": 0.5467,
"step": 1180
},
{
"epoch": 1.9876749529977022,
"grad_norm": 0.3845864236354828,
"learning_rate": 3.101176414166605e-05,
"loss": 0.5278,
"step": 1190
},
{
"epoch": 2.0033423856277417,
"grad_norm": 0.38537508249282837,
"learning_rate": 3.011675422307172e-05,
"loss": 0.4786,
"step": 1200
},
{
"epoch": 2.0200543137664506,
"grad_norm": 0.43613022565841675,
"learning_rate": 2.9229249349905684e-05,
"loss": 0.4896,
"step": 1210
},
{
"epoch": 2.03676624190516,
"grad_norm": 0.4069509208202362,
"learning_rate": 2.834958451598465e-05,
"loss": 0.4863,
"step": 1220
},
{
"epoch": 2.053478170043869,
"grad_norm": 0.4296644628047943,
"learning_rate": 2.7478091755857422e-05,
"loss": 0.4549,
"step": 1230
},
{
"epoch": 2.070190098182578,
"grad_norm": 0.4591960906982422,
"learning_rate": 2.6615100019476535e-05,
"loss": 0.5586,
"step": 1240
},
{
"epoch": 2.0869020263212867,
"grad_norm": 0.4489918053150177,
"learning_rate": 2.576093504803432e-05,
"loss": 0.4685,
"step": 1250
},
{
"epoch": 2.1036139544599957,
"grad_norm": 0.42996761202812195,
"learning_rate": 2.491591925100985e-05,
"loss": 0.5057,
"step": 1260
},
{
"epoch": 2.120325882598705,
"grad_norm": 0.47328057885169983,
"learning_rate": 2.4080371584473748e-05,
"loss": 0.4707,
"step": 1270
},
{
"epoch": 2.137037810737414,
"grad_norm": 0.47140026092529297,
"learning_rate": 2.325460743069639e-05,
"loss": 0.5067,
"step": 1280
},
{
"epoch": 2.153749738876123,
"grad_norm": 0.48226219415664673,
"learning_rate": 2.2438938479104952e-05,
"loss": 0.4863,
"step": 1290
},
{
"epoch": 2.170461667014832,
"grad_norm": 0.47203513979911804,
"learning_rate": 2.1633672608634524e-05,
"loss": 0.4831,
"step": 1300
},
{
"epoch": 2.1871735951535407,
"grad_norm": 0.5904713273048401,
"learning_rate": 2.0839113771517467e-05,
"loss": 0.4733,
"step": 1310
},
{
"epoch": 2.20388552329225,
"grad_norm": 0.5169677734375,
"learning_rate": 2.0055561878554792e-05,
"loss": 0.4802,
"step": 1320
},
{
"epoch": 2.220597451430959,
"grad_norm": 0.4227210283279419,
"learning_rate": 1.928331268591315e-05,
"loss": 0.4735,
"step": 1330
},
{
"epoch": 2.237309379569668,
"grad_norm": 0.47158095240592957,
"learning_rate": 1.852265768349006e-05,
"loss": 0.493,
"step": 1340
},
{
"epoch": 2.254021307708377,
"grad_norm": 0.45563799142837524,
"learning_rate": 1.777388398488918e-05,
"loss": 0.4794,
"step": 1350
},
{
"epoch": 2.2707332358470858,
"grad_norm": 0.4908081889152527,
"learning_rate": 1.7037274219047798e-05,
"loss": 0.4856,
"step": 1360
},
{
"epoch": 2.2874451639857947,
"grad_norm": 0.46372032165527344,
"learning_rate": 1.6313106423556878e-05,
"loss": 0.5309,
"step": 1370
},
{
"epoch": 2.304157092124504,
"grad_norm": 0.48590731620788574,
"learning_rate": 1.5601653939714074e-05,
"loss": 0.5225,
"step": 1380
},
{
"epoch": 2.320869020263213,
"grad_norm": 0.5794572830200195,
"learning_rate": 1.490318530934957e-05,
"loss": 0.5351,
"step": 1390
},
{
"epoch": 2.337580948401922,
"grad_norm": 0.4877820909023285,
"learning_rate": 1.4217964173463472e-05,
"loss": 0.4954,
"step": 1400
},
{
"epoch": 2.354292876540631,
"grad_norm": 0.4966225028038025,
"learning_rate": 1.3546249172712849e-05,
"loss": 0.4613,
"step": 1410
},
{
"epoch": 2.3710048046793397,
"grad_norm": 0.5034465789794922,
"learning_rate": 1.2888293849786503e-05,
"loss": 0.4936,
"step": 1420
},
{
"epoch": 2.387716732818049,
"grad_norm": 0.4590546190738678,
"learning_rate": 1.2244346553703667e-05,
"loss": 0.4583,
"step": 1430
},
{
"epoch": 2.404428660956758,
"grad_norm": 0.4191470146179199,
"learning_rate": 1.161465034607332e-05,
"loss": 0.441,
"step": 1440
},
{
"epoch": 2.421140589095467,
"grad_norm": 0.5140660405158997,
"learning_rate": 1.0999442909349217e-05,
"loss": 0.4506,
"step": 1450
},
{
"epoch": 2.437852517234176,
"grad_norm": 0.5455506443977356,
"learning_rate": 1.0398956457115194e-05,
"loss": 0.4816,
"step": 1460
},
{
"epoch": 2.4545644453728848,
"grad_norm": 0.48107025027275085,
"learning_rate": 9.813417646434864e-06,
"loss": 0.5195,
"step": 1470
},
{
"epoch": 2.4712763735115937,
"grad_norm": 0.4184824824333191,
"learning_rate": 9.243047492298634e-06,
"loss": 0.4877,
"step": 1480
},
{
"epoch": 2.487988301650303,
"grad_norm": 0.46802711486816406,
"learning_rate": 8.688061284200266e-06,
"loss": 0.4473,
"step": 1490
},
{
"epoch": 2.504700229789012,
"grad_norm": 0.45637863874435425,
"learning_rate": 8.148668504874623e-06,
"loss": 0.4787,
"step": 1500
},
{
"epoch": 2.521412157927721,
"grad_norm": 0.3878587782382965,
"learning_rate": 7.625072751227297e-06,
"loss": 0.4778,
"step": 1510
},
{
"epoch": 2.53812408606643,
"grad_norm": 0.5192758440971375,
"learning_rate": 7.117471657485663e-06,
"loss": 0.4696,
"step": 1520
},
{
"epoch": 2.554836014205139,
"grad_norm": 0.5533237457275391,
"learning_rate": 6.626056820600768e-06,
"loss": 0.525,
"step": 1530
},
{
"epoch": 2.571547942343848,
"grad_norm": 0.4474124312400818,
"learning_rate": 6.151013727927984e-06,
"loss": 0.5093,
"step": 1540
},
{
"epoch": 2.588259870482557,
"grad_norm": 0.4954865872859955,
"learning_rate": 5.69252168721367e-06,
"loss": 0.4585,
"step": 1550
},
{
"epoch": 2.604971798621266,
"grad_norm": 0.5517941117286682,
"learning_rate": 5.250753758914506e-06,
"loss": 0.5038,
"step": 1560
},
{
"epoch": 2.621683726759975,
"grad_norm": 0.48453307151794434,
"learning_rate": 4.82587669087477e-06,
"loss": 0.4839,
"step": 1570
},
{
"epoch": 2.6383956548986838,
"grad_norm": 0.4252147376537323,
"learning_rate": 4.418050855386413e-06,
"loss": 0.4257,
"step": 1580
},
{
"epoch": 2.6551075830373927,
"grad_norm": 0.3846037983894348,
"learning_rate": 4.027430188655684e-06,
"loss": 0.5293,
"step": 1590
},
{
"epoch": 2.671819511176102,
"grad_norm": 0.5026289224624634,
"learning_rate": 3.654162132698918e-06,
"loss": 0.502,
"step": 1600
},
{
"epoch": 2.688531439314811,
"grad_norm": 0.5699180960655212,
"learning_rate": 3.298387579689771e-06,
"loss": 0.5035,
"step": 1610
},
{
"epoch": 2.70524336745352,
"grad_norm": 0.4977060854434967,
"learning_rate": 2.960240818778659e-06,
"loss": 0.478,
"step": 1620
},
{
"epoch": 2.721955295592229,
"grad_norm": 0.48151639103889465,
"learning_rate": 2.639849485404505e-06,
"loss": 0.503,
"step": 1630
},
{
"epoch": 2.738667223730938,
"grad_norm": 0.48960214853286743,
"learning_rate": 2.3373345131180224e-06,
"loss": 0.4862,
"step": 1640
},
{
"epoch": 2.755379151869647,
"grad_norm": 0.44109290838241577,
"learning_rate": 2.052810087934698e-06,
"loss": 0.4349,
"step": 1650
},
{
"epoch": 2.772091080008356,
"grad_norm": 0.4852181375026703,
"learning_rate": 1.7863836052345429e-06,
"loss": 0.4517,
"step": 1660
},
{
"epoch": 2.788803008147065,
"grad_norm": 0.45847681164741516,
"learning_rate": 1.5381556292251632e-06,
"loss": 0.5079,
"step": 1670
},
{
"epoch": 2.805514936285774,
"grad_norm": 0.5069862604141235,
"learning_rate": 1.3082198549831836e-06,
"loss": 0.4907,
"step": 1680
},
{
"epoch": 2.822226864424483,
"grad_norm": 0.5091650485992432,
"learning_rate": 1.0966630730884887e-06,
"loss": 0.5045,
"step": 1690
},
{
"epoch": 2.8389387925631917,
"grad_norm": 0.4692935049533844,
"learning_rate": 9.035651368646648e-07,
"loss": 0.4637,
"step": 1700
},
{
"epoch": 2.855650720701901,
"grad_norm": 0.4802175462245941,
"learning_rate": 7.289989322378732e-07,
"loss": 0.5006,
"step": 1710
},
{
"epoch": 2.87236264884061,
"grad_norm": 0.5460130572319031,
"learning_rate": 5.730303502256341e-07,
"loss": 0.471,
"step": 1720
},
{
"epoch": 2.889074576979319,
"grad_norm": 0.48738327622413635,
"learning_rate": 4.3571826206590396e-07,
"loss": 0.4222,
"step": 1730
},
{
"epoch": 2.905786505118028,
"grad_norm": 0.4350599944591522,
"learning_rate": 3.1711449699576845e-07,
"loss": 0.4816,
"step": 1740
},
{
"epoch": 2.922498433256737,
"grad_norm": 0.5202321410179138,
"learning_rate": 2.172638226882129e-07,
"loss": 0.4285,
"step": 1750
},
{
"epoch": 2.939210361395446,
"grad_norm": 0.5793569684028625,
"learning_rate": 1.3620392835430596e-07,
"loss": 0.4074,
"step": 1760
},
{
"epoch": 2.955922289534155,
"grad_norm": 0.5055475234985352,
"learning_rate": 7.396541051717942e-08,
"loss": 0.499,
"step": 1770
},
{
"epoch": 2.972634217672864,
"grad_norm": 0.5408802032470703,
"learning_rate": 3.057176146319951e-08,
"loss": 0.4842,
"step": 1780
},
{
"epoch": 2.989346145811573,
"grad_norm": 0.501858115196228,
"learning_rate": 6.0393603746822235e-09,
"loss": 0.5106,
"step": 1790
},
{
"epoch": 3.0,
"step": 1797,
"total_flos": 6.784185535022039e+17,
"train_loss": 0.5515258336107003,
"train_runtime": 18534.8157,
"train_samples_per_second": 0.775,
"train_steps_per_second": 0.097
}
],
"logging_steps": 10,
"max_steps": 1797,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.784185535022039e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}