codet5_qlora / checkpoint-9213 /trainer_state.json
gpol13's picture
Upload folder using huggingface_hub
d4a30c8 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 9213,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0162813415825464,
"grad_norm": 1.3684147596359253,
"learning_rate": 4.973407142081841e-05,
"loss": 4.405337219238281,
"step": 50
},
{
"epoch": 0.0325626831650928,
"grad_norm": 2.8155977725982666,
"learning_rate": 4.946271572777597e-05,
"loss": 3.2570425415039064,
"step": 100
},
{
"epoch": 0.04884402474763921,
"grad_norm": 1.134498119354248,
"learning_rate": 4.919136003473353e-05,
"loss": 2.13068603515625,
"step": 150
},
{
"epoch": 0.0651253663301856,
"grad_norm": 1.3554809093475342,
"learning_rate": 4.892000434169109e-05,
"loss": 1.8829190063476562,
"step": 200
},
{
"epoch": 0.08140670791273201,
"grad_norm": 1.3557366132736206,
"learning_rate": 4.8648648648648654e-05,
"loss": 1.819012908935547,
"step": 250
},
{
"epoch": 0.09768804949527841,
"grad_norm": 1.4770574569702148,
"learning_rate": 4.837729295560621e-05,
"loss": 1.7141534423828124,
"step": 300
},
{
"epoch": 0.11396939107782482,
"grad_norm": 1.4904377460479736,
"learning_rate": 4.810593726256377e-05,
"loss": 1.6724388122558593,
"step": 350
},
{
"epoch": 0.1302507326603712,
"grad_norm": 1.4447667598724365,
"learning_rate": 4.783458156952133e-05,
"loss": 1.5954844665527343,
"step": 400
},
{
"epoch": 0.14653207424291761,
"grad_norm": 1.3709800243377686,
"learning_rate": 4.756322587647889e-05,
"loss": 1.585284423828125,
"step": 450
},
{
"epoch": 0.16281341582546402,
"grad_norm": 1.9350112676620483,
"learning_rate": 4.729187018343645e-05,
"loss": 1.4481756591796875,
"step": 500
},
{
"epoch": 0.17909475740801042,
"grad_norm": 1.8580759763717651,
"learning_rate": 4.7020514490394014e-05,
"loss": 1.5084706115722657,
"step": 550
},
{
"epoch": 0.19537609899055683,
"grad_norm": 2.5280864238739014,
"learning_rate": 4.674915879735157e-05,
"loss": 1.3583644104003907,
"step": 600
},
{
"epoch": 0.21165744057310323,
"grad_norm": 1.7610465288162231,
"learning_rate": 4.647780310430913e-05,
"loss": 1.3752433776855468,
"step": 650
},
{
"epoch": 0.22793878215564964,
"grad_norm": 1.9367973804473877,
"learning_rate": 4.620644741126669e-05,
"loss": 1.3308279418945312,
"step": 700
},
{
"epoch": 0.24422012373819602,
"grad_norm": 1.8170645236968994,
"learning_rate": 4.593509171822425e-05,
"loss": 1.2124520111083985,
"step": 750
},
{
"epoch": 0.2605014653207424,
"grad_norm": 2.4546971321105957,
"learning_rate": 4.566373602518181e-05,
"loss": 1.2327165222167968,
"step": 800
},
{
"epoch": 0.2767828069032888,
"grad_norm": 2.476984977722168,
"learning_rate": 4.5392380332139374e-05,
"loss": 1.2717761993408203,
"step": 850
},
{
"epoch": 0.29306414848583523,
"grad_norm": 2.4676401615142822,
"learning_rate": 4.512102463909693e-05,
"loss": 1.1377059173583985,
"step": 900
},
{
"epoch": 0.30934549006838163,
"grad_norm": 3.8174469470977783,
"learning_rate": 4.484966894605449e-05,
"loss": 1.195997314453125,
"step": 950
},
{
"epoch": 0.32562683165092804,
"grad_norm": 2.879594564437866,
"learning_rate": 4.457831325301205e-05,
"loss": 1.160465316772461,
"step": 1000
},
{
"epoch": 0.34190817323347444,
"grad_norm": 2.7231075763702393,
"learning_rate": 4.430695755996961e-05,
"loss": 1.1307568359375,
"step": 1050
},
{
"epoch": 0.35818951481602085,
"grad_norm": 2.86122989654541,
"learning_rate": 4.403560186692717e-05,
"loss": 1.1008319854736328,
"step": 1100
},
{
"epoch": 0.37447085639856725,
"grad_norm": 5.22833251953125,
"learning_rate": 4.3764246173884733e-05,
"loss": 1.075364456176758,
"step": 1150
},
{
"epoch": 0.39075219798111366,
"grad_norm": 2.75107479095459,
"learning_rate": 4.349289048084229e-05,
"loss": 1.02243408203125,
"step": 1200
},
{
"epoch": 0.40703353956366006,
"grad_norm": 5.390212535858154,
"learning_rate": 4.322153478779985e-05,
"loss": 0.9791134643554688,
"step": 1250
},
{
"epoch": 0.42331488114620647,
"grad_norm": 2.980733871459961,
"learning_rate": 4.295017909475741e-05,
"loss": 0.9889099884033203,
"step": 1300
},
{
"epoch": 0.4395962227287529,
"grad_norm": 2.0358238220214844,
"learning_rate": 4.267882340171497e-05,
"loss": 1.0286465454101563,
"step": 1350
},
{
"epoch": 0.4558775643112993,
"grad_norm": 3.465447187423706,
"learning_rate": 4.2407467708672534e-05,
"loss": 0.921280517578125,
"step": 1400
},
{
"epoch": 0.4721589058938456,
"grad_norm": 3.2969017028808594,
"learning_rate": 4.213611201563009e-05,
"loss": 0.9863865661621094,
"step": 1450
},
{
"epoch": 0.48844024747639203,
"grad_norm": 2.22483229637146,
"learning_rate": 4.186475632258765e-05,
"loss": 0.9354537963867188,
"step": 1500
},
{
"epoch": 0.5047215890589385,
"grad_norm": 2.548597812652588,
"learning_rate": 4.159340062954521e-05,
"loss": 0.9002230834960937,
"step": 1550
},
{
"epoch": 0.5210029306414848,
"grad_norm": 2.0713655948638916,
"learning_rate": 4.132204493650277e-05,
"loss": 0.9603328704833984,
"step": 1600
},
{
"epoch": 0.5372842722240313,
"grad_norm": 2.6993308067321777,
"learning_rate": 4.105068924346033e-05,
"loss": 0.91706787109375,
"step": 1650
},
{
"epoch": 0.5535656138065776,
"grad_norm": 2.801042079925537,
"learning_rate": 4.0779333550417894e-05,
"loss": 0.9315050506591797,
"step": 1700
},
{
"epoch": 0.5698469553891241,
"grad_norm": 2.2465991973876953,
"learning_rate": 4.050797785737545e-05,
"loss": 0.9338973236083984,
"step": 1750
},
{
"epoch": 0.5861282969716705,
"grad_norm": 2.300447702407837,
"learning_rate": 4.023662216433301e-05,
"loss": 0.8962403106689453,
"step": 1800
},
{
"epoch": 0.6024096385542169,
"grad_norm": 3.7357118129730225,
"learning_rate": 3.996526647129057e-05,
"loss": 0.8952316284179688,
"step": 1850
},
{
"epoch": 0.6186909801367633,
"grad_norm": 2.987694263458252,
"learning_rate": 3.969391077824813e-05,
"loss": 0.8913738250732421,
"step": 1900
},
{
"epoch": 0.6349723217193096,
"grad_norm": 2.678509473800659,
"learning_rate": 3.942255508520569e-05,
"loss": 0.8106181335449218,
"step": 1950
},
{
"epoch": 0.6512536633018561,
"grad_norm": 2.694267511367798,
"learning_rate": 3.9151199392163254e-05,
"loss": 0.8525662994384766,
"step": 2000
},
{
"epoch": 0.6675350048844024,
"grad_norm": 2.4209225177764893,
"learning_rate": 3.887984369912081e-05,
"loss": 0.8165419769287109,
"step": 2050
},
{
"epoch": 0.6838163464669489,
"grad_norm": 3.41129994392395,
"learning_rate": 3.860848800607837e-05,
"loss": 0.7995121765136719,
"step": 2100
},
{
"epoch": 0.7000976880494952,
"grad_norm": 2.8171026706695557,
"learning_rate": 3.833713231303593e-05,
"loss": 0.8041473388671875,
"step": 2150
},
{
"epoch": 0.7163790296320417,
"grad_norm": 2.9240710735321045,
"learning_rate": 3.806577661999349e-05,
"loss": 0.9764547729492188,
"step": 2200
},
{
"epoch": 0.732660371214588,
"grad_norm": 2.051103353500366,
"learning_rate": 3.7794420926951055e-05,
"loss": 0.8336094665527344,
"step": 2250
},
{
"epoch": 0.7489417127971345,
"grad_norm": 3.2116966247558594,
"learning_rate": 3.7523065233908613e-05,
"loss": 0.7777658081054688,
"step": 2300
},
{
"epoch": 0.7652230543796809,
"grad_norm": 3.8302836418151855,
"learning_rate": 3.725170954086617e-05,
"loss": 0.848994140625,
"step": 2350
},
{
"epoch": 0.7815043959622273,
"grad_norm": 2.0940141677856445,
"learning_rate": 3.698035384782373e-05,
"loss": 0.6995362091064453,
"step": 2400
},
{
"epoch": 0.7977857375447737,
"grad_norm": 3.155562400817871,
"learning_rate": 3.670899815478129e-05,
"loss": 0.7727601623535156,
"step": 2450
},
{
"epoch": 0.8140670791273201,
"grad_norm": 2.886545419692993,
"learning_rate": 3.643764246173885e-05,
"loss": 0.7873483276367188,
"step": 2500
},
{
"epoch": 0.8303484207098665,
"grad_norm": 2.5638420581817627,
"learning_rate": 3.6166286768696414e-05,
"loss": 0.7987093353271484,
"step": 2550
},
{
"epoch": 0.8466297622924129,
"grad_norm": 2.3368115425109863,
"learning_rate": 3.589493107565397e-05,
"loss": 0.7667920684814453,
"step": 2600
},
{
"epoch": 0.8629111038749593,
"grad_norm": 2.953003168106079,
"learning_rate": 3.562357538261153e-05,
"loss": 0.6812684631347656,
"step": 2650
},
{
"epoch": 0.8791924454575057,
"grad_norm": 3.382375955581665,
"learning_rate": 3.535221968956909e-05,
"loss": 0.7589307403564454,
"step": 2700
},
{
"epoch": 0.8954737870400521,
"grad_norm": 2.6374568939208984,
"learning_rate": 3.508086399652665e-05,
"loss": 0.7288359069824218,
"step": 2750
},
{
"epoch": 0.9117551286225986,
"grad_norm": 2.135319709777832,
"learning_rate": 3.480950830348421e-05,
"loss": 0.7487136077880859,
"step": 2800
},
{
"epoch": 0.9280364702051449,
"grad_norm": 3.235957145690918,
"learning_rate": 3.4538152610441774e-05,
"loss": 0.7287068176269531,
"step": 2850
},
{
"epoch": 0.9443178117876913,
"grad_norm": 3.254149913787842,
"learning_rate": 3.426679691739933e-05,
"loss": 0.7420393371582031,
"step": 2900
},
{
"epoch": 0.9605991533702377,
"grad_norm": 2.4111130237579346,
"learning_rate": 3.399544122435689e-05,
"loss": 0.7257329559326172,
"step": 2950
},
{
"epoch": 0.9768804949527841,
"grad_norm": 3.764932870864868,
"learning_rate": 3.372408553131445e-05,
"loss": 0.6783822631835937,
"step": 3000
},
{
"epoch": 0.9931618365353305,
"grad_norm": 3.1191906929016113,
"learning_rate": 3.345272983827201e-05,
"loss": 0.7785000610351562,
"step": 3050
},
{
"epoch": 1.0,
"eval_bertscore_f1": 0.9666035647731817,
"eval_bleu": 0.6286042636210436,
"eval_loss": 0.4953671097755432,
"eval_meteor": 0.727270993632129,
"eval_rouge1": 0.8535427677296903,
"eval_rouge2": 0.7442740134144676,
"eval_runtime": 125.3536,
"eval_samples_per_second": 10.307,
"eval_steps_per_second": 1.292,
"step": 3071
},
{
"epoch": 1.009443178117877,
"grad_norm": 3.273244857788086,
"learning_rate": 3.318137414522957e-05,
"loss": 0.6082234954833985,
"step": 3100
},
{
"epoch": 1.0257245197004232,
"grad_norm": 3.4152684211730957,
"learning_rate": 3.291001845218713e-05,
"loss": 0.5953511047363281,
"step": 3150
},
{
"epoch": 1.0420058612829697,
"grad_norm": 3.5387468338012695,
"learning_rate": 3.263866275914469e-05,
"loss": 0.6991500091552735,
"step": 3200
},
{
"epoch": 1.0582872028655161,
"grad_norm": 4.7770280838012695,
"learning_rate": 3.236730706610225e-05,
"loss": 0.6681033325195312,
"step": 3250
},
{
"epoch": 1.0745685444480626,
"grad_norm": 3.492780923843384,
"learning_rate": 3.209595137305981e-05,
"loss": 0.6578762054443359,
"step": 3300
},
{
"epoch": 1.0908498860306088,
"grad_norm": 3.3742964267730713,
"learning_rate": 3.182459568001737e-05,
"loss": 0.6371722030639648,
"step": 3350
},
{
"epoch": 1.1071312276131553,
"grad_norm": 2.9794905185699463,
"learning_rate": 3.155323998697493e-05,
"loss": 0.6163125228881836,
"step": 3400
},
{
"epoch": 1.1234125691957018,
"grad_norm": 4.077469825744629,
"learning_rate": 3.128188429393249e-05,
"loss": 0.6614529418945313,
"step": 3450
},
{
"epoch": 1.1396939107782482,
"grad_norm": 2.614112615585327,
"learning_rate": 3.1010528600890045e-05,
"loss": 0.6072891998291016,
"step": 3500
},
{
"epoch": 1.1559752523607945,
"grad_norm": 2.661522150039673,
"learning_rate": 3.0739172907847604e-05,
"loss": 0.5901923370361328,
"step": 3550
},
{
"epoch": 1.172256593943341,
"grad_norm": 3.5029804706573486,
"learning_rate": 3.0467817214805167e-05,
"loss": 0.7141423034667969,
"step": 3600
},
{
"epoch": 1.1885379355258874,
"grad_norm": 2.75795578956604,
"learning_rate": 3.0196461521762725e-05,
"loss": 0.6581143951416015,
"step": 3650
},
{
"epoch": 1.2048192771084336,
"grad_norm": 3.335984230041504,
"learning_rate": 2.992510582872029e-05,
"loss": 0.6345047378540039,
"step": 3700
},
{
"epoch": 1.22110061869098,
"grad_norm": 2.886397361755371,
"learning_rate": 2.965375013567785e-05,
"loss": 0.6183316802978516,
"step": 3750
},
{
"epoch": 1.2373819602735265,
"grad_norm": 3.5690371990203857,
"learning_rate": 2.938239444263541e-05,
"loss": 0.6330132293701172,
"step": 3800
},
{
"epoch": 1.253663301856073,
"grad_norm": 2.857058525085449,
"learning_rate": 2.9111038749592967e-05,
"loss": 0.5762019729614258,
"step": 3850
},
{
"epoch": 1.2699446434386195,
"grad_norm": 3.4295766353607178,
"learning_rate": 2.8839683056550526e-05,
"loss": 0.6346803665161133,
"step": 3900
},
{
"epoch": 1.2862259850211657,
"grad_norm": 3.578397274017334,
"learning_rate": 2.8568327363508085e-05,
"loss": 0.6218410491943359,
"step": 3950
},
{
"epoch": 1.3025073266037122,
"grad_norm": 4.432509422302246,
"learning_rate": 2.829697167046565e-05,
"loss": 0.6320372009277344,
"step": 4000
},
{
"epoch": 1.3187886681862586,
"grad_norm": 2.806671142578125,
"learning_rate": 2.802561597742321e-05,
"loss": 0.652824478149414,
"step": 4050
},
{
"epoch": 1.3350700097688049,
"grad_norm": 3.3935513496398926,
"learning_rate": 2.775426028438077e-05,
"loss": 0.6719865417480468,
"step": 4100
},
{
"epoch": 1.3513513513513513,
"grad_norm": 2.2870659828186035,
"learning_rate": 2.7482904591338327e-05,
"loss": 0.586540756225586,
"step": 4150
},
{
"epoch": 1.3676326929338978,
"grad_norm": 3.7163562774658203,
"learning_rate": 2.7211548898295886e-05,
"loss": 0.5858817672729493,
"step": 4200
},
{
"epoch": 1.3839140345164442,
"grad_norm": 2.730625629425049,
"learning_rate": 2.694019320525345e-05,
"loss": 0.5543305206298829,
"step": 4250
},
{
"epoch": 1.4001953760989905,
"grad_norm": 3.728361129760742,
"learning_rate": 2.666883751221101e-05,
"loss": 0.6442169952392578,
"step": 4300
},
{
"epoch": 1.416476717681537,
"grad_norm": 2.2651383876800537,
"learning_rate": 2.639748181916857e-05,
"loss": 0.582794075012207,
"step": 4350
},
{
"epoch": 1.4327580592640834,
"grad_norm": 2.430931806564331,
"learning_rate": 2.6126126126126128e-05,
"loss": 0.6022589874267578,
"step": 4400
},
{
"epoch": 1.4490394008466296,
"grad_norm": 3.222708225250244,
"learning_rate": 2.5854770433083687e-05,
"loss": 0.6583724975585937,
"step": 4450
},
{
"epoch": 1.465320742429176,
"grad_norm": 4.678814888000488,
"learning_rate": 2.5583414740041246e-05,
"loss": 0.5943536376953125,
"step": 4500
},
{
"epoch": 1.4816020840117226,
"grad_norm": 3.0403647422790527,
"learning_rate": 2.5312059046998808e-05,
"loss": 0.5958936309814453,
"step": 4550
},
{
"epoch": 1.497883425594269,
"grad_norm": 2.538402795791626,
"learning_rate": 2.504070335395637e-05,
"loss": 0.6167163467407226,
"step": 4600
},
{
"epoch": 1.5141647671768155,
"grad_norm": 5.17458438873291,
"learning_rate": 2.476934766091393e-05,
"loss": 0.5508831405639648,
"step": 4650
},
{
"epoch": 1.530446108759362,
"grad_norm": 2.4245738983154297,
"learning_rate": 2.4497991967871488e-05,
"loss": 0.6157744598388671,
"step": 4700
},
{
"epoch": 1.5467274503419082,
"grad_norm": 2.481553792953491,
"learning_rate": 2.4226636274829047e-05,
"loss": 0.6096940231323242,
"step": 4750
},
{
"epoch": 1.5630087919244544,
"grad_norm": 2.797117233276367,
"learning_rate": 2.395528058178661e-05,
"loss": 0.6150970458984375,
"step": 4800
},
{
"epoch": 1.5792901335070009,
"grad_norm": 4.05893611907959,
"learning_rate": 2.3683924888744168e-05,
"loss": 0.6198799133300781,
"step": 4850
},
{
"epoch": 1.5955714750895473,
"grad_norm": 3.5510406494140625,
"learning_rate": 2.3412569195701726e-05,
"loss": 0.5494784545898438,
"step": 4900
},
{
"epoch": 1.6118528166720938,
"grad_norm": 4.9277424812316895,
"learning_rate": 2.3141213502659285e-05,
"loss": 0.500348129272461,
"step": 4950
},
{
"epoch": 1.6281341582546403,
"grad_norm": 2.930206775665283,
"learning_rate": 2.2869857809616847e-05,
"loss": 0.5013648986816406,
"step": 5000
},
{
"epoch": 1.6444154998371867,
"grad_norm": 10.043802261352539,
"learning_rate": 2.2598502116574406e-05,
"loss": 0.5953195190429688,
"step": 5050
},
{
"epoch": 1.660696841419733,
"grad_norm": 3.334196090698242,
"learning_rate": 2.2327146423531965e-05,
"loss": 0.5374015426635742,
"step": 5100
},
{
"epoch": 1.6769781830022794,
"grad_norm": 3.292771339416504,
"learning_rate": 2.2055790730489524e-05,
"loss": 0.49036331176757814,
"step": 5150
},
{
"epoch": 1.6932595245848256,
"grad_norm": 3.440549373626709,
"learning_rate": 2.1784435037447086e-05,
"loss": 0.5311320114135742,
"step": 5200
},
{
"epoch": 1.709540866167372,
"grad_norm": 3.720184803009033,
"learning_rate": 2.1513079344404645e-05,
"loss": 0.5855265045166016,
"step": 5250
},
{
"epoch": 1.7258222077499186,
"grad_norm": 3.9988839626312256,
"learning_rate": 2.1241723651362204e-05,
"loss": 0.5410572052001953,
"step": 5300
},
{
"epoch": 1.742103549332465,
"grad_norm": 3.244662046432495,
"learning_rate": 2.0970367958319766e-05,
"loss": 0.553615951538086,
"step": 5350
},
{
"epoch": 1.7583848909150115,
"grad_norm": 3.0616097450256348,
"learning_rate": 2.0699012265277325e-05,
"loss": 0.4832605361938477,
"step": 5400
},
{
"epoch": 1.774666232497558,
"grad_norm": 3.586770534515381,
"learning_rate": 2.0427656572234887e-05,
"loss": 0.5060377883911132,
"step": 5450
},
{
"epoch": 1.7909475740801042,
"grad_norm": 2.99122953414917,
"learning_rate": 2.0156300879192446e-05,
"loss": 0.510421142578125,
"step": 5500
},
{
"epoch": 1.8072289156626506,
"grad_norm": 2.647064685821533,
"learning_rate": 1.9884945186150005e-05,
"loss": 0.5255117034912109,
"step": 5550
},
{
"epoch": 1.8235102572451969,
"grad_norm": 3.2572667598724365,
"learning_rate": 1.9613589493107567e-05,
"loss": 0.48496082305908206,
"step": 5600
},
{
"epoch": 1.8397915988277433,
"grad_norm": 4.511913299560547,
"learning_rate": 1.9342233800065126e-05,
"loss": 0.5061034393310547,
"step": 5650
},
{
"epoch": 1.8560729404102898,
"grad_norm": 2.9536867141723633,
"learning_rate": 1.9070878107022685e-05,
"loss": 0.5593103790283203,
"step": 5700
},
{
"epoch": 1.8723542819928363,
"grad_norm": 2.071770429611206,
"learning_rate": 1.8799522413980247e-05,
"loss": 0.5091780471801758,
"step": 5750
},
{
"epoch": 1.8886356235753827,
"grad_norm": 4.718142986297607,
"learning_rate": 1.8528166720937806e-05,
"loss": 0.4754158401489258,
"step": 5800
},
{
"epoch": 1.904916965157929,
"grad_norm": 4.707818508148193,
"learning_rate": 1.8256811027895364e-05,
"loss": 0.5697180557250977,
"step": 5850
},
{
"epoch": 1.9211983067404754,
"grad_norm": 4.631124973297119,
"learning_rate": 1.7985455334852927e-05,
"loss": 0.4917443084716797,
"step": 5900
},
{
"epoch": 1.9374796483230217,
"grad_norm": 3.7037010192871094,
"learning_rate": 1.7714099641810485e-05,
"loss": 0.577647933959961,
"step": 5950
},
{
"epoch": 1.9537609899055681,
"grad_norm": 4.4254302978515625,
"learning_rate": 1.7442743948768044e-05,
"loss": 0.49387569427490235,
"step": 6000
},
{
"epoch": 1.9700423314881146,
"grad_norm": 4.429046630859375,
"learning_rate": 1.7171388255725606e-05,
"loss": 0.555143165588379,
"step": 6050
},
{
"epoch": 1.986323673070661,
"grad_norm": 3.211913824081421,
"learning_rate": 1.6900032562683165e-05,
"loss": 0.5266495132446289,
"step": 6100
},
{
"epoch": 2.0,
"eval_bertscore_f1": 0.9751379909253342,
"eval_bleu": 0.7283162211638717,
"eval_loss": 0.3667888939380646,
"eval_meteor": 0.806130448591728,
"eval_rouge1": 0.8830570311221204,
"eval_rouge2": 0.803848501332357,
"eval_runtime": 126.2611,
"eval_samples_per_second": 10.233,
"eval_steps_per_second": 1.283,
"step": 6142
},
{
"epoch": 2.0026050146532075,
"grad_norm": 2.130920886993408,
"learning_rate": 1.6628676869640724e-05,
"loss": 0.49579532623291017,
"step": 6150
},
{
"epoch": 2.018886356235754,
"grad_norm": 3.622119665145874,
"learning_rate": 1.6357321176598286e-05,
"loss": 0.5926795959472656,
"step": 6200
},
{
"epoch": 2.0351676978183004,
"grad_norm": 4.096598148345947,
"learning_rate": 1.6085965483555845e-05,
"loss": 0.511412124633789,
"step": 6250
},
{
"epoch": 2.0514490394008464,
"grad_norm": 3.3541550636291504,
"learning_rate": 1.5814609790513404e-05,
"loss": 0.5026980590820312,
"step": 6300
},
{
"epoch": 2.067730380983393,
"grad_norm": 3.797450304031372,
"learning_rate": 1.5543254097470966e-05,
"loss": 0.48105335235595703,
"step": 6350
},
{
"epoch": 2.0840117225659394,
"grad_norm": 4.0247626304626465,
"learning_rate": 1.5271898404428525e-05,
"loss": 0.48235019683837893,
"step": 6400
},
{
"epoch": 2.100293064148486,
"grad_norm": 4.341481685638428,
"learning_rate": 1.5000542711386087e-05,
"loss": 0.5079761123657227,
"step": 6450
},
{
"epoch": 2.1165744057310323,
"grad_norm": 2.415269374847412,
"learning_rate": 1.4729187018343646e-05,
"loss": 0.5332447814941407,
"step": 6500
},
{
"epoch": 2.1328557473135787,
"grad_norm": 4.66851282119751,
"learning_rate": 1.4457831325301205e-05,
"loss": 0.5538092041015625,
"step": 6550
},
{
"epoch": 2.149137088896125,
"grad_norm": 3.448925495147705,
"learning_rate": 1.4186475632258767e-05,
"loss": 0.5555255889892579,
"step": 6600
},
{
"epoch": 2.165418430478671,
"grad_norm": 4.12534236907959,
"learning_rate": 1.3915119939216326e-05,
"loss": 0.49463138580322263,
"step": 6650
},
{
"epoch": 2.1816997720612177,
"grad_norm": 2.726137638092041,
"learning_rate": 1.3643764246173885e-05,
"loss": 0.4911256408691406,
"step": 6700
},
{
"epoch": 2.197981113643764,
"grad_norm": 2.9859631061553955,
"learning_rate": 1.3372408553131447e-05,
"loss": 0.5317694091796875,
"step": 6750
},
{
"epoch": 2.2142624552263106,
"grad_norm": 3.0363500118255615,
"learning_rate": 1.3101052860089006e-05,
"loss": 0.48204010009765624,
"step": 6800
},
{
"epoch": 2.230543796808857,
"grad_norm": 2.7553327083587646,
"learning_rate": 1.2829697167046565e-05,
"loss": 0.5275654602050781,
"step": 6850
},
{
"epoch": 2.2468251383914035,
"grad_norm": 3.37378191947937,
"learning_rate": 1.2558341474004127e-05,
"loss": 0.49252197265625,
"step": 6900
},
{
"epoch": 2.26310647997395,
"grad_norm": 3.5572731494903564,
"learning_rate": 1.2286985780961686e-05,
"loss": 0.5376744079589844,
"step": 6950
},
{
"epoch": 2.2793878215564964,
"grad_norm": 4.323084354400635,
"learning_rate": 1.2015630087919246e-05,
"loss": 0.5603115081787109,
"step": 7000
},
{
"epoch": 2.295669163139043,
"grad_norm": 3.9723100662231445,
"learning_rate": 1.1744274394876805e-05,
"loss": 0.4530460739135742,
"step": 7050
},
{
"epoch": 2.311950504721589,
"grad_norm": 4.0148491859436035,
"learning_rate": 1.1472918701834365e-05,
"loss": 0.4808524703979492,
"step": 7100
},
{
"epoch": 2.3282318463041354,
"grad_norm": 3.357252597808838,
"learning_rate": 1.1201563008791926e-05,
"loss": 0.5342596817016602,
"step": 7150
},
{
"epoch": 2.344513187886682,
"grad_norm": 1.4084240198135376,
"learning_rate": 1.0930207315749485e-05,
"loss": 0.5188829040527344,
"step": 7200
},
{
"epoch": 2.3607945294692283,
"grad_norm": 4.223776817321777,
"learning_rate": 1.0658851622707045e-05,
"loss": 0.5551160049438476,
"step": 7250
},
{
"epoch": 2.3770758710517748,
"grad_norm": 3.2388432025909424,
"learning_rate": 1.0387495929664604e-05,
"loss": 0.4678123092651367,
"step": 7300
},
{
"epoch": 2.393357212634321,
"grad_norm": 2.2621917724609375,
"learning_rate": 1.0116140236622165e-05,
"loss": 0.49837650299072267,
"step": 7350
},
{
"epoch": 2.4096385542168672,
"grad_norm": 3.9080207347869873,
"learning_rate": 9.844784543579723e-06,
"loss": 0.4679309844970703,
"step": 7400
},
{
"epoch": 2.4259198957994137,
"grad_norm": 2.2795565128326416,
"learning_rate": 9.573428850537284e-06,
"loss": 0.4956610870361328,
"step": 7450
},
{
"epoch": 2.44220123738196,
"grad_norm": 2.872938632965088,
"learning_rate": 9.302073157494844e-06,
"loss": 0.48609119415283203,
"step": 7500
},
{
"epoch": 2.4584825789645066,
"grad_norm": 3.869072675704956,
"learning_rate": 9.030717464452403e-06,
"loss": 0.5468455886840821,
"step": 7550
},
{
"epoch": 2.474763920547053,
"grad_norm": 3.9535272121429443,
"learning_rate": 8.759361771409964e-06,
"loss": 0.4581578826904297,
"step": 7600
},
{
"epoch": 2.4910452621295995,
"grad_norm": 2.2123703956604004,
"learning_rate": 8.488006078367524e-06,
"loss": 0.48407554626464844,
"step": 7650
},
{
"epoch": 2.507326603712146,
"grad_norm": 3.5734164714813232,
"learning_rate": 8.216650385325085e-06,
"loss": 0.4096232986450195,
"step": 7700
},
{
"epoch": 2.5236079452946925,
"grad_norm": 5.056736946105957,
"learning_rate": 7.945294692282644e-06,
"loss": 0.48154861450195313,
"step": 7750
},
{
"epoch": 2.539889286877239,
"grad_norm": 5.742955207824707,
"learning_rate": 7.673938999240204e-06,
"loss": 0.46851539611816406,
"step": 7800
},
{
"epoch": 2.556170628459785,
"grad_norm": 3.6929991245269775,
"learning_rate": 7.402583306197765e-06,
"loss": 0.4527290725708008,
"step": 7850
},
{
"epoch": 2.5724519700423314,
"grad_norm": 4.624508857727051,
"learning_rate": 7.1312276131553235e-06,
"loss": 0.487193717956543,
"step": 7900
},
{
"epoch": 2.588733311624878,
"grad_norm": 3.526402711868286,
"learning_rate": 6.859871920112884e-06,
"loss": 0.4767793273925781,
"step": 7950
},
{
"epoch": 2.6050146532074243,
"grad_norm": 2.557279586791992,
"learning_rate": 6.5885162270704446e-06,
"loss": 0.47788654327392577,
"step": 8000
},
{
"epoch": 2.6212959947899708,
"grad_norm": 3.1233720779418945,
"learning_rate": 6.317160534028005e-06,
"loss": 0.43118988037109374,
"step": 8050
},
{
"epoch": 2.6375773363725172,
"grad_norm": 3.1848602294921875,
"learning_rate": 6.045804840985564e-06,
"loss": 0.4824806594848633,
"step": 8100
},
{
"epoch": 2.6538586779550632,
"grad_norm": 2.1851677894592285,
"learning_rate": 5.774449147943124e-06,
"loss": 0.5195888900756835,
"step": 8150
},
{
"epoch": 2.6701400195376097,
"grad_norm": 4.459860801696777,
"learning_rate": 5.503093454900684e-06,
"loss": 0.5190325164794922,
"step": 8200
},
{
"epoch": 2.686421361120156,
"grad_norm": 2.845478057861328,
"learning_rate": 5.231737761858245e-06,
"loss": 0.5278813171386719,
"step": 8250
},
{
"epoch": 2.7027027027027026,
"grad_norm": 3.6786937713623047,
"learning_rate": 4.960382068815804e-06,
"loss": 0.4795223236083984,
"step": 8300
},
{
"epoch": 2.718984044285249,
"grad_norm": 3.6952383518218994,
"learning_rate": 4.689026375773364e-06,
"loss": 0.5154057312011718,
"step": 8350
},
{
"epoch": 2.7352653858677956,
"grad_norm": 2.0930325984954834,
"learning_rate": 4.417670682730924e-06,
"loss": 0.4847321319580078,
"step": 8400
},
{
"epoch": 2.751546727450342,
"grad_norm": 6.89557409286499,
"learning_rate": 4.146314989688484e-06,
"loss": 0.39144508361816405,
"step": 8450
},
{
"epoch": 2.7678280690328885,
"grad_norm": 4.23341703414917,
"learning_rate": 3.874959296646044e-06,
"loss": 0.47240074157714845,
"step": 8500
},
{
"epoch": 2.784109410615435,
"grad_norm": 5.129818439483643,
"learning_rate": 3.603603603603604e-06,
"loss": 0.4966455841064453,
"step": 8550
},
{
"epoch": 2.800390752197981,
"grad_norm": 2.9496281147003174,
"learning_rate": 3.3322479105611635e-06,
"loss": 0.48110599517822267,
"step": 8600
},
{
"epoch": 2.8166720937805274,
"grad_norm": 5.308284759521484,
"learning_rate": 3.0608922175187236e-06,
"loss": 0.4717050552368164,
"step": 8650
},
{
"epoch": 2.832953435363074,
"grad_norm": 3.275146722793579,
"learning_rate": 2.7895365244762837e-06,
"loss": 0.4382866668701172,
"step": 8700
},
{
"epoch": 2.8492347769456203,
"grad_norm": 2.5721969604492188,
"learning_rate": 2.5181808314338434e-06,
"loss": 0.46353874206542967,
"step": 8750
},
{
"epoch": 2.865516118528167,
"grad_norm": 3.426837921142578,
"learning_rate": 2.2468251383914035e-06,
"loss": 0.5103521347045898,
"step": 8800
},
{
"epoch": 2.8817974601107132,
"grad_norm": 2.3799993991851807,
"learning_rate": 1.9754694453489636e-06,
"loss": 0.47350223541259767,
"step": 8850
},
{
"epoch": 2.8980788016932593,
"grad_norm": 3.316828966140747,
"learning_rate": 1.7041137523065235e-06,
"loss": 0.4602887725830078,
"step": 8900
},
{
"epoch": 2.9143601432758057,
"grad_norm": 5.089015007019043,
"learning_rate": 1.4327580592640834e-06,
"loss": 0.4908058547973633,
"step": 8950
},
{
"epoch": 2.930641484858352,
"grad_norm": 5.589182376861572,
"learning_rate": 1.1614023662216434e-06,
"loss": 0.4539414215087891,
"step": 9000
},
{
"epoch": 2.9469228264408986,
"grad_norm": 2.4032421112060547,
"learning_rate": 8.900466731792033e-07,
"loss": 0.42325592041015625,
"step": 9050
},
{
"epoch": 2.963204168023445,
"grad_norm": 3.4075100421905518,
"learning_rate": 6.186909801367633e-07,
"loss": 0.4070619201660156,
"step": 9100
},
{
"epoch": 2.9794855096059916,
"grad_norm": 1.6805068254470825,
"learning_rate": 3.4733528709432325e-07,
"loss": 0.4350212860107422,
"step": 9150
},
{
"epoch": 2.995766851188538,
"grad_norm": 3.386293888092041,
"learning_rate": 7.597959405188321e-08,
"loss": 0.4917270660400391,
"step": 9200
},
{
"epoch": 3.0,
"eval_bertscore_f1": 0.9777762908776847,
"eval_bleu": 0.7589549870918858,
"eval_loss": 0.3381657600402832,
"eval_meteor": 0.8307886222747983,
"eval_rouge1": 0.893313778543083,
"eval_rouge2": 0.821781747009253,
"eval_runtime": 130.6802,
"eval_samples_per_second": 9.887,
"eval_steps_per_second": 1.24,
"step": 9213
}
],
"logging_steps": 50,
"max_steps": 9213,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.263887217557504e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}