LEMMA-LLAMA-3-70B / trainer_state.json
panzs19's picture
Upload folder using huggingface_hub
fad79ab verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9931630082763583,
"eval_steps": 500,
"global_step": 1041,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01439366678661389,
"grad_norm": 0.5779629390229284,
"learning_rate": 1.5625e-06,
"loss": 0.8406,
"num_input_tokens_seen": 827616,
"step": 5
},
{
"epoch": 0.02878733357322778,
"grad_norm": 0.3769644100895997,
"learning_rate": 3.125e-06,
"loss": 0.7276,
"num_input_tokens_seen": 1614424,
"step": 10
},
{
"epoch": 0.04318100035984167,
"grad_norm": 0.23058525664159524,
"learning_rate": 4.6875000000000004e-06,
"loss": 0.6057,
"num_input_tokens_seen": 2426024,
"step": 15
},
{
"epoch": 0.05757466714645556,
"grad_norm": 0.17576524916291753,
"learning_rate": 6.25e-06,
"loss": 0.5692,
"num_input_tokens_seen": 3247560,
"step": 20
},
{
"epoch": 0.07196833393306945,
"grad_norm": 0.1076556243187617,
"learning_rate": 7.8125e-06,
"loss": 0.5376,
"num_input_tokens_seen": 4047840,
"step": 25
},
{
"epoch": 0.08636200071968334,
"grad_norm": 0.09246755590597658,
"learning_rate": 9.375000000000001e-06,
"loss": 0.5098,
"num_input_tokens_seen": 4856920,
"step": 30
},
{
"epoch": 0.10075566750629723,
"grad_norm": 0.08634389055616094,
"learning_rate": 9.99978187935494e-06,
"loss": 0.4882,
"num_input_tokens_seen": 5666200,
"step": 35
},
{
"epoch": 0.11514933429291112,
"grad_norm": 0.0825592120207266,
"learning_rate": 9.998448988774289e-06,
"loss": 0.4772,
"num_input_tokens_seen": 6479848,
"step": 40
},
{
"epoch": 0.12954300107952502,
"grad_norm": 0.12979547541437692,
"learning_rate": 9.99590470838777e-06,
"loss": 0.4716,
"num_input_tokens_seen": 7293912,
"step": 45
},
{
"epoch": 0.1439366678661389,
"grad_norm": 0.08008212265814983,
"learning_rate": 9.99214965480972e-06,
"loss": 0.4698,
"num_input_tokens_seen": 8104992,
"step": 50
},
{
"epoch": 0.15833033465275279,
"grad_norm": 0.07635889994141641,
"learning_rate": 9.987184738089166e-06,
"loss": 0.4672,
"num_input_tokens_seen": 8928032,
"step": 55
},
{
"epoch": 0.17272400143936667,
"grad_norm": 0.08349978523786301,
"learning_rate": 9.981011161489262e-06,
"loss": 0.4569,
"num_input_tokens_seen": 9747912,
"step": 60
},
{
"epoch": 0.18711766822598058,
"grad_norm": 0.07698759791429816,
"learning_rate": 9.973630421195679e-06,
"loss": 0.4494,
"num_input_tokens_seen": 10544384,
"step": 65
},
{
"epoch": 0.20151133501259447,
"grad_norm": 0.08882664782692092,
"learning_rate": 9.965044305954002e-06,
"loss": 0.4542,
"num_input_tokens_seen": 11331952,
"step": 70
},
{
"epoch": 0.21590500179920835,
"grad_norm": 0.07284395170855988,
"learning_rate": 9.955254896636217e-06,
"loss": 0.4481,
"num_input_tokens_seen": 12163384,
"step": 75
},
{
"epoch": 0.23029866858582224,
"grad_norm": 0.36584865417957224,
"learning_rate": 9.944264565736406e-06,
"loss": 0.4508,
"num_input_tokens_seen": 12976480,
"step": 80
},
{
"epoch": 0.24469233537243612,
"grad_norm": 0.07486647710511375,
"learning_rate": 9.93207597679577e-06,
"loss": 0.4499,
"num_input_tokens_seen": 13784400,
"step": 85
},
{
"epoch": 0.25908600215905003,
"grad_norm": 0.08499053233185717,
"learning_rate": 9.918692083757105e-06,
"loss": 0.4471,
"num_input_tokens_seen": 14598392,
"step": 90
},
{
"epoch": 0.2734796689456639,
"grad_norm": 0.08289267494160008,
"learning_rate": 9.904116130248913e-06,
"loss": 0.4417,
"num_input_tokens_seen": 15392832,
"step": 95
},
{
"epoch": 0.2878733357322778,
"grad_norm": 0.1865288970287863,
"learning_rate": 9.888351648799288e-06,
"loss": 0.4439,
"num_input_tokens_seen": 16196872,
"step": 100
},
{
"epoch": 0.3022670025188917,
"grad_norm": 0.07732845404963207,
"learning_rate": 9.871402459979804e-06,
"loss": 0.4491,
"num_input_tokens_seen": 16997848,
"step": 105
},
{
"epoch": 0.31666066930550557,
"grad_norm": 0.0882909536082469,
"learning_rate": 9.853272671479588e-06,
"loss": 0.4413,
"num_input_tokens_seen": 17810472,
"step": 110
},
{
"epoch": 0.3310543360921195,
"grad_norm": 0.08736097787765183,
"learning_rate": 9.833966677109805e-06,
"loss": 0.4425,
"num_input_tokens_seen": 18612496,
"step": 115
},
{
"epoch": 0.34544800287873334,
"grad_norm": 0.08223983944409248,
"learning_rate": 9.813489155738801e-06,
"loss": 0.4368,
"num_input_tokens_seen": 19430240,
"step": 120
},
{
"epoch": 0.35984166966534725,
"grad_norm": 0.1049720939536027,
"learning_rate": 9.791845070158175e-06,
"loss": 0.4419,
"num_input_tokens_seen": 20237792,
"step": 125
},
{
"epoch": 0.37423533645196116,
"grad_norm": 0.10864415450523532,
"learning_rate": 9.769039665880024e-06,
"loss": 0.434,
"num_input_tokens_seen": 21048760,
"step": 130
},
{
"epoch": 0.388629003238575,
"grad_norm": 0.08777748987691976,
"learning_rate": 9.745078469865673e-06,
"loss": 0.4323,
"num_input_tokens_seen": 21879304,
"step": 135
},
{
"epoch": 0.40302267002518893,
"grad_norm": 0.07866460945251785,
"learning_rate": 9.719967289186211e-06,
"loss": 0.4341,
"num_input_tokens_seen": 22686568,
"step": 140
},
{
"epoch": 0.4174163368118028,
"grad_norm": 0.1389458191249627,
"learning_rate": 9.69371220961511e-06,
"loss": 0.4322,
"num_input_tokens_seen": 23493624,
"step": 145
},
{
"epoch": 0.4318100035984167,
"grad_norm": 0.0935774579848259,
"learning_rate": 9.666319594153342e-06,
"loss": 0.4355,
"num_input_tokens_seen": 24282576,
"step": 150
},
{
"epoch": 0.44620367038503056,
"grad_norm": 0.0872016916001588,
"learning_rate": 9.637796081487263e-06,
"loss": 0.4303,
"num_input_tokens_seen": 25095960,
"step": 155
},
{
"epoch": 0.46059733717164447,
"grad_norm": 0.09036021866577526,
"learning_rate": 9.608148584379724e-06,
"loss": 0.4302,
"num_input_tokens_seen": 25893416,
"step": 160
},
{
"epoch": 0.4749910039582584,
"grad_norm": 0.0706217345866479,
"learning_rate": 9.577384287994733e-06,
"loss": 0.4294,
"num_input_tokens_seen": 26706608,
"step": 165
},
{
"epoch": 0.48938467074487224,
"grad_norm": 0.11020952152398915,
"learning_rate": 9.545510648156106e-06,
"loss": 0.4278,
"num_input_tokens_seen": 27505960,
"step": 170
},
{
"epoch": 0.5037783375314862,
"grad_norm": 0.07371653666185858,
"learning_rate": 9.512535389540532e-06,
"loss": 0.4279,
"num_input_tokens_seen": 28310928,
"step": 175
},
{
"epoch": 0.5181720043181001,
"grad_norm": 0.07763054855764867,
"learning_rate": 9.478466503805467e-06,
"loss": 0.4292,
"num_input_tokens_seen": 29123264,
"step": 180
},
{
"epoch": 0.532565671104714,
"grad_norm": 0.07538708494428616,
"learning_rate": 9.443312247652335e-06,
"loss": 0.4304,
"num_input_tokens_seen": 29929752,
"step": 185
},
{
"epoch": 0.5469593378913278,
"grad_norm": 0.07649519412043841,
"learning_rate": 9.407081140825485e-06,
"loss": 0.4334,
"num_input_tokens_seen": 30730488,
"step": 190
},
{
"epoch": 0.5613530046779417,
"grad_norm": 0.08583553040482807,
"learning_rate": 9.36978196404741e-06,
"loss": 0.4243,
"num_input_tokens_seen": 31540408,
"step": 195
},
{
"epoch": 0.5757466714645556,
"grad_norm": 0.07900459269030875,
"learning_rate": 9.331423756890715e-06,
"loss": 0.4251,
"num_input_tokens_seen": 32337216,
"step": 200
},
{
"epoch": 0.5901403382511695,
"grad_norm": 0.15753797766328317,
"learning_rate": 9.292015815587326e-06,
"loss": 0.4235,
"num_input_tokens_seen": 33141928,
"step": 205
},
{
"epoch": 0.6045340050377834,
"grad_norm": 0.11638457502760226,
"learning_rate": 9.251567690775554e-06,
"loss": 0.4312,
"num_input_tokens_seen": 33934528,
"step": 210
},
{
"epoch": 0.6189276718243972,
"grad_norm": 0.08712369323178569,
"learning_rate": 9.210089185185436e-06,
"loss": 0.4231,
"num_input_tokens_seen": 34747544,
"step": 215
},
{
"epoch": 0.6333213386110111,
"grad_norm": 0.06827926636300911,
"learning_rate": 9.167590351263031e-06,
"loss": 0.424,
"num_input_tokens_seen": 35570176,
"step": 220
},
{
"epoch": 0.647715005397625,
"grad_norm": 0.10267838073029005,
"learning_rate": 9.124081488734173e-06,
"loss": 0.4306,
"num_input_tokens_seen": 36358744,
"step": 225
},
{
"epoch": 0.662108672184239,
"grad_norm": 0.0755782915067997,
"learning_rate": 9.0795731421083e-06,
"loss": 0.4206,
"num_input_tokens_seen": 37173208,
"step": 230
},
{
"epoch": 0.6765023389708529,
"grad_norm": 0.15663263718621678,
"learning_rate": 9.034076098122954e-06,
"loss": 0.4196,
"num_input_tokens_seen": 37976680,
"step": 235
},
{
"epoch": 0.6908960057574667,
"grad_norm": 0.07527866839853452,
"learning_rate": 8.987601383129597e-06,
"loss": 0.4276,
"num_input_tokens_seen": 38771712,
"step": 240
},
{
"epoch": 0.7052896725440806,
"grad_norm": 0.31914580023071465,
"learning_rate": 8.940160260421315e-06,
"loss": 0.4241,
"num_input_tokens_seen": 39578848,
"step": 245
},
{
"epoch": 0.7196833393306945,
"grad_norm": 0.14036972783190346,
"learning_rate": 8.891764227503148e-06,
"loss": 0.4226,
"num_input_tokens_seen": 40384704,
"step": 250
},
{
"epoch": 0.7340770061173084,
"grad_norm": 0.07024104609177177,
"learning_rate": 8.842425013305624e-06,
"loss": 0.4251,
"num_input_tokens_seen": 41191448,
"step": 255
},
{
"epoch": 0.7484706729039223,
"grad_norm": 0.09370623767861135,
"learning_rate": 8.79215457534221e-06,
"loss": 0.4197,
"num_input_tokens_seen": 42006856,
"step": 260
},
{
"epoch": 0.7628643396905361,
"grad_norm": 0.07417987507060489,
"learning_rate": 8.740965096811376e-06,
"loss": 0.4214,
"num_input_tokens_seen": 42807520,
"step": 265
},
{
"epoch": 0.77725800647715,
"grad_norm": 0.11217362260602592,
"learning_rate": 8.688868983643966e-06,
"loss": 0.4236,
"num_input_tokens_seen": 43628288,
"step": 270
},
{
"epoch": 0.791651673263764,
"grad_norm": 0.08240322932111749,
"learning_rate": 8.635878861496566e-06,
"loss": 0.4211,
"num_input_tokens_seen": 44427360,
"step": 275
},
{
"epoch": 0.8060453400503779,
"grad_norm": 0.08138298276876849,
"learning_rate": 8.582007572691655e-06,
"loss": 0.425,
"num_input_tokens_seen": 45237864,
"step": 280
},
{
"epoch": 0.8204390068369917,
"grad_norm": 0.11395085023012703,
"learning_rate": 8.52726817310521e-06,
"loss": 0.4221,
"num_input_tokens_seen": 46038440,
"step": 285
},
{
"epoch": 0.8348326736236056,
"grad_norm": 0.09636284445475302,
"learning_rate": 8.47167392900258e-06,
"loss": 0.4177,
"num_input_tokens_seen": 46851360,
"step": 290
},
{
"epoch": 0.8492263404102195,
"grad_norm": 0.09929673214111566,
"learning_rate": 8.415238313823375e-06,
"loss": 0.4208,
"num_input_tokens_seen": 47668192,
"step": 295
},
{
"epoch": 0.8636200071968334,
"grad_norm": 0.06916677532174491,
"learning_rate": 8.357975004916122e-06,
"loss": 0.424,
"num_input_tokens_seen": 48478472,
"step": 300
},
{
"epoch": 0.8780136739834473,
"grad_norm": 0.067801409968533,
"learning_rate": 8.29989788022352e-06,
"loss": 0.4131,
"num_input_tokens_seen": 49279360,
"step": 305
},
{
"epoch": 0.8924073407700611,
"grad_norm": 0.06661287194929971,
"learning_rate": 8.241021014919085e-06,
"loss": 0.4202,
"num_input_tokens_seen": 50088376,
"step": 310
},
{
"epoch": 0.906801007556675,
"grad_norm": 0.25272287393239684,
"learning_rate": 8.18135867799597e-06,
"loss": 0.4093,
"num_input_tokens_seen": 50900336,
"step": 315
},
{
"epoch": 0.9211946743432889,
"grad_norm": 0.1388520732911355,
"learning_rate": 8.120925328808855e-06,
"loss": 0.4096,
"num_input_tokens_seen": 51710424,
"step": 320
},
{
"epoch": 0.9355883411299029,
"grad_norm": 0.06259776537090868,
"learning_rate": 8.059735613569651e-06,
"loss": 0.4162,
"num_input_tokens_seen": 52534840,
"step": 325
},
{
"epoch": 0.9499820079165168,
"grad_norm": 0.081614401363399,
"learning_rate": 7.997804361797964e-06,
"loss": 0.4121,
"num_input_tokens_seen": 53352176,
"step": 330
},
{
"epoch": 0.9643756747031306,
"grad_norm": 0.06189708443811681,
"learning_rate": 7.935146582727103e-06,
"loss": 0.4064,
"num_input_tokens_seen": 54164336,
"step": 335
},
{
"epoch": 0.9787693414897445,
"grad_norm": 0.061248567570393035,
"learning_rate": 7.87177746166655e-06,
"loss": 0.4169,
"num_input_tokens_seen": 54988816,
"step": 340
},
{
"epoch": 0.9931630082763584,
"grad_norm": 0.07421437751723413,
"learning_rate": 7.80771235632175e-06,
"loss": 0.4078,
"num_input_tokens_seen": 55805856,
"step": 345
},
{
"epoch": 1.0057574667146456,
"grad_norm": 0.09084673101497391,
"learning_rate": 7.742966793072131e-06,
"loss": 0.3865,
"num_input_tokens_seen": 56512576,
"step": 350
},
{
"epoch": 1.0201511335012594,
"grad_norm": 0.07851350583386621,
"learning_rate": 7.677556463208221e-06,
"loss": 0.3601,
"num_input_tokens_seen": 57327512,
"step": 355
},
{
"epoch": 1.0345448002878734,
"grad_norm": 0.07435122459174627,
"learning_rate": 7.61149721912883e-06,
"loss": 0.3569,
"num_input_tokens_seen": 58141856,
"step": 360
},
{
"epoch": 1.0489384670744872,
"grad_norm": 0.0681940057877125,
"learning_rate": 7.544805070499166e-06,
"loss": 0.3564,
"num_input_tokens_seen": 58938160,
"step": 365
},
{
"epoch": 1.063332133861101,
"grad_norm": 0.06668405495729676,
"learning_rate": 7.477496180370838e-06,
"loss": 0.355,
"num_input_tokens_seen": 59741352,
"step": 370
},
{
"epoch": 1.077725800647715,
"grad_norm": 0.1717361063870089,
"learning_rate": 7.409586861264696e-06,
"loss": 0.3508,
"num_input_tokens_seen": 60543752,
"step": 375
},
{
"epoch": 1.0921194674343289,
"grad_norm": 0.06299146123449073,
"learning_rate": 7.3410935712174405e-06,
"loss": 0.3532,
"num_input_tokens_seen": 61353904,
"step": 380
},
{
"epoch": 1.1065131342209429,
"grad_norm": 0.06788891381084333,
"learning_rate": 7.272032909792956e-06,
"loss": 0.3537,
"num_input_tokens_seen": 62159368,
"step": 385
},
{
"epoch": 1.1209068010075567,
"grad_norm": 0.09200642332429916,
"learning_rate": 7.202421614059369e-06,
"loss": 0.3582,
"num_input_tokens_seen": 62975832,
"step": 390
},
{
"epoch": 1.1353004677941705,
"grad_norm": 0.059684315559530396,
"learning_rate": 7.1322765545327555e-06,
"loss": 0.364,
"num_input_tokens_seen": 63784744,
"step": 395
},
{
"epoch": 1.1496941345807845,
"grad_norm": 0.06160567329815703,
"learning_rate": 7.061614731088523e-06,
"loss": 0.3531,
"num_input_tokens_seen": 64584400,
"step": 400
},
{
"epoch": 1.1640878013673983,
"grad_norm": 0.056833762653917076,
"learning_rate": 6.990453268841438e-06,
"loss": 0.3532,
"num_input_tokens_seen": 65406656,
"step": 405
},
{
"epoch": 1.1784814681540121,
"grad_norm": 0.0648391845120075,
"learning_rate": 6.918809413995299e-06,
"loss": 0.3537,
"num_input_tokens_seen": 66219456,
"step": 410
},
{
"epoch": 1.1928751349406261,
"grad_norm": 0.15247000076826037,
"learning_rate": 6.846700529663265e-06,
"loss": 0.3529,
"num_input_tokens_seen": 67030016,
"step": 415
},
{
"epoch": 1.20726880172724,
"grad_norm": 0.06203950219387994,
"learning_rate": 6.774144091659853e-06,
"loss": 0.3546,
"num_input_tokens_seen": 67841528,
"step": 420
},
{
"epoch": 1.221662468513854,
"grad_norm": 0.058269900721777464,
"learning_rate": 6.701157684265613e-06,
"loss": 0.3551,
"num_input_tokens_seen": 68670448,
"step": 425
},
{
"epoch": 1.2360561353004678,
"grad_norm": 0.05870151582320512,
"learning_rate": 6.627758995965533e-06,
"loss": 0.3558,
"num_input_tokens_seen": 69469232,
"step": 430
},
{
"epoch": 1.2504498020870818,
"grad_norm": 0.07616445921338318,
"learning_rate": 6.553965815162167e-06,
"loss": 0.3483,
"num_input_tokens_seen": 70276336,
"step": 435
},
{
"epoch": 1.2648434688736956,
"grad_norm": 0.06527387970084267,
"learning_rate": 6.479796025864569e-06,
"loss": 0.3585,
"num_input_tokens_seen": 71082576,
"step": 440
},
{
"epoch": 1.2792371356603094,
"grad_norm": 0.06068354431009231,
"learning_rate": 6.405267603354044e-06,
"loss": 0.354,
"num_input_tokens_seen": 71893496,
"step": 445
},
{
"epoch": 1.2936308024469234,
"grad_norm": 0.06649515990798605,
"learning_rate": 6.330398609827779e-06,
"loss": 0.355,
"num_input_tokens_seen": 72702304,
"step": 450
},
{
"epoch": 1.3080244692335372,
"grad_norm": 0.0636875010377068,
"learning_rate": 6.255207190021421e-06,
"loss": 0.363,
"num_input_tokens_seen": 73492408,
"step": 455
},
{
"epoch": 1.322418136020151,
"grad_norm": 0.07427443839072705,
"learning_rate": 6.179711566811637e-06,
"loss": 0.3599,
"num_input_tokens_seen": 74291240,
"step": 460
},
{
"epoch": 1.336811802806765,
"grad_norm": 0.0899910977698727,
"learning_rate": 6.103930036799739e-06,
"loss": 0.3594,
"num_input_tokens_seen": 75089824,
"step": 465
},
{
"epoch": 1.3512054695933788,
"grad_norm": 0.06200502328591128,
"learning_rate": 6.027880965877457e-06,
"loss": 0.362,
"num_input_tokens_seen": 75911472,
"step": 470
},
{
"epoch": 1.3655991363799929,
"grad_norm": 0.1146044117050479,
"learning_rate": 5.951582784775896e-06,
"loss": 0.3574,
"num_input_tokens_seen": 76718736,
"step": 475
},
{
"epoch": 1.3799928031666067,
"grad_norm": 0.06441611259214139,
"learning_rate": 5.8750539845987945e-06,
"loss": 0.3477,
"num_input_tokens_seen": 77557112,
"step": 480
},
{
"epoch": 1.3943864699532207,
"grad_norm": 0.05968003970772643,
"learning_rate": 5.798313112341153e-06,
"loss": 0.3556,
"num_input_tokens_seen": 78376728,
"step": 485
},
{
"epoch": 1.4087801367398345,
"grad_norm": 0.054880539209110374,
"learning_rate": 5.721378766394301e-06,
"loss": 0.3559,
"num_input_tokens_seen": 79192112,
"step": 490
},
{
"epoch": 1.4231738035264483,
"grad_norm": 0.07349211652635998,
"learning_rate": 5.644269592038528e-06,
"loss": 0.3585,
"num_input_tokens_seen": 79992352,
"step": 495
},
{
"epoch": 1.4375674703130623,
"grad_norm": 0.07882680739235833,
"learning_rate": 5.5670042769243375e-06,
"loss": 0.3521,
"num_input_tokens_seen": 80808472,
"step": 500
},
{
"epoch": 1.4519611370996761,
"grad_norm": 0.1760760350719321,
"learning_rate": 5.48960154654343e-06,
"loss": 0.3514,
"num_input_tokens_seen": 81619216,
"step": 505
},
{
"epoch": 1.46635480388629,
"grad_norm": 0.0932677849233813,
"learning_rate": 5.412080159690537e-06,
"loss": 0.3544,
"num_input_tokens_seen": 82417160,
"step": 510
},
{
"epoch": 1.480748470672904,
"grad_norm": 0.056089869978498436,
"learning_rate": 5.33445890391715e-06,
"loss": 0.355,
"num_input_tokens_seen": 83217520,
"step": 515
},
{
"epoch": 1.4951421374595177,
"grad_norm": 0.06493714997932841,
"learning_rate": 5.25675659097831e-06,
"loss": 0.3506,
"num_input_tokens_seen": 84030552,
"step": 520
},
{
"epoch": 1.5095358042461315,
"grad_norm": 0.05682480731625458,
"learning_rate": 5.178992052273519e-06,
"loss": 0.3594,
"num_input_tokens_seen": 84839896,
"step": 525
},
{
"epoch": 1.5239294710327456,
"grad_norm": 0.05607306565452241,
"learning_rate": 5.101184134282884e-06,
"loss": 0.3494,
"num_input_tokens_seen": 85662560,
"step": 530
},
{
"epoch": 1.5383231378193596,
"grad_norm": 0.09755835213667695,
"learning_rate": 5.023351693999621e-06,
"loss": 0.3526,
"num_input_tokens_seen": 86485568,
"step": 535
},
{
"epoch": 1.5527168046059734,
"grad_norm": 0.06270507638091757,
"learning_rate": 4.945513594360001e-06,
"loss": 0.3559,
"num_input_tokens_seen": 87302664,
"step": 540
},
{
"epoch": 1.5671104713925872,
"grad_norm": 0.05520508666248976,
"learning_rate": 4.867688699671857e-06,
"loss": 0.3583,
"num_input_tokens_seen": 88092320,
"step": 545
},
{
"epoch": 1.5815041381792012,
"grad_norm": 0.05923134133054089,
"learning_rate": 4.78989587104276e-06,
"loss": 0.3547,
"num_input_tokens_seen": 88903728,
"step": 550
},
{
"epoch": 1.595897804965815,
"grad_norm": 0.13487117489611755,
"learning_rate": 4.712153961808974e-06,
"loss": 0.3508,
"num_input_tokens_seen": 89714288,
"step": 555
},
{
"epoch": 1.6102914717524288,
"grad_norm": 0.07477267101349609,
"learning_rate": 4.63448181296628e-06,
"loss": 0.3562,
"num_input_tokens_seen": 90539936,
"step": 560
},
{
"epoch": 1.6246851385390428,
"grad_norm": 0.05848260185515576,
"learning_rate": 4.556898248603818e-06,
"loss": 0.3563,
"num_input_tokens_seen": 91348088,
"step": 565
},
{
"epoch": 1.6390788053256569,
"grad_norm": 0.939716017860552,
"learning_rate": 4.479422071341996e-06,
"loss": 0.3578,
"num_input_tokens_seen": 92153056,
"step": 570
},
{
"epoch": 1.6534724721122704,
"grad_norm": 0.05764958246910856,
"learning_rate": 4.402072057775625e-06,
"loss": 0.3507,
"num_input_tokens_seen": 92971456,
"step": 575
},
{
"epoch": 1.6678661388988845,
"grad_norm": 0.05569822817930277,
"learning_rate": 4.324866953923343e-06,
"loss": 0.3561,
"num_input_tokens_seen": 93770320,
"step": 580
},
{
"epoch": 1.6822598056854985,
"grad_norm": 0.06002574158177971,
"learning_rate": 4.247825470684465e-06,
"loss": 0.3539,
"num_input_tokens_seen": 94581992,
"step": 585
},
{
"epoch": 1.6966534724721123,
"grad_norm": 0.057235219716851825,
"learning_rate": 4.170966279304343e-06,
"loss": 0.3521,
"num_input_tokens_seen": 95372504,
"step": 590
},
{
"epoch": 1.711047139258726,
"grad_norm": 0.06556443270713266,
"learning_rate": 4.094308006849314e-06,
"loss": 0.3579,
"num_input_tokens_seen": 96171552,
"step": 595
},
{
"epoch": 1.7254408060453401,
"grad_norm": 0.06424878482783151,
"learning_rate": 4.017869231692393e-06,
"loss": 0.3547,
"num_input_tokens_seen": 96979448,
"step": 600
},
{
"epoch": 1.739834472831954,
"grad_norm": 0.08032219079598245,
"learning_rate": 3.9416684790107315e-06,
"loss": 0.3494,
"num_input_tokens_seen": 97797072,
"step": 605
},
{
"epoch": 1.7542281396185677,
"grad_norm": 0.4749112254453778,
"learning_rate": 3.8657242162959845e-06,
"loss": 0.3545,
"num_input_tokens_seen": 98590616,
"step": 610
},
{
"epoch": 1.7686218064051817,
"grad_norm": 0.06123533928947785,
"learning_rate": 3.79005484887866e-06,
"loss": 0.3521,
"num_input_tokens_seen": 99396368,
"step": 615
},
{
"epoch": 1.7830154731917958,
"grad_norm": 0.05742676071138364,
"learning_rate": 3.714678715467529e-06,
"loss": 0.3481,
"num_input_tokens_seen": 100203576,
"step": 620
},
{
"epoch": 1.7974091399784093,
"grad_norm": 0.06440672140421486,
"learning_rate": 3.639614083705178e-06,
"loss": 0.3484,
"num_input_tokens_seen": 101006960,
"step": 625
},
{
"epoch": 1.8118028067650234,
"grad_norm": 0.06147767275941702,
"learning_rate": 3.564879145740794e-06,
"loss": 0.348,
"num_input_tokens_seen": 101802048,
"step": 630
},
{
"epoch": 1.8261964735516374,
"grad_norm": 0.07134673122449259,
"learning_rate": 3.490492013821234e-06,
"loss": 0.3498,
"num_input_tokens_seen": 102601344,
"step": 635
},
{
"epoch": 1.8405901403382512,
"grad_norm": 0.06127784256623604,
"learning_rate": 3.4164707159014675e-06,
"loss": 0.3479,
"num_input_tokens_seen": 103423888,
"step": 640
},
{
"epoch": 1.854983807124865,
"grad_norm": 0.10291014818905699,
"learning_rate": 3.3428331912754507e-06,
"loss": 0.3501,
"num_input_tokens_seen": 104240432,
"step": 645
},
{
"epoch": 1.869377473911479,
"grad_norm": 0.083764374836595,
"learning_rate": 3.2695972862284707e-06,
"loss": 0.3534,
"num_input_tokens_seen": 105039520,
"step": 650
},
{
"epoch": 1.8837711406980928,
"grad_norm": 0.07402581019991672,
"learning_rate": 3.196780749712054e-06,
"loss": 0.3536,
"num_input_tokens_seen": 105849896,
"step": 655
},
{
"epoch": 1.8981648074847066,
"grad_norm": 0.05825043903199368,
"learning_rate": 3.124401229042443e-06,
"loss": 0.3519,
"num_input_tokens_seen": 106657432,
"step": 660
},
{
"epoch": 1.9125584742713206,
"grad_norm": 0.055875357784687135,
"learning_rate": 3.0524762656237184e-06,
"loss": 0.3493,
"num_input_tokens_seen": 107449072,
"step": 665
},
{
"epoch": 1.9269521410579347,
"grad_norm": 0.058675504332384255,
"learning_rate": 2.9810232906965875e-06,
"loss": 0.3486,
"num_input_tokens_seen": 108242240,
"step": 670
},
{
"epoch": 1.9413458078445482,
"grad_norm": 0.0517673347841925,
"learning_rate": 2.9100596211138576e-06,
"loss": 0.3473,
"num_input_tokens_seen": 109045472,
"step": 675
},
{
"epoch": 1.9557394746311623,
"grad_norm": 0.05749945983945952,
"learning_rate": 2.83960245514366e-06,
"loss": 0.3485,
"num_input_tokens_seen": 109863544,
"step": 680
},
{
"epoch": 1.9701331414177763,
"grad_norm": 0.05784125966865765,
"learning_rate": 2.769668868301374e-06,
"loss": 0.3461,
"num_input_tokens_seen": 110688568,
"step": 685
},
{
"epoch": 1.98452680820439,
"grad_norm": 0.062273752549577115,
"learning_rate": 2.700275809211343e-06,
"loss": 0.3432,
"num_input_tokens_seen": 111513728,
"step": 690
},
{
"epoch": 1.998920474991004,
"grad_norm": 0.05792150781045054,
"learning_rate": 2.631440095499306e-06,
"loss": 0.3437,
"num_input_tokens_seen": 112313520,
"step": 695
},
{
"epoch": 2.011514933429291,
"grad_norm": 0.06452905642316617,
"learning_rate": 2.5631784097166024e-06,
"loss": 0.2945,
"num_input_tokens_seen": 113017424,
"step": 700
},
{
"epoch": 2.025908600215905,
"grad_norm": 0.0754055479297737,
"learning_rate": 2.4955072952970993e-06,
"loss": 0.2782,
"num_input_tokens_seen": 113822512,
"step": 705
},
{
"epoch": 2.040302267002519,
"grad_norm": 0.06736914348176111,
"learning_rate": 2.42844315254784e-06,
"loss": 0.2747,
"num_input_tokens_seen": 114633976,
"step": 710
},
{
"epoch": 2.054695933789133,
"grad_norm": 0.062117519224842634,
"learning_rate": 2.3620022346743816e-06,
"loss": 0.2731,
"num_input_tokens_seen": 115431800,
"step": 715
},
{
"epoch": 2.069089600575747,
"grad_norm": 0.0648582689881186,
"learning_rate": 2.2962006438417704e-06,
"loss": 0.2737,
"num_input_tokens_seen": 116248920,
"step": 720
},
{
"epoch": 2.0834832673623604,
"grad_norm": 0.08190857311562821,
"learning_rate": 2.231054327272141e-06,
"loss": 0.2745,
"num_input_tokens_seen": 117061896,
"step": 725
},
{
"epoch": 2.0978769341489745,
"grad_norm": 0.06399331693798593,
"learning_rate": 2.1665790733798497e-06,
"loss": 0.2752,
"num_input_tokens_seen": 117872528,
"step": 730
},
{
"epoch": 2.1122706009355885,
"grad_norm": 0.0811898084709039,
"learning_rate": 2.102790507945107e-06,
"loss": 0.2719,
"num_input_tokens_seen": 118683856,
"step": 735
},
{
"epoch": 2.126664267722202,
"grad_norm": 0.06778838087191189,
"learning_rate": 2.039704090327024e-06,
"loss": 0.2722,
"num_input_tokens_seen": 119506984,
"step": 740
},
{
"epoch": 2.141057934508816,
"grad_norm": 0.07316784709705096,
"learning_rate": 1.9773351097169785e-06,
"loss": 0.273,
"num_input_tokens_seen": 120326304,
"step": 745
},
{
"epoch": 2.15545160129543,
"grad_norm": 0.08150150222214562,
"learning_rate": 1.9156986814332374e-06,
"loss": 0.28,
"num_input_tokens_seen": 121128528,
"step": 750
},
{
"epoch": 2.1698452680820437,
"grad_norm": 0.06737328239446577,
"learning_rate": 1.8548097432577162e-06,
"loss": 0.2746,
"num_input_tokens_seen": 121939984,
"step": 755
},
{
"epoch": 2.1842389348686577,
"grad_norm": 0.08471478094090777,
"learning_rate": 1.7946830518157505e-06,
"loss": 0.2731,
"num_input_tokens_seen": 122764136,
"step": 760
},
{
"epoch": 2.1986326016552717,
"grad_norm": 0.07721882265295917,
"learning_rate": 1.7353331789997869e-06,
"loss": 0.2702,
"num_input_tokens_seen": 123576472,
"step": 765
},
{
"epoch": 2.2130262684418858,
"grad_norm": 0.06032730562314184,
"learning_rate": 1.6767745084378445e-06,
"loss": 0.2768,
"num_input_tokens_seen": 124371520,
"step": 770
},
{
"epoch": 2.2274199352284993,
"grad_norm": 0.0627842929379572,
"learning_rate": 1.6190212320075871e-06,
"loss": 0.273,
"num_input_tokens_seen": 125181360,
"step": 775
},
{
"epoch": 2.2418136020151134,
"grad_norm": 0.05670333203137917,
"learning_rate": 1.5620873463968827e-06,
"loss": 0.2676,
"num_input_tokens_seen": 125996040,
"step": 780
},
{
"epoch": 2.2562072688017274,
"grad_norm": 0.08228221683732528,
"learning_rate": 1.5059866497116627e-06,
"loss": 0.2724,
"num_input_tokens_seen": 126821112,
"step": 785
},
{
"epoch": 2.270600935588341,
"grad_norm": 0.08066295227211785,
"learning_rate": 1.450732738131904e-06,
"loss": 0.2743,
"num_input_tokens_seen": 127622432,
"step": 790
},
{
"epoch": 2.284994602374955,
"grad_norm": 0.06110726435272247,
"learning_rate": 1.3963390026165596e-06,
"loss": 0.2739,
"num_input_tokens_seen": 128423736,
"step": 795
},
{
"epoch": 2.299388269161569,
"grad_norm": 0.06176432064087441,
"learning_rate": 1.3428186256582088e-06,
"loss": 0.2736,
"num_input_tokens_seen": 129236504,
"step": 800
},
{
"epoch": 2.3137819359481826,
"grad_norm": 0.07246369574623969,
"learning_rate": 1.2901845780882427e-06,
"loss": 0.2756,
"num_input_tokens_seen": 130032840,
"step": 805
},
{
"epoch": 2.3281756027347966,
"grad_norm": 0.22551094843089572,
"learning_rate": 1.238449615933343e-06,
"loss": 0.271,
"num_input_tokens_seen": 130842184,
"step": 810
},
{
"epoch": 2.3425692695214106,
"grad_norm": 0.06657259550779034,
"learning_rate": 1.1876262773240172e-06,
"loss": 0.2729,
"num_input_tokens_seen": 131639680,
"step": 815
},
{
"epoch": 2.3569629363080242,
"grad_norm": 0.06338272985414989,
"learning_rate": 1.1377268794559476e-06,
"loss": 0.2744,
"num_input_tokens_seen": 132458128,
"step": 820
},
{
"epoch": 2.3713566030946382,
"grad_norm": 0.1124043376777874,
"learning_rate": 1.0887635156048736e-06,
"loss": 0.2727,
"num_input_tokens_seen": 133269328,
"step": 825
},
{
"epoch": 2.3857502698812523,
"grad_norm": 0.0632677743276735,
"learning_rate": 1.040748052195752e-06,
"loss": 0.2778,
"num_input_tokens_seen": 134070720,
"step": 830
},
{
"epoch": 2.4001439366678663,
"grad_norm": 0.06492693963778486,
"learning_rate": 9.936921259268944e-07,
"loss": 0.2772,
"num_input_tokens_seen": 134882424,
"step": 835
},
{
"epoch": 2.41453760345448,
"grad_norm": 0.060076618526455246,
"learning_rate": 9.476071409497712e-07,
"loss": 0.2735,
"num_input_tokens_seen": 135698792,
"step": 840
},
{
"epoch": 2.428931270241094,
"grad_norm": 0.08280106425022785,
"learning_rate": 9.025042661051808e-07,
"loss": 0.2705,
"num_input_tokens_seen": 136530352,
"step": 845
},
{
"epoch": 2.443324937027708,
"grad_norm": 0.05958478280414458,
"learning_rate": 8.583944322164528e-07,
"loss": 0.2737,
"num_input_tokens_seen": 137327240,
"step": 850
},
{
"epoch": 2.457718603814322,
"grad_norm": 0.05777233020881536,
"learning_rate": 8.15288329440318e-07,
"loss": 0.2771,
"num_input_tokens_seen": 138139400,
"step": 855
},
{
"epoch": 2.4721122706009355,
"grad_norm": 0.06057855771827018,
"learning_rate": 7.731964046761231e-07,
"loss": 0.2754,
"num_input_tokens_seen": 138957568,
"step": 860
},
{
"epoch": 2.4865059373875495,
"grad_norm": 0.056813895747753126,
"learning_rate": 7.321288590339898e-07,
"loss": 0.2756,
"num_input_tokens_seen": 139771480,
"step": 865
},
{
"epoch": 2.5008996041741636,
"grad_norm": 0.06117083971010646,
"learning_rate": 6.920956453625405e-07,
"loss": 0.2768,
"num_input_tokens_seen": 140570624,
"step": 870
},
{
"epoch": 2.515293270960777,
"grad_norm": 0.0660405745496251,
"learning_rate": 6.531064658368019e-07,
"loss": 0.2704,
"num_input_tokens_seen": 141385080,
"step": 875
},
{
"epoch": 2.529686937747391,
"grad_norm": 0.057932630895581885,
"learning_rate": 6.151707696068443e-07,
"loss": 0.2778,
"num_input_tokens_seen": 142184240,
"step": 880
},
{
"epoch": 2.544080604534005,
"grad_norm": 0.06878718995000013,
"learning_rate": 5.782977505077536e-07,
"loss": 0.2729,
"num_input_tokens_seen": 142995056,
"step": 885
},
{
"epoch": 2.5584742713206188,
"grad_norm": 0.0580673873503823,
"learning_rate": 5.42496344831478e-07,
"loss": 0.2731,
"num_input_tokens_seen": 143820176,
"step": 890
},
{
"epoch": 2.572867938107233,
"grad_norm": 0.057106792488244816,
"learning_rate": 5.077752291610854e-07,
"loss": 0.2726,
"num_input_tokens_seen": 144650192,
"step": 895
},
{
"epoch": 2.587261604893847,
"grad_norm": 0.06262446744657013,
"learning_rate": 4.741428182679736e-07,
"loss": 0.2658,
"num_input_tokens_seen": 145470624,
"step": 900
},
{
"epoch": 2.6016552716804604,
"grad_norm": 0.05888508525468414,
"learning_rate": 4.416072630725166e-07,
"loss": 0.2681,
"num_input_tokens_seen": 146273344,
"step": 905
},
{
"epoch": 2.6160489384670744,
"grad_norm": 0.06149362474237269,
"learning_rate": 4.101764486686649e-07,
"loss": 0.2727,
"num_input_tokens_seen": 147072896,
"step": 910
},
{
"epoch": 2.6304426052536884,
"grad_norm": 0.07222401443910899,
"learning_rate": 3.798579924129736e-07,
"loss": 0.2717,
"num_input_tokens_seen": 147878304,
"step": 915
},
{
"epoch": 2.644836272040302,
"grad_norm": 0.061506122244776834,
"learning_rate": 3.5065924207850486e-07,
"loss": 0.2764,
"num_input_tokens_seen": 148671456,
"step": 920
},
{
"epoch": 2.659229938826916,
"grad_norm": 0.0613909216214167,
"learning_rate": 3.225872740740754e-07,
"loss": 0.2686,
"num_input_tokens_seen": 149483856,
"step": 925
},
{
"epoch": 2.67362360561353,
"grad_norm": 0.06453955158522587,
"learning_rate": 2.9564889172926993e-07,
"loss": 0.2753,
"num_input_tokens_seen": 150283600,
"step": 930
},
{
"epoch": 2.688017272400144,
"grad_norm": 0.10443619424766862,
"learning_rate": 2.6985062364562607e-07,
"loss": 0.2696,
"num_input_tokens_seen": 151100280,
"step": 935
},
{
"epoch": 2.7024109391867577,
"grad_norm": 0.06230171634578411,
"learning_rate": 2.451987221144109e-07,
"loss": 0.271,
"num_input_tokens_seen": 151904000,
"step": 940
},
{
"epoch": 2.7168046059733717,
"grad_norm": 0.059711746877137976,
"learning_rate": 2.2169916160136029e-07,
"loss": 0.2709,
"num_input_tokens_seen": 152705992,
"step": 945
},
{
"epoch": 2.7311982727599857,
"grad_norm": 0.06539560738526348,
"learning_rate": 1.9935763729874435e-07,
"loss": 0.2718,
"num_input_tokens_seen": 153518184,
"step": 950
},
{
"epoch": 2.7455919395465997,
"grad_norm": 0.060237242887579666,
"learning_rate": 1.7817956374512334e-07,
"loss": 0.2705,
"num_input_tokens_seen": 154348480,
"step": 955
},
{
"epoch": 2.7599856063332133,
"grad_norm": 0.2487927082694509,
"learning_rate": 1.5817007351311476e-07,
"loss": 0.2744,
"num_input_tokens_seen": 155152208,
"step": 960
},
{
"epoch": 2.7743792731198273,
"grad_norm": 0.06025522620856994,
"learning_rate": 1.393340159654999e-07,
"loss": 0.2749,
"num_input_tokens_seen": 155945864,
"step": 965
},
{
"epoch": 2.7887729399064414,
"grad_norm": 0.2603522650255621,
"learning_rate": 1.2167595607996296e-07,
"loss": 0.2702,
"num_input_tokens_seen": 156758192,
"step": 970
},
{
"epoch": 2.803166606693055,
"grad_norm": 0.06187354928479084,
"learning_rate": 1.0520017334275823e-07,
"loss": 0.2665,
"num_input_tokens_seen": 157574744,
"step": 975
},
{
"epoch": 2.817560273479669,
"grad_norm": 0.07120098362481125,
"learning_rate": 8.991066071156074e-08,
"loss": 0.2692,
"num_input_tokens_seen": 158393024,
"step": 980
},
{
"epoch": 2.831953940266283,
"grad_norm": 0.055215645995719737,
"learning_rate": 7.581112364776044e-08,
"loss": 0.265,
"num_input_tokens_seen": 159206248,
"step": 985
},
{
"epoch": 2.8463476070528966,
"grad_norm": 0.06017854152413813,
"learning_rate": 6.290497921843219e-08,
"loss": 0.2696,
"num_input_tokens_seen": 160007232,
"step": 990
},
{
"epoch": 2.8607412738395106,
"grad_norm": 0.05974227356522313,
"learning_rate": 5.1195355268199854e-08,
"loss": 0.2711,
"num_input_tokens_seen": 160824040,
"step": 995
},
{
"epoch": 2.8751349406261246,
"grad_norm": 0.06171006884841187,
"learning_rate": 4.0685089661192114e-08,
"loss": 0.2697,
"num_input_tokens_seen": 161628064,
"step": 1000
},
{
"epoch": 2.889528607412738,
"grad_norm": 0.058328519842120057,
"learning_rate": 3.1376729593276534e-08,
"loss": 0.2712,
"num_input_tokens_seen": 162440072,
"step": 1005
},
{
"epoch": 2.9039222741993522,
"grad_norm": 0.06284590737604528,
"learning_rate": 2.327253097474169e-08,
"loss": 0.2707,
"num_input_tokens_seen": 163224848,
"step": 1010
},
{
"epoch": 2.9183159409859663,
"grad_norm": 0.06229073320667278,
"learning_rate": 1.637445788356673e-08,
"loss": 0.2696,
"num_input_tokens_seen": 164001968,
"step": 1015
},
{
"epoch": 2.93270960777258,
"grad_norm": 0.05999956361820157,
"learning_rate": 1.0684182089423234e-08,
"loss": 0.2696,
"num_input_tokens_seen": 164814128,
"step": 1020
},
{
"epoch": 2.947103274559194,
"grad_norm": 0.06911814969710056,
"learning_rate": 6.20308264851488e-09,
"loss": 0.2719,
"num_input_tokens_seen": 165611872,
"step": 1025
},
{
"epoch": 2.961496941345808,
"grad_norm": 0.06993687176707261,
"learning_rate": 2.932245569360892e-09,
"loss": 0.2729,
"num_input_tokens_seen": 166408296,
"step": 1030
},
{
"epoch": 2.975890608132422,
"grad_norm": 0.05605369021482799,
"learning_rate": 8.724635495965805e-10,
"loss": 0.2704,
"num_input_tokens_seen": 167212328,
"step": 1035
},
{
"epoch": 2.9902842749190355,
"grad_norm": 0.06125219377349758,
"learning_rate": 2.4235783861459304e-11,
"loss": 0.2718,
"num_input_tokens_seen": 168024280,
"step": 1040
}
],
"logging_steps": 5,
"max_steps": 1041,
"num_input_tokens_seen": 168192984,
"num_train_epochs": 3,
"save_steps": 1.0,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1330846506680320.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}