MNLP_M3_mcqa_model / trainer_state.json
VladShash's picture
Upload 14 files
f3ee385 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 4466,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005597850425436633,
"grad_norm": 9.592120170593262,
"learning_rate": 9.999287451410797e-06,
"loss": 1.4229,
"step": 25
},
{
"epoch": 0.011195700850873265,
"grad_norm": 10.640639305114746,
"learning_rate": 9.997030034001815e-06,
"loss": 1.3665,
"step": 50
},
{
"epoch": 0.016793551276309895,
"grad_norm": 11.189779281616211,
"learning_rate": 9.993227208820974e-06,
"loss": 1.0753,
"step": 75
},
{
"epoch": 0.02239140170174653,
"grad_norm": 15.177507400512695,
"learning_rate": 9.987880151949976e-06,
"loss": 1.1291,
"step": 100
},
{
"epoch": 0.027989252127183162,
"grad_norm": 6.662464141845703,
"learning_rate": 9.980990517047731e-06,
"loss": 1.2145,
"step": 125
},
{
"epoch": 0.03358710255261979,
"grad_norm": 9.72287368774414,
"learning_rate": 9.972560434838943e-06,
"loss": 1.0737,
"step": 150
},
{
"epoch": 0.03918495297805643,
"grad_norm": 8.399821281433105,
"learning_rate": 9.96259251245514e-06,
"loss": 0.9843,
"step": 175
},
{
"epoch": 0.04478280340349306,
"grad_norm": 10.552536010742188,
"learning_rate": 9.951089832628388e-06,
"loss": 1.1599,
"step": 200
},
{
"epoch": 0.05038065382892969,
"grad_norm": 8.101223945617676,
"learning_rate": 9.938055952737908e-06,
"loss": 1.1275,
"step": 225
},
{
"epoch": 0.055978504254366325,
"grad_norm": 6.296974182128906,
"learning_rate": 9.923494903709896e-06,
"loss": 1.0943,
"step": 250
},
{
"epoch": 0.06157635467980296,
"grad_norm": 8.705899238586426,
"learning_rate": 9.90741118877091e-06,
"loss": 1.0905,
"step": 275
},
{
"epoch": 0.06717420510523958,
"grad_norm": 13.383705139160156,
"learning_rate": 9.88980978205516e-06,
"loss": 1.0648,
"step": 300
},
{
"epoch": 0.07277205553067621,
"grad_norm": 6.9907636642456055,
"learning_rate": 9.8706961270662e-06,
"loss": 1.0228,
"step": 325
},
{
"epoch": 0.07836990595611286,
"grad_norm": 10.480993270874023,
"learning_rate": 9.85007613499343e-06,
"loss": 1.0825,
"step": 350
},
{
"epoch": 0.08396775638154949,
"grad_norm": 11.11700439453125,
"learning_rate": 9.82795618288397e-06,
"loss": 1.083,
"step": 375
},
{
"epoch": 0.08956560680698612,
"grad_norm": 10.408477783203125,
"learning_rate": 9.804343111670472e-06,
"loss": 1.1088,
"step": 400
},
{
"epoch": 0.09516345723242275,
"grad_norm": 8.23421859741211,
"learning_rate": 9.779244224055446e-06,
"loss": 1.1182,
"step": 425
},
{
"epoch": 0.10076130765785939,
"grad_norm": 11.640494346618652,
"learning_rate": 9.752667282252788e-06,
"loss": 1.152,
"step": 450
},
{
"epoch": 0.10635915808329602,
"grad_norm": 9.683300971984863,
"learning_rate": 9.724620505587205e-06,
"loss": 1.1508,
"step": 475
},
{
"epoch": 0.11195700850873265,
"grad_norm": 12.511320114135742,
"learning_rate": 9.69511256795226e-06,
"loss": 0.9544,
"step": 500
},
{
"epoch": 0.11755485893416928,
"grad_norm": 6.891467094421387,
"learning_rate": 9.664152595127834e-06,
"loss": 1.0014,
"step": 525
},
{
"epoch": 0.12315270935960591,
"grad_norm": 8.182930946350098,
"learning_rate": 9.63175016195784e-06,
"loss": 1.0197,
"step": 550
},
{
"epoch": 0.12875055978504255,
"grad_norm": 12.893110275268555,
"learning_rate": 9.597915289389067e-06,
"loss": 0.9908,
"step": 575
},
{
"epoch": 0.13434841021047916,
"grad_norm": 12.896308898925781,
"learning_rate": 9.56265844137203e-06,
"loss": 1.0792,
"step": 600
},
{
"epoch": 0.1399462606359158,
"grad_norm": 6.506565093994141,
"learning_rate": 9.525990521624855e-06,
"loss": 0.996,
"step": 625
},
{
"epoch": 0.14554411106135243,
"grad_norm": 11.659127235412598,
"learning_rate": 9.487922870261123e-06,
"loss": 1.1131,
"step": 650
},
{
"epoch": 0.15114196148678907,
"grad_norm": 11.150578498840332,
"learning_rate": 9.44846726028277e-06,
"loss": 1.0725,
"step": 675
},
{
"epoch": 0.15673981191222572,
"grad_norm": 10.530040740966797,
"learning_rate": 9.407635893939111e-06,
"loss": 1.2278,
"step": 700
},
{
"epoch": 0.16233766233766234,
"grad_norm": 10.855611801147461,
"learning_rate": 9.365441398953103e-06,
"loss": 0.9453,
"step": 725
},
{
"epoch": 0.16793551276309898,
"grad_norm": 9.590290069580078,
"learning_rate": 9.321896824616036e-06,
"loss": 1.0567,
"step": 750
},
{
"epoch": 0.1735333631885356,
"grad_norm": 8.801950454711914,
"learning_rate": 9.27701563775183e-06,
"loss": 1.1515,
"step": 775
},
{
"epoch": 0.17913121361397225,
"grad_norm": 14.007678985595703,
"learning_rate": 9.23081171855222e-06,
"loss": 0.904,
"step": 800
},
{
"epoch": 0.18472906403940886,
"grad_norm": 11.931122779846191,
"learning_rate": 9.183299356284102e-06,
"loss": 1.0948,
"step": 825
},
{
"epoch": 0.1903269144648455,
"grad_norm": 11.707269668579102,
"learning_rate": 9.134493244870347e-06,
"loss": 1.0469,
"step": 850
},
{
"epoch": 0.19592476489028213,
"grad_norm": 7.64641809463501,
"learning_rate": 9.084408478345486e-06,
"loss": 1.0608,
"step": 875
},
{
"epoch": 0.20152261531571877,
"grad_norm": 11.14941692352295,
"learning_rate": 9.033060546187651e-06,
"loss": 1.0769,
"step": 900
},
{
"epoch": 0.2071204657411554,
"grad_norm": 7.081947326660156,
"learning_rate": 8.98046532852822e-06,
"loss": 0.9925,
"step": 925
},
{
"epoch": 0.21271831616659204,
"grad_norm": 8.95167064666748,
"learning_rate": 8.926639091240636e-06,
"loss": 0.8297,
"step": 950
},
{
"epoch": 0.21831616659202865,
"grad_norm": 8.65690803527832,
"learning_rate": 8.871598480909945e-06,
"loss": 0.8238,
"step": 975
},
{
"epoch": 0.2239140170174653,
"grad_norm": 8.113872528076172,
"learning_rate": 8.815360519684579e-06,
"loss": 1.0677,
"step": 1000
},
{
"epoch": 0.22951186744290192,
"grad_norm": 7.616383075714111,
"learning_rate": 8.757942600012008e-06,
"loss": 1.0761,
"step": 1025
},
{
"epoch": 0.23510971786833856,
"grad_norm": 10.469049453735352,
"learning_rate": 8.699362479259847e-06,
"loss": 1.0677,
"step": 1050
},
{
"epoch": 0.24070756829377518,
"grad_norm": 10.512489318847656,
"learning_rate": 8.639638274224127e-06,
"loss": 0.9852,
"step": 1075
},
{
"epoch": 0.24630541871921183,
"grad_norm": 14.909335136413574,
"learning_rate": 8.578788455526398e-06,
"loss": 0.9895,
"step": 1100
},
{
"epoch": 0.25190326914464845,
"grad_norm": 9.663930892944336,
"learning_rate": 8.516831841901406e-06,
"loss": 0.9607,
"step": 1125
},
{
"epoch": 0.2575011195700851,
"grad_norm": 10.520059585571289,
"learning_rate": 8.453787594377117e-06,
"loss": 1.0332,
"step": 1150
},
{
"epoch": 0.26309896999552174,
"grad_norm": 10.005462646484375,
"learning_rate": 8.389675210348864e-06,
"loss": 1.0453,
"step": 1175
},
{
"epoch": 0.2686968204209583,
"grad_norm": 10.835626602172852,
"learning_rate": 8.3245145175495e-06,
"loss": 1.0625,
"step": 1200
},
{
"epoch": 0.274294670846395,
"grad_norm": 5.667449951171875,
"learning_rate": 8.258325667917355e-06,
"loss": 0.8611,
"step": 1225
},
{
"epoch": 0.2798925212718316,
"grad_norm": 13.251060485839844,
"learning_rate": 8.191129131363942e-06,
"loss": 1.0864,
"step": 1250
},
{
"epoch": 0.28549037169726826,
"grad_norm": 7.886396884918213,
"learning_rate": 8.122945689443328e-06,
"loss": 0.8904,
"step": 1275
},
{
"epoch": 0.29108822212270485,
"grad_norm": 9.646678924560547,
"learning_rate": 8.053796428925123e-06,
"loss": 0.9213,
"step": 1300
},
{
"epoch": 0.2966860725481415,
"grad_norm": 10.207015037536621,
"learning_rate": 7.98370273527307e-06,
"loss": 1.0658,
"step": 1325
},
{
"epoch": 0.30228392297357815,
"grad_norm": 6.621966361999512,
"learning_rate": 7.91268628603127e-06,
"loss": 1.1713,
"step": 1350
},
{
"epoch": 0.3078817733990148,
"grad_norm": 7.847959518432617,
"learning_rate": 7.840769044120067e-06,
"loss": 0.9781,
"step": 1375
},
{
"epoch": 0.31347962382445144,
"grad_norm": 9.040787696838379,
"learning_rate": 7.76797325104368e-06,
"loss": 0.9718,
"step": 1400
},
{
"epoch": 0.319077474249888,
"grad_norm": 6.201238632202148,
"learning_rate": 7.694321420011672e-06,
"loss": 0.9015,
"step": 1425
},
{
"epoch": 0.3246753246753247,
"grad_norm": 14.064117431640625,
"learning_rate": 7.619836328976416e-06,
"loss": 1.0073,
"step": 1450
},
{
"epoch": 0.3302731751007613,
"grad_norm": 6.1384663581848145,
"learning_rate": 7.5445410135886455e-06,
"loss": 0.8406,
"step": 1475
},
{
"epoch": 0.33587102552619796,
"grad_norm": 7.594089508056641,
"learning_rate": 7.468458760073334e-06,
"loss": 0.8915,
"step": 1500
},
{
"epoch": 0.34146887595163455,
"grad_norm": 9.95975399017334,
"learning_rate": 7.391613098028081e-06,
"loss": 1.1634,
"step": 1525
},
{
"epoch": 0.3470667263770712,
"grad_norm": 10.708215713500977,
"learning_rate": 7.314027793146219e-06,
"loss": 0.9969,
"step": 1550
},
{
"epoch": 0.35266457680250785,
"grad_norm": 9.816734313964844,
"learning_rate": 7.23572683986691e-06,
"loss": 1.1063,
"step": 1575
},
{
"epoch": 0.3582624272279445,
"grad_norm": 10.419496536254883,
"learning_rate": 7.156734453954503e-06,
"loss": 1.0399,
"step": 1600
},
{
"epoch": 0.3638602776533811,
"grad_norm": 7.747687339782715,
"learning_rate": 7.0770750650094335e-06,
"loss": 0.8368,
"step": 1625
},
{
"epoch": 0.3694581280788177,
"grad_norm": 9.370880126953125,
"learning_rate": 6.996773308913003e-06,
"loss": 1.0139,
"step": 1650
},
{
"epoch": 0.3750559785042544,
"grad_norm": 10.095951080322266,
"learning_rate": 6.915854020208358e-06,
"loss": 1.0617,
"step": 1675
},
{
"epoch": 0.380653828929691,
"grad_norm": 11.371135711669922,
"learning_rate": 6.834342224420016e-06,
"loss": 1.0399,
"step": 1700
},
{
"epoch": 0.3862516793551276,
"grad_norm": 9.077759742736816,
"learning_rate": 6.752263130314361e-06,
"loss": 1.0934,
"step": 1725
},
{
"epoch": 0.39184952978056425,
"grad_norm": 8.414709091186523,
"learning_rate": 6.669642122103423e-06,
"loss": 0.9078,
"step": 1750
},
{
"epoch": 0.3974473802060009,
"grad_norm": 7.037852764129639,
"learning_rate": 6.586504751594451e-06,
"loss": 1.0392,
"step": 1775
},
{
"epoch": 0.40304523063143755,
"grad_norm": 12.552664756774902,
"learning_rate": 6.5028767302875974e-06,
"loss": 0.9487,
"step": 1800
},
{
"epoch": 0.40864308105687414,
"grad_norm": 6.407071113586426,
"learning_rate": 6.418783921424269e-06,
"loss": 0.9555,
"step": 1825
},
{
"epoch": 0.4142409314823108,
"grad_norm": 5.643894672393799,
"learning_rate": 6.334252331988515e-06,
"loss": 1.074,
"step": 1850
},
{
"epoch": 0.4198387819077474,
"grad_norm": 7.254835605621338,
"learning_rate": 6.249308104663966e-06,
"loss": 0.9496,
"step": 1875
},
{
"epoch": 0.4254366323331841,
"grad_norm": 8.206978797912598,
"learning_rate": 6.1639775097488155e-06,
"loss": 1.0772,
"step": 1900
},
{
"epoch": 0.43103448275862066,
"grad_norm": 4.874852657318115,
"learning_rate": 6.07828693703133e-06,
"loss": 0.8274,
"step": 1925
},
{
"epoch": 0.4366323331840573,
"grad_norm": 14.283883094787598,
"learning_rate": 5.992262887628405e-06,
"loss": 1.1204,
"step": 1950
},
{
"epoch": 0.44223018360949395,
"grad_norm": 12.45128059387207,
"learning_rate": 5.905931965789688e-06,
"loss": 1.0209,
"step": 1975
},
{
"epoch": 0.4478280340349306,
"grad_norm": 9.155681610107422,
"learning_rate": 5.819320870669806e-06,
"loss": 1.0208,
"step": 2000
},
{
"epoch": 0.45342588446036725,
"grad_norm": 7.600436210632324,
"learning_rate": 5.732456388071247e-06,
"loss": 0.9395,
"step": 2025
},
{
"epoch": 0.45902373488580384,
"grad_norm": 10.021013259887695,
"learning_rate": 5.645365382160446e-06,
"loss": 0.9514,
"step": 2050
},
{
"epoch": 0.4646215853112405,
"grad_norm": 10.850826263427734,
"learning_rate": 5.558074787159629e-06,
"loss": 1.0362,
"step": 2075
},
{
"epoch": 0.4702194357366771,
"grad_norm": 8.926505088806152,
"learning_rate": 5.470611599016986e-06,
"loss": 0.9716,
"step": 2100
},
{
"epoch": 0.4758172861621138,
"grad_norm": 6.5484185218811035,
"learning_rate": 5.383002867057778e-06,
"loss": 1.0704,
"step": 2125
},
{
"epoch": 0.48141513658755036,
"grad_norm": 7.495320796966553,
"learning_rate": 5.295275685618905e-06,
"loss": 0.9566,
"step": 2150
},
{
"epoch": 0.487012987012987,
"grad_norm": 8.97859001159668,
"learning_rate": 5.2074571856695776e-06,
"loss": 0.9744,
"step": 2175
},
{
"epoch": 0.49261083743842365,
"grad_norm": 8.450396537780762,
"learning_rate": 5.119574526420652e-06,
"loss": 1.0064,
"step": 2200
},
{
"epoch": 0.4982086878638603,
"grad_norm": 15.110372543334961,
"learning_rate": 5.0316548869252205e-06,
"loss": 0.9724,
"step": 2225
},
{
"epoch": 0.5038065382892969,
"grad_norm": 11.507658004760742,
"learning_rate": 4.943725457673086e-06,
"loss": 1.0204,
"step": 2250
},
{
"epoch": 0.5094043887147336,
"grad_norm": 5.711903095245361,
"learning_rate": 4.8558134321816816e-06,
"loss": 1.0455,
"step": 2275
},
{
"epoch": 0.5150022391401702,
"grad_norm": 7.272406101226807,
"learning_rate": 4.76794599858606e-06,
"loss": 1.0133,
"step": 2300
},
{
"epoch": 0.5206000895656068,
"grad_norm": 6.9339680671691895,
"learning_rate": 4.680150331230552e-06,
"loss": 1.1114,
"step": 2325
},
{
"epoch": 0.5261979399910435,
"grad_norm": 6.307779788970947,
"learning_rate": 4.592453582264684e-06,
"loss": 0.8829,
"step": 2350
},
{
"epoch": 0.5317957904164801,
"grad_norm": 4.144500255584717,
"learning_rate": 4.504882873245961e-06,
"loss": 1.0952,
"step": 2375
},
{
"epoch": 0.5373936408419167,
"grad_norm": 8.365829467773438,
"learning_rate": 4.417465286752109e-06,
"loss": 1.0237,
"step": 2400
},
{
"epoch": 0.5429914912673534,
"grad_norm": 9.530734062194824,
"learning_rate": 4.330227858005379e-06,
"loss": 1.0088,
"step": 2425
},
{
"epoch": 0.54858934169279,
"grad_norm": 9.499034881591797,
"learning_rate": 4.243197566511493e-06,
"loss": 1.0404,
"step": 2450
},
{
"epoch": 0.5541871921182266,
"grad_norm": 6.624443054199219,
"learning_rate": 4.15640132771581e-06,
"loss": 0.802,
"step": 2475
},
{
"epoch": 0.5597850425436632,
"grad_norm": 4.885298252105713,
"learning_rate": 4.069865984679332e-06,
"loss": 0.9824,
"step": 2500
},
{
"epoch": 0.5653828929690998,
"grad_norm": 10.335043907165527,
"learning_rate": 3.9836182997770586e-06,
"loss": 1.0334,
"step": 2525
},
{
"epoch": 0.5709807433945365,
"grad_norm": 6.98168420791626,
"learning_rate": 3.897684946421326e-06,
"loss": 1.0047,
"step": 2550
},
{
"epoch": 0.5765785938199731,
"grad_norm": 7.940051555633545,
"learning_rate": 3.8120925008126457e-06,
"loss": 0.8648,
"step": 2575
},
{
"epoch": 0.5821764442454097,
"grad_norm": 7.1400885581970215,
"learning_rate": 3.7268674337206025e-06,
"loss": 0.8521,
"step": 2600
},
{
"epoch": 0.5877742946708464,
"grad_norm": 9.360673904418945,
"learning_rate": 3.6420361022973784e-06,
"loss": 0.8878,
"step": 2625
},
{
"epoch": 0.593372145096283,
"grad_norm": 6.841546058654785,
"learning_rate": 3.5576247419263854e-06,
"loss": 0.9828,
"step": 2650
},
{
"epoch": 0.5989699955217197,
"grad_norm": 9.706862449645996,
"learning_rate": 3.4736594581085837e-06,
"loss": 1.1297,
"step": 2675
},
{
"epoch": 0.6045678459471563,
"grad_norm": 9.606383323669434,
"learning_rate": 3.390166218388956e-06,
"loss": 1.0531,
"step": 2700
},
{
"epoch": 0.6101656963725929,
"grad_norm": 5.520534515380859,
"learning_rate": 3.3071708443256414e-06,
"loss": 0.9844,
"step": 2725
},
{
"epoch": 0.6157635467980296,
"grad_norm": 9.2130765914917,
"learning_rate": 3.224699003504236e-06,
"loss": 0.9696,
"step": 2750
},
{
"epoch": 0.6213613972234662,
"grad_norm": 8.891196250915527,
"learning_rate": 3.142776201599689e-06,
"loss": 0.8803,
"step": 2775
},
{
"epoch": 0.6269592476489029,
"grad_norm": 9.511999130249023,
"learning_rate": 3.061427774488287e-06,
"loss": 1.0108,
"step": 2800
},
{
"epoch": 0.6325570980743395,
"grad_norm": 5.509415626525879,
"learning_rate": 2.9806788804121562e-06,
"loss": 0.8862,
"step": 2825
},
{
"epoch": 0.638154948499776,
"grad_norm": 5.889389991760254,
"learning_rate": 2.9005544921986774e-06,
"loss": 0.9829,
"step": 2850
},
{
"epoch": 0.6437527989252128,
"grad_norm": 8.13467788696289,
"learning_rate": 2.8210793895372722e-06,
"loss": 0.8431,
"step": 2875
},
{
"epoch": 0.6493506493506493,
"grad_norm": 10.089922904968262,
"learning_rate": 2.742278151315898e-06,
"loss": 1.1452,
"step": 2900
},
{
"epoch": 0.6549484997760859,
"grad_norm": 9.242680549621582,
"learning_rate": 2.6641751480196485e-06,
"loss": 0.906,
"step": 2925
},
{
"epoch": 0.6605463502015226,
"grad_norm": 11.566295623779297,
"learning_rate": 2.5867945341938255e-06,
"loss": 0.941,
"step": 2950
},
{
"epoch": 0.6661442006269592,
"grad_norm": 8.217988014221191,
"learning_rate": 2.510160240973757e-06,
"loss": 0.9058,
"step": 2975
},
{
"epoch": 0.6717420510523959,
"grad_norm": 13.650288581848145,
"learning_rate": 2.434295968683741e-06,
"loss": 1.1236,
"step": 3000
},
{
"epoch": 0.6773399014778325,
"grad_norm": 9.068547248840332,
"learning_rate": 2.3592251795073564e-06,
"loss": 0.9533,
"step": 3025
},
{
"epoch": 0.6829377519032691,
"grad_norm": 10.866704940795898,
"learning_rate": 2.2849710902314205e-06,
"loss": 1.0299,
"step": 3050
},
{
"epoch": 0.6885356023287058,
"grad_norm": 9.991189002990723,
"learning_rate": 2.211556665065854e-06,
"loss": 0.9484,
"step": 3075
},
{
"epoch": 0.6941334527541424,
"grad_norm": 7.939027786254883,
"learning_rate": 2.1390046085416356e-06,
"loss": 0.8995,
"step": 3100
},
{
"epoch": 0.699731303179579,
"grad_norm": 10.984658241271973,
"learning_rate": 2.0673373584890847e-06,
"loss": 1.0449,
"step": 3125
},
{
"epoch": 0.7053291536050157,
"grad_norm": 6.965851306915283,
"learning_rate": 1.996577079098628e-06,
"loss": 0.976,
"step": 3150
},
{
"epoch": 0.7109270040304523,
"grad_norm": 10.873515129089355,
"learning_rate": 1.9267456540661723e-06,
"loss": 1.0484,
"step": 3175
},
{
"epoch": 0.716524854455889,
"grad_norm": 8.086957931518555,
"learning_rate": 1.8578646798252432e-06,
"loss": 0.8454,
"step": 3200
},
{
"epoch": 0.7221227048813256,
"grad_norm": 9.924606323242188,
"learning_rate": 1.7899554588679636e-06,
"loss": 1.0399,
"step": 3225
},
{
"epoch": 0.7277205553067622,
"grad_norm": 7.758438587188721,
"learning_rate": 1.7230389931569242e-06,
"loss": 0.9454,
"step": 3250
},
{
"epoch": 0.7333184057321989,
"grad_norm": 6.910051345825195,
"learning_rate": 1.657135977630015e-06,
"loss": 0.9082,
"step": 3275
},
{
"epoch": 0.7389162561576355,
"grad_norm": 8.083282470703125,
"learning_rate": 1.5922667938001913e-06,
"loss": 0.958,
"step": 3300
},
{
"epoch": 0.7445141065830722,
"grad_norm": 10.176769256591797,
"learning_rate": 1.5284515034521858e-06,
"loss": 0.9261,
"step": 3325
},
{
"epoch": 0.7501119570085087,
"grad_norm": 8.147079467773438,
"learning_rate": 1.4657098424380945e-06,
"loss": 0.9271,
"step": 3350
},
{
"epoch": 0.7557098074339453,
"grad_norm": 9.081329345703125,
"learning_rate": 1.4040612145737608e-06,
"loss": 1.031,
"step": 3375
},
{
"epoch": 0.761307657859382,
"grad_norm": 8.017196655273438,
"learning_rate": 1.3435246856378524e-06,
"loss": 1.0238,
"step": 3400
},
{
"epoch": 0.7669055082848186,
"grad_norm": 9.929486274719238,
"learning_rate": 1.284118977475481e-06,
"loss": 0.954,
"step": 3425
},
{
"epoch": 0.7725033587102552,
"grad_norm": 12.716034889221191,
"learning_rate": 1.2258624622081755e-06,
"loss": 0.9619,
"step": 3450
},
{
"epoch": 0.7781012091356919,
"grad_norm": 8.163155555725098,
"learning_rate": 1.1687731565520372e-06,
"loss": 1.1041,
"step": 3475
},
{
"epoch": 0.7836990595611285,
"grad_norm": 9.499692916870117,
"learning_rate": 1.112868716245783e-06,
"loss": 0.8927,
"step": 3500
},
{
"epoch": 0.7892969099865652,
"grad_norm": 6.03558874130249,
"learning_rate": 1.058166430590446e-06,
"loss": 0.9804,
"step": 3525
},
{
"epoch": 0.7948947604120018,
"grad_norm": 8.300068855285645,
"learning_rate": 1.0046832171023952e-06,
"loss": 1.0007,
"step": 3550
},
{
"epoch": 0.8004926108374384,
"grad_norm": 7.682162761688232,
"learning_rate": 9.524356162813326e-07,
"loss": 0.9482,
"step": 3575
},
{
"epoch": 0.8060904612628751,
"grad_norm": 8.57835578918457,
"learning_rate": 9.014397864948921e-07,
"loss": 0.9541,
"step": 3600
},
{
"epoch": 0.8116883116883117,
"grad_norm": 11.44987678527832,
"learning_rate": 8.51711498981419e-07,
"loss": 0.9518,
"step": 3625
},
{
"epoch": 0.8172861621137483,
"grad_norm": 7.918177604675293,
"learning_rate": 8.032661329724717e-07,
"loss": 1.0321,
"step": 3650
},
{
"epoch": 0.822884012539185,
"grad_norm": 10.909501075744629,
"learning_rate": 7.561186709365653e-07,
"loss": 0.9651,
"step": 3675
},
{
"epoch": 0.8284818629646216,
"grad_norm": 14.997469902038574,
"learning_rate": 7.102836939456071e-07,
"loss": 1.0462,
"step": 3700
},
{
"epoch": 0.8340797133900583,
"grad_norm": 5.220563888549805,
"learning_rate": 6.657753771654812e-07,
"loss": 0.8815,
"step": 3725
},
{
"epoch": 0.8396775638154949,
"grad_norm": 8.45197868347168,
"learning_rate": 6.226074854721653e-07,
"loss": 1.0041,
"step": 3750
},
{
"epoch": 0.8452754142409314,
"grad_norm": 11.861766815185547,
"learning_rate": 5.807933691947248e-07,
"loss": 0.8721,
"step": 3775
},
{
"epoch": 0.8508732646663681,
"grad_norm": 6.297611236572266,
"learning_rate": 5.403459599865307e-07,
"loss": 1.0609,
"step": 3800
},
{
"epoch": 0.8564711150918047,
"grad_norm": 9.576262474060059,
"learning_rate": 5.012777668259378e-07,
"loss": 0.9142,
"step": 3825
},
{
"epoch": 0.8620689655172413,
"grad_norm": 8.582077980041504,
"learning_rate": 4.6360087214769923e-07,
"loss": 0.9383,
"step": 3850
},
{
"epoch": 0.867666815942678,
"grad_norm": 9.107322692871094,
"learning_rate": 4.2732692810628583e-07,
"loss": 1.1366,
"step": 3875
},
{
"epoch": 0.8732646663681146,
"grad_norm": 5.725574493408203,
"learning_rate": 3.9246715297228176e-07,
"loss": 1.0077,
"step": 3900
},
{
"epoch": 0.8788625167935513,
"grad_norm": 6.691014766693115,
"learning_rate": 3.59032327662962e-07,
"loss": 1.0128,
"step": 3925
},
{
"epoch": 0.8844603672189879,
"grad_norm": 7.009856700897217,
"learning_rate": 3.270327924081301e-07,
"loss": 1.0404,
"step": 3950
},
{
"epoch": 0.8900582176444245,
"grad_norm": 9.100337982177734,
"learning_rate": 2.964784435522422e-07,
"loss": 0.9983,
"step": 3975
},
{
"epoch": 0.8956560680698612,
"grad_norm": 11.176060676574707,
"learning_rate": 2.6737873049381523e-07,
"loss": 0.9483,
"step": 4000
},
{
"epoch": 0.9012539184952978,
"grad_norm": 6.4081268310546875,
"learning_rate": 2.3974265276305253e-07,
"loss": 1.0171,
"step": 4025
},
{
"epoch": 0.9068517689207345,
"grad_norm": 5.774712562561035,
"learning_rate": 2.1357875723860222e-07,
"loss": 0.8993,
"step": 4050
},
{
"epoch": 0.9124496193461711,
"grad_norm": 9.53131103515625,
"learning_rate": 1.8889513550430892e-07,
"loss": 0.9881,
"step": 4075
},
{
"epoch": 0.9180474697716077,
"grad_norm": 7.965620040893555,
"learning_rate": 1.656994213467622e-07,
"loss": 0.9479,
"step": 4100
},
{
"epoch": 0.9236453201970444,
"grad_norm": 9.653414726257324,
"learning_rate": 1.439987883944355e-07,
"loss": 0.9613,
"step": 4125
},
{
"epoch": 0.929243170622481,
"grad_norm": 9.446654319763184,
"learning_rate": 1.237999478991303e-07,
"loss": 1.1298,
"step": 4150
},
{
"epoch": 0.9348410210479176,
"grad_norm": 9.220303535461426,
"learning_rate": 1.0510914666041927e-07,
"loss": 0.9897,
"step": 4175
},
{
"epoch": 0.9404388714733543,
"grad_norm": 9.508859634399414,
"learning_rate": 8.793216509373038e-08,
"loss": 1.0607,
"step": 4200
},
{
"epoch": 0.9460367218987908,
"grad_norm": 8.090096473693848,
"learning_rate": 7.227431544266194e-08,
"loss": 1.0712,
"step": 4225
},
{
"epoch": 0.9516345723242275,
"grad_norm": 5.4540510177612305,
"learning_rate": 5.8140440136091326e-08,
"loss": 1.0226,
"step": 4250
},
{
"epoch": 0.9572324227496641,
"grad_norm": 5.08390474319458,
"learning_rate": 4.553491029058221e-08,
"loss": 0.8554,
"step": 4275
},
{
"epoch": 0.9628302731751007,
"grad_norm": 11.732171058654785,
"learning_rate": 3.4461624358546056e-08,
"loss": 1.0033,
"step": 4300
},
{
"epoch": 0.9684281236005374,
"grad_norm": 8.629095077514648,
"learning_rate": 2.4924006922590338e-08,
"loss": 0.973,
"step": 4325
},
{
"epoch": 0.974025974025974,
"grad_norm": 6.797715663909912,
"learning_rate": 1.6925007636411362e-08,
"loss": 0.8825,
"step": 4350
},
{
"epoch": 0.9796238244514106,
"grad_norm": 10.467803001403809,
"learning_rate": 1.0467100312568923e-08,
"loss": 1.0412,
"step": 4375
},
{
"epoch": 0.9852216748768473,
"grad_norm": 10.413917541503906,
"learning_rate": 5.552282157424427e-09,
"loss": 0.9493,
"step": 4400
},
{
"epoch": 0.9908195253022839,
"grad_norm": 6.7092719078063965,
"learning_rate": 2.182073153471631e-09,
"loss": 0.986,
"step": 4425
},
{
"epoch": 0.9964173757277206,
"grad_norm": 6.736852645874023,
"learning_rate": 3.575155892604487e-10,
"loss": 0.9939,
"step": 4450
}
],
"logging_steps": 25,
"max_steps": 4466,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.4169358435549184e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}