klora_2000_skill / 1 /trainer_state.json
RayDu0010's picture
Upload folder using huggingface_hub
3696c7d verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 934,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.010718113612004287,
"grad_norm": 1.3203599452972412,
"learning_rate": 1.0256410256410257e-06,
"loss": 1.3095,
"step": 5
},
{
"epoch": 0.021436227224008574,
"grad_norm": 1.005972981452942,
"learning_rate": 2.307692307692308e-06,
"loss": 1.3756,
"step": 10
},
{
"epoch": 0.03215434083601286,
"grad_norm": 0.8182684183120728,
"learning_rate": 3.58974358974359e-06,
"loss": 1.3238,
"step": 15
},
{
"epoch": 0.04287245444801715,
"grad_norm": 0.791577160358429,
"learning_rate": 4.871794871794872e-06,
"loss": 1.2938,
"step": 20
},
{
"epoch": 0.05359056806002144,
"grad_norm": 0.6379374861717224,
"learning_rate": 6.153846153846154e-06,
"loss": 1.2856,
"step": 25
},
{
"epoch": 0.06430868167202572,
"grad_norm": 0.6668587327003479,
"learning_rate": 7.435897435897436e-06,
"loss": 1.2841,
"step": 30
},
{
"epoch": 0.07502679528403002,
"grad_norm": 0.716699481010437,
"learning_rate": 8.717948717948719e-06,
"loss": 1.2718,
"step": 35
},
{
"epoch": 0.0857449088960343,
"grad_norm": 0.5486516952514648,
"learning_rate": 9.999999999999999e-06,
"loss": 1.273,
"step": 40
},
{
"epoch": 0.09646302250803858,
"grad_norm": 0.46251392364501953,
"learning_rate": 1.1282051282051283e-05,
"loss": 1.312,
"step": 45
},
{
"epoch": 0.10718113612004287,
"grad_norm": 0.4610588848590851,
"learning_rate": 1.2564102564102565e-05,
"loss": 1.2015,
"step": 50
},
{
"epoch": 0.11789924973204716,
"grad_norm": 0.4434472918510437,
"learning_rate": 1.3846153846153847e-05,
"loss": 1.2457,
"step": 55
},
{
"epoch": 0.12861736334405144,
"grad_norm": 0.43525955080986023,
"learning_rate": 1.5128205128205129e-05,
"loss": 1.2111,
"step": 60
},
{
"epoch": 0.13933547695605572,
"grad_norm": 0.45412635803222656,
"learning_rate": 1.641025641025641e-05,
"loss": 1.2055,
"step": 65
},
{
"epoch": 0.15005359056806003,
"grad_norm": 0.47974953055381775,
"learning_rate": 1.7692307692307694e-05,
"loss": 1.1836,
"step": 70
},
{
"epoch": 0.1607717041800643,
"grad_norm": 0.5585247278213501,
"learning_rate": 1.8974358974358975e-05,
"loss": 1.1456,
"step": 75
},
{
"epoch": 0.1714898177920686,
"grad_norm": 0.4740090072154999,
"learning_rate": 2.025641025641026e-05,
"loss": 1.1697,
"step": 80
},
{
"epoch": 0.18220793140407288,
"grad_norm": 0.49104949831962585,
"learning_rate": 2.153846153846154e-05,
"loss": 1.1723,
"step": 85
},
{
"epoch": 0.19292604501607716,
"grad_norm": 0.47726204991340637,
"learning_rate": 2.282051282051282e-05,
"loss": 1.1729,
"step": 90
},
{
"epoch": 0.20364415862808147,
"grad_norm": 0.5089394450187683,
"learning_rate": 2.4102564102564103e-05,
"loss": 1.1313,
"step": 95
},
{
"epoch": 0.21436227224008575,
"grad_norm": 0.4856749475002289,
"learning_rate": 2.5384615384615386e-05,
"loss": 1.1221,
"step": 100
},
{
"epoch": 0.22508038585209003,
"grad_norm": 0.4452688992023468,
"learning_rate": 2.6666666666666667e-05,
"loss": 1.1334,
"step": 105
},
{
"epoch": 0.2357984994640943,
"grad_norm": 0.5895005464553833,
"learning_rate": 2.794871794871795e-05,
"loss": 1.1453,
"step": 110
},
{
"epoch": 0.2465166130760986,
"grad_norm": 0.5326249003410339,
"learning_rate": 2.923076923076923e-05,
"loss": 1.0856,
"step": 115
},
{
"epoch": 0.2572347266881029,
"grad_norm": 0.588718056678772,
"learning_rate": 2.9999939813702703e-05,
"loss": 1.0957,
"step": 120
},
{
"epoch": 0.2679528403001072,
"grad_norm": 0.4917924106121063,
"learning_rate": 2.9999262723404875e-05,
"loss": 1.0265,
"step": 125
},
{
"epoch": 0.27867095391211144,
"grad_norm": 0.624920666217804,
"learning_rate": 2.9997833344010312e-05,
"loss": 1.0845,
"step": 130
},
{
"epoch": 0.28938906752411575,
"grad_norm": 0.627967119216919,
"learning_rate": 2.9995651747209637e-05,
"loss": 1.1037,
"step": 135
},
{
"epoch": 0.30010718113612006,
"grad_norm": 0.5074423551559448,
"learning_rate": 2.9992718042420993e-05,
"loss": 1.0788,
"step": 140
},
{
"epoch": 0.3108252947481243,
"grad_norm": 0.5567290186882019,
"learning_rate": 2.9989032376784556e-05,
"loss": 1.1003,
"step": 145
},
{
"epoch": 0.3215434083601286,
"grad_norm": 0.5891143083572388,
"learning_rate": 2.998459493515515e-05,
"loss": 1.0162,
"step": 150
},
{
"epoch": 0.3322615219721329,
"grad_norm": 0.6841644048690796,
"learning_rate": 2.9979405940092978e-05,
"loss": 1.0204,
"step": 155
},
{
"epoch": 0.3429796355841372,
"grad_norm": 0.6039224863052368,
"learning_rate": 2.997346565185246e-05,
"loss": 1.0219,
"step": 160
},
{
"epoch": 0.3536977491961415,
"grad_norm": 0.5960545539855957,
"learning_rate": 2.9966774368369175e-05,
"loss": 1.0065,
"step": 165
},
{
"epoch": 0.36441586280814575,
"grad_norm": 0.6452131867408752,
"learning_rate": 2.995933242524493e-05,
"loss": 0.9917,
"step": 170
},
{
"epoch": 0.37513397642015006,
"grad_norm": 0.5966811776161194,
"learning_rate": 2.995114019573091e-05,
"loss": 0.9868,
"step": 175
},
{
"epoch": 0.3858520900321543,
"grad_norm": 0.6129615306854248,
"learning_rate": 2.9942198090708976e-05,
"loss": 1.034,
"step": 180
},
{
"epoch": 0.3965702036441586,
"grad_norm": 0.6630488038063049,
"learning_rate": 2.993250655867105e-05,
"loss": 1.0303,
"step": 185
},
{
"epoch": 0.40728831725616294,
"grad_norm": 0.6165331602096558,
"learning_rate": 2.9922066085696613e-05,
"loss": 0.987,
"step": 190
},
{
"epoch": 0.4180064308681672,
"grad_norm": 0.7296874523162842,
"learning_rate": 2.9910877195428335e-05,
"loss": 0.9805,
"step": 195
},
{
"epoch": 0.4287245444801715,
"grad_norm": 0.6991822123527527,
"learning_rate": 2.989894044904581e-05,
"loss": 0.9377,
"step": 200
},
{
"epoch": 0.43944265809217575,
"grad_norm": 0.7727455496788025,
"learning_rate": 2.9886256445237405e-05,
"loss": 0.9726,
"step": 205
},
{
"epoch": 0.45016077170418006,
"grad_norm": 0.7376512289047241,
"learning_rate": 2.987282582017024e-05,
"loss": 0.946,
"step": 210
},
{
"epoch": 0.4608788853161844,
"grad_norm": 0.819678544998169,
"learning_rate": 2.9858649247458287e-05,
"loss": 0.9861,
"step": 215
},
{
"epoch": 0.4715969989281886,
"grad_norm": 0.7300285696983337,
"learning_rate": 2.984372743812855e-05,
"loss": 1.0026,
"step": 220
},
{
"epoch": 0.48231511254019294,
"grad_norm": 0.7369091510772705,
"learning_rate": 2.9828061140585463e-05,
"loss": 0.9064,
"step": 225
},
{
"epoch": 0.4930332261521972,
"grad_norm": 0.6813873052597046,
"learning_rate": 2.9811651140573287e-05,
"loss": 0.8859,
"step": 230
},
{
"epoch": 0.5037513397642015,
"grad_norm": 0.7347345352172852,
"learning_rate": 2.979449826113676e-05,
"loss": 0.8983,
"step": 235
},
{
"epoch": 0.5144694533762058,
"grad_norm": 0.8283259272575378,
"learning_rate": 2.9776603362579775e-05,
"loss": 0.8876,
"step": 240
},
{
"epoch": 0.5251875669882101,
"grad_norm": 0.7388147711753845,
"learning_rate": 2.9757967342422246e-05,
"loss": 0.899,
"step": 245
},
{
"epoch": 0.5359056806002144,
"grad_norm": 0.7657457590103149,
"learning_rate": 2.9738591135355108e-05,
"loss": 0.9412,
"step": 250
},
{
"epoch": 0.5466237942122186,
"grad_norm": 0.8804264664649963,
"learning_rate": 2.971847571319341e-05,
"loss": 0.8838,
"step": 255
},
{
"epoch": 0.5573419078242229,
"grad_norm": 0.771865963935852,
"learning_rate": 2.969762208482759e-05,
"loss": 0.8697,
"step": 260
},
{
"epoch": 0.5680600214362272,
"grad_norm": 0.7006550431251526,
"learning_rate": 2.967603129617288e-05,
"loss": 0.8801,
"step": 265
},
{
"epoch": 0.5787781350482315,
"grad_norm": 0.7909011244773865,
"learning_rate": 2.9653704430116827e-05,
"loss": 0.9199,
"step": 270
},
{
"epoch": 0.5894962486602358,
"grad_norm": 0.9315905570983887,
"learning_rate": 2.9630642606464998e-05,
"loss": 0.8249,
"step": 275
},
{
"epoch": 0.6002143622722401,
"grad_norm": 0.7528437376022339,
"learning_rate": 2.9606846981884812e-05,
"loss": 0.8656,
"step": 280
},
{
"epoch": 0.6109324758842444,
"grad_norm": 0.7914732098579407,
"learning_rate": 2.958231874984753e-05,
"loss": 0.8618,
"step": 285
},
{
"epoch": 0.6216505894962486,
"grad_norm": 0.7694675922393799,
"learning_rate": 2.955705914056838e-05,
"loss": 0.8546,
"step": 290
},
{
"epoch": 0.632368703108253,
"grad_norm": 0.8651600480079651,
"learning_rate": 2.9531069420944876e-05,
"loss": 0.8654,
"step": 295
},
{
"epoch": 0.6430868167202572,
"grad_norm": 0.7878230214118958,
"learning_rate": 2.9504350894493272e-05,
"loss": 0.8624,
"step": 300
},
{
"epoch": 0.6538049303322615,
"grad_norm": 0.9521771669387817,
"learning_rate": 2.9476904901283175e-05,
"loss": 0.8175,
"step": 305
},
{
"epoch": 0.6645230439442658,
"grad_norm": 0.8132469058036804,
"learning_rate": 2.944873281787034e-05,
"loss": 0.8087,
"step": 310
},
{
"epoch": 0.6752411575562701,
"grad_norm": 0.8626110553741455,
"learning_rate": 2.9419836057227634e-05,
"loss": 0.8033,
"step": 315
},
{
"epoch": 0.6859592711682744,
"grad_norm": 0.858932614326477,
"learning_rate": 2.9390216068674154e-05,
"loss": 0.7535,
"step": 320
},
{
"epoch": 0.6966773847802786,
"grad_norm": 1.0335079431533813,
"learning_rate": 2.9359874337802542e-05,
"loss": 0.8272,
"step": 325
},
{
"epoch": 0.707395498392283,
"grad_norm": 0.819476842880249,
"learning_rate": 2.9328812386404503e-05,
"loss": 0.8184,
"step": 330
},
{
"epoch": 0.7181136120042872,
"grad_norm": 0.9225292205810547,
"learning_rate": 2.9297031772394412e-05,
"loss": 0.8009,
"step": 335
},
{
"epoch": 0.7288317256162915,
"grad_norm": 0.9197941422462463,
"learning_rate": 2.9264534089731262e-05,
"loss": 0.7841,
"step": 340
},
{
"epoch": 0.7395498392282959,
"grad_norm": 0.8611735701560974,
"learning_rate": 2.9231320968338644e-05,
"loss": 0.7975,
"step": 345
},
{
"epoch": 0.7502679528403001,
"grad_norm": 0.8244771361351013,
"learning_rate": 2.9197394074023042e-05,
"loss": 0.7895,
"step": 350
},
{
"epoch": 0.7609860664523044,
"grad_norm": 0.8496798872947693,
"learning_rate": 2.9162755108390278e-05,
"loss": 0.7931,
"step": 355
},
{
"epoch": 0.7717041800643086,
"grad_norm": 0.9810552000999451,
"learning_rate": 2.9127405808760152e-05,
"loss": 0.7507,
"step": 360
},
{
"epoch": 0.782422293676313,
"grad_norm": 0.9915773272514343,
"learning_rate": 2.9091347948079318e-05,
"loss": 0.8251,
"step": 365
},
{
"epoch": 0.7931404072883173,
"grad_norm": 1.0386714935302734,
"learning_rate": 2.9054583334832366e-05,
"loss": 0.8063,
"step": 370
},
{
"epoch": 0.8038585209003215,
"grad_norm": 0.9649940729141235,
"learning_rate": 2.9017113812951105e-05,
"loss": 0.7797,
"step": 375
},
{
"epoch": 0.8145766345123259,
"grad_norm": 1.0805562734603882,
"learning_rate": 2.897894126172209e-05,
"loss": 0.7873,
"step": 380
},
{
"epoch": 0.8252947481243301,
"grad_norm": 0.9175759553909302,
"learning_rate": 2.8940067595692367e-05,
"loss": 0.7306,
"step": 385
},
{
"epoch": 0.8360128617363344,
"grad_norm": 1.0049532651901245,
"learning_rate": 2.890049476457344e-05,
"loss": 0.7357,
"step": 390
},
{
"epoch": 0.8467309753483387,
"grad_norm": 0.8982154726982117,
"learning_rate": 2.8860224753143486e-05,
"loss": 0.7478,
"step": 395
},
{
"epoch": 0.857449088960343,
"grad_norm": 0.898063600063324,
"learning_rate": 2.881925958114782e-05,
"loss": 0.7576,
"step": 400
},
{
"epoch": 0.8681672025723473,
"grad_norm": 0.9208119511604309,
"learning_rate": 2.877760130319757e-05,
"loss": 0.7413,
"step": 405
},
{
"epoch": 0.8788853161843515,
"grad_norm": 0.9576625823974609,
"learning_rate": 2.8735252008666663e-05,
"loss": 0.6863,
"step": 410
},
{
"epoch": 0.8896034297963559,
"grad_norm": 0.9474856853485107,
"learning_rate": 2.8692213821586988e-05,
"loss": 0.7466,
"step": 415
},
{
"epoch": 0.9003215434083601,
"grad_norm": 1.0231027603149414,
"learning_rate": 2.8648488900541915e-05,
"loss": 0.7399,
"step": 420
},
{
"epoch": 0.9110396570203644,
"grad_norm": 0.9832124710083008,
"learning_rate": 2.8604079438557985e-05,
"loss": 0.7037,
"step": 425
},
{
"epoch": 0.9217577706323687,
"grad_norm": 1.0980058908462524,
"learning_rate": 2.8558987662994948e-05,
"loss": 0.7189,
"step": 430
},
{
"epoch": 0.932475884244373,
"grad_norm": 1.1283589601516724,
"learning_rate": 2.851321583543404e-05,
"loss": 0.6951,
"step": 435
},
{
"epoch": 0.9431939978563773,
"grad_norm": 0.9087287187576294,
"learning_rate": 2.8466766251564554e-05,
"loss": 0.6932,
"step": 440
},
{
"epoch": 0.9539121114683816,
"grad_norm": 1.0392457246780396,
"learning_rate": 2.8419641241068687e-05,
"loss": 0.7065,
"step": 445
},
{
"epoch": 0.9646302250803859,
"grad_norm": 0.9226632118225098,
"learning_rate": 2.837184316750473e-05,
"loss": 0.7066,
"step": 450
},
{
"epoch": 0.9753483386923901,
"grad_norm": 0.9685462713241577,
"learning_rate": 2.832337442818848e-05,
"loss": 0.6805,
"step": 455
},
{
"epoch": 0.9860664523043944,
"grad_norm": 0.9703472852706909,
"learning_rate": 2.8274237454073034e-05,
"loss": 0.668,
"step": 460
},
{
"epoch": 0.9967845659163987,
"grad_norm": 0.902520477771759,
"learning_rate": 2.822443470962685e-05,
"loss": 0.6379,
"step": 465
},
{
"epoch": 1.0064308681672025,
"grad_norm": 1.2527391910552979,
"learning_rate": 2.8173968692710142e-05,
"loss": 0.6063,
"step": 470
},
{
"epoch": 1.0171489817792068,
"grad_norm": 1.0625535249710083,
"learning_rate": 2.8122841934449607e-05,
"loss": 0.5916,
"step": 475
},
{
"epoch": 1.0278670953912112,
"grad_norm": 1.1103384494781494,
"learning_rate": 2.807105699911147e-05,
"loss": 0.6323,
"step": 480
},
{
"epoch": 1.0385852090032155,
"grad_norm": 1.0214422941207886,
"learning_rate": 2.8018616483972874e-05,
"loss": 0.573,
"step": 485
},
{
"epoch": 1.0493033226152197,
"grad_norm": 0.9643238186836243,
"learning_rate": 2.7965523019191606e-05,
"loss": 0.5675,
"step": 490
},
{
"epoch": 1.060021436227224,
"grad_norm": 0.9685465097427368,
"learning_rate": 2.7911779267674207e-05,
"loss": 0.5708,
"step": 495
},
{
"epoch": 1.0707395498392283,
"grad_norm": 1.0887079238891602,
"learning_rate": 2.7857387924942387e-05,
"loss": 0.5954,
"step": 500
},
{
"epoch": 1.0814576634512325,
"grad_norm": 0.9755250811576843,
"learning_rate": 2.7802351718997825e-05,
"loss": 0.5384,
"step": 505
},
{
"epoch": 1.092175777063237,
"grad_norm": 0.9574642181396484,
"learning_rate": 2.7746673410185387e-05,
"loss": 0.5627,
"step": 510
},
{
"epoch": 1.1028938906752412,
"grad_norm": 1.0391875505447388,
"learning_rate": 2.7690355791054632e-05,
"loss": 0.5621,
"step": 515
},
{
"epoch": 1.1136120042872455,
"grad_norm": 0.9786142706871033,
"learning_rate": 2.7633401686219793e-05,
"loss": 0.5991,
"step": 520
},
{
"epoch": 1.1243301178992497,
"grad_norm": 1.0193188190460205,
"learning_rate": 2.7575813952218075e-05,
"loss": 0.5723,
"step": 525
},
{
"epoch": 1.135048231511254,
"grad_norm": 1.0080389976501465,
"learning_rate": 2.75175954773664e-05,
"loss": 0.5206,
"step": 530
},
{
"epoch": 1.1457663451232583,
"grad_norm": 0.9439852833747864,
"learning_rate": 2.7458749181616545e-05,
"loss": 0.571,
"step": 535
},
{
"epoch": 1.1564844587352625,
"grad_norm": 0.9455674290657043,
"learning_rate": 2.7399278016408695e-05,
"loss": 0.547,
"step": 540
},
{
"epoch": 1.167202572347267,
"grad_norm": 1.138099193572998,
"learning_rate": 2.7339184964523404e-05,
"loss": 0.5942,
"step": 545
},
{
"epoch": 1.1779206859592712,
"grad_norm": 1.1031590700149536,
"learning_rate": 2.7278473039931992e-05,
"loss": 0.5378,
"step": 550
},
{
"epoch": 1.1886387995712755,
"grad_norm": 1.1105057001113892,
"learning_rate": 2.7217145287645393e-05,
"loss": 0.5186,
"step": 555
},
{
"epoch": 1.1993569131832797,
"grad_norm": 1.2202413082122803,
"learning_rate": 2.715520478356141e-05,
"loss": 0.5721,
"step": 560
},
{
"epoch": 1.210075026795284,
"grad_norm": 0.9721954464912415,
"learning_rate": 2.7092654634310468e-05,
"loss": 0.5433,
"step": 565
},
{
"epoch": 1.2207931404072883,
"grad_norm": 1.2260032892227173,
"learning_rate": 2.7029497977099784e-05,
"loss": 0.543,
"step": 570
},
{
"epoch": 1.2315112540192925,
"grad_norm": 0.9644677639007568,
"learning_rate": 2.6965737979556025e-05,
"loss": 0.552,
"step": 575
},
{
"epoch": 1.242229367631297,
"grad_norm": 0.9937833547592163,
"learning_rate": 2.6901377839566443e-05,
"loss": 0.535,
"step": 580
},
{
"epoch": 1.2529474812433012,
"grad_norm": 1.031482219696045,
"learning_rate": 2.683642078511846e-05,
"loss": 0.5322,
"step": 585
},
{
"epoch": 1.2636655948553055,
"grad_norm": 0.9676262140274048,
"learning_rate": 2.6770870074137808e-05,
"loss": 0.5006,
"step": 590
},
{
"epoch": 1.2743837084673098,
"grad_norm": 1.3593294620513916,
"learning_rate": 2.670472899432509e-05,
"loss": 0.5064,
"step": 595
},
{
"epoch": 1.285101822079314,
"grad_norm": 1.0694024562835693,
"learning_rate": 2.66380008629909e-05,
"loss": 0.5848,
"step": 600
},
{
"epoch": 1.2958199356913183,
"grad_norm": 1.0443016290664673,
"learning_rate": 2.657068902688945e-05,
"loss": 0.538,
"step": 605
},
{
"epoch": 1.3065380493033225,
"grad_norm": 1.023464560508728,
"learning_rate": 2.65027968620507e-05,
"loss": 0.5587,
"step": 610
},
{
"epoch": 1.317256162915327,
"grad_norm": 0.9948901534080505,
"learning_rate": 2.643432777361105e-05,
"loss": 0.534,
"step": 615
},
{
"epoch": 1.3279742765273312,
"grad_norm": 1.0392403602600098,
"learning_rate": 2.636528519564253e-05,
"loss": 0.5365,
"step": 620
},
{
"epoch": 1.3386923901393355,
"grad_norm": 1.0413520336151123,
"learning_rate": 2.6295672590980586e-05,
"loss": 0.4708,
"step": 625
},
{
"epoch": 1.3494105037513398,
"grad_norm": 1.1629996299743652,
"learning_rate": 2.6225493451050397e-05,
"loss": 0.5554,
"step": 630
},
{
"epoch": 1.360128617363344,
"grad_norm": 1.023750901222229,
"learning_rate": 2.6154751295691756e-05,
"loss": 0.4827,
"step": 635
},
{
"epoch": 1.3708467309753483,
"grad_norm": 1.13310706615448,
"learning_rate": 2.608344967298253e-05,
"loss": 0.4657,
"step": 640
},
{
"epoch": 1.3815648445873525,
"grad_norm": 1.083983063697815,
"learning_rate": 2.6011592159060717e-05,
"loss": 0.5213,
"step": 645
},
{
"epoch": 1.392282958199357,
"grad_norm": 1.048169493675232,
"learning_rate": 2.593918235794508e-05,
"loss": 0.4819,
"step": 650
},
{
"epoch": 1.4030010718113612,
"grad_norm": 0.9783421158790588,
"learning_rate": 2.5866223901354372e-05,
"loss": 0.5378,
"step": 655
},
{
"epoch": 1.4137191854233655,
"grad_norm": 1.2764405012130737,
"learning_rate": 2.579272044852523e-05,
"loss": 0.4782,
"step": 660
},
{
"epoch": 1.4244372990353698,
"grad_norm": 1.0567833185195923,
"learning_rate": 2.5718675686028582e-05,
"loss": 0.5,
"step": 665
},
{
"epoch": 1.435155412647374,
"grad_norm": 1.150490164756775,
"learning_rate": 2.56440933275848e-05,
"loss": 0.4415,
"step": 670
},
{
"epoch": 1.4458735262593785,
"grad_norm": 1.0718622207641602,
"learning_rate": 2.5568977113877426e-05,
"loss": 0.5022,
"step": 675
},
{
"epoch": 1.4565916398713825,
"grad_norm": 1.0396467447280884,
"learning_rate": 2.549333081236553e-05,
"loss": 0.4582,
"step": 680
},
{
"epoch": 1.467309753483387,
"grad_norm": 1.0076903104782104,
"learning_rate": 2.5417158217094788e-05,
"loss": 0.4865,
"step": 685
},
{
"epoch": 1.4780278670953912,
"grad_norm": 1.1110858917236328,
"learning_rate": 2.5340463148507178e-05,
"loss": 0.4707,
"step": 690
},
{
"epoch": 1.4887459807073955,
"grad_norm": 1.0835309028625488,
"learning_rate": 2.5263249453249356e-05,
"loss": 0.4569,
"step": 695
},
{
"epoch": 1.4994640943193998,
"grad_norm": 1.1380281448364258,
"learning_rate": 2.5185521003979746e-05,
"loss": 0.4699,
"step": 700
},
{
"epoch": 1.510182207931404,
"grad_norm": 1.2212399244308472,
"learning_rate": 2.5107281699174287e-05,
"loss": 0.4579,
"step": 705
},
{
"epoch": 1.5209003215434085,
"grad_norm": 1.080427646636963,
"learning_rate": 2.5028535462930924e-05,
"loss": 0.4509,
"step": 710
},
{
"epoch": 1.5316184351554125,
"grad_norm": 1.0146664381027222,
"learning_rate": 2.494928624477277e-05,
"loss": 0.5209,
"step": 715
},
{
"epoch": 1.542336548767417,
"grad_norm": 1.023618221282959,
"learning_rate": 2.4869538019450045e-05,
"loss": 0.459,
"step": 720
},
{
"epoch": 1.5530546623794212,
"grad_norm": 1.0760775804519653,
"learning_rate": 2.4789294786740705e-05,
"loss": 0.4522,
"step": 725
},
{
"epoch": 1.5637727759914255,
"grad_norm": 1.0976240634918213,
"learning_rate": 2.4708560571249845e-05,
"loss": 0.4853,
"step": 730
},
{
"epoch": 1.5744908896034298,
"grad_norm": 1.2561285495758057,
"learning_rate": 2.4627339422207805e-05,
"loss": 0.4584,
"step": 735
},
{
"epoch": 1.585209003215434,
"grad_norm": 1.1148028373718262,
"learning_rate": 2.454563541326716e-05,
"loss": 0.4492,
"step": 740
},
{
"epoch": 1.5959271168274385,
"grad_norm": 1.085665225982666,
"learning_rate": 2.4463452642298324e-05,
"loss": 0.4545,
"step": 745
},
{
"epoch": 1.6066452304394425,
"grad_norm": 0.9975435137748718,
"learning_rate": 2.438079523118406e-05,
"loss": 0.4392,
"step": 750
},
{
"epoch": 1.617363344051447,
"grad_norm": 1.1030360460281372,
"learning_rate": 2.4297667325612756e-05,
"loss": 0.4255,
"step": 755
},
{
"epoch": 1.6280814576634512,
"grad_norm": 1.02976655960083,
"learning_rate": 2.421407309487046e-05,
"loss": 0.4452,
"step": 760
},
{
"epoch": 1.6387995712754555,
"grad_norm": 1.0580244064331055,
"learning_rate": 2.4130016731631815e-05,
"loss": 0.4275,
"step": 765
},
{
"epoch": 1.6495176848874598,
"grad_norm": 1.1036502122879028,
"learning_rate": 2.4045502451749736e-05,
"loss": 0.4672,
"step": 770
},
{
"epoch": 1.660235798499464,
"grad_norm": 1.1784788370132446,
"learning_rate": 2.3960534494043993e-05,
"loss": 0.4036,
"step": 775
},
{
"epoch": 1.6709539121114685,
"grad_norm": 1.1079624891281128,
"learning_rate": 2.387511712008859e-05,
"loss": 0.4294,
"step": 780
},
{
"epoch": 1.6816720257234725,
"grad_norm": 1.0606523752212524,
"learning_rate": 2.3789254613998043e-05,
"loss": 0.449,
"step": 785
},
{
"epoch": 1.692390139335477,
"grad_norm": 0.9858099818229675,
"learning_rate": 2.3702951282212492e-05,
"loss": 0.3832,
"step": 790
},
{
"epoch": 1.7031082529474812,
"grad_norm": 1.1735355854034424,
"learning_rate": 2.3616211453281726e-05,
"loss": 0.445,
"step": 795
},
{
"epoch": 1.7138263665594855,
"grad_norm": 1.083713412284851,
"learning_rate": 2.3529039477648087e-05,
"loss": 0.4306,
"step": 800
},
{
"epoch": 1.72454448017149,
"grad_norm": 1.1281569004058838,
"learning_rate": 2.344143972742826e-05,
"loss": 0.3807,
"step": 805
},
{
"epoch": 1.735262593783494,
"grad_norm": 1.0552798509597778,
"learning_rate": 2.3353416596193985e-05,
"loss": 0.4013,
"step": 810
},
{
"epoch": 1.7459807073954985,
"grad_norm": 1.0200108289718628,
"learning_rate": 2.326497449875173e-05,
"loss": 0.369,
"step": 815
},
{
"epoch": 1.7566988210075025,
"grad_norm": 1.0658568143844604,
"learning_rate": 2.3176117870921218e-05,
"loss": 0.3814,
"step": 820
},
{
"epoch": 1.767416934619507,
"grad_norm": 1.164661169052124,
"learning_rate": 2.3086851169312992e-05,
"loss": 0.4044,
"step": 825
},
{
"epoch": 1.7781350482315113,
"grad_norm": 1.178032636642456,
"learning_rate": 2.299717887110487e-05,
"loss": 0.3985,
"step": 830
},
{
"epoch": 1.7888531618435155,
"grad_norm": 1.1956833600997925,
"learning_rate": 2.290710547381739e-05,
"loss": 0.3905,
"step": 835
},
{
"epoch": 1.79957127545552,
"grad_norm": 1.205641269683838,
"learning_rate": 2.2816635495088244e-05,
"loss": 0.3915,
"step": 840
},
{
"epoch": 1.810289389067524,
"grad_norm": 1.208314299583435,
"learning_rate": 2.272577347244571e-05,
"loss": 0.403,
"step": 845
},
{
"epoch": 1.8210075026795285,
"grad_norm": 1.1529418230056763,
"learning_rate": 2.2634523963081033e-05,
"loss": 0.3932,
"step": 850
},
{
"epoch": 1.8317256162915327,
"grad_norm": 1.1101915836334229,
"learning_rate": 2.25428915436199e-05,
"loss": 0.3763,
"step": 855
},
{
"epoch": 1.842443729903537,
"grad_norm": 1.071894884109497,
"learning_rate": 2.245088080989287e-05,
"loss": 0.3824,
"step": 860
},
{
"epoch": 1.8531618435155413,
"grad_norm": 1.1817514896392822,
"learning_rate": 2.2358496376704898e-05,
"loss": 0.4261,
"step": 865
},
{
"epoch": 1.8638799571275455,
"grad_norm": 1.1303304433822632,
"learning_rate": 2.2265742877603838e-05,
"loss": 0.3604,
"step": 870
},
{
"epoch": 1.87459807073955,
"grad_norm": 1.0623246431350708,
"learning_rate": 2.2172624964648094e-05,
"loss": 0.4049,
"step": 875
},
{
"epoch": 1.885316184351554,
"grad_norm": 1.171410322189331,
"learning_rate": 2.2079147308173258e-05,
"loss": 0.3633,
"step": 880
},
{
"epoch": 1.8960342979635585,
"grad_norm": 1.20453941822052,
"learning_rate": 2.1985314596557887e-05,
"loss": 0.4022,
"step": 885
},
{
"epoch": 1.9067524115755627,
"grad_norm": 1.171126365661621,
"learning_rate": 2.1891131535988364e-05,
"loss": 0.3967,
"step": 890
},
{
"epoch": 1.917470525187567,
"grad_norm": 1.1599109172821045,
"learning_rate": 2.1796602850222832e-05,
"loss": 0.349,
"step": 895
},
{
"epoch": 1.9281886387995713,
"grad_norm": 1.056682825088501,
"learning_rate": 2.170173328035431e-05,
"loss": 0.3705,
"step": 900
},
{
"epoch": 1.9389067524115755,
"grad_norm": 1.1990083456039429,
"learning_rate": 2.1606527584572854e-05,
"loss": 0.367,
"step": 905
},
{
"epoch": 1.94962486602358,
"grad_norm": 1.1245200634002686,
"learning_rate": 2.1510990537926983e-05,
"loss": 0.3734,
"step": 910
},
{
"epoch": 1.960342979635584,
"grad_norm": 1.2177960872650146,
"learning_rate": 2.1415126932084104e-05,
"loss": 0.4062,
"step": 915
},
{
"epoch": 1.9710610932475885,
"grad_norm": 1.1364030838012695,
"learning_rate": 2.1318941575090233e-05,
"loss": 0.3492,
"step": 920
},
{
"epoch": 1.9817792068595927,
"grad_norm": 1.1492586135864258,
"learning_rate": 2.1222439291128855e-05,
"loss": 0.3597,
"step": 925
},
{
"epoch": 1.992497320471597,
"grad_norm": 1.0529663562774658,
"learning_rate": 2.112562492027893e-05,
"loss": 0.3996,
"step": 930
}
],
"logging_steps": 5,
"max_steps": 2335,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 2000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.394156316393472e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}