NER-Lora-checkpoint-1e / trainer_state.json
D1zzYzz's picture
Upload 11 files
79458e1 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.99958071278826,
"eval_steps": 1192,
"global_step": 2384,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0041928721174004195,
"grad_norm": 0.8826889991760254,
"learning_rate": 3.765690376569038e-06,
"loss": 1.5208,
"step": 10
},
{
"epoch": 0.008385744234800839,
"grad_norm": 1.026820182800293,
"learning_rate": 7.94979079497908e-06,
"loss": 1.4602,
"step": 20
},
{
"epoch": 0.012578616352201259,
"grad_norm": 0.8231751918792725,
"learning_rate": 1.2133891213389122e-05,
"loss": 1.3823,
"step": 30
},
{
"epoch": 0.016771488469601678,
"grad_norm": 1.1056824922561646,
"learning_rate": 1.6317991631799166e-05,
"loss": 1.3479,
"step": 40
},
{
"epoch": 0.020964360587002098,
"grad_norm": 1.2568145990371704,
"learning_rate": 2.0502092050209206e-05,
"loss": 1.251,
"step": 50
},
{
"epoch": 0.025157232704402517,
"grad_norm": 1.2502493858337402,
"learning_rate": 2.468619246861925e-05,
"loss": 1.1857,
"step": 60
},
{
"epoch": 0.029350104821802937,
"grad_norm": 1.0695595741271973,
"learning_rate": 2.887029288702929e-05,
"loss": 1.1229,
"step": 70
},
{
"epoch": 0.033542976939203356,
"grad_norm": 1.4829120635986328,
"learning_rate": 3.305439330543933e-05,
"loss": 1.0773,
"step": 80
},
{
"epoch": 0.03773584905660377,
"grad_norm": 1.1467750072479248,
"learning_rate": 3.723849372384937e-05,
"loss": 1.0372,
"step": 90
},
{
"epoch": 0.041928721174004195,
"grad_norm": 1.1534297466278076,
"learning_rate": 4.1422594142259416e-05,
"loss": 1.0358,
"step": 100
},
{
"epoch": 0.04612159329140461,
"grad_norm": 1.0568394660949707,
"learning_rate": 4.560669456066946e-05,
"loss": 1.0164,
"step": 110
},
{
"epoch": 0.050314465408805034,
"grad_norm": 0.9403956532478333,
"learning_rate": 4.97907949790795e-05,
"loss": 0.9783,
"step": 120
},
{
"epoch": 0.05450733752620545,
"grad_norm": 1.179694414138794,
"learning_rate": 5.397489539748955e-05,
"loss": 0.9927,
"step": 130
},
{
"epoch": 0.05870020964360587,
"grad_norm": 1.0261974334716797,
"learning_rate": 5.8158995815899583e-05,
"loss": 0.9921,
"step": 140
},
{
"epoch": 0.06289308176100629,
"grad_norm": 1.114426612854004,
"learning_rate": 6.234309623430963e-05,
"loss": 0.9313,
"step": 150
},
{
"epoch": 0.06708595387840671,
"grad_norm": 1.1404651403427124,
"learning_rate": 6.652719665271967e-05,
"loss": 0.9672,
"step": 160
},
{
"epoch": 0.07127882599580712,
"grad_norm": 1.1163074970245361,
"learning_rate": 7.071129707112971e-05,
"loss": 0.9444,
"step": 170
},
{
"epoch": 0.07547169811320754,
"grad_norm": 1.0015060901641846,
"learning_rate": 7.489539748953976e-05,
"loss": 0.9437,
"step": 180
},
{
"epoch": 0.07966457023060797,
"grad_norm": 0.9487001299858093,
"learning_rate": 7.90794979079498e-05,
"loss": 0.928,
"step": 190
},
{
"epoch": 0.08385744234800839,
"grad_norm": 0.7763631343841553,
"learning_rate": 8.326359832635983e-05,
"loss": 0.8887,
"step": 200
},
{
"epoch": 0.0880503144654088,
"grad_norm": 0.9685043096542358,
"learning_rate": 8.744769874476988e-05,
"loss": 0.9235,
"step": 210
},
{
"epoch": 0.09224318658280922,
"grad_norm": 0.8481013774871826,
"learning_rate": 9.163179916317992e-05,
"loss": 0.8863,
"step": 220
},
{
"epoch": 0.09643605870020965,
"grad_norm": 0.8485997319221497,
"learning_rate": 9.581589958158997e-05,
"loss": 0.8861,
"step": 230
},
{
"epoch": 0.10062893081761007,
"grad_norm": 0.765440046787262,
"learning_rate": 0.0001,
"loss": 0.8762,
"step": 240
},
{
"epoch": 0.10482180293501048,
"grad_norm": 0.7229307293891907,
"learning_rate": 9.999879815104596e-05,
"loss": 0.835,
"step": 250
},
{
"epoch": 0.1090146750524109,
"grad_norm": 0.7660157680511475,
"learning_rate": 9.999519266196147e-05,
"loss": 0.9046,
"step": 260
},
{
"epoch": 0.11320754716981132,
"grad_norm": 0.9106878638267517,
"learning_rate": 9.998918370607666e-05,
"loss": 0.9144,
"step": 270
},
{
"epoch": 0.11740041928721175,
"grad_norm": 0.7474270462989807,
"learning_rate": 9.998077157226584e-05,
"loss": 0.8111,
"step": 280
},
{
"epoch": 0.12159329140461216,
"grad_norm": 0.8624346852302551,
"learning_rate": 9.996995666493356e-05,
"loss": 0.8864,
"step": 290
},
{
"epoch": 0.12578616352201258,
"grad_norm": 0.7222551107406616,
"learning_rate": 9.995673950399522e-05,
"loss": 0.8726,
"step": 300
},
{
"epoch": 0.129979035639413,
"grad_norm": 0.7533407807350159,
"learning_rate": 9.994112072485208e-05,
"loss": 0.8337,
"step": 310
},
{
"epoch": 0.13417190775681342,
"grad_norm": 0.8422977328300476,
"learning_rate": 9.992310107836067e-05,
"loss": 0.852,
"step": 320
},
{
"epoch": 0.13836477987421383,
"grad_norm": 0.6526598334312439,
"learning_rate": 9.990268143079671e-05,
"loss": 0.9103,
"step": 330
},
{
"epoch": 0.14255765199161424,
"grad_norm": 0.8105525374412537,
"learning_rate": 9.987986276381349e-05,
"loss": 0.8347,
"step": 340
},
{
"epoch": 0.14675052410901468,
"grad_norm": 0.7625735402107239,
"learning_rate": 9.985464617439465e-05,
"loss": 0.8581,
"step": 350
},
{
"epoch": 0.1509433962264151,
"grad_norm": 0.7675321102142334,
"learning_rate": 9.982703287480148e-05,
"loss": 0.8481,
"step": 360
},
{
"epoch": 0.15513626834381553,
"grad_norm": 0.7721195220947266,
"learning_rate": 9.979702419251455e-05,
"loss": 0.829,
"step": 370
},
{
"epoch": 0.15932914046121593,
"grad_norm": 0.7836877703666687,
"learning_rate": 9.976462157017001e-05,
"loss": 0.8182,
"step": 380
},
{
"epoch": 0.16352201257861634,
"grad_norm": 0.6462391018867493,
"learning_rate": 9.972982656549017e-05,
"loss": 0.8736,
"step": 390
},
{
"epoch": 0.16771488469601678,
"grad_norm": 0.6665590405464172,
"learning_rate": 9.969264085120864e-05,
"loss": 0.8308,
"step": 400
},
{
"epoch": 0.1719077568134172,
"grad_norm": 0.690707802772522,
"learning_rate": 9.965306621498989e-05,
"loss": 0.7982,
"step": 410
},
{
"epoch": 0.1761006289308176,
"grad_norm": 0.7840791344642639,
"learning_rate": 9.96111045593433e-05,
"loss": 0.8346,
"step": 420
},
{
"epoch": 0.18029350104821804,
"grad_norm": 0.6609325408935547,
"learning_rate": 9.95667579015318e-05,
"loss": 0.7658,
"step": 430
},
{
"epoch": 0.18448637316561844,
"grad_norm": 0.678220808506012,
"learning_rate": 9.952002837347472e-05,
"loss": 0.7606,
"step": 440
},
{
"epoch": 0.18867924528301888,
"grad_norm": 0.7335385084152222,
"learning_rate": 9.947091822164544e-05,
"loss": 0.7907,
"step": 450
},
{
"epoch": 0.1928721174004193,
"grad_norm": 0.8243012428283691,
"learning_rate": 9.941942980696335e-05,
"loss": 0.8451,
"step": 460
},
{
"epoch": 0.1970649895178197,
"grad_norm": 0.6208651065826416,
"learning_rate": 9.936556560468037e-05,
"loss": 0.7922,
"step": 470
},
{
"epoch": 0.20125786163522014,
"grad_norm": 0.6883862614631653,
"learning_rate": 9.930932820426187e-05,
"loss": 0.8272,
"step": 480
},
{
"epoch": 0.20545073375262055,
"grad_norm": 0.8161155581474304,
"learning_rate": 9.92507203092623e-05,
"loss": 0.8332,
"step": 490
},
{
"epoch": 0.20964360587002095,
"grad_norm": 0.601380467414856,
"learning_rate": 9.918974473719513e-05,
"loss": 0.8031,
"step": 500
},
{
"epoch": 0.2138364779874214,
"grad_norm": 0.6501343846321106,
"learning_rate": 9.91264044193975e-05,
"loss": 0.7841,
"step": 510
},
{
"epoch": 0.2180293501048218,
"grad_norm": 0.6840798258781433,
"learning_rate": 9.906070240088918e-05,
"loss": 0.7912,
"step": 520
},
{
"epoch": 0.2222222222222222,
"grad_norm": 0.6910505294799805,
"learning_rate": 9.899264184022625e-05,
"loss": 0.8498,
"step": 530
},
{
"epoch": 0.22641509433962265,
"grad_norm": 0.8739312887191772,
"learning_rate": 9.892222600934926e-05,
"loss": 0.8031,
"step": 540
},
{
"epoch": 0.23060796645702306,
"grad_norm": 0.7407451868057251,
"learning_rate": 9.884945829342593e-05,
"loss": 0.7669,
"step": 550
},
{
"epoch": 0.2348008385744235,
"grad_norm": 0.6191729307174683,
"learning_rate": 9.877434219068837e-05,
"loss": 0.7946,
"step": 560
},
{
"epoch": 0.2389937106918239,
"grad_norm": 0.6644866466522217,
"learning_rate": 9.869688131226499e-05,
"loss": 0.8018,
"step": 570
},
{
"epoch": 0.2431865828092243,
"grad_norm": 0.637300968170166,
"learning_rate": 9.86170793820068e-05,
"loss": 0.7256,
"step": 580
},
{
"epoch": 0.24737945492662475,
"grad_norm": 0.7838623523712158,
"learning_rate": 9.853494023630842e-05,
"loss": 0.8195,
"step": 590
},
{
"epoch": 0.25157232704402516,
"grad_norm": 0.6689124703407288,
"learning_rate": 9.845046782392377e-05,
"loss": 0.7889,
"step": 600
},
{
"epoch": 0.2557651991614256,
"grad_norm": 0.8161881566047668,
"learning_rate": 9.836366620577604e-05,
"loss": 0.7875,
"step": 610
},
{
"epoch": 0.259958071278826,
"grad_norm": 0.609159529209137,
"learning_rate": 9.827453955476258e-05,
"loss": 0.7807,
"step": 620
},
{
"epoch": 0.2641509433962264,
"grad_norm": 0.6595575213432312,
"learning_rate": 9.81830921555543e-05,
"loss": 0.7779,
"step": 630
},
{
"epoch": 0.26834381551362685,
"grad_norm": 0.7574846148490906,
"learning_rate": 9.808932840438962e-05,
"loss": 0.8019,
"step": 640
},
{
"epoch": 0.27253668763102723,
"grad_norm": 0.7723109722137451,
"learning_rate": 9.799325280886322e-05,
"loss": 0.8144,
"step": 650
},
{
"epoch": 0.27672955974842767,
"grad_norm": 0.7428059577941895,
"learning_rate": 9.789486998770923e-05,
"loss": 0.7779,
"step": 660
},
{
"epoch": 0.2809224318658281,
"grad_norm": 0.7655267715454102,
"learning_rate": 9.77941846705793e-05,
"loss": 0.7783,
"step": 670
},
{
"epoch": 0.2851153039832285,
"grad_norm": 0.6804608106613159,
"learning_rate": 9.769120169781514e-05,
"loss": 0.7832,
"step": 680
},
{
"epoch": 0.2893081761006289,
"grad_norm": 0.6434263586997986,
"learning_rate": 9.758592602021588e-05,
"loss": 0.7881,
"step": 690
},
{
"epoch": 0.29350104821802936,
"grad_norm": 0.6806625127792358,
"learning_rate": 9.747836269880003e-05,
"loss": 0.7876,
"step": 700
},
{
"epoch": 0.2976939203354298,
"grad_norm": 0.6142706274986267,
"learning_rate": 9.736851690456224e-05,
"loss": 0.7649,
"step": 710
},
{
"epoch": 0.3018867924528302,
"grad_norm": 0.7315255999565125,
"learning_rate": 9.725639391822457e-05,
"loss": 0.7582,
"step": 720
},
{
"epoch": 0.3060796645702306,
"grad_norm": 0.6575433015823364,
"learning_rate": 9.714199912998282e-05,
"loss": 0.7722,
"step": 730
},
{
"epoch": 0.31027253668763105,
"grad_norm": 0.7043889760971069,
"learning_rate": 9.702533803924724e-05,
"loss": 0.7551,
"step": 740
},
{
"epoch": 0.31446540880503143,
"grad_norm": 0.7640373110771179,
"learning_rate": 9.690641625437821e-05,
"loss": 0.7545,
"step": 750
},
{
"epoch": 0.31865828092243187,
"grad_norm": 0.7035335898399353,
"learning_rate": 9.678523949241666e-05,
"loss": 0.7851,
"step": 760
},
{
"epoch": 0.3228511530398323,
"grad_norm": 0.6915122866630554,
"learning_rate": 9.666181357880917e-05,
"loss": 0.7652,
"step": 770
},
{
"epoch": 0.3270440251572327,
"grad_norm": 0.7723565697669983,
"learning_rate": 9.653614444712794e-05,
"loss": 0.739,
"step": 780
},
{
"epoch": 0.3312368972746331,
"grad_norm": 0.6718587875366211,
"learning_rate": 9.640823813878555e-05,
"loss": 0.7359,
"step": 790
},
{
"epoch": 0.33542976939203356,
"grad_norm": 0.7723305821418762,
"learning_rate": 9.627810080274453e-05,
"loss": 0.7381,
"step": 800
},
{
"epoch": 0.33962264150943394,
"grad_norm": 0.9143453240394592,
"learning_rate": 9.61457386952217e-05,
"loss": 0.7671,
"step": 810
},
{
"epoch": 0.3438155136268344,
"grad_norm": 0.6352068185806274,
"learning_rate": 9.60111581793875e-05,
"loss": 0.7356,
"step": 820
},
{
"epoch": 0.3480083857442348,
"grad_norm": 0.6827622652053833,
"learning_rate": 9.587436572506003e-05,
"loss": 0.7444,
"step": 830
},
{
"epoch": 0.3522012578616352,
"grad_norm": 0.656425952911377,
"learning_rate": 9.5735367908394e-05,
"loss": 0.7792,
"step": 840
},
{
"epoch": 0.35639412997903563,
"grad_norm": 0.7113560438156128,
"learning_rate": 9.559417141156461e-05,
"loss": 0.7607,
"step": 850
},
{
"epoch": 0.36058700209643607,
"grad_norm": 0.6049885749816895,
"learning_rate": 9.545078302244638e-05,
"loss": 0.7577,
"step": 860
},
{
"epoch": 0.36477987421383645,
"grad_norm": 0.7477822303771973,
"learning_rate": 9.53052096342867e-05,
"loss": 0.7511,
"step": 870
},
{
"epoch": 0.3689727463312369,
"grad_norm": 0.7556253671646118,
"learning_rate": 9.515745824537457e-05,
"loss": 0.7493,
"step": 880
},
{
"epoch": 0.3731656184486373,
"grad_norm": 0.7980862855911255,
"learning_rate": 9.500753595870406e-05,
"loss": 0.7477,
"step": 890
},
{
"epoch": 0.37735849056603776,
"grad_norm": 0.7579500675201416,
"learning_rate": 9.485544998163291e-05,
"loss": 0.7457,
"step": 900
},
{
"epoch": 0.38155136268343814,
"grad_norm": 0.7790155410766602,
"learning_rate": 9.470120762553603e-05,
"loss": 0.7363,
"step": 910
},
{
"epoch": 0.3857442348008386,
"grad_norm": 0.6668843030929565,
"learning_rate": 9.454481630545397e-05,
"loss": 0.7364,
"step": 920
},
{
"epoch": 0.389937106918239,
"grad_norm": 0.8395707011222839,
"learning_rate": 9.438628353973653e-05,
"loss": 0.7462,
"step": 930
},
{
"epoch": 0.3941299790356394,
"grad_norm": 0.6021170616149902,
"learning_rate": 9.422561694968124e-05,
"loss": 0.6654,
"step": 940
},
{
"epoch": 0.39832285115303984,
"grad_norm": 0.7387002110481262,
"learning_rate": 9.406282425916703e-05,
"loss": 0.7489,
"step": 950
},
{
"epoch": 0.4025157232704403,
"grad_norm": 0.7128977179527283,
"learning_rate": 9.389791329428292e-05,
"loss": 0.685,
"step": 960
},
{
"epoch": 0.40670859538784065,
"grad_norm": 0.7485045194625854,
"learning_rate": 9.37308919829517e-05,
"loss": 0.6905,
"step": 970
},
{
"epoch": 0.4109014675052411,
"grad_norm": 0.7276797294616699,
"learning_rate": 9.356176835454893e-05,
"loss": 0.6698,
"step": 980
},
{
"epoch": 0.41509433962264153,
"grad_norm": 0.9191114902496338,
"learning_rate": 9.339055053951685e-05,
"loss": 0.6743,
"step": 990
},
{
"epoch": 0.4192872117400419,
"grad_norm": 0.6531959772109985,
"learning_rate": 9.321724676897349e-05,
"loss": 0.7194,
"step": 1000
},
{
"epoch": 0.42348008385744235,
"grad_norm": 0.9562734961509705,
"learning_rate": 9.304186537431709e-05,
"loss": 0.7025,
"step": 1010
},
{
"epoch": 0.4276729559748428,
"grad_norm": 0.9310542941093445,
"learning_rate": 9.286441478682552e-05,
"loss": 0.6774,
"step": 1020
},
{
"epoch": 0.43186582809224316,
"grad_norm": 0.8425101637840271,
"learning_rate": 9.268490353725083e-05,
"loss": 0.7326,
"step": 1030
},
{
"epoch": 0.4360587002096436,
"grad_norm": 0.6824538111686707,
"learning_rate": 9.250334025540938e-05,
"loss": 0.7289,
"step": 1040
},
{
"epoch": 0.44025157232704404,
"grad_norm": 0.6529750227928162,
"learning_rate": 9.231973366976675e-05,
"loss": 0.698,
"step": 1050
},
{
"epoch": 0.4444444444444444,
"grad_norm": 0.8103228807449341,
"learning_rate": 9.213409260701827e-05,
"loss": 0.7105,
"step": 1060
},
{
"epoch": 0.44863731656184486,
"grad_norm": 0.7299141883850098,
"learning_rate": 9.194642599166461e-05,
"loss": 0.7065,
"step": 1070
},
{
"epoch": 0.4528301886792453,
"grad_norm": 0.8794196248054504,
"learning_rate": 9.17567428455828e-05,
"loss": 0.6818,
"step": 1080
},
{
"epoch": 0.4570230607966457,
"grad_norm": 0.8989048004150391,
"learning_rate": 9.156505228759246e-05,
"loss": 0.6774,
"step": 1090
},
{
"epoch": 0.4612159329140461,
"grad_norm": 0.8377278447151184,
"learning_rate": 9.137136353301747e-05,
"loss": 0.6755,
"step": 1100
},
{
"epoch": 0.46540880503144655,
"grad_norm": 0.8824465870857239,
"learning_rate": 9.11756858932429e-05,
"loss": 0.7017,
"step": 1110
},
{
"epoch": 0.469601677148847,
"grad_norm": 0.6842039823532104,
"learning_rate": 9.097802877526742e-05,
"loss": 0.6658,
"step": 1120
},
{
"epoch": 0.47379454926624737,
"grad_norm": 0.660696268081665,
"learning_rate": 9.077840168125104e-05,
"loss": 0.6998,
"step": 1130
},
{
"epoch": 0.4779874213836478,
"grad_norm": 0.7316898107528687,
"learning_rate": 9.057681420805832e-05,
"loss": 0.7341,
"step": 1140
},
{
"epoch": 0.48218029350104824,
"grad_norm": 1.0073384046554565,
"learning_rate": 9.037327604679706e-05,
"loss": 0.7027,
"step": 1150
},
{
"epoch": 0.4863731656184486,
"grad_norm": 0.9173741936683655,
"learning_rate": 9.016779698235227e-05,
"loss": 0.6876,
"step": 1160
},
{
"epoch": 0.49056603773584906,
"grad_norm": 0.9299442172050476,
"learning_rate": 8.99603868929159e-05,
"loss": 0.7094,
"step": 1170
},
{
"epoch": 0.4947589098532495,
"grad_norm": 0.7131206393241882,
"learning_rate": 8.97510557495119e-05,
"loss": 0.6916,
"step": 1180
},
{
"epoch": 0.4989517819706499,
"grad_norm": 0.7590337991714478,
"learning_rate": 8.953981361551693e-05,
"loss": 0.6879,
"step": 1190
},
{
"epoch": 0.49979035639413,
"eval_loss": 0.8200252056121826,
"eval_runtime": 776.9138,
"eval_samples_per_second": 2.729,
"eval_steps_per_second": 2.729,
"step": 1192
},
{
"epoch": 0.5031446540880503,
"grad_norm": 0.8566190600395203,
"learning_rate": 8.93266706461765e-05,
"loss": 0.707,
"step": 1200
},
{
"epoch": 0.5073375262054507,
"grad_norm": 0.8124281167984009,
"learning_rate": 8.911163708811677e-05,
"loss": 0.6732,
"step": 1210
},
{
"epoch": 0.5115303983228512,
"grad_norm": 0.7451833486557007,
"learning_rate": 8.889472327885203e-05,
"loss": 0.6737,
"step": 1220
},
{
"epoch": 0.5157232704402516,
"grad_norm": 0.7067970633506775,
"learning_rate": 8.867593964628769e-05,
"loss": 0.7146,
"step": 1230
},
{
"epoch": 0.519916142557652,
"grad_norm": 0.8552123308181763,
"learning_rate": 8.84552967082189e-05,
"loss": 0.7213,
"step": 1240
},
{
"epoch": 0.5241090146750524,
"grad_norm": 0.8083493709564209,
"learning_rate": 8.82328050718251e-05,
"loss": 0.649,
"step": 1250
},
{
"epoch": 0.5283018867924528,
"grad_norm": 0.7538031339645386,
"learning_rate": 8.800847543315984e-05,
"loss": 0.6527,
"step": 1260
},
{
"epoch": 0.5324947589098532,
"grad_norm": 0.8465218544006348,
"learning_rate": 8.778231857663684e-05,
"loss": 0.6348,
"step": 1270
},
{
"epoch": 0.5366876310272537,
"grad_norm": 0.7604790329933167,
"learning_rate": 8.755434537451132e-05,
"loss": 0.7313,
"step": 1280
},
{
"epoch": 0.5408805031446541,
"grad_norm": 0.9563406705856323,
"learning_rate": 8.732456678635749e-05,
"loss": 0.658,
"step": 1290
},
{
"epoch": 0.5450733752620545,
"grad_norm": 0.7325488924980164,
"learning_rate": 8.709299385854157e-05,
"loss": 0.674,
"step": 1300
},
{
"epoch": 0.549266247379455,
"grad_norm": 0.9022886753082275,
"learning_rate": 8.685963772369081e-05,
"loss": 0.6738,
"step": 1310
},
{
"epoch": 0.5534591194968553,
"grad_norm": 0.7627152800559998,
"learning_rate": 8.662450960015824e-05,
"loss": 0.643,
"step": 1320
},
{
"epoch": 0.5576519916142557,
"grad_norm": 0.890799880027771,
"learning_rate": 8.638762079148348e-05,
"loss": 0.6955,
"step": 1330
},
{
"epoch": 0.5618448637316562,
"grad_norm": 0.690117597579956,
"learning_rate": 8.614898268584919e-05,
"loss": 0.6522,
"step": 1340
},
{
"epoch": 0.5660377358490566,
"grad_norm": 0.7684505581855774,
"learning_rate": 8.590860675553365e-05,
"loss": 0.6695,
"step": 1350
},
{
"epoch": 0.570230607966457,
"grad_norm": 0.9167590141296387,
"learning_rate": 8.566650455635932e-05,
"loss": 0.6406,
"step": 1360
},
{
"epoch": 0.5744234800838575,
"grad_norm": 0.7661821246147156,
"learning_rate": 8.542268772713717e-05,
"loss": 0.6322,
"step": 1370
},
{
"epoch": 0.5786163522012578,
"grad_norm": 0.7940593957901001,
"learning_rate": 8.517716798910726e-05,
"loss": 0.6629,
"step": 1380
},
{
"epoch": 0.5828092243186582,
"grad_norm": 0.8788663744926453,
"learning_rate": 8.492995714537518e-05,
"loss": 0.6251,
"step": 1390
},
{
"epoch": 0.5870020964360587,
"grad_norm": 0.7903651595115662,
"learning_rate": 8.468106708034471e-05,
"loss": 0.6658,
"step": 1400
},
{
"epoch": 0.5911949685534591,
"grad_norm": 0.7461090683937073,
"learning_rate": 8.443050975914642e-05,
"loss": 0.6341,
"step": 1410
},
{
"epoch": 0.5953878406708596,
"grad_norm": 0.8510380983352661,
"learning_rate": 8.417829722706248e-05,
"loss": 0.6494,
"step": 1420
},
{
"epoch": 0.59958071278826,
"grad_norm": 0.7093218564987183,
"learning_rate": 8.392444160894762e-05,
"loss": 0.6901,
"step": 1430
},
{
"epoch": 0.6037735849056604,
"grad_norm": 0.9880977272987366,
"learning_rate": 8.366895510864618e-05,
"loss": 0.6496,
"step": 1440
},
{
"epoch": 0.6079664570230608,
"grad_norm": 0.653540849685669,
"learning_rate": 8.34118500084055e-05,
"loss": 0.652,
"step": 1450
},
{
"epoch": 0.6121593291404612,
"grad_norm": 0.8846402168273926,
"learning_rate": 8.315313866828543e-05,
"loss": 0.6394,
"step": 1460
},
{
"epoch": 0.6163522012578616,
"grad_norm": 0.8670586347579956,
"learning_rate": 8.289283352556407e-05,
"loss": 0.6214,
"step": 1470
},
{
"epoch": 0.6205450733752621,
"grad_norm": 0.6209778189659119,
"learning_rate": 8.263094709413998e-05,
"loss": 0.6638,
"step": 1480
},
{
"epoch": 0.6247379454926625,
"grad_norm": 0.7160047888755798,
"learning_rate": 8.236749196393054e-05,
"loss": 0.682,
"step": 1490
},
{
"epoch": 0.6289308176100629,
"grad_norm": 1.0311180353164673,
"learning_rate": 8.21024808002666e-05,
"loss": 0.6014,
"step": 1500
},
{
"epoch": 0.6331236897274634,
"grad_norm": 0.7684214115142822,
"learning_rate": 8.183592634328378e-05,
"loss": 0.6451,
"step": 1510
},
{
"epoch": 0.6373165618448637,
"grad_norm": 0.8719981908798218,
"learning_rate": 8.156784140730992e-05,
"loss": 0.6297,
"step": 1520
},
{
"epoch": 0.6415094339622641,
"grad_norm": 0.7028118968009949,
"learning_rate": 8.129823888024896e-05,
"loss": 0.6569,
"step": 1530
},
{
"epoch": 0.6457023060796646,
"grad_norm": 0.7719480395317078,
"learning_rate": 8.102713172296157e-05,
"loss": 0.6129,
"step": 1540
},
{
"epoch": 0.649895178197065,
"grad_norm": 1.020864725112915,
"learning_rate": 8.075453296864184e-05,
"loss": 0.6318,
"step": 1550
},
{
"epoch": 0.6540880503144654,
"grad_norm": 0.7063642144203186,
"learning_rate": 8.048045572219089e-05,
"loss": 0.6467,
"step": 1560
},
{
"epoch": 0.6582809224318659,
"grad_norm": 0.8628724813461304,
"learning_rate": 8.02049131595868e-05,
"loss": 0.6109,
"step": 1570
},
{
"epoch": 0.6624737945492662,
"grad_norm": 0.8442419767379761,
"learning_rate": 7.992791852725119e-05,
"loss": 0.6504,
"step": 1580
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.9704012870788574,
"learning_rate": 7.964948514141244e-05,
"loss": 0.6082,
"step": 1590
},
{
"epoch": 0.6708595387840671,
"grad_norm": 0.8251159191131592,
"learning_rate": 7.93696263874655e-05,
"loss": 0.6221,
"step": 1600
},
{
"epoch": 0.6750524109014675,
"grad_norm": 0.8831840753555298,
"learning_rate": 7.908835571932838e-05,
"loss": 0.6436,
"step": 1610
},
{
"epoch": 0.6792452830188679,
"grad_norm": 0.950869619846344,
"learning_rate": 7.880568665879542e-05,
"loss": 0.5878,
"step": 1620
},
{
"epoch": 0.6834381551362684,
"grad_norm": 0.9539180994033813,
"learning_rate": 7.85216327948872e-05,
"loss": 0.6157,
"step": 1630
},
{
"epoch": 0.6876310272536688,
"grad_norm": 0.9718248844146729,
"learning_rate": 7.823620778319729e-05,
"loss": 0.6395,
"step": 1640
},
{
"epoch": 0.6918238993710691,
"grad_norm": 0.8369801640510559,
"learning_rate": 7.794942534523577e-05,
"loss": 0.6078,
"step": 1650
},
{
"epoch": 0.6960167714884696,
"grad_norm": 0.8650975227355957,
"learning_rate": 7.766129926776957e-05,
"loss": 0.6166,
"step": 1660
},
{
"epoch": 0.70020964360587,
"grad_norm": 0.73323655128479,
"learning_rate": 7.737184340215968e-05,
"loss": 0.6032,
"step": 1670
},
{
"epoch": 0.7044025157232704,
"grad_norm": 0.9203535914421082,
"learning_rate": 7.708107166369524e-05,
"loss": 0.6296,
"step": 1680
},
{
"epoch": 0.7085953878406709,
"grad_norm": 1.1156162023544312,
"learning_rate": 7.678899803092469e-05,
"loss": 0.6146,
"step": 1690
},
{
"epoch": 0.7127882599580713,
"grad_norm": 0.9607049226760864,
"learning_rate": 7.64956365449836e-05,
"loss": 0.6308,
"step": 1700
},
{
"epoch": 0.7169811320754716,
"grad_norm": 1.0079562664031982,
"learning_rate": 7.620100130891977e-05,
"loss": 0.6017,
"step": 1710
},
{
"epoch": 0.7211740041928721,
"grad_norm": 0.8753647208213806,
"learning_rate": 7.590510648701524e-05,
"loss": 0.6048,
"step": 1720
},
{
"epoch": 0.7253668763102725,
"grad_norm": 0.8041886687278748,
"learning_rate": 7.560796630410528e-05,
"loss": 0.606,
"step": 1730
},
{
"epoch": 0.7295597484276729,
"grad_norm": 0.9427897930145264,
"learning_rate": 7.530959504489459e-05,
"loss": 0.5782,
"step": 1740
},
{
"epoch": 0.7337526205450734,
"grad_norm": 0.8512046337127686,
"learning_rate": 7.501000705327062e-05,
"loss": 0.6115,
"step": 1750
},
{
"epoch": 0.7379454926624738,
"grad_norm": 0.9579586982727051,
"learning_rate": 7.470921673161396e-05,
"loss": 0.5935,
"step": 1760
},
{
"epoch": 0.7421383647798742,
"grad_norm": 1.0027107000350952,
"learning_rate": 7.440723854010594e-05,
"loss": 0.5798,
"step": 1770
},
{
"epoch": 0.7463312368972747,
"grad_norm": 0.9444760680198669,
"learning_rate": 7.410408699603349e-05,
"loss": 0.6266,
"step": 1780
},
{
"epoch": 0.750524109014675,
"grad_norm": 1.0437049865722656,
"learning_rate": 7.379977667309128e-05,
"loss": 0.5819,
"step": 1790
},
{
"epoch": 0.7547169811320755,
"grad_norm": 0.7806771993637085,
"learning_rate": 7.349432220068102e-05,
"loss": 0.5992,
"step": 1800
},
{
"epoch": 0.7589098532494759,
"grad_norm": 0.8425828814506531,
"learning_rate": 7.318773826320826e-05,
"loss": 0.6224,
"step": 1810
},
{
"epoch": 0.7631027253668763,
"grad_norm": 1.1011372804641724,
"learning_rate": 7.288003959937636e-05,
"loss": 0.5747,
"step": 1820
},
{
"epoch": 0.7672955974842768,
"grad_norm": 0.8370475769042969,
"learning_rate": 7.257124100147802e-05,
"loss": 0.6063,
"step": 1830
},
{
"epoch": 0.7714884696016772,
"grad_norm": 1.0179104804992676,
"learning_rate": 7.226135731468413e-05,
"loss": 0.6222,
"step": 1840
},
{
"epoch": 0.7756813417190775,
"grad_norm": 0.8768573999404907,
"learning_rate": 7.195040343633007e-05,
"loss": 0.6037,
"step": 1850
},
{
"epoch": 0.779874213836478,
"grad_norm": 1.3235031366348267,
"learning_rate": 7.163839431519956e-05,
"loss": 0.6487,
"step": 1860
},
{
"epoch": 0.7840670859538784,
"grad_norm": 1.0841494798660278,
"learning_rate": 7.132534495080608e-05,
"loss": 0.5897,
"step": 1870
},
{
"epoch": 0.7882599580712788,
"grad_norm": 0.9377679824829102,
"learning_rate": 7.101127039267163e-05,
"loss": 0.5672,
"step": 1880
},
{
"epoch": 0.7924528301886793,
"grad_norm": 0.9596067667007446,
"learning_rate": 7.069618573960343e-05,
"loss": 0.6099,
"step": 1890
},
{
"epoch": 0.7966457023060797,
"grad_norm": 0.9195724725723267,
"learning_rate": 7.038010613896785e-05,
"loss": 0.5778,
"step": 1900
},
{
"epoch": 0.80083857442348,
"grad_norm": 1.0495296716690063,
"learning_rate": 7.006304678596243e-05,
"loss": 0.5717,
"step": 1910
},
{
"epoch": 0.8050314465408805,
"grad_norm": 1.0088776350021362,
"learning_rate": 6.97450229228852e-05,
"loss": 0.5557,
"step": 1920
},
{
"epoch": 0.8092243186582809,
"grad_norm": 1.157039761543274,
"learning_rate": 6.94260498384021e-05,
"loss": 0.5781,
"step": 1930
},
{
"epoch": 0.8134171907756813,
"grad_norm": 0.9531391263008118,
"learning_rate": 6.91061428668118e-05,
"loss": 0.5966,
"step": 1940
},
{
"epoch": 0.8176100628930818,
"grad_norm": 1.028555154800415,
"learning_rate": 6.878531738730872e-05,
"loss": 0.5335,
"step": 1950
},
{
"epoch": 0.8218029350104822,
"grad_norm": 1.0623886585235596,
"learning_rate": 6.84635888232435e-05,
"loss": 0.5852,
"step": 1960
},
{
"epoch": 0.8259958071278826,
"grad_norm": 1.0144100189208984,
"learning_rate": 6.814097264138166e-05,
"loss": 0.5894,
"step": 1970
},
{
"epoch": 0.8301886792452831,
"grad_norm": 1.2390482425689697,
"learning_rate": 6.781748435116007e-05,
"loss": 0.572,
"step": 1980
},
{
"epoch": 0.8343815513626834,
"grad_norm": 0.9619780778884888,
"learning_rate": 6.749313950394122e-05,
"loss": 0.5621,
"step": 1990
},
{
"epoch": 0.8385744234800838,
"grad_norm": 0.9017946720123291,
"learning_rate": 6.716795369226573e-05,
"loss": 0.552,
"step": 2000
},
{
"epoch": 0.8427672955974843,
"grad_norm": 1.0533087253570557,
"learning_rate": 6.684194254910274e-05,
"loss": 0.5888,
"step": 2010
},
{
"epoch": 0.8469601677148847,
"grad_norm": 0.9775870442390442,
"learning_rate": 6.651512174709828e-05,
"loss": 0.5539,
"step": 2020
},
{
"epoch": 0.8511530398322851,
"grad_norm": 1.0731542110443115,
"learning_rate": 6.618750699782191e-05,
"loss": 0.5678,
"step": 2030
},
{
"epoch": 0.8553459119496856,
"grad_norm": 0.9971261024475098,
"learning_rate": 6.585911405101137e-05,
"loss": 0.5321,
"step": 2040
},
{
"epoch": 0.859538784067086,
"grad_norm": 0.9683151245117188,
"learning_rate": 6.552995869381548e-05,
"loss": 0.5729,
"step": 2050
},
{
"epoch": 0.8637316561844863,
"grad_norm": 1.0008348226547241,
"learning_rate": 6.520005675003509e-05,
"loss": 0.5559,
"step": 2060
},
{
"epoch": 0.8679245283018868,
"grad_norm": 1.1209384202957153,
"learning_rate": 6.486942407936243e-05,
"loss": 0.5594,
"step": 2070
},
{
"epoch": 0.8721174004192872,
"grad_norm": 0.8659446835517883,
"learning_rate": 6.45380765766187e-05,
"loss": 0.5739,
"step": 2080
},
{
"epoch": 0.8763102725366876,
"grad_norm": 1.1587382555007935,
"learning_rate": 6.420603017098987e-05,
"loss": 0.5738,
"step": 2090
},
{
"epoch": 0.8805031446540881,
"grad_norm": 1.013728141784668,
"learning_rate": 6.387330082526096e-05,
"loss": 0.5398,
"step": 2100
},
{
"epoch": 0.8846960167714885,
"grad_norm": 1.0449576377868652,
"learning_rate": 6.353990453504862e-05,
"loss": 0.5153,
"step": 2110
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.9392942786216736,
"learning_rate": 6.320585732803214e-05,
"loss": 0.538,
"step": 2120
},
{
"epoch": 0.8930817610062893,
"grad_norm": 1.244337558746338,
"learning_rate": 6.287117526318298e-05,
"loss": 0.5446,
"step": 2130
},
{
"epoch": 0.8972746331236897,
"grad_norm": 0.9983499050140381,
"learning_rate": 6.253587442999274e-05,
"loss": 0.5921,
"step": 2140
},
{
"epoch": 0.9014675052410901,
"grad_norm": 1.1698367595672607,
"learning_rate": 6.21999709476996e-05,
"loss": 0.5339,
"step": 2150
},
{
"epoch": 0.9056603773584906,
"grad_norm": 0.8834764361381531,
"learning_rate": 6.186348096451359e-05,
"loss": 0.5879,
"step": 2160
},
{
"epoch": 0.909853249475891,
"grad_norm": 1.0187932252883911,
"learning_rate": 6.152642065684002e-05,
"loss": 0.5555,
"step": 2170
},
{
"epoch": 0.9140461215932913,
"grad_norm": 0.9964016675949097,
"learning_rate": 6.118880622850202e-05,
"loss": 0.5047,
"step": 2180
},
{
"epoch": 0.9182389937106918,
"grad_norm": 1.1290080547332764,
"learning_rate": 6.085065390996154e-05,
"loss": 0.5395,
"step": 2190
},
{
"epoch": 0.9224318658280922,
"grad_norm": 1.0761950016021729,
"learning_rate": 6.0511979957538955e-05,
"loss": 0.5187,
"step": 2200
},
{
"epoch": 0.9266247379454927,
"grad_norm": 0.9644138813018799,
"learning_rate": 6.01728006526317e-05,
"loss": 0.5256,
"step": 2210
},
{
"epoch": 0.9308176100628931,
"grad_norm": 1.098429560661316,
"learning_rate": 5.983313230093148e-05,
"loss": 0.5586,
"step": 2220
},
{
"epoch": 0.9350104821802935,
"grad_norm": 0.9541507959365845,
"learning_rate": 5.949299123164043e-05,
"loss": 0.5526,
"step": 2230
},
{
"epoch": 0.939203354297694,
"grad_norm": 1.135026454925537,
"learning_rate": 5.915239379668607e-05,
"loss": 0.5369,
"step": 2240
},
{
"epoch": 0.9433962264150944,
"grad_norm": 0.9107454419136047,
"learning_rate": 5.8811356369935253e-05,
"loss": 0.5366,
"step": 2250
},
{
"epoch": 0.9475890985324947,
"grad_norm": 1.116883635520935,
"learning_rate": 5.8469895346406965e-05,
"loss": 0.51,
"step": 2260
},
{
"epoch": 0.9517819706498952,
"grad_norm": 1.0966845750808716,
"learning_rate": 5.8128027141484166e-05,
"loss": 0.5002,
"step": 2270
},
{
"epoch": 0.9559748427672956,
"grad_norm": 1.2240691184997559,
"learning_rate": 5.778576819012461e-05,
"loss": 0.5393,
"step": 2280
},
{
"epoch": 0.960167714884696,
"grad_norm": 1.0875015258789062,
"learning_rate": 5.7443134946070843e-05,
"loss": 0.5476,
"step": 2290
},
{
"epoch": 0.9643605870020965,
"grad_norm": 1.105652093887329,
"learning_rate": 5.710014388105908e-05,
"loss": 0.5329,
"step": 2300
},
{
"epoch": 0.9685534591194969,
"grad_norm": 1.2524014711380005,
"learning_rate": 5.6756811484027425e-05,
"loss": 0.5331,
"step": 2310
},
{
"epoch": 0.9727463312368972,
"grad_norm": 1.0238686800003052,
"learning_rate": 5.641315426032321e-05,
"loss": 0.5248,
"step": 2320
},
{
"epoch": 0.9769392033542977,
"grad_norm": 1.4908053874969482,
"learning_rate": 5.606918873090938e-05,
"loss": 0.5008,
"step": 2330
},
{
"epoch": 0.9811320754716981,
"grad_norm": 1.1398487091064453,
"learning_rate": 5.5724931431570426e-05,
"loss": 0.5191,
"step": 2340
},
{
"epoch": 0.9853249475890985,
"grad_norm": 0.9709526896476746,
"learning_rate": 5.538039891211736e-05,
"loss": 0.5255,
"step": 2350
},
{
"epoch": 0.989517819706499,
"grad_norm": 1.181663155555725,
"learning_rate": 5.5035607735592086e-05,
"loss": 0.511,
"step": 2360
},
{
"epoch": 0.9937106918238994,
"grad_norm": 0.9637453556060791,
"learning_rate": 5.4690574477471236e-05,
"loss": 0.5315,
"step": 2370
},
{
"epoch": 0.9979035639412998,
"grad_norm": 1.2031704187393188,
"learning_rate": 5.4345315724869184e-05,
"loss": 0.4944,
"step": 2380
},
{
"epoch": 0.99958071278826,
"eval_loss": 0.8822941780090332,
"eval_runtime": 776.4347,
"eval_samples_per_second": 2.73,
"eval_steps_per_second": 2.73,
"step": 2384
}
],
"logging_steps": 10,
"max_steps": 4770,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 1192,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.276378975203885e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}