Phi2-QLoRa-OSST / trainer_state.json
nikhiljais's picture
Upload fine-tuned Phi-2 model
4b82e15 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.5837623762376238,
"eval_steps": 500,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.007920792079207921,
"grad_norm": 3.6492598056793213,
"learning_rate": 0.00019928684627575278,
"loss": 3.5835,
"step": 10
},
{
"epoch": 0.015841584158415842,
"grad_norm": 0.15779662132263184,
"learning_rate": 0.0001984944532488114,
"loss": 0.3867,
"step": 20
},
{
"epoch": 0.023762376237623763,
"grad_norm": 0.0943402647972107,
"learning_rate": 0.00019770206022187006,
"loss": 0.3435,
"step": 30
},
{
"epoch": 0.031683168316831684,
"grad_norm": 0.07766730338335037,
"learning_rate": 0.0001969096671949287,
"loss": 0.3724,
"step": 40
},
{
"epoch": 0.039603960396039604,
"grad_norm": 0.044251784682273865,
"learning_rate": 0.00019611727416798732,
"loss": 0.2735,
"step": 50
},
{
"epoch": 0.047524752475247525,
"grad_norm": 0.04691075533628464,
"learning_rate": 0.00019532488114104598,
"loss": 0.3015,
"step": 60
},
{
"epoch": 0.055445544554455446,
"grad_norm": 0.04267169535160065,
"learning_rate": 0.0001945324881141046,
"loss": 0.2531,
"step": 70
},
{
"epoch": 0.06336633663366337,
"grad_norm": 0.03702476993203163,
"learning_rate": 0.00019374009508716324,
"loss": 0.2919,
"step": 80
},
{
"epoch": 0.07128712871287128,
"grad_norm": 0.04517766833305359,
"learning_rate": 0.00019294770206022187,
"loss": 0.2666,
"step": 90
},
{
"epoch": 0.07920792079207921,
"grad_norm": 0.03789166733622551,
"learning_rate": 0.00019215530903328053,
"loss": 0.2768,
"step": 100
},
{
"epoch": 0.08712871287128712,
"grad_norm": 0.0474526472389698,
"learning_rate": 0.00019136291600633915,
"loss": 0.3106,
"step": 110
},
{
"epoch": 0.09504950495049505,
"grad_norm": 0.040468480437994,
"learning_rate": 0.00019057052297939778,
"loss": 0.2724,
"step": 120
},
{
"epoch": 0.10297029702970296,
"grad_norm": 0.036530930548906326,
"learning_rate": 0.00018977812995245644,
"loss": 0.2419,
"step": 130
},
{
"epoch": 0.11089108910891089,
"grad_norm": 0.026719143614172935,
"learning_rate": 0.00018898573692551507,
"loss": 0.251,
"step": 140
},
{
"epoch": 0.1188118811881188,
"grad_norm": 0.037144389003515244,
"learning_rate": 0.0001881933438985737,
"loss": 0.2759,
"step": 150
},
{
"epoch": 0.12673267326732673,
"grad_norm": 0.04505979269742966,
"learning_rate": 0.00018740095087163233,
"loss": 0.2502,
"step": 160
},
{
"epoch": 0.13465346534653466,
"grad_norm": 0.030406810343265533,
"learning_rate": 0.00018660855784469099,
"loss": 0.2571,
"step": 170
},
{
"epoch": 0.14257425742574256,
"grad_norm": 0.04877385124564171,
"learning_rate": 0.00018581616481774962,
"loss": 0.2649,
"step": 180
},
{
"epoch": 0.1504950495049505,
"grad_norm": 0.04424108564853668,
"learning_rate": 0.00018502377179080824,
"loss": 0.2891,
"step": 190
},
{
"epoch": 0.15841584158415842,
"grad_norm": 0.036704324185848236,
"learning_rate": 0.0001842313787638669,
"loss": 0.3115,
"step": 200
},
{
"epoch": 0.16633663366336635,
"grad_norm": 0.03781713545322418,
"learning_rate": 0.00018343898573692553,
"loss": 0.2741,
"step": 210
},
{
"epoch": 0.17425742574257425,
"grad_norm": 0.034683957695961,
"learning_rate": 0.00018264659270998416,
"loss": 0.2595,
"step": 220
},
{
"epoch": 0.18217821782178217,
"grad_norm": 0.03879937529563904,
"learning_rate": 0.0001818541996830428,
"loss": 0.2565,
"step": 230
},
{
"epoch": 0.1900990099009901,
"grad_norm": 0.030427804216742516,
"learning_rate": 0.00018106180665610145,
"loss": 0.2716,
"step": 240
},
{
"epoch": 0.19801980198019803,
"grad_norm": 0.05185231566429138,
"learning_rate": 0.00018026941362916008,
"loss": 0.2864,
"step": 250
},
{
"epoch": 0.20594059405940593,
"grad_norm": 0.04268000274896622,
"learning_rate": 0.0001794770206022187,
"loss": 0.3219,
"step": 260
},
{
"epoch": 0.21386138613861386,
"grad_norm": 0.033408161252737045,
"learning_rate": 0.00017868462757527736,
"loss": 0.2582,
"step": 270
},
{
"epoch": 0.22178217821782178,
"grad_norm": 0.03039587289094925,
"learning_rate": 0.000177892234548336,
"loss": 0.2687,
"step": 280
},
{
"epoch": 0.2297029702970297,
"grad_norm": 0.037947878241539,
"learning_rate": 0.00017709984152139462,
"loss": 0.2966,
"step": 290
},
{
"epoch": 0.2376237623762376,
"grad_norm": 0.03255503624677658,
"learning_rate": 0.00017630744849445328,
"loss": 0.2648,
"step": 300
},
{
"epoch": 0.24554455445544554,
"grad_norm": 0.029127761721611023,
"learning_rate": 0.0001755150554675119,
"loss": 0.2581,
"step": 310
},
{
"epoch": 0.25346534653465347,
"grad_norm": 0.03634035587310791,
"learning_rate": 0.00017472266244057054,
"loss": 0.2677,
"step": 320
},
{
"epoch": 0.2613861386138614,
"grad_norm": 0.030241692438721657,
"learning_rate": 0.00017393026941362917,
"loss": 0.2799,
"step": 330
},
{
"epoch": 0.2693069306930693,
"grad_norm": 0.034741729497909546,
"learning_rate": 0.00017313787638668782,
"loss": 0.2826,
"step": 340
},
{
"epoch": 0.27722772277227725,
"grad_norm": 0.03765638545155525,
"learning_rate": 0.00017234548335974645,
"loss": 0.2895,
"step": 350
},
{
"epoch": 0.2851485148514851,
"grad_norm": 0.026430374011397362,
"learning_rate": 0.00017155309033280508,
"loss": 0.2595,
"step": 360
},
{
"epoch": 0.29306930693069305,
"grad_norm": 0.03160027414560318,
"learning_rate": 0.00017076069730586374,
"loss": 0.271,
"step": 370
},
{
"epoch": 0.300990099009901,
"grad_norm": 0.03543638437986374,
"learning_rate": 0.00016996830427892237,
"loss": 0.2659,
"step": 380
},
{
"epoch": 0.3089108910891089,
"grad_norm": 0.03766461834311485,
"learning_rate": 0.000169175911251981,
"loss": 0.2729,
"step": 390
},
{
"epoch": 0.31683168316831684,
"grad_norm": 0.03187921643257141,
"learning_rate": 0.00016838351822503963,
"loss": 0.2656,
"step": 400
},
{
"epoch": 0.32475247524752476,
"grad_norm": 0.039832450449466705,
"learning_rate": 0.00016759112519809828,
"loss": 0.2582,
"step": 410
},
{
"epoch": 0.3326732673267327,
"grad_norm": 0.037864796817302704,
"learning_rate": 0.0001667987321711569,
"loss": 0.2789,
"step": 420
},
{
"epoch": 0.3405940594059406,
"grad_norm": 0.0412084236741066,
"learning_rate": 0.00016600633914421551,
"loss": 0.2697,
"step": 430
},
{
"epoch": 0.3485148514851485,
"grad_norm": 0.03728514164686203,
"learning_rate": 0.00016521394611727417,
"loss": 0.2574,
"step": 440
},
{
"epoch": 0.3564356435643564,
"grad_norm": 0.042556311935186386,
"learning_rate": 0.0001644215530903328,
"loss": 0.3146,
"step": 450
},
{
"epoch": 0.36435643564356435,
"grad_norm": 0.03390824422240257,
"learning_rate": 0.00016362916006339143,
"loss": 0.267,
"step": 460
},
{
"epoch": 0.3722772277227723,
"grad_norm": 0.04026597738265991,
"learning_rate": 0.0001628367670364501,
"loss": 0.3195,
"step": 470
},
{
"epoch": 0.3801980198019802,
"grad_norm": 0.04190334305167198,
"learning_rate": 0.00016204437400950872,
"loss": 0.2559,
"step": 480
},
{
"epoch": 0.38811881188118813,
"grad_norm": 0.029818540439009666,
"learning_rate": 0.00016125198098256735,
"loss": 0.2916,
"step": 490
},
{
"epoch": 0.39603960396039606,
"grad_norm": 0.02525998093187809,
"learning_rate": 0.000160459587955626,
"loss": 0.2422,
"step": 500
},
{
"epoch": 0.403960396039604,
"grad_norm": 0.03909703716635704,
"learning_rate": 0.00015966719492868463,
"loss": 0.2621,
"step": 510
},
{
"epoch": 0.41188118811881186,
"grad_norm": 0.034918032586574554,
"learning_rate": 0.00015887480190174326,
"loss": 0.2557,
"step": 520
},
{
"epoch": 0.4198019801980198,
"grad_norm": 0.0317477211356163,
"learning_rate": 0.0001580824088748019,
"loss": 0.2712,
"step": 530
},
{
"epoch": 0.4277227722772277,
"grad_norm": 0.036918554455041885,
"learning_rate": 0.00015729001584786055,
"loss": 0.2379,
"step": 540
},
{
"epoch": 0.43564356435643564,
"grad_norm": 0.029061611741781235,
"learning_rate": 0.00015649762282091918,
"loss": 0.2673,
"step": 550
},
{
"epoch": 0.44356435643564357,
"grad_norm": 0.03352155163884163,
"learning_rate": 0.0001557052297939778,
"loss": 0.329,
"step": 560
},
{
"epoch": 0.4514851485148515,
"grad_norm": 0.04538818076252937,
"learning_rate": 0.00015491283676703646,
"loss": 0.2628,
"step": 570
},
{
"epoch": 0.4594059405940594,
"grad_norm": 0.03284861892461777,
"learning_rate": 0.0001541204437400951,
"loss": 0.3,
"step": 580
},
{
"epoch": 0.46732673267326735,
"grad_norm": 0.03831164538860321,
"learning_rate": 0.00015332805071315372,
"loss": 0.3217,
"step": 590
},
{
"epoch": 0.4752475247524752,
"grad_norm": 0.038299866020679474,
"learning_rate": 0.00015253565768621235,
"loss": 0.2921,
"step": 600
},
{
"epoch": 0.48316831683168315,
"grad_norm": 0.039746932685375214,
"learning_rate": 0.000151743264659271,
"loss": 0.2629,
"step": 610
},
{
"epoch": 0.4910891089108911,
"grad_norm": 0.029749492183327675,
"learning_rate": 0.00015095087163232964,
"loss": 0.2698,
"step": 620
},
{
"epoch": 0.499009900990099,
"grad_norm": 0.032644934952259064,
"learning_rate": 0.00015015847860538827,
"loss": 0.249,
"step": 630
},
{
"epoch": 0.5069306930693069,
"grad_norm": 0.03892878070473671,
"learning_rate": 0.00014936608557844692,
"loss": 0.256,
"step": 640
},
{
"epoch": 0.5148514851485149,
"grad_norm": 0.0364990159869194,
"learning_rate": 0.00014857369255150555,
"loss": 0.257,
"step": 650
},
{
"epoch": 0.5227722772277228,
"grad_norm": 0.03153720125555992,
"learning_rate": 0.00014778129952456418,
"loss": 0.2549,
"step": 660
},
{
"epoch": 0.5306930693069307,
"grad_norm": 0.03544802591204643,
"learning_rate": 0.0001469889064976228,
"loss": 0.2918,
"step": 670
},
{
"epoch": 0.5386138613861386,
"grad_norm": 0.03145139664411545,
"learning_rate": 0.00014619651347068147,
"loss": 0.2597,
"step": 680
},
{
"epoch": 0.5465346534653466,
"grad_norm": 0.03319784253835678,
"learning_rate": 0.0001454041204437401,
"loss": 0.2877,
"step": 690
},
{
"epoch": 0.5544554455445545,
"grad_norm": 0.026824399828910828,
"learning_rate": 0.00014461172741679873,
"loss": 0.2516,
"step": 700
},
{
"epoch": 0.5623762376237624,
"grad_norm": 0.02784889005124569,
"learning_rate": 0.00014381933438985738,
"loss": 0.2495,
"step": 710
},
{
"epoch": 0.5702970297029702,
"grad_norm": 0.035639721900224686,
"learning_rate": 0.000143026941362916,
"loss": 0.2875,
"step": 720
},
{
"epoch": 0.5782178217821782,
"grad_norm": 0.02496819756925106,
"learning_rate": 0.00014223454833597464,
"loss": 0.2536,
"step": 730
},
{
"epoch": 0.5861386138613861,
"grad_norm": 0.03329946845769882,
"learning_rate": 0.0001414421553090333,
"loss": 0.2606,
"step": 740
},
{
"epoch": 0.594059405940594,
"grad_norm": 0.03752067685127258,
"learning_rate": 0.00014064976228209193,
"loss": 0.2412,
"step": 750
},
{
"epoch": 0.601980198019802,
"grad_norm": 0.029111869633197784,
"learning_rate": 0.00013985736925515056,
"loss": 0.2624,
"step": 760
},
{
"epoch": 0.6099009900990099,
"grad_norm": 0.04291738569736481,
"learning_rate": 0.0001390649762282092,
"loss": 0.2546,
"step": 770
},
{
"epoch": 0.6178217821782178,
"grad_norm": 0.028964821249246597,
"learning_rate": 0.00013827258320126784,
"loss": 0.2662,
"step": 780
},
{
"epoch": 0.6257425742574257,
"grad_norm": 0.03424842283129692,
"learning_rate": 0.00013748019017432647,
"loss": 0.2434,
"step": 790
},
{
"epoch": 0.6336633663366337,
"grad_norm": 0.02667427435517311,
"learning_rate": 0.0001366877971473851,
"loss": 0.2329,
"step": 800
},
{
"epoch": 0.6415841584158416,
"grad_norm": 0.02951924316585064,
"learning_rate": 0.00013589540412044376,
"loss": 0.2643,
"step": 810
},
{
"epoch": 0.6495049504950495,
"grad_norm": 0.023334722965955734,
"learning_rate": 0.0001351030110935024,
"loss": 0.2453,
"step": 820
},
{
"epoch": 0.6574257425742575,
"grad_norm": 0.03401855751872063,
"learning_rate": 0.00013431061806656102,
"loss": 0.2707,
"step": 830
},
{
"epoch": 0.6653465346534654,
"grad_norm": 0.033355824649333954,
"learning_rate": 0.00013351822503961965,
"loss": 0.2874,
"step": 840
},
{
"epoch": 0.6732673267326733,
"grad_norm": 0.027616815641522408,
"learning_rate": 0.0001327258320126783,
"loss": 0.2527,
"step": 850
},
{
"epoch": 0.6811881188118812,
"grad_norm": 0.03240259364247322,
"learning_rate": 0.00013193343898573693,
"loss": 0.2907,
"step": 860
},
{
"epoch": 0.689108910891089,
"grad_norm": 0.02760745771229267,
"learning_rate": 0.00013114104595879556,
"loss": 0.2452,
"step": 870
},
{
"epoch": 0.697029702970297,
"grad_norm": 0.034452375024557114,
"learning_rate": 0.00013034865293185422,
"loss": 0.2401,
"step": 880
},
{
"epoch": 0.7049504950495049,
"grad_norm": 0.04038127139210701,
"learning_rate": 0.00012955625990491285,
"loss": 0.2586,
"step": 890
},
{
"epoch": 0.7128712871287128,
"grad_norm": 0.03635507449507713,
"learning_rate": 0.00012876386687797148,
"loss": 0.2752,
"step": 900
},
{
"epoch": 0.7207920792079208,
"grad_norm": 0.030857287347316742,
"learning_rate": 0.0001279714738510301,
"loss": 0.2694,
"step": 910
},
{
"epoch": 0.7287128712871287,
"grad_norm": 0.035111233592033386,
"learning_rate": 0.00012717908082408876,
"loss": 0.2569,
"step": 920
},
{
"epoch": 0.7366336633663366,
"grad_norm": 0.025900471955537796,
"learning_rate": 0.0001263866877971474,
"loss": 0.2561,
"step": 930
},
{
"epoch": 0.7445544554455445,
"grad_norm": 0.035240888595581055,
"learning_rate": 0.00012559429477020602,
"loss": 0.2869,
"step": 940
},
{
"epoch": 0.7524752475247525,
"grad_norm": 0.03573041409254074,
"learning_rate": 0.00012480190174326468,
"loss": 0.2934,
"step": 950
},
{
"epoch": 0.7603960396039604,
"grad_norm": 0.03271041065454483,
"learning_rate": 0.0001240095087163233,
"loss": 0.24,
"step": 960
},
{
"epoch": 0.7683168316831683,
"grad_norm": 0.03942486643791199,
"learning_rate": 0.00012321711568938194,
"loss": 0.2798,
"step": 970
},
{
"epoch": 0.7762376237623763,
"grad_norm": 0.029904989525675774,
"learning_rate": 0.0001224247226624406,
"loss": 0.2456,
"step": 980
},
{
"epoch": 0.7841584158415842,
"grad_norm": 0.036968715488910675,
"learning_rate": 0.00012163232963549923,
"loss": 0.2683,
"step": 990
},
{
"epoch": 0.7920792079207921,
"grad_norm": 0.02765348181128502,
"learning_rate": 0.00012083993660855785,
"loss": 0.255,
"step": 1000
},
{
"epoch": 0.8,
"grad_norm": 0.02997688390314579,
"learning_rate": 0.0001200475435816165,
"loss": 0.2705,
"step": 1010
},
{
"epoch": 0.807920792079208,
"grad_norm": 0.03708113357424736,
"learning_rate": 0.00011925515055467513,
"loss": 0.251,
"step": 1020
},
{
"epoch": 0.8158415841584158,
"grad_norm": 0.03513709455728531,
"learning_rate": 0.00011846275752773377,
"loss": 0.2886,
"step": 1030
},
{
"epoch": 0.8237623762376237,
"grad_norm": 0.044748757034540176,
"learning_rate": 0.00011767036450079241,
"loss": 0.2695,
"step": 1040
},
{
"epoch": 0.8316831683168316,
"grad_norm": 0.03215530887246132,
"learning_rate": 0.00011687797147385104,
"loss": 0.2674,
"step": 1050
},
{
"epoch": 0.8396039603960396,
"grad_norm": 0.0335354283452034,
"learning_rate": 0.00011608557844690969,
"loss": 0.261,
"step": 1060
},
{
"epoch": 0.8475247524752475,
"grad_norm": 0.03107100911438465,
"learning_rate": 0.00011529318541996832,
"loss": 0.2453,
"step": 1070
},
{
"epoch": 0.8554455445544554,
"grad_norm": 0.02802272140979767,
"learning_rate": 0.00011450079239302696,
"loss": 0.2557,
"step": 1080
},
{
"epoch": 0.8633663366336634,
"grad_norm": 0.03208250179886818,
"learning_rate": 0.00011370839936608559,
"loss": 0.2871,
"step": 1090
},
{
"epoch": 0.8712871287128713,
"grad_norm": 0.03218165040016174,
"learning_rate": 0.00011291600633914423,
"loss": 0.2274,
"step": 1100
},
{
"epoch": 0.8792079207920792,
"grad_norm": 0.03343340381979942,
"learning_rate": 0.00011212361331220287,
"loss": 0.284,
"step": 1110
},
{
"epoch": 0.8871287128712871,
"grad_norm": 0.0337323360145092,
"learning_rate": 0.0001113312202852615,
"loss": 0.2442,
"step": 1120
},
{
"epoch": 0.8950495049504951,
"grad_norm": 0.03257569298148155,
"learning_rate": 0.00011053882725832012,
"loss": 0.2639,
"step": 1130
},
{
"epoch": 0.902970297029703,
"grad_norm": 0.035853032022714615,
"learning_rate": 0.00010974643423137876,
"loss": 0.2767,
"step": 1140
},
{
"epoch": 0.9108910891089109,
"grad_norm": 0.03382259979844093,
"learning_rate": 0.00010895404120443739,
"loss": 0.2789,
"step": 1150
},
{
"epoch": 0.9188118811881189,
"grad_norm": 0.033024683594703674,
"learning_rate": 0.00010816164817749603,
"loss": 0.2724,
"step": 1160
},
{
"epoch": 0.9267326732673268,
"grad_norm": 0.029246920719742775,
"learning_rate": 0.00010736925515055466,
"loss": 0.224,
"step": 1170
},
{
"epoch": 0.9346534653465347,
"grad_norm": 0.03842251002788544,
"learning_rate": 0.00010657686212361331,
"loss": 0.2733,
"step": 1180
},
{
"epoch": 0.9425742574257425,
"grad_norm": 0.027735207229852676,
"learning_rate": 0.00010578446909667195,
"loss": 0.2592,
"step": 1190
},
{
"epoch": 0.9504950495049505,
"grad_norm": 0.03715645521879196,
"learning_rate": 0.00010499207606973058,
"loss": 0.2495,
"step": 1200
},
{
"epoch": 0.9584158415841584,
"grad_norm": 0.04229114204645157,
"learning_rate": 0.00010419968304278922,
"loss": 0.2926,
"step": 1210
},
{
"epoch": 0.9663366336633663,
"grad_norm": 0.0291130393743515,
"learning_rate": 0.00010340729001584785,
"loss": 0.2273,
"step": 1220
},
{
"epoch": 0.9742574257425742,
"grad_norm": 0.03738139569759369,
"learning_rate": 0.0001026148969889065,
"loss": 0.2657,
"step": 1230
},
{
"epoch": 0.9821782178217822,
"grad_norm": 0.02908489853143692,
"learning_rate": 0.00010182250396196514,
"loss": 0.2354,
"step": 1240
},
{
"epoch": 0.9900990099009901,
"grad_norm": 0.03095426596701145,
"learning_rate": 0.00010103011093502377,
"loss": 0.2518,
"step": 1250
},
{
"epoch": 0.998019801980198,
"grad_norm": 0.029647527262568474,
"learning_rate": 0.00010023771790808241,
"loss": 0.2573,
"step": 1260
},
{
"epoch": 1.0055445544554455,
"grad_norm": 0.02596193552017212,
"learning_rate": 9.944532488114105e-05,
"loss": 0.2485,
"step": 1270
},
{
"epoch": 1.0134653465346535,
"grad_norm": 0.0323275551199913,
"learning_rate": 9.86529318541997e-05,
"loss": 0.2596,
"step": 1280
},
{
"epoch": 1.0213861386138614,
"grad_norm": 0.03321196511387825,
"learning_rate": 9.786053882725833e-05,
"loss": 0.2619,
"step": 1290
},
{
"epoch": 1.0293069306930693,
"grad_norm": 0.03337240219116211,
"learning_rate": 9.706814580031696e-05,
"loss": 0.2638,
"step": 1300
},
{
"epoch": 1.0372277227722773,
"grad_norm": 0.03522242233157158,
"learning_rate": 9.62757527733756e-05,
"loss": 0.2398,
"step": 1310
},
{
"epoch": 1.0451485148514852,
"grad_norm": 0.03158261254429817,
"learning_rate": 9.548335974643423e-05,
"loss": 0.2626,
"step": 1320
},
{
"epoch": 1.0530693069306931,
"grad_norm": 0.025289108976721764,
"learning_rate": 9.469096671949287e-05,
"loss": 0.2706,
"step": 1330
},
{
"epoch": 1.060990099009901,
"grad_norm": 0.030833808705210686,
"learning_rate": 9.38985736925515e-05,
"loss": 0.2246,
"step": 1340
},
{
"epoch": 1.068910891089109,
"grad_norm": 0.03905413672327995,
"learning_rate": 9.310618066561014e-05,
"loss": 0.2578,
"step": 1350
},
{
"epoch": 1.076831683168317,
"grad_norm": 0.03793856501579285,
"learning_rate": 9.231378763866879e-05,
"loss": 0.266,
"step": 1360
},
{
"epoch": 1.0847524752475248,
"grad_norm": 0.039849117398262024,
"learning_rate": 9.152139461172742e-05,
"loss": 0.2591,
"step": 1370
},
{
"epoch": 1.0926732673267328,
"grad_norm": 0.03838738054037094,
"learning_rate": 9.072900158478606e-05,
"loss": 0.2553,
"step": 1380
},
{
"epoch": 1.1005940594059407,
"grad_norm": 0.032800108194351196,
"learning_rate": 8.993660855784469e-05,
"loss": 0.2328,
"step": 1390
},
{
"epoch": 1.1085148514851486,
"grad_norm": 0.03500501811504364,
"learning_rate": 8.914421553090333e-05,
"loss": 0.251,
"step": 1400
},
{
"epoch": 1.1164356435643565,
"grad_norm": 0.03320365026593208,
"learning_rate": 8.835182250396196e-05,
"loss": 0.274,
"step": 1410
},
{
"epoch": 1.1243564356435645,
"grad_norm": 0.033811185508966446,
"learning_rate": 8.75594294770206e-05,
"loss": 0.2525,
"step": 1420
},
{
"epoch": 1.1322772277227724,
"grad_norm": 0.03873720020055771,
"learning_rate": 8.676703645007925e-05,
"loss": 0.2767,
"step": 1430
},
{
"epoch": 1.1401980198019803,
"grad_norm": 0.030054964125156403,
"learning_rate": 8.597464342313788e-05,
"loss": 0.257,
"step": 1440
},
{
"epoch": 1.148118811881188,
"grad_norm": 0.03347312659025192,
"learning_rate": 8.518225039619652e-05,
"loss": 0.2583,
"step": 1450
},
{
"epoch": 1.156039603960396,
"grad_norm": 0.023901082575321198,
"learning_rate": 8.438985736925515e-05,
"loss": 0.2286,
"step": 1460
},
{
"epoch": 1.1639603960396039,
"grad_norm": 0.030662311241030693,
"learning_rate": 8.359746434231379e-05,
"loss": 0.272,
"step": 1470
},
{
"epoch": 1.1718811881188118,
"grad_norm": 0.035689935088157654,
"learning_rate": 8.280507131537243e-05,
"loss": 0.2593,
"step": 1480
},
{
"epoch": 1.1798019801980197,
"grad_norm": 0.04119367152452469,
"learning_rate": 8.201267828843106e-05,
"loss": 0.2778,
"step": 1490
},
{
"epoch": 1.1877227722772277,
"grad_norm": 0.033894915133714676,
"learning_rate": 8.122028526148971e-05,
"loss": 0.2727,
"step": 1500
},
{
"epoch": 1.1956435643564356,
"grad_norm": 0.031137267127633095,
"learning_rate": 8.042789223454834e-05,
"loss": 0.2734,
"step": 1510
},
{
"epoch": 1.2035643564356435,
"grad_norm": 0.038331665098667145,
"learning_rate": 7.963549920760698e-05,
"loss": 0.2925,
"step": 1520
},
{
"epoch": 1.2114851485148515,
"grad_norm": 0.0408506877720356,
"learning_rate": 7.884310618066561e-05,
"loss": 0.2921,
"step": 1530
},
{
"epoch": 1.2194059405940594,
"grad_norm": 0.04389080032706261,
"learning_rate": 7.805071315372425e-05,
"loss": 0.2538,
"step": 1540
},
{
"epoch": 1.2273267326732673,
"grad_norm": 0.03198599815368652,
"learning_rate": 7.72583201267829e-05,
"loss": 0.2759,
"step": 1550
},
{
"epoch": 1.2352475247524752,
"grad_norm": 0.03177543357014656,
"learning_rate": 7.646592709984152e-05,
"loss": 0.2514,
"step": 1560
},
{
"epoch": 1.2431683168316832,
"grad_norm": 0.02572786435484886,
"learning_rate": 7.567353407290017e-05,
"loss": 0.2293,
"step": 1570
},
{
"epoch": 1.251089108910891,
"grad_norm": 0.03768354281783104,
"learning_rate": 7.48811410459588e-05,
"loss": 0.2521,
"step": 1580
},
{
"epoch": 1.259009900990099,
"grad_norm": 0.03194349259138107,
"learning_rate": 7.408874801901744e-05,
"loss": 0.2912,
"step": 1590
},
{
"epoch": 1.266930693069307,
"grad_norm": 0.02686484344303608,
"learning_rate": 7.329635499207608e-05,
"loss": 0.261,
"step": 1600
},
{
"epoch": 1.2748514851485149,
"grad_norm": 0.03775911033153534,
"learning_rate": 7.250396196513471e-05,
"loss": 0.2744,
"step": 1610
},
{
"epoch": 1.2827722772277228,
"grad_norm": 0.03530183061957359,
"learning_rate": 7.171156893819336e-05,
"loss": 0.2622,
"step": 1620
},
{
"epoch": 1.2906930693069307,
"grad_norm": 0.03804347291588783,
"learning_rate": 7.091917591125199e-05,
"loss": 0.2877,
"step": 1630
},
{
"epoch": 1.2986138613861387,
"grad_norm": 0.03272108733654022,
"learning_rate": 7.012678288431063e-05,
"loss": 0.2576,
"step": 1640
},
{
"epoch": 1.3065346534653466,
"grad_norm": 0.03325280174612999,
"learning_rate": 6.933438985736926e-05,
"loss": 0.2714,
"step": 1650
},
{
"epoch": 1.3144554455445545,
"grad_norm": 0.026619790121912956,
"learning_rate": 6.854199683042789e-05,
"loss": 0.2279,
"step": 1660
},
{
"epoch": 1.3223762376237624,
"grad_norm": 0.039562638849020004,
"learning_rate": 6.774960380348653e-05,
"loss": 0.2867,
"step": 1670
},
{
"epoch": 1.3302970297029704,
"grad_norm": 0.03468257933855057,
"learning_rate": 6.695721077654516e-05,
"loss": 0.2634,
"step": 1680
},
{
"epoch": 1.3382178217821783,
"grad_norm": 0.025986766442656517,
"learning_rate": 6.61648177496038e-05,
"loss": 0.224,
"step": 1690
},
{
"epoch": 1.346138613861386,
"grad_norm": 0.03160301595926285,
"learning_rate": 6.537242472266245e-05,
"loss": 0.2952,
"step": 1700
},
{
"epoch": 1.354059405940594,
"grad_norm": 0.03343851491808891,
"learning_rate": 6.458003169572108e-05,
"loss": 0.2447,
"step": 1710
},
{
"epoch": 1.3619801980198019,
"grad_norm": 0.04069166257977486,
"learning_rate": 6.378763866877972e-05,
"loss": 0.2356,
"step": 1720
},
{
"epoch": 1.3699009900990098,
"grad_norm": 0.031248316168785095,
"learning_rate": 6.299524564183835e-05,
"loss": 0.2275,
"step": 1730
},
{
"epoch": 1.3778217821782177,
"grad_norm": 0.04714437574148178,
"learning_rate": 6.220285261489699e-05,
"loss": 0.2949,
"step": 1740
},
{
"epoch": 1.3857425742574256,
"grad_norm": 0.03378450125455856,
"learning_rate": 6.141045958795562e-05,
"loss": 0.2812,
"step": 1750
},
{
"epoch": 1.3936633663366336,
"grad_norm": 0.03825616091489792,
"learning_rate": 6.061806656101426e-05,
"loss": 0.2662,
"step": 1760
},
{
"epoch": 1.4015841584158415,
"grad_norm": 0.03776633366942406,
"learning_rate": 5.98256735340729e-05,
"loss": 0.2986,
"step": 1770
},
{
"epoch": 1.4095049504950494,
"grad_norm": 0.030764272436499596,
"learning_rate": 5.9033280507131536e-05,
"loss": 0.2462,
"step": 1780
},
{
"epoch": 1.4174257425742574,
"grad_norm": 0.03300099819898605,
"learning_rate": 5.824088748019018e-05,
"loss": 0.2423,
"step": 1790
},
{
"epoch": 1.4253465346534653,
"grad_norm": 0.03446972742676735,
"learning_rate": 5.7448494453248815e-05,
"loss": 0.2428,
"step": 1800
},
{
"epoch": 1.4332673267326732,
"grad_norm": 0.032901886850595474,
"learning_rate": 5.665610142630745e-05,
"loss": 0.2436,
"step": 1810
},
{
"epoch": 1.4411881188118811,
"grad_norm": 0.04104743152856827,
"learning_rate": 5.586370839936609e-05,
"loss": 0.2625,
"step": 1820
},
{
"epoch": 1.449108910891089,
"grad_norm": 0.033070165663957596,
"learning_rate": 5.5071315372424724e-05,
"loss": 0.2588,
"step": 1830
},
{
"epoch": 1.457029702970297,
"grad_norm": 0.03634243831038475,
"learning_rate": 5.427892234548336e-05,
"loss": 0.26,
"step": 1840
},
{
"epoch": 1.464950495049505,
"grad_norm": 0.033768270164728165,
"learning_rate": 5.3486529318542e-05,
"loss": 0.2467,
"step": 1850
},
{
"epoch": 1.4728712871287128,
"grad_norm": 0.036840010434389114,
"learning_rate": 5.269413629160064e-05,
"loss": 0.2563,
"step": 1860
},
{
"epoch": 1.4807920792079208,
"grad_norm": 0.03726380318403244,
"learning_rate": 5.1901743264659275e-05,
"loss": 0.298,
"step": 1870
},
{
"epoch": 1.4887128712871287,
"grad_norm": 0.032144658267498016,
"learning_rate": 5.110935023771791e-05,
"loss": 0.275,
"step": 1880
},
{
"epoch": 1.4966336633663366,
"grad_norm": 0.027517810463905334,
"learning_rate": 5.031695721077655e-05,
"loss": 0.2675,
"step": 1890
},
{
"epoch": 1.5045544554455446,
"grad_norm": 0.0347113199532032,
"learning_rate": 4.9524564183835184e-05,
"loss": 0.2453,
"step": 1900
},
{
"epoch": 1.5124752475247525,
"grad_norm": 0.03786664828658104,
"learning_rate": 4.873217115689383e-05,
"loss": 0.2783,
"step": 1910
},
{
"epoch": 1.5203960396039604,
"grad_norm": 0.03823668137192726,
"learning_rate": 4.793977812995246e-05,
"loss": 0.2248,
"step": 1920
},
{
"epoch": 1.5283168316831683,
"grad_norm": 0.04581904783844948,
"learning_rate": 4.714738510301109e-05,
"loss": 0.2662,
"step": 1930
},
{
"epoch": 1.5362376237623763,
"grad_norm": 0.03468246012926102,
"learning_rate": 4.635499207606973e-05,
"loss": 0.244,
"step": 1940
},
{
"epoch": 1.5441584158415842,
"grad_norm": 0.033897291868925095,
"learning_rate": 4.5562599049128365e-05,
"loss": 0.2751,
"step": 1950
},
{
"epoch": 1.5520792079207921,
"grad_norm": 0.03185650333762169,
"learning_rate": 4.477020602218701e-05,
"loss": 0.296,
"step": 1960
},
{
"epoch": 1.56,
"grad_norm": 0.032408442348241806,
"learning_rate": 4.3977812995245645e-05,
"loss": 0.2943,
"step": 1970
},
{
"epoch": 1.567920792079208,
"grad_norm": 0.02897750772535801,
"learning_rate": 4.318541996830428e-05,
"loss": 0.2473,
"step": 1980
},
{
"epoch": 1.575841584158416,
"grad_norm": 0.03331100195646286,
"learning_rate": 4.239302694136292e-05,
"loss": 0.2875,
"step": 1990
},
{
"epoch": 1.5837623762376238,
"grad_norm": 0.02858470194041729,
"learning_rate": 4.160063391442155e-05,
"loss": 0.247,
"step": 2000
}
],
"logging_steps": 10,
"max_steps": 2524,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.0863914532562534e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}