JinNian0072's picture
Upload folder using huggingface_hub
1bb59a9 verified
{
"best_global_step": 383,
"best_metric": 0.3038630783557892,
"best_model_checkpoint": "./lora_qwen7b_cpp_abdiff_v1/checkpoint-383",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 1149,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02611818478615736,
"grad_norm": 0.272135853767395,
"learning_rate": 2.347826086956522e-06,
"loss": 0.747,
"step": 10
},
{
"epoch": 0.05223636957231472,
"grad_norm": 0.2994903028011322,
"learning_rate": 4.956521739130435e-06,
"loss": 0.729,
"step": 20
},
{
"epoch": 0.07835455435847209,
"grad_norm": 0.3347424566745758,
"learning_rate": 7.5652173913043475e-06,
"loss": 0.7694,
"step": 30
},
{
"epoch": 0.10447273914462944,
"grad_norm": 0.24008171260356903,
"learning_rate": 1.017391304347826e-05,
"loss": 0.6931,
"step": 40
},
{
"epoch": 0.1305909239307868,
"grad_norm": 0.31324484944343567,
"learning_rate": 1.2782608695652173e-05,
"loss": 0.6601,
"step": 50
},
{
"epoch": 0.15670910871694418,
"grad_norm": 0.3652394115924835,
"learning_rate": 1.5391304347826088e-05,
"loss": 0.4995,
"step": 60
},
{
"epoch": 0.18282729350310153,
"grad_norm": 0.18758858740329742,
"learning_rate": 1.8e-05,
"loss": 0.3874,
"step": 70
},
{
"epoch": 0.20894547828925888,
"grad_norm": 0.11186491698026657,
"learning_rate": 2.0608695652173913e-05,
"loss": 0.3579,
"step": 80
},
{
"epoch": 0.23506366307541626,
"grad_norm": 0.0821973979473114,
"learning_rate": 2.3217391304347826e-05,
"loss": 0.3375,
"step": 90
},
{
"epoch": 0.2611818478615736,
"grad_norm": 0.10780682414770126,
"learning_rate": 2.582608695652174e-05,
"loss": 0.3286,
"step": 100
},
{
"epoch": 0.28730003264773096,
"grad_norm": 0.09552709758281708,
"learning_rate": 2.8434782608695652e-05,
"loss": 0.3135,
"step": 110
},
{
"epoch": 0.31341821743388837,
"grad_norm": 0.1099899634718895,
"learning_rate": 2.988394584139265e-05,
"loss": 0.2828,
"step": 120
},
{
"epoch": 0.3395364022200457,
"grad_norm": 0.08481885492801666,
"learning_rate": 2.9593810444874276e-05,
"loss": 0.2884,
"step": 130
},
{
"epoch": 0.36565458700620307,
"grad_norm": 0.0872187465429306,
"learning_rate": 2.93036750483559e-05,
"loss": 0.2856,
"step": 140
},
{
"epoch": 0.3917727717923604,
"grad_norm": 0.09415256232023239,
"learning_rate": 2.9013539651837528e-05,
"loss": 0.2958,
"step": 150
},
{
"epoch": 0.41789095657851777,
"grad_norm": 0.09529490023851395,
"learning_rate": 2.872340425531915e-05,
"loss": 0.2832,
"step": 160
},
{
"epoch": 0.4440091413646752,
"grad_norm": 0.12695518136024475,
"learning_rate": 2.8433268858800773e-05,
"loss": 0.2852,
"step": 170
},
{
"epoch": 0.4701273261508325,
"grad_norm": 0.09822621941566467,
"learning_rate": 2.81431334622824e-05,
"loss": 0.2647,
"step": 180
},
{
"epoch": 0.4962455109369899,
"grad_norm": 0.11181768029928207,
"learning_rate": 2.785299806576402e-05,
"loss": 0.2718,
"step": 190
},
{
"epoch": 0.5223636957231472,
"grad_norm": 0.0999976173043251,
"learning_rate": 2.7562862669245647e-05,
"loss": 0.287,
"step": 200
},
{
"epoch": 0.5484818805093046,
"grad_norm": 0.11232498288154602,
"learning_rate": 2.7272727272727273e-05,
"loss": 0.2657,
"step": 210
},
{
"epoch": 0.5746000652954619,
"grad_norm": 0.09556713700294495,
"learning_rate": 2.69825918762089e-05,
"loss": 0.2728,
"step": 220
},
{
"epoch": 0.6007182500816193,
"grad_norm": 0.09838665276765823,
"learning_rate": 2.669245647969052e-05,
"loss": 0.2679,
"step": 230
},
{
"epoch": 0.6268364348677767,
"grad_norm": 0.12542720139026642,
"learning_rate": 2.6402321083172148e-05,
"loss": 0.2717,
"step": 240
},
{
"epoch": 0.6529546196539341,
"grad_norm": 0.12866875529289246,
"learning_rate": 2.6112185686653773e-05,
"loss": 0.2722,
"step": 250
},
{
"epoch": 0.6790728044400914,
"grad_norm": 0.12063395977020264,
"learning_rate": 2.5822050290135396e-05,
"loss": 0.2665,
"step": 260
},
{
"epoch": 0.7051909892262488,
"grad_norm": 0.12060956656932831,
"learning_rate": 2.5531914893617022e-05,
"loss": 0.2498,
"step": 270
},
{
"epoch": 0.7313091740124061,
"grad_norm": 0.1269434541463852,
"learning_rate": 2.5241779497098648e-05,
"loss": 0.2685,
"step": 280
},
{
"epoch": 0.7574273587985635,
"grad_norm": 0.13224200904369354,
"learning_rate": 2.495164410058027e-05,
"loss": 0.2562,
"step": 290
},
{
"epoch": 0.7835455435847208,
"grad_norm": 0.12492657452821732,
"learning_rate": 2.4661508704061896e-05,
"loss": 0.2436,
"step": 300
},
{
"epoch": 0.8096637283708782,
"grad_norm": 0.19281449913978577,
"learning_rate": 2.4371373307543522e-05,
"loss": 0.2593,
"step": 310
},
{
"epoch": 0.8357819131570355,
"grad_norm": 0.13644647598266602,
"learning_rate": 2.408123791102515e-05,
"loss": 0.2678,
"step": 320
},
{
"epoch": 0.861900097943193,
"grad_norm": 0.13078241050243378,
"learning_rate": 2.379110251450677e-05,
"loss": 0.2586,
"step": 330
},
{
"epoch": 0.8880182827293504,
"grad_norm": 0.16267353296279907,
"learning_rate": 2.3500967117988397e-05,
"loss": 0.2441,
"step": 340
},
{
"epoch": 0.9141364675155077,
"grad_norm": 0.14218953251838684,
"learning_rate": 2.321083172147002e-05,
"loss": 0.2561,
"step": 350
},
{
"epoch": 0.940254652301665,
"grad_norm": 0.14463242888450623,
"learning_rate": 2.2920696324951642e-05,
"loss": 0.2437,
"step": 360
},
{
"epoch": 0.9663728370878224,
"grad_norm": 0.15065905451774597,
"learning_rate": 2.2630560928433268e-05,
"loss": 0.2448,
"step": 370
},
{
"epoch": 0.9924910218739798,
"grad_norm": 0.1440647393465042,
"learning_rate": 2.2340425531914894e-05,
"loss": 0.2407,
"step": 380
},
{
"epoch": 1.0,
"eval_loss": 0.3038630783557892,
"eval_runtime": 46.9955,
"eval_samples_per_second": 19.534,
"eval_steps_per_second": 9.767,
"step": 383
},
{
"epoch": 1.01828272935031,
"grad_norm": 0.18325522541999817,
"learning_rate": 2.2050290135396516e-05,
"loss": 0.2475,
"step": 390
},
{
"epoch": 1.0444009141364674,
"grad_norm": 0.19586290419101715,
"learning_rate": 2.1760154738878142e-05,
"loss": 0.2269,
"step": 400
},
{
"epoch": 1.070519098922625,
"grad_norm": 0.1957836151123047,
"learning_rate": 2.1470019342359768e-05,
"loss": 0.2383,
"step": 410
},
{
"epoch": 1.0966372837087823,
"grad_norm": 0.20044924318790436,
"learning_rate": 2.1179883945841394e-05,
"loss": 0.2277,
"step": 420
},
{
"epoch": 1.1227554684949397,
"grad_norm": 0.17480885982513428,
"learning_rate": 2.0889748549323017e-05,
"loss": 0.2364,
"step": 430
},
{
"epoch": 1.148873653281097,
"grad_norm": 0.22346200048923492,
"learning_rate": 2.0599613152804643e-05,
"loss": 0.2337,
"step": 440
},
{
"epoch": 1.1749918380672544,
"grad_norm": 0.18232356011867523,
"learning_rate": 2.030947775628627e-05,
"loss": 0.2391,
"step": 450
},
{
"epoch": 1.2011100228534117,
"grad_norm": 0.21770867705345154,
"learning_rate": 2.001934235976789e-05,
"loss": 0.2219,
"step": 460
},
{
"epoch": 1.227228207639569,
"grad_norm": 0.23520149290561676,
"learning_rate": 1.9729206963249517e-05,
"loss": 0.2204,
"step": 470
},
{
"epoch": 1.2533463924257264,
"grad_norm": 0.2378666251897812,
"learning_rate": 1.9439071566731143e-05,
"loss": 0.2185,
"step": 480
},
{
"epoch": 1.2794645772118838,
"grad_norm": 0.23565010726451874,
"learning_rate": 1.914893617021277e-05,
"loss": 0.2013,
"step": 490
},
{
"epoch": 1.3055827619980411,
"grad_norm": 0.2177935689687729,
"learning_rate": 1.885880077369439e-05,
"loss": 0.2321,
"step": 500
},
{
"epoch": 1.3317009467841985,
"grad_norm": 0.2423669397830963,
"learning_rate": 1.8568665377176018e-05,
"loss": 0.2155,
"step": 510
},
{
"epoch": 1.3578191315703558,
"grad_norm": 0.2234184592962265,
"learning_rate": 1.8278529980657643e-05,
"loss": 0.207,
"step": 520
},
{
"epoch": 1.3839373163565132,
"grad_norm": 0.27478086948394775,
"learning_rate": 1.7988394584139263e-05,
"loss": 0.202,
"step": 530
},
{
"epoch": 1.4100555011426705,
"grad_norm": 0.23157715797424316,
"learning_rate": 1.769825918762089e-05,
"loss": 0.2132,
"step": 540
},
{
"epoch": 1.4361736859288279,
"grad_norm": 0.2137601226568222,
"learning_rate": 1.7408123791102515e-05,
"loss": 0.2206,
"step": 550
},
{
"epoch": 1.4622918707149852,
"grad_norm": 0.25221434235572815,
"learning_rate": 1.7117988394584137e-05,
"loss": 0.1936,
"step": 560
},
{
"epoch": 1.4884100555011428,
"grad_norm": 0.265802800655365,
"learning_rate": 1.6827852998065763e-05,
"loss": 0.1927,
"step": 570
},
{
"epoch": 1.5145282402873002,
"grad_norm": 0.29695677757263184,
"learning_rate": 1.653771760154739e-05,
"loss": 0.1901,
"step": 580
},
{
"epoch": 1.5406464250734575,
"grad_norm": 0.28603777289390564,
"learning_rate": 1.6247582205029015e-05,
"loss": 0.1817,
"step": 590
},
{
"epoch": 1.5667646098596149,
"grad_norm": 0.27603891491889954,
"learning_rate": 1.5957446808510637e-05,
"loss": 0.1796,
"step": 600
},
{
"epoch": 1.5928827946457722,
"grad_norm": 0.23388804495334625,
"learning_rate": 1.5667311411992263e-05,
"loss": 0.2099,
"step": 610
},
{
"epoch": 1.6190009794319296,
"grad_norm": 0.3027153015136719,
"learning_rate": 1.537717601547389e-05,
"loss": 0.1874,
"step": 620
},
{
"epoch": 1.645119164218087,
"grad_norm": 0.23569999635219574,
"learning_rate": 1.5087040618955514e-05,
"loss": 0.2092,
"step": 630
},
{
"epoch": 1.6712373490042443,
"grad_norm": 0.28446313738822937,
"learning_rate": 1.4796905222437138e-05,
"loss": 0.1928,
"step": 640
},
{
"epoch": 1.6973555337904016,
"grad_norm": 0.33438780903816223,
"learning_rate": 1.4506769825918764e-05,
"loss": 0.2076,
"step": 650
},
{
"epoch": 1.723473718576559,
"grad_norm": 0.24610307812690735,
"learning_rate": 1.4216634429400386e-05,
"loss": 0.1834,
"step": 660
},
{
"epoch": 1.7495919033627163,
"grad_norm": 0.3221604526042938,
"learning_rate": 1.392649903288201e-05,
"loss": 0.1886,
"step": 670
},
{
"epoch": 1.7757100881488737,
"grad_norm": 0.30149781703948975,
"learning_rate": 1.3636363636363637e-05,
"loss": 0.1903,
"step": 680
},
{
"epoch": 1.801828272935031,
"grad_norm": 0.26918601989746094,
"learning_rate": 1.334622823984526e-05,
"loss": 0.1847,
"step": 690
},
{
"epoch": 1.8279464577211884,
"grad_norm": 0.36477166414260864,
"learning_rate": 1.3056092843326887e-05,
"loss": 0.1873,
"step": 700
},
{
"epoch": 1.8540646425073457,
"grad_norm": 0.2950615882873535,
"learning_rate": 1.2765957446808511e-05,
"loss": 0.1884,
"step": 710
},
{
"epoch": 1.880182827293503,
"grad_norm": 0.29690101742744446,
"learning_rate": 1.2475822050290135e-05,
"loss": 0.1876,
"step": 720
},
{
"epoch": 1.9063010120796604,
"grad_norm": 0.3099982738494873,
"learning_rate": 1.2185686653771761e-05,
"loss": 0.1778,
"step": 730
},
{
"epoch": 1.9324191968658178,
"grad_norm": 0.32279762625694275,
"learning_rate": 1.1895551257253385e-05,
"loss": 0.1854,
"step": 740
},
{
"epoch": 1.958537381651975,
"grad_norm": 0.2992270588874817,
"learning_rate": 1.160541586073501e-05,
"loss": 0.1747,
"step": 750
},
{
"epoch": 1.9846555664381325,
"grad_norm": 0.27671152353286743,
"learning_rate": 1.1315280464216634e-05,
"loss": 0.1624,
"step": 760
},
{
"epoch": 2.0,
"eval_loss": 0.31829941272735596,
"eval_runtime": 46.8149,
"eval_samples_per_second": 19.609,
"eval_steps_per_second": 9.805,
"step": 766
},
{
"epoch": 2.0104472739144628,
"grad_norm": 0.302716463804245,
"learning_rate": 1.1025145067698258e-05,
"loss": 0.1923,
"step": 770
},
{
"epoch": 2.03656545870062,
"grad_norm": 0.2851350009441376,
"learning_rate": 1.0735009671179884e-05,
"loss": 0.1759,
"step": 780
},
{
"epoch": 2.0626836434867775,
"grad_norm": 0.3431122303009033,
"learning_rate": 1.0444874274661508e-05,
"loss": 0.1682,
"step": 790
},
{
"epoch": 2.088801828272935,
"grad_norm": 0.33571285009384155,
"learning_rate": 1.0154738878143134e-05,
"loss": 0.1568,
"step": 800
},
{
"epoch": 2.1149200130590926,
"grad_norm": 0.38898637890815735,
"learning_rate": 9.864603481624759e-06,
"loss": 0.1543,
"step": 810
},
{
"epoch": 2.14103819784525,
"grad_norm": 0.30346909165382385,
"learning_rate": 9.574468085106385e-06,
"loss": 0.1592,
"step": 820
},
{
"epoch": 2.1671563826314073,
"grad_norm": 0.33506202697753906,
"learning_rate": 9.284332688588009e-06,
"loss": 0.1853,
"step": 830
},
{
"epoch": 2.1932745674175647,
"grad_norm": 0.30120134353637695,
"learning_rate": 8.994197292069631e-06,
"loss": 0.1621,
"step": 840
},
{
"epoch": 2.219392752203722,
"grad_norm": 0.35409367084503174,
"learning_rate": 8.704061895551257e-06,
"loss": 0.1733,
"step": 850
},
{
"epoch": 2.2455109369898794,
"grad_norm": 0.4092079699039459,
"learning_rate": 8.413926499032882e-06,
"loss": 0.1616,
"step": 860
},
{
"epoch": 2.2716291217760367,
"grad_norm": 0.3036758005619049,
"learning_rate": 8.123791102514507e-06,
"loss": 0.1703,
"step": 870
},
{
"epoch": 2.297747306562194,
"grad_norm": 0.40276363492012024,
"learning_rate": 7.833655705996132e-06,
"loss": 0.1591,
"step": 880
},
{
"epoch": 2.3238654913483514,
"grad_norm": 0.3477386236190796,
"learning_rate": 7.543520309477757e-06,
"loss": 0.1848,
"step": 890
},
{
"epoch": 2.3499836761345088,
"grad_norm": 0.3512537479400635,
"learning_rate": 7.253384912959382e-06,
"loss": 0.1668,
"step": 900
},
{
"epoch": 2.376101860920666,
"grad_norm": 0.3957723081111908,
"learning_rate": 6.963249516441005e-06,
"loss": 0.1568,
"step": 910
},
{
"epoch": 2.4022200457068235,
"grad_norm": 0.36369815468788147,
"learning_rate": 6.67311411992263e-06,
"loss": 0.164,
"step": 920
},
{
"epoch": 2.428338230492981,
"grad_norm": 0.37673720717430115,
"learning_rate": 6.3829787234042555e-06,
"loss": 0.1635,
"step": 930
},
{
"epoch": 2.454456415279138,
"grad_norm": 0.42920976877212524,
"learning_rate": 6.092843326885881e-06,
"loss": 0.1608,
"step": 940
},
{
"epoch": 2.4805746000652955,
"grad_norm": 0.42978808283805847,
"learning_rate": 5.802707930367505e-06,
"loss": 0.1638,
"step": 950
},
{
"epoch": 2.506692784851453,
"grad_norm": 0.4102988839149475,
"learning_rate": 5.512572533849129e-06,
"loss": 0.1654,
"step": 960
},
{
"epoch": 2.53281096963761,
"grad_norm": 0.33200573921203613,
"learning_rate": 5.222437137330754e-06,
"loss": 0.1587,
"step": 970
},
{
"epoch": 2.5589291544237676,
"grad_norm": 0.34544673562049866,
"learning_rate": 4.932301740812379e-06,
"loss": 0.1422,
"step": 980
},
{
"epoch": 2.585047339209925,
"grad_norm": 0.3770700693130493,
"learning_rate": 4.642166344294004e-06,
"loss": 0.1533,
"step": 990
},
{
"epoch": 2.6111655239960823,
"grad_norm": 0.4285246431827545,
"learning_rate": 4.352030947775629e-06,
"loss": 0.1667,
"step": 1000
},
{
"epoch": 2.6372837087822396,
"grad_norm": 0.3781305253505707,
"learning_rate": 4.061895551257254e-06,
"loss": 0.1545,
"step": 1010
},
{
"epoch": 2.663401893568397,
"grad_norm": 0.41318264603614807,
"learning_rate": 3.7717601547388784e-06,
"loss": 0.1679,
"step": 1020
},
{
"epoch": 2.6895200783545543,
"grad_norm": 0.40817004442214966,
"learning_rate": 3.4816247582205027e-06,
"loss": 0.1573,
"step": 1030
},
{
"epoch": 2.7156382631407117,
"grad_norm": 0.41028645634651184,
"learning_rate": 3.1914893617021277e-06,
"loss": 0.1474,
"step": 1040
},
{
"epoch": 2.741756447926869,
"grad_norm": 0.3509976267814636,
"learning_rate": 2.9013539651837524e-06,
"loss": 0.1578,
"step": 1050
},
{
"epoch": 2.7678746327130264,
"grad_norm": 0.4071323573589325,
"learning_rate": 2.611218568665377e-06,
"loss": 0.1496,
"step": 1060
},
{
"epoch": 2.7939928174991837,
"grad_norm": 0.2635524570941925,
"learning_rate": 2.321083172147002e-06,
"loss": 0.1696,
"step": 1070
},
{
"epoch": 2.820111002285341,
"grad_norm": 0.3947674632072449,
"learning_rate": 2.030947775628627e-06,
"loss": 0.1664,
"step": 1080
},
{
"epoch": 2.8462291870714984,
"grad_norm": 0.47258490324020386,
"learning_rate": 1.7408123791102513e-06,
"loss": 0.1617,
"step": 1090
},
{
"epoch": 2.8723473718576558,
"grad_norm": 0.3031919300556183,
"learning_rate": 1.4506769825918762e-06,
"loss": 0.1454,
"step": 1100
},
{
"epoch": 2.898465556643813,
"grad_norm": 0.3442898988723755,
"learning_rate": 1.160541586073501e-06,
"loss": 0.1584,
"step": 1110
},
{
"epoch": 2.9245837414299705,
"grad_norm": 0.46089524030685425,
"learning_rate": 8.704061895551257e-07,
"loss": 0.1607,
"step": 1120
},
{
"epoch": 2.950701926216128,
"grad_norm": 0.35179299116134644,
"learning_rate": 5.802707930367505e-07,
"loss": 0.1668,
"step": 1130
},
{
"epoch": 2.9768201110022856,
"grad_norm": 0.40915292501449585,
"learning_rate": 2.901353965183753e-07,
"loss": 0.1522,
"step": 1140
},
{
"epoch": 3.0,
"eval_loss": 0.3278330862522125,
"eval_runtime": 46.5303,
"eval_samples_per_second": 19.729,
"eval_steps_per_second": 9.865,
"step": 1149
}
],
"logging_steps": 10,
"max_steps": 1149,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.1050463737748521e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}