mixed-lora-1 / checkpoint-921 /trainer_state.json
UtsuSl0th's picture
Upload folder using huggingface_hub
d57722d verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 100,
"global_step": 921,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03257328990228013,
"grad_norm": 4.319990634918213,
"learning_rate": 7.741935483870967e-07,
"loss": 1.3255,
"step": 10
},
{
"epoch": 0.06514657980456026,
"grad_norm": 3.764126777648926,
"learning_rate": 1.6344086021505377e-06,
"loss": 1.2843,
"step": 20
},
{
"epoch": 0.09771986970684039,
"grad_norm": 4.150228023529053,
"learning_rate": 2.4946236559139784e-06,
"loss": 1.2342,
"step": 30
},
{
"epoch": 0.13029315960912052,
"grad_norm": 1.6511605978012085,
"learning_rate": 3.3548387096774194e-06,
"loss": 0.9679,
"step": 40
},
{
"epoch": 0.16286644951140064,
"grad_norm": 0.7357141971588135,
"learning_rate": 4.21505376344086e-06,
"loss": 0.7937,
"step": 50
},
{
"epoch": 0.19543973941368079,
"grad_norm": 0.598514974117279,
"learning_rate": 5.075268817204301e-06,
"loss": 0.6905,
"step": 60
},
{
"epoch": 0.2280130293159609,
"grad_norm": 0.6065505743026733,
"learning_rate": 5.935483870967741e-06,
"loss": 0.6328,
"step": 70
},
{
"epoch": 0.26058631921824105,
"grad_norm": 0.5301859378814697,
"learning_rate": 6.7956989247311825e-06,
"loss": 0.5722,
"step": 80
},
{
"epoch": 0.2931596091205212,
"grad_norm": 0.5616416335105896,
"learning_rate": 7.655913978494624e-06,
"loss": 0.5132,
"step": 90
},
{
"epoch": 0.3257328990228013,
"grad_norm": 0.7361940145492554,
"learning_rate": 7.998963539365396e-06,
"loss": 0.4687,
"step": 100
},
{
"epoch": 0.3257328990228013,
"eval_loss": 0.4614461660385132,
"eval_runtime": 33.8621,
"eval_samples_per_second": 7.619,
"eval_steps_per_second": 3.81,
"step": 100
},
{
"epoch": 0.3583061889250814,
"grad_norm": 0.6860578656196594,
"learning_rate": 7.992631558305708e-06,
"loss": 0.4247,
"step": 110
},
{
"epoch": 0.39087947882736157,
"grad_norm": 0.6234003901481628,
"learning_rate": 7.980552510965744e-06,
"loss": 0.38,
"step": 120
},
{
"epoch": 0.4234527687296417,
"grad_norm": 0.7827787399291992,
"learning_rate": 7.962743784145322e-06,
"loss": 0.3475,
"step": 130
},
{
"epoch": 0.4560260586319218,
"grad_norm": 1.0882517099380493,
"learning_rate": 7.939231012048832e-06,
"loss": 0.2904,
"step": 140
},
{
"epoch": 0.48859934853420195,
"grad_norm": 0.9678266048431396,
"learning_rate": 7.910048039386883e-06,
"loss": 0.2646,
"step": 150
},
{
"epoch": 0.5211726384364821,
"grad_norm": 1.0340553522109985,
"learning_rate": 7.875236872659625e-06,
"loss": 0.2408,
"step": 160
},
{
"epoch": 0.5537459283387622,
"grad_norm": 0.8255159258842468,
"learning_rate": 7.834847619691822e-06,
"loss": 0.1985,
"step": 170
},
{
"epoch": 0.5863192182410424,
"grad_norm": 0.9649443626403809,
"learning_rate": 7.788938417506746e-06,
"loss": 0.1981,
"step": 180
},
{
"epoch": 0.6188925081433225,
"grad_norm": 0.7817046642303467,
"learning_rate": 7.737575348642683e-06,
"loss": 0.1947,
"step": 190
},
{
"epoch": 0.6514657980456026,
"grad_norm": 0.7640238404273987,
"learning_rate": 7.680832346032546e-06,
"loss": 0.1752,
"step": 200
},
{
"epoch": 0.6514657980456026,
"eval_loss": 0.1747174710035324,
"eval_runtime": 33.2887,
"eval_samples_per_second": 7.75,
"eval_steps_per_second": 3.875,
"step": 200
},
{
"epoch": 0.6840390879478827,
"grad_norm": 0.701442301273346,
"learning_rate": 7.618791086583449e-06,
"loss": 0.1768,
"step": 210
},
{
"epoch": 0.7166123778501629,
"grad_norm": 0.8486807942390442,
"learning_rate": 7.5515408736095e-06,
"loss": 0.1667,
"step": 220
},
{
"epoch": 0.749185667752443,
"grad_norm": 0.9864675998687744,
"learning_rate": 7.4791785082869865e-06,
"loss": 0.1505,
"step": 230
},
{
"epoch": 0.7817589576547231,
"grad_norm": 0.9136099815368652,
"learning_rate": 7.401808150317004e-06,
"loss": 0.1518,
"step": 240
},
{
"epoch": 0.8143322475570033,
"grad_norm": 0.6785700917243958,
"learning_rate": 7.3195411679961005e-06,
"loss": 0.1485,
"step": 250
},
{
"epoch": 0.8469055374592834,
"grad_norm": 0.9049308896064758,
"learning_rate": 7.232495977910721e-06,
"loss": 0.144,
"step": 260
},
{
"epoch": 0.8794788273615635,
"grad_norm": 0.8609828352928162,
"learning_rate": 7.140797874486235e-06,
"loss": 0.1471,
"step": 270
},
{
"epoch": 0.9120521172638436,
"grad_norm": 0.8438140749931335,
"learning_rate": 7.044578849635858e-06,
"loss": 0.1424,
"step": 280
},
{
"epoch": 0.9446254071661238,
"grad_norm": 0.7648624181747437,
"learning_rate": 6.943977402769108e-06,
"loss": 0.1385,
"step": 290
},
{
"epoch": 0.9771986970684039,
"grad_norm": 0.6212359666824341,
"learning_rate": 6.839138341433236e-06,
"loss": 0.1428,
"step": 300
},
{
"epoch": 0.9771986970684039,
"eval_loss": 0.1377791315317154,
"eval_runtime": 33.2415,
"eval_samples_per_second": 7.761,
"eval_steps_per_second": 3.881,
"step": 300
},
{
"epoch": 1.009771986970684,
"grad_norm": 0.5962893962860107,
"learning_rate": 6.730212572874616e-06,
"loss": 0.1364,
"step": 310
},
{
"epoch": 1.0423452768729642,
"grad_norm": 0.7699111700057983,
"learning_rate": 6.6173568868201244e-06,
"loss": 0.1332,
"step": 320
},
{
"epoch": 1.0749185667752443,
"grad_norm": 0.6283778548240662,
"learning_rate": 6.500733729791155e-06,
"loss": 0.1254,
"step": 330
},
{
"epoch": 1.1074918566775245,
"grad_norm": 0.623935341835022,
"learning_rate": 6.380510971275159e-06,
"loss": 0.13,
"step": 340
},
{
"epoch": 1.1400651465798046,
"grad_norm": 0.6022305488586426,
"learning_rate": 6.256861662091248e-06,
"loss": 0.138,
"step": 350
},
{
"epoch": 1.1726384364820848,
"grad_norm": 0.716827392578125,
"learning_rate": 6.1299637852977185e-06,
"loss": 0.1363,
"step": 360
},
{
"epoch": 1.205211726384365,
"grad_norm": 0.7210624814033508,
"learning_rate": 6e-06,
"loss": 0.1371,
"step": 370
},
{
"epoch": 1.237785016286645,
"grad_norm": 0.5957107543945312,
"learning_rate": 5.867157378427834e-06,
"loss": 0.1308,
"step": 380
},
{
"epoch": 1.2703583061889252,
"grad_norm": 0.6254829168319702,
"learning_rate": 5.731627136660109e-06,
"loss": 0.1298,
"step": 390
},
{
"epoch": 1.3029315960912053,
"grad_norm": 0.5790061950683594,
"learning_rate": 5.593604359384966e-06,
"loss": 0.1312,
"step": 400
},
{
"epoch": 1.3029315960912053,
"eval_loss": 0.13056354224681854,
"eval_runtime": 33.286,
"eval_samples_per_second": 7.751,
"eval_steps_per_second": 3.876,
"step": 400
},
{
"epoch": 1.3355048859934853,
"grad_norm": 0.6293231844902039,
"learning_rate": 5.453287719091365e-06,
"loss": 0.1342,
"step": 410
},
{
"epoch": 1.3680781758957654,
"grad_norm": 0.7954932451248169,
"learning_rate": 5.3108791900962995e-06,
"loss": 0.1312,
"step": 420
},
{
"epoch": 1.4006514657980456,
"grad_norm": 0.5331751108169556,
"learning_rate": 5.1665837578192995e-06,
"loss": 0.1319,
"step": 430
},
{
"epoch": 1.4332247557003257,
"grad_norm": 0.7147168517112732,
"learning_rate": 5.020609123722704e-06,
"loss": 0.1307,
"step": 440
},
{
"epoch": 1.4657980456026058,
"grad_norm": 0.6740135550498962,
"learning_rate": 4.873165406342406e-06,
"loss": 0.133,
"step": 450
},
{
"epoch": 1.498371335504886,
"grad_norm": 0.6303773522377014,
"learning_rate": 4.724464838839423e-06,
"loss": 0.1203,
"step": 460
},
{
"epoch": 1.5309446254071661,
"grad_norm": 0.6301987171173096,
"learning_rate": 4.574721463507632e-06,
"loss": 0.1174,
"step": 470
},
{
"epoch": 1.5635179153094463,
"grad_norm": 0.4884192943572998,
"learning_rate": 4.424150823677419e-06,
"loss": 0.1221,
"step": 480
},
{
"epoch": 1.5960912052117264,
"grad_norm": 0.8104236721992493,
"learning_rate": 4.272969653458684e-06,
"loss": 0.1248,
"step": 490
},
{
"epoch": 1.6286644951140063,
"grad_norm": 0.6095107197761536,
"learning_rate": 4.1213955657698496e-06,
"loss": 0.1221,
"step": 500
},
{
"epoch": 1.6286644951140063,
"eval_loss": 0.12636995315551758,
"eval_runtime": 33.9806,
"eval_samples_per_second": 7.593,
"eval_steps_per_second": 3.796,
"step": 500
},
{
"epoch": 1.6612377850162865,
"grad_norm": 0.5424883961677551,
"learning_rate": 3.969646739101868e-06,
"loss": 0.1196,
"step": 510
},
{
"epoch": 1.6938110749185666,
"grad_norm": 0.8408002257347107,
"learning_rate": 3.817941603468146e-06,
"loss": 0.1249,
"step": 520
},
{
"epoch": 1.7263843648208468,
"grad_norm": 0.6321083307266235,
"learning_rate": 3.666498525992426e-06,
"loss": 0.1258,
"step": 530
},
{
"epoch": 1.758957654723127,
"grad_norm": 0.705021321773529,
"learning_rate": 3.5155354965871845e-06,
"loss": 0.1259,
"step": 540
},
{
"epoch": 1.791530944625407,
"grad_norm": 0.5481059551239014,
"learning_rate": 3.3652698141749946e-06,
"loss": 0.1273,
"step": 550
},
{
"epoch": 1.8241042345276872,
"grad_norm": 0.5651680827140808,
"learning_rate": 3.215917773904527e-06,
"loss": 0.1218,
"step": 560
},
{
"epoch": 1.8566775244299674,
"grad_norm": 0.9432061314582825,
"learning_rate": 3.067694355811374e-06,
"loss": 0.1267,
"step": 570
},
{
"epoch": 1.8892508143322475,
"grad_norm": 0.7050531506538391,
"learning_rate": 2.9208129153719025e-06,
"loss": 0.1286,
"step": 580
},
{
"epoch": 1.9218241042345277,
"grad_norm": 0.6143736243247986,
"learning_rate": 2.7754848763955035e-06,
"loss": 0.1236,
"step": 590
},
{
"epoch": 1.9543973941368078,
"grad_norm": 0.5079554319381714,
"learning_rate": 2.631919426697325e-06,
"loss": 0.1281,
"step": 600
},
{
"epoch": 1.9543973941368078,
"eval_loss": 0.12330074608325958,
"eval_runtime": 34.0218,
"eval_samples_per_second": 7.583,
"eval_steps_per_second": 3.792,
"step": 600
},
{
"epoch": 1.986970684039088,
"grad_norm": 0.6454936861991882,
"learning_rate": 2.490323216989543e-06,
"loss": 0.127,
"step": 610
},
{
"epoch": 2.019543973941368,
"grad_norm": 0.5652497410774231,
"learning_rate": 2.35090006342457e-06,
"loss": 0.1288,
"step": 620
},
{
"epoch": 2.0521172638436482,
"grad_norm": 0.5710923671722412,
"learning_rate": 2.2138506542184e-06,
"loss": 0.1151,
"step": 630
},
{
"epoch": 2.0846905537459284,
"grad_norm": 0.5721701383590698,
"learning_rate": 2.079372260776338e-06,
"loss": 0.1187,
"step": 640
},
{
"epoch": 2.1172638436482085,
"grad_norm": 0.6961202025413513,
"learning_rate": 1.9476584537369703e-06,
"loss": 0.1236,
"step": 650
},
{
"epoch": 2.1498371335504887,
"grad_norm": 0.6558916568756104,
"learning_rate": 1.8188988243430696e-06,
"loss": 0.1118,
"step": 660
},
{
"epoch": 2.182410423452769,
"grad_norm": 0.6206583976745605,
"learning_rate": 1.6932787115405317e-06,
"loss": 0.1207,
"step": 670
},
{
"epoch": 2.214983713355049,
"grad_norm": 0.5952445864677429,
"learning_rate": 1.5709789351981212e-06,
"loss": 0.1208,
"step": 680
},
{
"epoch": 2.247557003257329,
"grad_norm": 0.6680878400802612,
"learning_rate": 1.4521755358320865e-06,
"loss": 0.1222,
"step": 690
},
{
"epoch": 2.2801302931596092,
"grad_norm": 0.5293084383010864,
"learning_rate": 1.3370395212102378e-06,
"loss": 0.1136,
"step": 700
},
{
"epoch": 2.2801302931596092,
"eval_loss": 0.1221059188246727,
"eval_runtime": 34.0931,
"eval_samples_per_second": 7.568,
"eval_steps_per_second": 3.784,
"step": 700
},
{
"epoch": 2.3127035830618894,
"grad_norm": 0.6006386280059814,
"learning_rate": 1.2257366202002693e-06,
"loss": 0.1241,
"step": 710
},
{
"epoch": 2.3452768729641695,
"grad_norm": 0.6543139815330505,
"learning_rate": 1.1184270442166187e-06,
"loss": 0.1265,
"step": 720
},
{
"epoch": 2.3778501628664497,
"grad_norm": 0.694369375705719,
"learning_rate": 1.015265256609258e-06,
"loss": 0.121,
"step": 730
},
{
"epoch": 2.41042345276873,
"grad_norm": 0.5214750170707703,
"learning_rate": 9.163997503263364e-07,
"loss": 0.1215,
"step": 740
},
{
"epoch": 2.44299674267101,
"grad_norm": 0.8416522145271301,
"learning_rate": 8.219728341707424e-07,
"loss": 0.1229,
"step": 750
},
{
"epoch": 2.47557003257329,
"grad_norm": 0.5506000518798828,
"learning_rate": 7.32120427958232e-07,
"loss": 0.1176,
"step": 760
},
{
"epoch": 2.5081433224755703,
"grad_norm": 0.6719224452972412,
"learning_rate": 6.469718668719827e-07,
"loss": 0.1201,
"step": 770
},
{
"epoch": 2.5407166123778504,
"grad_norm": 0.7880155444145203,
"learning_rate": 5.66649715295179e-07,
"loss": 0.1107,
"step": 780
},
{
"epoch": 2.5732899022801305,
"grad_norm": 0.7107143402099609,
"learning_rate": 4.91269590389623e-07,
"loss": 0.1237,
"step": 790
},
{
"epoch": 2.6058631921824107,
"grad_norm": 0.5582589507102966,
"learning_rate": 4.2093999567428893e-07,
"loss": 0.1077,
"step": 800
},
{
"epoch": 2.6058631921824107,
"eval_loss": 0.12127382308244705,
"eval_runtime": 34.3481,
"eval_samples_per_second": 7.511,
"eval_steps_per_second": 3.756,
"step": 800
},
{
"epoch": 2.6384364820846904,
"grad_norm": 0.626492977142334,
"learning_rate": 3.557621648433935e-07,
"loss": 0.114,
"step": 810
},
{
"epoch": 2.6710097719869705,
"grad_norm": 0.658108651638031,
"learning_rate": 2.958299160487789e-07,
"loss": 0.1213,
"step": 820
},
{
"epoch": 2.7035830618892507,
"grad_norm": 0.5767033696174622,
"learning_rate": 2.412295168563667e-07,
"loss": 0.1177,
"step": 830
},
{
"epoch": 2.736156351791531,
"grad_norm": 0.6014522910118103,
"learning_rate": 1.9203956007105292e-07,
"loss": 0.1205,
"step": 840
},
{
"epoch": 2.768729641693811,
"grad_norm": 0.5462273955345154,
"learning_rate": 1.4833085060880347e-07,
"loss": 0.1195,
"step": 850
},
{
"epoch": 2.801302931596091,
"grad_norm": 0.6881915926933289,
"learning_rate": 1.1016630357877276e-07,
"loss": 0.1217,
"step": 860
},
{
"epoch": 2.8338762214983713,
"grad_norm": 0.7328351140022278,
"learning_rate": 7.760085372215508e-08,
"loss": 0.1251,
"step": 870
},
{
"epoch": 2.8664495114006514,
"grad_norm": 0.6310160756111145,
"learning_rate": 5.0681376338121527e-08,
"loss": 0.1217,
"step": 880
},
{
"epoch": 2.8990228013029316,
"grad_norm": 0.6388233304023743,
"learning_rate": 2.944661981066243e-08,
"loss": 0.1276,
"step": 890
},
{
"epoch": 2.9315960912052117,
"grad_norm": 0.6657443046569824,
"learning_rate": 1.3927149833463659e-08,
"loss": 0.1149,
"step": 900
},
{
"epoch": 2.9315960912052117,
"eval_loss": 0.12107826769351959,
"eval_runtime": 34.4059,
"eval_samples_per_second": 7.499,
"eval_steps_per_second": 3.749,
"step": 900
},
{
"epoch": 2.964169381107492,
"grad_norm": 0.7934221029281616,
"learning_rate": 4.145305413089772e-09,
"loss": 0.1124,
"step": 910
},
{
"epoch": 2.996742671009772,
"grad_norm": 0.6388999819755554,
"learning_rate": 1.1516671381528454e-10,
"loss": 0.124,
"step": 920
}
],
"logging_steps": 10,
"max_steps": 921,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.388320321760666e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}