hp_ablations_qwen_epoch4 / trainer_state.json
sedrickkeh's picture
End of training
e6c7e36 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.993162393162393,
"eval_steps": 500,
"global_step": 1752,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.022792022792022793,
"grad_norm": 0.9912007491850634,
"learning_rate": 5e-06,
"loss": 0.7548,
"step": 10
},
{
"epoch": 0.045584045584045586,
"grad_norm": 0.8120341080388481,
"learning_rate": 5e-06,
"loss": 0.6988,
"step": 20
},
{
"epoch": 0.06837606837606838,
"grad_norm": 0.9144364506308376,
"learning_rate": 5e-06,
"loss": 0.68,
"step": 30
},
{
"epoch": 0.09116809116809117,
"grad_norm": 0.8778720973960674,
"learning_rate": 5e-06,
"loss": 0.6798,
"step": 40
},
{
"epoch": 0.11396011396011396,
"grad_norm": 0.8083687880277666,
"learning_rate": 5e-06,
"loss": 0.6757,
"step": 50
},
{
"epoch": 0.13675213675213677,
"grad_norm": 0.6856245947077141,
"learning_rate": 5e-06,
"loss": 0.6555,
"step": 60
},
{
"epoch": 0.15954415954415954,
"grad_norm": 0.4549134078131556,
"learning_rate": 5e-06,
"loss": 0.6528,
"step": 70
},
{
"epoch": 0.18233618233618235,
"grad_norm": 0.38149974370876494,
"learning_rate": 5e-06,
"loss": 0.6557,
"step": 80
},
{
"epoch": 0.20512820512820512,
"grad_norm": 0.31780767590815034,
"learning_rate": 5e-06,
"loss": 0.6398,
"step": 90
},
{
"epoch": 0.22792022792022792,
"grad_norm": 0.2979038503551664,
"learning_rate": 5e-06,
"loss": 0.6414,
"step": 100
},
{
"epoch": 0.25071225071225073,
"grad_norm": 0.34299995442682374,
"learning_rate": 5e-06,
"loss": 0.6326,
"step": 110
},
{
"epoch": 0.27350427350427353,
"grad_norm": 0.31224439631858925,
"learning_rate": 5e-06,
"loss": 0.6411,
"step": 120
},
{
"epoch": 0.2962962962962963,
"grad_norm": 0.2872702884170918,
"learning_rate": 5e-06,
"loss": 0.6421,
"step": 130
},
{
"epoch": 0.3190883190883191,
"grad_norm": 0.31843610582508325,
"learning_rate": 5e-06,
"loss": 0.6441,
"step": 140
},
{
"epoch": 0.3418803418803419,
"grad_norm": 0.31677020367403513,
"learning_rate": 5e-06,
"loss": 0.6369,
"step": 150
},
{
"epoch": 0.3646723646723647,
"grad_norm": 0.30778519805576754,
"learning_rate": 5e-06,
"loss": 0.6423,
"step": 160
},
{
"epoch": 0.38746438746438744,
"grad_norm": 0.3520973006350889,
"learning_rate": 5e-06,
"loss": 0.6333,
"step": 170
},
{
"epoch": 0.41025641025641024,
"grad_norm": 0.3024095360698375,
"learning_rate": 5e-06,
"loss": 0.6395,
"step": 180
},
{
"epoch": 0.43304843304843305,
"grad_norm": 0.306683195208484,
"learning_rate": 5e-06,
"loss": 0.6314,
"step": 190
},
{
"epoch": 0.45584045584045585,
"grad_norm": 0.3028828247046378,
"learning_rate": 5e-06,
"loss": 0.6397,
"step": 200
},
{
"epoch": 0.47863247863247865,
"grad_norm": 0.3090017394413483,
"learning_rate": 5e-06,
"loss": 0.6356,
"step": 210
},
{
"epoch": 0.5014245014245015,
"grad_norm": 0.29949977441022513,
"learning_rate": 5e-06,
"loss": 0.6397,
"step": 220
},
{
"epoch": 0.5242165242165242,
"grad_norm": 0.32419318781752016,
"learning_rate": 5e-06,
"loss": 0.6351,
"step": 230
},
{
"epoch": 0.5470085470085471,
"grad_norm": 0.2983992630345295,
"learning_rate": 5e-06,
"loss": 0.6311,
"step": 240
},
{
"epoch": 0.5698005698005698,
"grad_norm": 0.3388064649445867,
"learning_rate": 5e-06,
"loss": 0.636,
"step": 250
},
{
"epoch": 0.5925925925925926,
"grad_norm": 0.30378675945812544,
"learning_rate": 5e-06,
"loss": 0.6342,
"step": 260
},
{
"epoch": 0.6153846153846154,
"grad_norm": 0.3118605836030505,
"learning_rate": 5e-06,
"loss": 0.6314,
"step": 270
},
{
"epoch": 0.6381766381766382,
"grad_norm": 0.3185910877256985,
"learning_rate": 5e-06,
"loss": 0.6427,
"step": 280
},
{
"epoch": 0.6609686609686609,
"grad_norm": 0.3238612685790961,
"learning_rate": 5e-06,
"loss": 0.6308,
"step": 290
},
{
"epoch": 0.6837606837606838,
"grad_norm": 0.2910572930554487,
"learning_rate": 5e-06,
"loss": 0.6288,
"step": 300
},
{
"epoch": 0.7065527065527065,
"grad_norm": 0.2961577217293602,
"learning_rate": 5e-06,
"loss": 0.6287,
"step": 310
},
{
"epoch": 0.7293447293447294,
"grad_norm": 0.347125995352574,
"learning_rate": 5e-06,
"loss": 0.6297,
"step": 320
},
{
"epoch": 0.7521367521367521,
"grad_norm": 0.3017718885104704,
"learning_rate": 5e-06,
"loss": 0.626,
"step": 330
},
{
"epoch": 0.7749287749287749,
"grad_norm": 0.32719242803351367,
"learning_rate": 5e-06,
"loss": 0.6319,
"step": 340
},
{
"epoch": 0.7977207977207977,
"grad_norm": 0.3355483959267454,
"learning_rate": 5e-06,
"loss": 0.6392,
"step": 350
},
{
"epoch": 0.8205128205128205,
"grad_norm": 0.31827824902748364,
"learning_rate": 5e-06,
"loss": 0.6309,
"step": 360
},
{
"epoch": 0.8433048433048433,
"grad_norm": 0.32626776608416697,
"learning_rate": 5e-06,
"loss": 0.6245,
"step": 370
},
{
"epoch": 0.8660968660968661,
"grad_norm": 0.31292644377425555,
"learning_rate": 5e-06,
"loss": 0.6367,
"step": 380
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.32907302888237766,
"learning_rate": 5e-06,
"loss": 0.6353,
"step": 390
},
{
"epoch": 0.9116809116809117,
"grad_norm": 0.29588853416504324,
"learning_rate": 5e-06,
"loss": 0.6387,
"step": 400
},
{
"epoch": 0.9344729344729344,
"grad_norm": 0.28565869479324607,
"learning_rate": 5e-06,
"loss": 0.6346,
"step": 410
},
{
"epoch": 0.9572649572649573,
"grad_norm": 0.30338812600636667,
"learning_rate": 5e-06,
"loss": 0.6216,
"step": 420
},
{
"epoch": 0.98005698005698,
"grad_norm": 0.3526849426643039,
"learning_rate": 5e-06,
"loss": 0.6343,
"step": 430
},
{
"epoch": 0.9982905982905983,
"eval_loss": 0.6251307129859924,
"eval_runtime": 223.0,
"eval_samples_per_second": 53.018,
"eval_steps_per_second": 0.417,
"step": 438
},
{
"epoch": 1.002849002849003,
"grad_norm": 0.3525041598672024,
"learning_rate": 5e-06,
"loss": 0.6297,
"step": 440
},
{
"epoch": 1.0256410256410255,
"grad_norm": 0.3088212010408478,
"learning_rate": 5e-06,
"loss": 0.6024,
"step": 450
},
{
"epoch": 1.0484330484330484,
"grad_norm": 0.3048735024737802,
"learning_rate": 5e-06,
"loss": 0.5978,
"step": 460
},
{
"epoch": 1.0712250712250713,
"grad_norm": 0.30395209418239993,
"learning_rate": 5e-06,
"loss": 0.5953,
"step": 470
},
{
"epoch": 1.0940170940170941,
"grad_norm": 0.3224872071873683,
"learning_rate": 5e-06,
"loss": 0.6052,
"step": 480
},
{
"epoch": 1.1168091168091168,
"grad_norm": 0.31440242200972873,
"learning_rate": 5e-06,
"loss": 0.6086,
"step": 490
},
{
"epoch": 1.1396011396011396,
"grad_norm": 0.29525108520793303,
"learning_rate": 5e-06,
"loss": 0.5988,
"step": 500
},
{
"epoch": 1.1623931623931625,
"grad_norm": 0.32620379296750857,
"learning_rate": 5e-06,
"loss": 0.5968,
"step": 510
},
{
"epoch": 1.1851851851851851,
"grad_norm": 0.30445507629744495,
"learning_rate": 5e-06,
"loss": 0.6017,
"step": 520
},
{
"epoch": 1.207977207977208,
"grad_norm": 0.2989183656534176,
"learning_rate": 5e-06,
"loss": 0.5973,
"step": 530
},
{
"epoch": 1.2307692307692308,
"grad_norm": 0.2895135488724733,
"learning_rate": 5e-06,
"loss": 0.6038,
"step": 540
},
{
"epoch": 1.2535612535612537,
"grad_norm": 0.33760774288876744,
"learning_rate": 5e-06,
"loss": 0.604,
"step": 550
},
{
"epoch": 1.2763532763532763,
"grad_norm": 0.3106514910309685,
"learning_rate": 5e-06,
"loss": 0.6023,
"step": 560
},
{
"epoch": 1.2991452991452992,
"grad_norm": 0.2931272749092201,
"learning_rate": 5e-06,
"loss": 0.599,
"step": 570
},
{
"epoch": 1.3219373219373218,
"grad_norm": 0.27937877406216316,
"learning_rate": 5e-06,
"loss": 0.5984,
"step": 580
},
{
"epoch": 1.3447293447293447,
"grad_norm": 0.2780566360895875,
"learning_rate": 5e-06,
"loss": 0.5947,
"step": 590
},
{
"epoch": 1.3675213675213675,
"grad_norm": 0.338396436099565,
"learning_rate": 5e-06,
"loss": 0.6031,
"step": 600
},
{
"epoch": 1.3903133903133904,
"grad_norm": 0.3388673486546411,
"learning_rate": 5e-06,
"loss": 0.6,
"step": 610
},
{
"epoch": 1.413105413105413,
"grad_norm": 0.2960462008725661,
"learning_rate": 5e-06,
"loss": 0.6048,
"step": 620
},
{
"epoch": 1.435897435897436,
"grad_norm": 0.31261947283205377,
"learning_rate": 5e-06,
"loss": 0.6031,
"step": 630
},
{
"epoch": 1.4586894586894588,
"grad_norm": 0.31540213681546253,
"learning_rate": 5e-06,
"loss": 0.5964,
"step": 640
},
{
"epoch": 1.4814814814814814,
"grad_norm": 0.3011418333363954,
"learning_rate": 5e-06,
"loss": 0.5953,
"step": 650
},
{
"epoch": 1.5042735042735043,
"grad_norm": 0.29770496669495466,
"learning_rate": 5e-06,
"loss": 0.5953,
"step": 660
},
{
"epoch": 1.5270655270655271,
"grad_norm": 0.3227357467535432,
"learning_rate": 5e-06,
"loss": 0.6032,
"step": 670
},
{
"epoch": 1.54985754985755,
"grad_norm": 0.2926461969068394,
"learning_rate": 5e-06,
"loss": 0.5924,
"step": 680
},
{
"epoch": 1.5726495726495726,
"grad_norm": 0.3151144341930611,
"learning_rate": 5e-06,
"loss": 0.5932,
"step": 690
},
{
"epoch": 1.5954415954415955,
"grad_norm": 0.28487938842896926,
"learning_rate": 5e-06,
"loss": 0.6113,
"step": 700
},
{
"epoch": 1.618233618233618,
"grad_norm": 0.30356180399923843,
"learning_rate": 5e-06,
"loss": 0.5975,
"step": 710
},
{
"epoch": 1.641025641025641,
"grad_norm": 0.2856653342858608,
"learning_rate": 5e-06,
"loss": 0.6005,
"step": 720
},
{
"epoch": 1.6638176638176638,
"grad_norm": 0.34957562019945143,
"learning_rate": 5e-06,
"loss": 0.5974,
"step": 730
},
{
"epoch": 1.6866096866096867,
"grad_norm": 0.3269288669640998,
"learning_rate": 5e-06,
"loss": 0.5939,
"step": 740
},
{
"epoch": 1.7094017094017095,
"grad_norm": 0.30774086259571193,
"learning_rate": 5e-06,
"loss": 0.5958,
"step": 750
},
{
"epoch": 1.7321937321937322,
"grad_norm": 0.296846543928719,
"learning_rate": 5e-06,
"loss": 0.5963,
"step": 760
},
{
"epoch": 1.7549857549857548,
"grad_norm": 0.2981885551582998,
"learning_rate": 5e-06,
"loss": 0.593,
"step": 770
},
{
"epoch": 1.7777777777777777,
"grad_norm": 0.31214489545002166,
"learning_rate": 5e-06,
"loss": 0.6025,
"step": 780
},
{
"epoch": 1.8005698005698005,
"grad_norm": 0.3192140300099497,
"learning_rate": 5e-06,
"loss": 0.5986,
"step": 790
},
{
"epoch": 1.8233618233618234,
"grad_norm": 0.32130945178436227,
"learning_rate": 5e-06,
"loss": 0.5879,
"step": 800
},
{
"epoch": 1.8461538461538463,
"grad_norm": 0.3148559521921556,
"learning_rate": 5e-06,
"loss": 0.5904,
"step": 810
},
{
"epoch": 1.868945868945869,
"grad_norm": 0.3112411609133845,
"learning_rate": 5e-06,
"loss": 0.5917,
"step": 820
},
{
"epoch": 1.8917378917378918,
"grad_norm": 0.3161978881525877,
"learning_rate": 5e-06,
"loss": 0.5952,
"step": 830
},
{
"epoch": 1.9145299145299144,
"grad_norm": 0.31432684111426584,
"learning_rate": 5e-06,
"loss": 0.5989,
"step": 840
},
{
"epoch": 1.9373219373219372,
"grad_norm": 0.2968776393628373,
"learning_rate": 5e-06,
"loss": 0.5991,
"step": 850
},
{
"epoch": 1.96011396011396,
"grad_norm": 0.30501940664641986,
"learning_rate": 5e-06,
"loss": 0.5978,
"step": 860
},
{
"epoch": 1.982905982905983,
"grad_norm": 0.34243750839103754,
"learning_rate": 5e-06,
"loss": 0.5965,
"step": 870
},
{
"epoch": 1.998860398860399,
"eval_loss": 0.6187237501144409,
"eval_runtime": 223.8992,
"eval_samples_per_second": 52.805,
"eval_steps_per_second": 0.415,
"step": 877
},
{
"epoch": 2.005698005698006,
"grad_norm": 0.3076552665931079,
"learning_rate": 5e-06,
"loss": 0.6035,
"step": 880
},
{
"epoch": 2.0284900284900287,
"grad_norm": 0.316707349656788,
"learning_rate": 5e-06,
"loss": 0.5544,
"step": 890
},
{
"epoch": 2.051282051282051,
"grad_norm": 0.314619421056384,
"learning_rate": 5e-06,
"loss": 0.5702,
"step": 900
},
{
"epoch": 2.074074074074074,
"grad_norm": 0.3676853811483963,
"learning_rate": 5e-06,
"loss": 0.5679,
"step": 910
},
{
"epoch": 2.096866096866097,
"grad_norm": 0.3405104791289966,
"learning_rate": 5e-06,
"loss": 0.5705,
"step": 920
},
{
"epoch": 2.1196581196581197,
"grad_norm": 0.30588234453097496,
"learning_rate": 5e-06,
"loss": 0.5696,
"step": 930
},
{
"epoch": 2.1424501424501425,
"grad_norm": 0.3159010620787154,
"learning_rate": 5e-06,
"loss": 0.565,
"step": 940
},
{
"epoch": 2.1652421652421654,
"grad_norm": 0.31276096665138237,
"learning_rate": 5e-06,
"loss": 0.5637,
"step": 950
},
{
"epoch": 2.1880341880341883,
"grad_norm": 0.32793772773197,
"learning_rate": 5e-06,
"loss": 0.569,
"step": 960
},
{
"epoch": 2.2108262108262107,
"grad_norm": 0.3039440121244138,
"learning_rate": 5e-06,
"loss": 0.5701,
"step": 970
},
{
"epoch": 2.2336182336182335,
"grad_norm": 0.29684753392141544,
"learning_rate": 5e-06,
"loss": 0.5615,
"step": 980
},
{
"epoch": 2.2564102564102564,
"grad_norm": 0.30214100814674827,
"learning_rate": 5e-06,
"loss": 0.5654,
"step": 990
},
{
"epoch": 2.2792022792022792,
"grad_norm": 0.3283314542723796,
"learning_rate": 5e-06,
"loss": 0.5687,
"step": 1000
},
{
"epoch": 2.301994301994302,
"grad_norm": 0.28655825406701363,
"learning_rate": 5e-06,
"loss": 0.566,
"step": 1010
},
{
"epoch": 2.324786324786325,
"grad_norm": 0.3172763262482182,
"learning_rate": 5e-06,
"loss": 0.5684,
"step": 1020
},
{
"epoch": 2.347578347578348,
"grad_norm": 0.29685961712777825,
"learning_rate": 5e-06,
"loss": 0.5721,
"step": 1030
},
{
"epoch": 2.3703703703703702,
"grad_norm": 0.3083523951805142,
"learning_rate": 5e-06,
"loss": 0.5664,
"step": 1040
},
{
"epoch": 2.393162393162393,
"grad_norm": 0.30997548246110007,
"learning_rate": 5e-06,
"loss": 0.5672,
"step": 1050
},
{
"epoch": 2.415954415954416,
"grad_norm": 0.30789457003363374,
"learning_rate": 5e-06,
"loss": 0.5774,
"step": 1060
},
{
"epoch": 2.438746438746439,
"grad_norm": 0.34506757910084734,
"learning_rate": 5e-06,
"loss": 0.5776,
"step": 1070
},
{
"epoch": 2.4615384615384617,
"grad_norm": 0.29907001031582975,
"learning_rate": 5e-06,
"loss": 0.5748,
"step": 1080
},
{
"epoch": 2.484330484330484,
"grad_norm": 0.3061615554843654,
"learning_rate": 5e-06,
"loss": 0.5731,
"step": 1090
},
{
"epoch": 2.5071225071225074,
"grad_norm": 0.3222138386350068,
"learning_rate": 5e-06,
"loss": 0.577,
"step": 1100
},
{
"epoch": 2.52991452991453,
"grad_norm": 0.3080789218888492,
"learning_rate": 5e-06,
"loss": 0.5715,
"step": 1110
},
{
"epoch": 2.5527065527065527,
"grad_norm": 0.29258951587176035,
"learning_rate": 5e-06,
"loss": 0.5685,
"step": 1120
},
{
"epoch": 2.5754985754985755,
"grad_norm": 0.3281842313884605,
"learning_rate": 5e-06,
"loss": 0.5686,
"step": 1130
},
{
"epoch": 2.5982905982905984,
"grad_norm": 0.29411671998782135,
"learning_rate": 5e-06,
"loss": 0.571,
"step": 1140
},
{
"epoch": 2.6210826210826212,
"grad_norm": 0.29959622578924333,
"learning_rate": 5e-06,
"loss": 0.5682,
"step": 1150
},
{
"epoch": 2.6438746438746437,
"grad_norm": 0.30740185285745697,
"learning_rate": 5e-06,
"loss": 0.571,
"step": 1160
},
{
"epoch": 2.6666666666666665,
"grad_norm": 0.2839640105696346,
"learning_rate": 5e-06,
"loss": 0.5718,
"step": 1170
},
{
"epoch": 2.6894586894586894,
"grad_norm": 0.2896360639343294,
"learning_rate": 5e-06,
"loss": 0.5694,
"step": 1180
},
{
"epoch": 2.7122507122507122,
"grad_norm": 0.3247548012145319,
"learning_rate": 5e-06,
"loss": 0.5692,
"step": 1190
},
{
"epoch": 2.735042735042735,
"grad_norm": 0.29034523418069846,
"learning_rate": 5e-06,
"loss": 0.5722,
"step": 1200
},
{
"epoch": 2.757834757834758,
"grad_norm": 0.2853110947522709,
"learning_rate": 5e-06,
"loss": 0.577,
"step": 1210
},
{
"epoch": 2.780626780626781,
"grad_norm": 0.2828550287829409,
"learning_rate": 5e-06,
"loss": 0.5641,
"step": 1220
},
{
"epoch": 2.8034188034188032,
"grad_norm": 0.2979901647854109,
"learning_rate": 5e-06,
"loss": 0.5652,
"step": 1230
},
{
"epoch": 2.826210826210826,
"grad_norm": 0.29825085966088555,
"learning_rate": 5e-06,
"loss": 0.578,
"step": 1240
},
{
"epoch": 2.849002849002849,
"grad_norm": 0.29169708831163993,
"learning_rate": 5e-06,
"loss": 0.5762,
"step": 1250
},
{
"epoch": 2.871794871794872,
"grad_norm": 0.2972957929068833,
"learning_rate": 5e-06,
"loss": 0.5696,
"step": 1260
},
{
"epoch": 2.8945868945868947,
"grad_norm": 0.31082071951469487,
"learning_rate": 5e-06,
"loss": 0.5762,
"step": 1270
},
{
"epoch": 2.9173789173789175,
"grad_norm": 0.2900888035317228,
"learning_rate": 5e-06,
"loss": 0.5623,
"step": 1280
},
{
"epoch": 2.9401709401709404,
"grad_norm": 0.3110544935430418,
"learning_rate": 5e-06,
"loss": 0.5648,
"step": 1290
},
{
"epoch": 2.962962962962963,
"grad_norm": 0.31392258075162666,
"learning_rate": 5e-06,
"loss": 0.5653,
"step": 1300
},
{
"epoch": 2.9857549857549857,
"grad_norm": 0.31856046416041095,
"learning_rate": 5e-06,
"loss": 0.5744,
"step": 1310
},
{
"epoch": 2.9994301994301993,
"eval_loss": 0.6185686588287354,
"eval_runtime": 222.7671,
"eval_samples_per_second": 53.073,
"eval_steps_per_second": 0.417,
"step": 1316
},
{
"epoch": 3.0085470085470085,
"grad_norm": 0.3351007793036639,
"learning_rate": 5e-06,
"loss": 0.5691,
"step": 1320
},
{
"epoch": 3.0313390313390314,
"grad_norm": 0.3019286650434241,
"learning_rate": 5e-06,
"loss": 0.5399,
"step": 1330
},
{
"epoch": 3.0541310541310542,
"grad_norm": 0.3439158785823229,
"learning_rate": 5e-06,
"loss": 0.5394,
"step": 1340
},
{
"epoch": 3.076923076923077,
"grad_norm": 0.3374532391642498,
"learning_rate": 5e-06,
"loss": 0.5432,
"step": 1350
},
{
"epoch": 3.0997150997150995,
"grad_norm": 0.2989682915138886,
"learning_rate": 5e-06,
"loss": 0.5376,
"step": 1360
},
{
"epoch": 3.1225071225071224,
"grad_norm": 0.2963980039357649,
"learning_rate": 5e-06,
"loss": 0.5395,
"step": 1370
},
{
"epoch": 3.1452991452991452,
"grad_norm": 0.2937114794782103,
"learning_rate": 5e-06,
"loss": 0.5394,
"step": 1380
},
{
"epoch": 3.168091168091168,
"grad_norm": 0.29626505292683575,
"learning_rate": 5e-06,
"loss": 0.5389,
"step": 1390
},
{
"epoch": 3.190883190883191,
"grad_norm": 0.30686647691318353,
"learning_rate": 5e-06,
"loss": 0.5339,
"step": 1400
},
{
"epoch": 3.213675213675214,
"grad_norm": 0.28513483825993924,
"learning_rate": 5e-06,
"loss": 0.5374,
"step": 1410
},
{
"epoch": 3.2364672364672367,
"grad_norm": 0.32112984492909513,
"learning_rate": 5e-06,
"loss": 0.5466,
"step": 1420
},
{
"epoch": 3.259259259259259,
"grad_norm": 0.28822837064728235,
"learning_rate": 5e-06,
"loss": 0.5474,
"step": 1430
},
{
"epoch": 3.282051282051282,
"grad_norm": 0.3185576111825181,
"learning_rate": 5e-06,
"loss": 0.5366,
"step": 1440
},
{
"epoch": 3.304843304843305,
"grad_norm": 0.2792872891044498,
"learning_rate": 5e-06,
"loss": 0.5362,
"step": 1450
},
{
"epoch": 3.3276353276353277,
"grad_norm": 0.29995676319638775,
"learning_rate": 5e-06,
"loss": 0.5396,
"step": 1460
},
{
"epoch": 3.3504273504273505,
"grad_norm": 0.2920803607745571,
"learning_rate": 5e-06,
"loss": 0.5421,
"step": 1470
},
{
"epoch": 3.3732193732193734,
"grad_norm": 0.3432358333663625,
"learning_rate": 5e-06,
"loss": 0.5438,
"step": 1480
},
{
"epoch": 3.396011396011396,
"grad_norm": 0.2917134971418762,
"learning_rate": 5e-06,
"loss": 0.5382,
"step": 1490
},
{
"epoch": 3.4188034188034186,
"grad_norm": 0.30814260872698784,
"learning_rate": 5e-06,
"loss": 0.5395,
"step": 1500
},
{
"epoch": 3.4415954415954415,
"grad_norm": 0.28877135275478877,
"learning_rate": 5e-06,
"loss": 0.5446,
"step": 1510
},
{
"epoch": 3.4643874643874644,
"grad_norm": 0.29391852963248044,
"learning_rate": 5e-06,
"loss": 0.538,
"step": 1520
},
{
"epoch": 3.4871794871794872,
"grad_norm": 0.2934166080799258,
"learning_rate": 5e-06,
"loss": 0.5501,
"step": 1530
},
{
"epoch": 3.50997150997151,
"grad_norm": 0.31731681904845593,
"learning_rate": 5e-06,
"loss": 0.5469,
"step": 1540
},
{
"epoch": 3.532763532763533,
"grad_norm": 0.3057350977648524,
"learning_rate": 5e-06,
"loss": 0.5426,
"step": 1550
},
{
"epoch": 3.5555555555555554,
"grad_norm": 0.3141703306612959,
"learning_rate": 5e-06,
"loss": 0.5441,
"step": 1560
},
{
"epoch": 3.578347578347578,
"grad_norm": 0.30598298352967457,
"learning_rate": 5e-06,
"loss": 0.5475,
"step": 1570
},
{
"epoch": 3.601139601139601,
"grad_norm": 0.28570732113004244,
"learning_rate": 5e-06,
"loss": 0.5438,
"step": 1580
},
{
"epoch": 3.623931623931624,
"grad_norm": 0.29764587818834687,
"learning_rate": 5e-06,
"loss": 0.5405,
"step": 1590
},
{
"epoch": 3.646723646723647,
"grad_norm": 0.30126884574280993,
"learning_rate": 5e-06,
"loss": 0.5365,
"step": 1600
},
{
"epoch": 3.6695156695156697,
"grad_norm": 0.2855847909256962,
"learning_rate": 5e-06,
"loss": 0.5403,
"step": 1610
},
{
"epoch": 3.6923076923076925,
"grad_norm": 0.2926291093654215,
"learning_rate": 5e-06,
"loss": 0.5446,
"step": 1620
},
{
"epoch": 3.715099715099715,
"grad_norm": 0.30082477661083823,
"learning_rate": 5e-06,
"loss": 0.5425,
"step": 1630
},
{
"epoch": 3.737891737891738,
"grad_norm": 0.3371570347062514,
"learning_rate": 5e-06,
"loss": 0.5397,
"step": 1640
},
{
"epoch": 3.7606837606837606,
"grad_norm": 0.3105947434754125,
"learning_rate": 5e-06,
"loss": 0.5533,
"step": 1650
},
{
"epoch": 3.7834757834757835,
"grad_norm": 0.316609957936053,
"learning_rate": 5e-06,
"loss": 0.5382,
"step": 1660
},
{
"epoch": 3.8062678062678064,
"grad_norm": 0.29097493899738613,
"learning_rate": 5e-06,
"loss": 0.5495,
"step": 1670
},
{
"epoch": 3.8290598290598292,
"grad_norm": 0.29269233881195844,
"learning_rate": 5e-06,
"loss": 0.544,
"step": 1680
},
{
"epoch": 3.851851851851852,
"grad_norm": 0.26939440151127303,
"learning_rate": 5e-06,
"loss": 0.5449,
"step": 1690
},
{
"epoch": 3.8746438746438745,
"grad_norm": 0.28613738234540304,
"learning_rate": 5e-06,
"loss": 0.5435,
"step": 1700
},
{
"epoch": 3.8974358974358974,
"grad_norm": 0.28520024092833124,
"learning_rate": 5e-06,
"loss": 0.5482,
"step": 1710
},
{
"epoch": 3.92022792022792,
"grad_norm": 0.2957727419479367,
"learning_rate": 5e-06,
"loss": 0.5476,
"step": 1720
},
{
"epoch": 3.943019943019943,
"grad_norm": 0.2926051428617584,
"learning_rate": 5e-06,
"loss": 0.5534,
"step": 1730
},
{
"epoch": 3.965811965811966,
"grad_norm": 0.27352601542002747,
"learning_rate": 5e-06,
"loss": 0.5475,
"step": 1740
},
{
"epoch": 3.9886039886039883,
"grad_norm": 0.2921116881599489,
"learning_rate": 5e-06,
"loss": 0.549,
"step": 1750
},
{
"epoch": 3.993162393162393,
"eval_loss": 0.6236439347267151,
"eval_runtime": 223.2896,
"eval_samples_per_second": 52.949,
"eval_steps_per_second": 0.416,
"step": 1752
},
{
"epoch": 3.993162393162393,
"step": 1752,
"total_flos": 3673450900094976.0,
"train_loss": 0.5887784766688194,
"train_runtime": 47860.2816,
"train_samples_per_second": 18.773,
"train_steps_per_second": 0.037
}
],
"logging_steps": 10,
"max_steps": 1752,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3673450900094976.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}