train_copa_1757340231 / trainer_state.json
rbelanec's picture
End of training
e4c957b verified
{
"best_global_step": 90,
"best_metric": 0.11805140972137451,
"best_model_checkpoint": "saves_stability/lntuning/llama-3-8b-instruct/train_copa_1757340231/checkpoint-90",
"epoch": 10.0,
"eval_steps": 45,
"global_step": 900,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05555555555555555,
"grad_norm": 12.028129577636719,
"learning_rate": 2.2222222222222225e-06,
"loss": 0.6551,
"num_input_tokens_seen": 1568,
"step": 5
},
{
"epoch": 0.1111111111111111,
"grad_norm": 13.552009582519531,
"learning_rate": 5e-06,
"loss": 0.6673,
"num_input_tokens_seen": 3104,
"step": 10
},
{
"epoch": 0.16666666666666666,
"grad_norm": 11.640186309814453,
"learning_rate": 7.777777777777777e-06,
"loss": 0.9098,
"num_input_tokens_seen": 4704,
"step": 15
},
{
"epoch": 0.2222222222222222,
"grad_norm": 9.485243797302246,
"learning_rate": 1.0555555555555555e-05,
"loss": 0.5073,
"num_input_tokens_seen": 6304,
"step": 20
},
{
"epoch": 0.2777777777777778,
"grad_norm": 8.202380180358887,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.4478,
"num_input_tokens_seen": 7808,
"step": 25
},
{
"epoch": 0.3333333333333333,
"grad_norm": 8.377937316894531,
"learning_rate": 1.6111111111111115e-05,
"loss": 0.4037,
"num_input_tokens_seen": 9344,
"step": 30
},
{
"epoch": 0.3888888888888889,
"grad_norm": 3.752380847930908,
"learning_rate": 1.888888888888889e-05,
"loss": 0.2965,
"num_input_tokens_seen": 10912,
"step": 35
},
{
"epoch": 0.4444444444444444,
"grad_norm": 2.004181385040283,
"learning_rate": 2.1666666666666667e-05,
"loss": 0.1495,
"num_input_tokens_seen": 12512,
"step": 40
},
{
"epoch": 0.5,
"grad_norm": 3.194992780685425,
"learning_rate": 2.4444444444444445e-05,
"loss": 0.2041,
"num_input_tokens_seen": 14016,
"step": 45
},
{
"epoch": 0.5,
"eval_loss": 0.1784650981426239,
"eval_runtime": 0.5749,
"eval_samples_per_second": 69.58,
"eval_steps_per_second": 17.395,
"num_input_tokens_seen": 14016,
"step": 45
},
{
"epoch": 0.5555555555555556,
"grad_norm": 1.5340490341186523,
"learning_rate": 2.7222222222222223e-05,
"loss": 0.0414,
"num_input_tokens_seen": 15552,
"step": 50
},
{
"epoch": 0.6111111111111112,
"grad_norm": 1.0850095748901367,
"learning_rate": 3e-05,
"loss": 0.0265,
"num_input_tokens_seen": 17120,
"step": 55
},
{
"epoch": 0.6666666666666666,
"grad_norm": 17.816328048706055,
"learning_rate": 3.277777777777778e-05,
"loss": 0.1182,
"num_input_tokens_seen": 18720,
"step": 60
},
{
"epoch": 0.7222222222222222,
"grad_norm": 0.18008385598659515,
"learning_rate": 3.555555555555556e-05,
"loss": 0.1036,
"num_input_tokens_seen": 20288,
"step": 65
},
{
"epoch": 0.7777777777777778,
"grad_norm": 0.1256040334701538,
"learning_rate": 3.8333333333333334e-05,
"loss": 0.021,
"num_input_tokens_seen": 21856,
"step": 70
},
{
"epoch": 0.8333333333333334,
"grad_norm": 6.297001361846924,
"learning_rate": 4.111111111111111e-05,
"loss": 0.1456,
"num_input_tokens_seen": 23456,
"step": 75
},
{
"epoch": 0.8888888888888888,
"grad_norm": 6.056818008422852,
"learning_rate": 4.388888888888889e-05,
"loss": 0.1581,
"num_input_tokens_seen": 24992,
"step": 80
},
{
"epoch": 0.9444444444444444,
"grad_norm": 0.5619527101516724,
"learning_rate": 4.666666666666667e-05,
"loss": 0.1223,
"num_input_tokens_seen": 26528,
"step": 85
},
{
"epoch": 1.0,
"grad_norm": 0.8489146828651428,
"learning_rate": 4.9444444444444446e-05,
"loss": 0.0848,
"num_input_tokens_seen": 28096,
"step": 90
},
{
"epoch": 1.0,
"eval_loss": 0.11805140972137451,
"eval_runtime": 0.5744,
"eval_samples_per_second": 69.634,
"eval_steps_per_second": 17.408,
"num_input_tokens_seen": 28096,
"step": 90
},
{
"epoch": 1.0555555555555556,
"grad_norm": 0.040736857801675797,
"learning_rate": 4.9996991493233693e-05,
"loss": 0.017,
"num_input_tokens_seen": 29632,
"step": 95
},
{
"epoch": 1.1111111111111112,
"grad_norm": 0.370405912399292,
"learning_rate": 4.99847706754774e-05,
"loss": 0.0399,
"num_input_tokens_seen": 31200,
"step": 100
},
{
"epoch": 1.1666666666666667,
"grad_norm": 0.2983262538909912,
"learning_rate": 4.9963154107272295e-05,
"loss": 0.0811,
"num_input_tokens_seen": 32768,
"step": 105
},
{
"epoch": 1.2222222222222223,
"grad_norm": 0.04243910312652588,
"learning_rate": 4.993214991772563e-05,
"loss": 0.2488,
"num_input_tokens_seen": 34336,
"step": 110
},
{
"epoch": 1.2777777777777777,
"grad_norm": 1.2641880512237549,
"learning_rate": 4.989176976624511e-05,
"loss": 0.1245,
"num_input_tokens_seen": 35872,
"step": 115
},
{
"epoch": 1.3333333333333333,
"grad_norm": 0.5663049221038818,
"learning_rate": 4.9842028838154285e-05,
"loss": 0.0252,
"num_input_tokens_seen": 37440,
"step": 120
},
{
"epoch": 1.3888888888888888,
"grad_norm": 3.6676812171936035,
"learning_rate": 4.978294583898196e-05,
"loss": 0.0156,
"num_input_tokens_seen": 38976,
"step": 125
},
{
"epoch": 1.4444444444444444,
"grad_norm": 5.408911228179932,
"learning_rate": 4.971454298742779e-05,
"loss": 0.1855,
"num_input_tokens_seen": 40544,
"step": 130
},
{
"epoch": 1.5,
"grad_norm": 0.3176509439945221,
"learning_rate": 4.963684600700679e-05,
"loss": 0.069,
"num_input_tokens_seen": 42144,
"step": 135
},
{
"epoch": 1.5,
"eval_loss": 0.1266736388206482,
"eval_runtime": 0.58,
"eval_samples_per_second": 68.963,
"eval_steps_per_second": 17.241,
"num_input_tokens_seen": 42144,
"step": 135
},
{
"epoch": 1.5555555555555556,
"grad_norm": 5.149271488189697,
"learning_rate": 4.9549884116375714e-05,
"loss": 0.2346,
"num_input_tokens_seen": 43744,
"step": 140
},
{
"epoch": 1.6111111111111112,
"grad_norm": 0.3459404408931732,
"learning_rate": 4.9453690018345144e-05,
"loss": 0.0473,
"num_input_tokens_seen": 45280,
"step": 145
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.04379900172352791,
"learning_rate": 4.934829988758131e-05,
"loss": 0.1136,
"num_input_tokens_seen": 46880,
"step": 150
},
{
"epoch": 1.7222222222222223,
"grad_norm": 6.0745720863342285,
"learning_rate": 4.923375335700223e-05,
"loss": 0.2481,
"num_input_tokens_seen": 48480,
"step": 155
},
{
"epoch": 1.7777777777777777,
"grad_norm": 0.06044352427124977,
"learning_rate": 4.9110093502873476e-05,
"loss": 0.017,
"num_input_tokens_seen": 50080,
"step": 160
},
{
"epoch": 1.8333333333333335,
"grad_norm": 4.317785263061523,
"learning_rate": 4.897736682860885e-05,
"loss": 0.0766,
"num_input_tokens_seen": 51648,
"step": 165
},
{
"epoch": 1.8888888888888888,
"grad_norm": 2.0243148803710938,
"learning_rate": 4.883562324728241e-05,
"loss": 0.0453,
"num_input_tokens_seen": 53216,
"step": 170
},
{
"epoch": 1.9444444444444444,
"grad_norm": 0.2717069983482361,
"learning_rate": 4.868491606285823e-05,
"loss": 0.0625,
"num_input_tokens_seen": 54752,
"step": 175
},
{
"epoch": 2.0,
"grad_norm": 0.03594539687037468,
"learning_rate": 4.8525301950144894e-05,
"loss": 0.0283,
"num_input_tokens_seen": 56352,
"step": 180
},
{
"epoch": 2.0,
"eval_loss": 0.11958863586187363,
"eval_runtime": 0.5774,
"eval_samples_per_second": 69.278,
"eval_steps_per_second": 17.319,
"num_input_tokens_seen": 56352,
"step": 180
},
{
"epoch": 2.0555555555555554,
"grad_norm": 0.019619405269622803,
"learning_rate": 4.835684093348244e-05,
"loss": 0.0367,
"num_input_tokens_seen": 57888,
"step": 185
},
{
"epoch": 2.111111111111111,
"grad_norm": 0.5464202761650085,
"learning_rate": 4.817959636416969e-05,
"loss": 0.0118,
"num_input_tokens_seen": 59488,
"step": 190
},
{
"epoch": 2.1666666666666665,
"grad_norm": 0.048002611845731735,
"learning_rate": 4.7993634896640394e-05,
"loss": 0.091,
"num_input_tokens_seen": 61024,
"step": 195
},
{
"epoch": 2.2222222222222223,
"grad_norm": 0.032152995467185974,
"learning_rate": 4.779902646339722e-05,
"loss": 0.051,
"num_input_tokens_seen": 62528,
"step": 200
},
{
"epoch": 2.2777777777777777,
"grad_norm": 6.827496528625488,
"learning_rate": 4.759584424871302e-05,
"loss": 0.1105,
"num_input_tokens_seen": 64064,
"step": 205
},
{
"epoch": 2.3333333333333335,
"grad_norm": 0.04550907015800476,
"learning_rate": 4.7384164661109176e-05,
"loss": 0.1496,
"num_input_tokens_seen": 65696,
"step": 210
},
{
"epoch": 2.388888888888889,
"grad_norm": 2.4212865829467773,
"learning_rate": 4.7164067304621536e-05,
"loss": 0.0791,
"num_input_tokens_seen": 67232,
"step": 215
},
{
"epoch": 2.4444444444444446,
"grad_norm": 2.493133544921875,
"learning_rate": 4.693563494886455e-05,
"loss": 0.0206,
"num_input_tokens_seen": 68832,
"step": 220
},
{
"epoch": 2.5,
"grad_norm": 4.306882858276367,
"learning_rate": 4.669895349790502e-05,
"loss": 0.1055,
"num_input_tokens_seen": 70432,
"step": 225
},
{
"epoch": 2.5,
"eval_loss": 0.11920864880084991,
"eval_runtime": 0.583,
"eval_samples_per_second": 68.608,
"eval_steps_per_second": 17.152,
"num_input_tokens_seen": 70432,
"step": 225
},
{
"epoch": 2.5555555555555554,
"grad_norm": 0.6485729813575745,
"learning_rate": 4.645411195795709e-05,
"loss": 0.0058,
"num_input_tokens_seen": 72032,
"step": 230
},
{
"epoch": 2.611111111111111,
"grad_norm": 0.7758520245552063,
"learning_rate": 4.620120240391065e-05,
"loss": 0.015,
"num_input_tokens_seen": 73632,
"step": 235
},
{
"epoch": 2.6666666666666665,
"grad_norm": 0.016528239473700523,
"learning_rate": 4.5940319944705736e-05,
"loss": 0.1068,
"num_input_tokens_seen": 75168,
"step": 240
},
{
"epoch": 2.7222222222222223,
"grad_norm": 2.0096018314361572,
"learning_rate": 4.567156268756594e-05,
"loss": 0.0228,
"num_input_tokens_seen": 76768,
"step": 245
},
{
"epoch": 2.7777777777777777,
"grad_norm": 0.016257042065262794,
"learning_rate": 4.539503170110431e-05,
"loss": 0.0078,
"num_input_tokens_seen": 78336,
"step": 250
},
{
"epoch": 2.8333333333333335,
"grad_norm": 0.037896547466516495,
"learning_rate": 4.5110830977315556e-05,
"loss": 0.0148,
"num_input_tokens_seen": 79872,
"step": 255
},
{
"epoch": 2.888888888888889,
"grad_norm": 1.2489393949508667,
"learning_rate": 4.4819067392468944e-05,
"loss": 0.06,
"num_input_tokens_seen": 81440,
"step": 260
},
{
"epoch": 2.9444444444444446,
"grad_norm": 0.04921220242977142,
"learning_rate": 4.4519850666916484e-05,
"loss": 0.0711,
"num_input_tokens_seen": 83008,
"step": 265
},
{
"epoch": 3.0,
"grad_norm": 0.06469002366065979,
"learning_rate": 4.4213293323831585e-05,
"loss": 0.2017,
"num_input_tokens_seen": 84544,
"step": 270
},
{
"epoch": 3.0,
"eval_loss": 0.1241941899061203,
"eval_runtime": 0.5764,
"eval_samples_per_second": 69.395,
"eval_steps_per_second": 17.349,
"num_input_tokens_seen": 84544,
"step": 270
},
{
"epoch": 3.0555555555555554,
"grad_norm": 1.0416898727416992,
"learning_rate": 4.38995106468937e-05,
"loss": 0.0423,
"num_input_tokens_seen": 86112,
"step": 275
},
{
"epoch": 3.111111111111111,
"grad_norm": 0.030291926115751266,
"learning_rate": 4.357862063693486e-05,
"loss": 0.1031,
"num_input_tokens_seen": 87680,
"step": 280
},
{
"epoch": 3.1666666666666665,
"grad_norm": 0.34328025579452515,
"learning_rate": 4.325074396756437e-05,
"loss": 0.0353,
"num_input_tokens_seen": 89216,
"step": 285
},
{
"epoch": 3.2222222222222223,
"grad_norm": 0.014772674068808556,
"learning_rate": 4.2916003939788403e-05,
"loss": 0.0296,
"num_input_tokens_seen": 90816,
"step": 290
},
{
"epoch": 3.2777777777777777,
"grad_norm": 1.0737226009368896,
"learning_rate": 4.257452643564155e-05,
"loss": 0.051,
"num_input_tokens_seen": 92448,
"step": 295
},
{
"epoch": 3.3333333333333335,
"grad_norm": 0.3188193440437317,
"learning_rate": 4.22264398708477e-05,
"loss": 0.0048,
"num_input_tokens_seen": 93984,
"step": 300
},
{
"epoch": 3.388888888888889,
"grad_norm": 0.01848280057311058,
"learning_rate": 4.1871875146528195e-05,
"loss": 0.0704,
"num_input_tokens_seen": 95584,
"step": 305
},
{
"epoch": 3.4444444444444446,
"grad_norm": 0.02418445609509945,
"learning_rate": 4.1510965599975196e-05,
"loss": 0.0083,
"num_input_tokens_seen": 97088,
"step": 310
},
{
"epoch": 3.5,
"grad_norm": 0.012784923426806927,
"learning_rate": 4.114384695450906e-05,
"loss": 0.001,
"num_input_tokens_seen": 98688,
"step": 315
},
{
"epoch": 3.5,
"eval_loss": 0.12571492791175842,
"eval_runtime": 0.5841,
"eval_samples_per_second": 68.486,
"eval_steps_per_second": 17.122,
"num_input_tokens_seen": 98688,
"step": 315
},
{
"epoch": 3.5555555555555554,
"grad_norm": 0.1880708634853363,
"learning_rate": 4.077065726843828e-05,
"loss": 0.1335,
"num_input_tokens_seen": 100288,
"step": 320
},
{
"epoch": 3.611111111111111,
"grad_norm": 0.010126232169568539,
"learning_rate": 4.039153688314145e-05,
"loss": 0.0038,
"num_input_tokens_seen": 101824,
"step": 325
},
{
"epoch": 3.6666666666666665,
"grad_norm": 0.01738058216869831,
"learning_rate": 4.000662837029062e-05,
"loss": 0.0093,
"num_input_tokens_seen": 103392,
"step": 330
},
{
"epoch": 3.7222222222222223,
"grad_norm": 0.39928749203681946,
"learning_rate": 3.961607647823583e-05,
"loss": 0.1068,
"num_input_tokens_seen": 104928,
"step": 335
},
{
"epoch": 3.7777777777777777,
"grad_norm": 3.9759440422058105,
"learning_rate": 3.9220028077571295e-05,
"loss": 0.1346,
"num_input_tokens_seen": 106560,
"step": 340
},
{
"epoch": 3.8333333333333335,
"grad_norm": 0.045272424817085266,
"learning_rate": 3.881863210590332e-05,
"loss": 0.0017,
"num_input_tokens_seen": 108096,
"step": 345
},
{
"epoch": 3.888888888888889,
"grad_norm": 1.4312611818313599,
"learning_rate": 3.841203951184095e-05,
"loss": 0.0963,
"num_input_tokens_seen": 109664,
"step": 350
},
{
"epoch": 3.9444444444444446,
"grad_norm": 0.02004213072359562,
"learning_rate": 3.8000403198230387e-05,
"loss": 0.0429,
"num_input_tokens_seen": 111232,
"step": 355
},
{
"epoch": 4.0,
"grad_norm": 0.017298046499490738,
"learning_rate": 3.75838779646545e-05,
"loss": 0.0121,
"num_input_tokens_seen": 112800,
"step": 360
},
{
"epoch": 4.0,
"eval_loss": 0.1298300325870514,
"eval_runtime": 0.5773,
"eval_samples_per_second": 69.285,
"eval_steps_per_second": 17.321,
"num_input_tokens_seen": 112800,
"step": 360
},
{
"epoch": 4.055555555555555,
"grad_norm": 0.04087962582707405,
"learning_rate": 3.7162620449219e-05,
"loss": 0.1289,
"num_input_tokens_seen": 114400,
"step": 365
},
{
"epoch": 4.111111111111111,
"grad_norm": 6.873142719268799,
"learning_rate": 3.673678906964727e-05,
"loss": 0.1789,
"num_input_tokens_seen": 115936,
"step": 370
},
{
"epoch": 4.166666666666667,
"grad_norm": 0.16114553809165955,
"learning_rate": 3.630654396370594e-05,
"loss": 0.0099,
"num_input_tokens_seen": 117472,
"step": 375
},
{
"epoch": 4.222222222222222,
"grad_norm": 0.5829356908798218,
"learning_rate": 3.5872046928983626e-05,
"loss": 0.1038,
"num_input_tokens_seen": 119040,
"step": 380
},
{
"epoch": 4.277777777777778,
"grad_norm": 0.7600432634353638,
"learning_rate": 3.543346136204545e-05,
"loss": 0.0187,
"num_input_tokens_seen": 120640,
"step": 385
},
{
"epoch": 4.333333333333333,
"grad_norm": 0.151974618434906,
"learning_rate": 3.499095219698631e-05,
"loss": 0.0012,
"num_input_tokens_seen": 122176,
"step": 390
},
{
"epoch": 4.388888888888889,
"grad_norm": 0.2258666604757309,
"learning_rate": 3.454468584340588e-05,
"loss": 0.0239,
"num_input_tokens_seen": 123744,
"step": 395
},
{
"epoch": 4.444444444444445,
"grad_norm": 0.018906690180301666,
"learning_rate": 3.409483012382879e-05,
"loss": 0.0231,
"num_input_tokens_seen": 125280,
"step": 400
},
{
"epoch": 4.5,
"grad_norm": 0.05237025022506714,
"learning_rate": 3.364155421059342e-05,
"loss": 0.0068,
"num_input_tokens_seen": 126816,
"step": 405
},
{
"epoch": 4.5,
"eval_loss": 0.13041964173316956,
"eval_runtime": 0.5753,
"eval_samples_per_second": 69.532,
"eval_steps_per_second": 17.383,
"num_input_tokens_seen": 126816,
"step": 405
},
{
"epoch": 4.555555555555555,
"grad_norm": 0.11989942193031311,
"learning_rate": 3.318502856223311e-05,
"loss": 0.0047,
"num_input_tokens_seen": 128352,
"step": 410
},
{
"epoch": 4.611111111111111,
"grad_norm": 0.015702011063694954,
"learning_rate": 3.272542485937369e-05,
"loss": 0.0029,
"num_input_tokens_seen": 129920,
"step": 415
},
{
"epoch": 4.666666666666667,
"grad_norm": 0.028754986822605133,
"learning_rate": 3.2262915940171376e-05,
"loss": 0.02,
"num_input_tokens_seen": 131424,
"step": 420
},
{
"epoch": 4.722222222222222,
"grad_norm": 0.010241667740046978,
"learning_rate": 3.1797675735315455e-05,
"loss": 0.004,
"num_input_tokens_seen": 132960,
"step": 425
},
{
"epoch": 4.777777777777778,
"grad_norm": 0.011341402307152748,
"learning_rate": 3.132987920262005e-05,
"loss": 0.0012,
"num_input_tokens_seen": 134528,
"step": 430
},
{
"epoch": 4.833333333333333,
"grad_norm": 0.009936963208019733,
"learning_rate": 3.085970226122962e-05,
"loss": 0.0168,
"num_input_tokens_seen": 136096,
"step": 435
},
{
"epoch": 4.888888888888889,
"grad_norm": 0.03836958855390549,
"learning_rate": 3.0387321725463e-05,
"loss": 0.0643,
"num_input_tokens_seen": 137728,
"step": 440
},
{
"epoch": 4.944444444444445,
"grad_norm": 3.9874274730682373,
"learning_rate": 2.9912915238320754e-05,
"loss": 0.0452,
"num_input_tokens_seen": 139264,
"step": 445
},
{
"epoch": 5.0,
"grad_norm": 1.8843461275100708,
"learning_rate": 2.9436661204680882e-05,
"loss": 0.0379,
"num_input_tokens_seen": 140800,
"step": 450
},
{
"epoch": 5.0,
"eval_loss": 0.1350220888853073,
"eval_runtime": 0.5763,
"eval_samples_per_second": 69.414,
"eval_steps_per_second": 17.353,
"num_input_tokens_seen": 140800,
"step": 450
},
{
"epoch": 5.055555555555555,
"grad_norm": 0.2792241871356964,
"learning_rate": 2.8958738724208072e-05,
"loss": 0.0302,
"num_input_tokens_seen": 142400,
"step": 455
},
{
"epoch": 5.111111111111111,
"grad_norm": 0.027824169024825096,
"learning_rate": 2.8479327524001636e-05,
"loss": 0.0015,
"num_input_tokens_seen": 143872,
"step": 460
},
{
"epoch": 5.166666666666667,
"grad_norm": 0.005916323512792587,
"learning_rate": 2.7998607891007495e-05,
"loss": 0.0019,
"num_input_tokens_seen": 145440,
"step": 465
},
{
"epoch": 5.222222222222222,
"grad_norm": 0.008561103604733944,
"learning_rate": 2.7516760604219617e-05,
"loss": 0.0025,
"num_input_tokens_seen": 147040,
"step": 470
},
{
"epoch": 5.277777777777778,
"grad_norm": 0.013526272028684616,
"learning_rate": 2.7033966866696457e-05,
"loss": 0.0771,
"num_input_tokens_seen": 148576,
"step": 475
},
{
"epoch": 5.333333333333333,
"grad_norm": 0.13795623183250427,
"learning_rate": 2.6550408237417885e-05,
"loss": 0.0053,
"num_input_tokens_seen": 150112,
"step": 480
},
{
"epoch": 5.388888888888889,
"grad_norm": 0.8442857265472412,
"learning_rate": 2.6066266563008267e-05,
"loss": 0.0898,
"num_input_tokens_seen": 151712,
"step": 485
},
{
"epoch": 5.444444444444445,
"grad_norm": 2.5526673793792725,
"learning_rate": 2.5581723909351406e-05,
"loss": 0.0661,
"num_input_tokens_seen": 153280,
"step": 490
},
{
"epoch": 5.5,
"grad_norm": 0.05172133445739746,
"learning_rate": 2.5096962493123012e-05,
"loss": 0.0014,
"num_input_tokens_seen": 154848,
"step": 495
},
{
"epoch": 5.5,
"eval_loss": 0.13754430413246155,
"eval_runtime": 0.58,
"eval_samples_per_second": 68.968,
"eval_steps_per_second": 17.242,
"num_input_tokens_seen": 154848,
"step": 495
},
{
"epoch": 5.555555555555555,
"grad_norm": 0.007516528945416212,
"learning_rate": 2.461216461326642e-05,
"loss": 0.007,
"num_input_tokens_seen": 156352,
"step": 500
},
{
"epoch": 5.611111111111111,
"grad_norm": 3.065181016921997,
"learning_rate": 2.4127512582437485e-05,
"loss": 0.0205,
"num_input_tokens_seen": 157920,
"step": 505
},
{
"epoch": 5.666666666666667,
"grad_norm": 6.442762851715088,
"learning_rate": 2.364318865844416e-05,
"loss": 0.0562,
"num_input_tokens_seen": 159488,
"step": 510
},
{
"epoch": 5.722222222222222,
"grad_norm": 0.27856680750846863,
"learning_rate": 2.3159374975706884e-05,
"loss": 0.0033,
"num_input_tokens_seen": 161056,
"step": 515
},
{
"epoch": 5.777777777777778,
"grad_norm": 0.3110099732875824,
"learning_rate": 2.2676253476765196e-05,
"loss": 0.0015,
"num_input_tokens_seen": 162624,
"step": 520
},
{
"epoch": 5.833333333333333,
"grad_norm": 5.475592136383057,
"learning_rate": 2.2194005843856636e-05,
"loss": 0.0876,
"num_input_tokens_seen": 164128,
"step": 525
},
{
"epoch": 5.888888888888889,
"grad_norm": 7.484983444213867,
"learning_rate": 2.1712813430593436e-05,
"loss": 0.0961,
"num_input_tokens_seen": 165664,
"step": 530
},
{
"epoch": 5.944444444444445,
"grad_norm": 0.10913576930761337,
"learning_rate": 2.1232857193762924e-05,
"loss": 0.0025,
"num_input_tokens_seen": 167200,
"step": 535
},
{
"epoch": 6.0,
"grad_norm": 0.027029909193515778,
"learning_rate": 2.0754317625276983e-05,
"loss": 0.0027,
"num_input_tokens_seen": 168736,
"step": 540
},
{
"epoch": 6.0,
"eval_loss": 0.13720272481441498,
"eval_runtime": 0.5771,
"eval_samples_per_second": 69.312,
"eval_steps_per_second": 17.328,
"num_input_tokens_seen": 168736,
"step": 540
},
{
"epoch": 6.055555555555555,
"grad_norm": 0.03148474916815758,
"learning_rate": 2.02773746842965e-05,
"loss": 0.0017,
"num_input_tokens_seen": 170304,
"step": 545
},
{
"epoch": 6.111111111111111,
"grad_norm": 0.08897438645362854,
"learning_rate": 1.980220772955602e-05,
"loss": 0.0135,
"num_input_tokens_seen": 171840,
"step": 550
},
{
"epoch": 6.166666666666667,
"grad_norm": 0.05110946670174599,
"learning_rate": 1.932899545191433e-05,
"loss": 0.0008,
"num_input_tokens_seen": 173408,
"step": 555
},
{
"epoch": 6.222222222222222,
"grad_norm": 0.011350942775607109,
"learning_rate": 1.8857915807156092e-05,
"loss": 0.0014,
"num_input_tokens_seen": 174976,
"step": 560
},
{
"epoch": 6.277777777777778,
"grad_norm": 0.009600317105650902,
"learning_rate": 1.838914594906995e-05,
"loss": 0.0227,
"num_input_tokens_seen": 176544,
"step": 565
},
{
"epoch": 6.333333333333333,
"grad_norm": 0.027924560010433197,
"learning_rate": 1.792286216282824e-05,
"loss": 0.0964,
"num_input_tokens_seen": 178112,
"step": 570
},
{
"epoch": 6.388888888888889,
"grad_norm": 0.3235127925872803,
"learning_rate": 1.7459239798693364e-05,
"loss": 0.0015,
"num_input_tokens_seen": 179584,
"step": 575
},
{
"epoch": 6.444444444444445,
"grad_norm": 3.067894458770752,
"learning_rate": 1.699845320607571e-05,
"loss": 0.022,
"num_input_tokens_seen": 181152,
"step": 580
},
{
"epoch": 6.5,
"grad_norm": 0.014973540790379047,
"learning_rate": 1.6540675667967974e-05,
"loss": 0.0014,
"num_input_tokens_seen": 182688,
"step": 585
},
{
"epoch": 6.5,
"eval_loss": 0.14122465252876282,
"eval_runtime": 0.5961,
"eval_samples_per_second": 67.101,
"eval_steps_per_second": 16.775,
"num_input_tokens_seen": 182688,
"step": 585
},
{
"epoch": 6.555555555555555,
"grad_norm": 0.009066694416105747,
"learning_rate": 1.60860793357805e-05,
"loss": 0.0014,
"num_input_tokens_seen": 184224,
"step": 590
},
{
"epoch": 6.611111111111111,
"grad_norm": 0.17362192273139954,
"learning_rate": 1.56348351646022e-05,
"loss": 0.0395,
"num_input_tokens_seen": 185792,
"step": 595
},
{
"epoch": 6.666666666666667,
"grad_norm": 0.032302968204021454,
"learning_rate": 1.5187112848911323e-05,
"loss": 0.0417,
"num_input_tokens_seen": 187392,
"step": 600
},
{
"epoch": 6.722222222222222,
"grad_norm": 0.6071529388427734,
"learning_rate": 1.47430807587603e-05,
"loss": 0.0832,
"num_input_tokens_seen": 188992,
"step": 605
},
{
"epoch": 6.777777777777778,
"grad_norm": 0.04848611727356911,
"learning_rate": 1.430290587645865e-05,
"loss": 0.0866,
"num_input_tokens_seen": 190560,
"step": 610
},
{
"epoch": 6.833333333333333,
"grad_norm": 0.0878119021654129,
"learning_rate": 1.3866753733777765e-05,
"loss": 0.007,
"num_input_tokens_seen": 192128,
"step": 615
},
{
"epoch": 6.888888888888889,
"grad_norm": 0.10325942933559418,
"learning_rate": 1.343478834970121e-05,
"loss": 0.0074,
"num_input_tokens_seen": 193664,
"step": 620
},
{
"epoch": 6.944444444444445,
"grad_norm": 2.3258378505706787,
"learning_rate": 1.3007172168743854e-05,
"loss": 0.0102,
"num_input_tokens_seen": 195296,
"step": 625
},
{
"epoch": 7.0,
"grad_norm": 0.18902866542339325,
"learning_rate": 1.2584065999863102e-05,
"loss": 0.0013,
"num_input_tokens_seen": 196896,
"step": 630
},
{
"epoch": 7.0,
"eval_loss": 0.1432798057794571,
"eval_runtime": 0.5835,
"eval_samples_per_second": 68.557,
"eval_steps_per_second": 17.139,
"num_input_tokens_seen": 196896,
"step": 630
},
{
"epoch": 7.055555555555555,
"grad_norm": 0.10227175801992416,
"learning_rate": 1.2165628955985314e-05,
"loss": 0.0085,
"num_input_tokens_seen": 198528,
"step": 635
},
{
"epoch": 7.111111111111111,
"grad_norm": 0.16049188375473022,
"learning_rate": 1.175201839416988e-05,
"loss": 0.0203,
"num_input_tokens_seen": 200096,
"step": 640
},
{
"epoch": 7.166666666666667,
"grad_norm": 0.05664211884140968,
"learning_rate": 1.1343389856433658e-05,
"loss": 0.0326,
"num_input_tokens_seen": 201696,
"step": 645
},
{
"epoch": 7.222222222222222,
"grad_norm": 0.673611044883728,
"learning_rate": 1.0939897011258001e-05,
"loss": 0.0038,
"num_input_tokens_seen": 203232,
"step": 650
},
{
"epoch": 7.277777777777778,
"grad_norm": 8.291230201721191,
"learning_rate": 1.0541691595800337e-05,
"loss": 0.084,
"num_input_tokens_seen": 204768,
"step": 655
},
{
"epoch": 7.333333333333333,
"grad_norm": 0.0056886388920247555,
"learning_rate": 1.0148923358832022e-05,
"loss": 0.0079,
"num_input_tokens_seen": 206368,
"step": 660
},
{
"epoch": 7.388888888888889,
"grad_norm": 0.005177559796720743,
"learning_rate": 9.761740004423927e-06,
"loss": 0.0006,
"num_input_tokens_seen": 207904,
"step": 665
},
{
"epoch": 7.444444444444445,
"grad_norm": 0.008305308409035206,
"learning_rate": 9.380287136401e-06,
"loss": 0.0009,
"num_input_tokens_seen": 209472,
"step": 670
},
{
"epoch": 7.5,
"grad_norm": 0.01929614134132862,
"learning_rate": 9.00470820358663e-06,
"loss": 0.0014,
"num_input_tokens_seen": 211072,
"step": 675
},
{
"epoch": 7.5,
"eval_loss": 0.14391985535621643,
"eval_runtime": 0.5862,
"eval_samples_per_second": 68.24,
"eval_steps_per_second": 17.06,
"num_input_tokens_seen": 211072,
"step": 675
},
{
"epoch": 7.555555555555555,
"grad_norm": 0.020041748881340027,
"learning_rate": 8.635144445857406e-06,
"loss": 0.0137,
"num_input_tokens_seen": 212608,
"step": 680
},
{
"epoch": 7.611111111111111,
"grad_norm": 0.05644703283905983,
"learning_rate": 8.271734841028553e-06,
"loss": 0.0056,
"num_input_tokens_seen": 214176,
"step": 685
},
{
"epoch": 7.666666666666667,
"grad_norm": 0.05736091732978821,
"learning_rate": 7.914616052590071e-06,
"loss": 0.0053,
"num_input_tokens_seen": 215744,
"step": 690
},
{
"epoch": 7.722222222222222,
"grad_norm": 1.157248616218567,
"learning_rate": 7.563922378313218e-06,
"loss": 0.006,
"num_input_tokens_seen": 217248,
"step": 695
},
{
"epoch": 7.777777777777778,
"grad_norm": 0.006323433481156826,
"learning_rate": 7.219785699746573e-06,
"loss": 0.0013,
"num_input_tokens_seen": 218816,
"step": 700
},
{
"epoch": 7.833333333333333,
"grad_norm": 0.24250803887844086,
"learning_rate": 6.882335432620779e-06,
"loss": 0.0738,
"num_input_tokens_seen": 220288,
"step": 705
},
{
"epoch": 7.888888888888889,
"grad_norm": 0.04636640101671219,
"learning_rate": 6.55169847818059e-06,
"loss": 0.0011,
"num_input_tokens_seen": 221856,
"step": 710
},
{
"epoch": 7.944444444444445,
"grad_norm": 0.5231844782829285,
"learning_rate": 6.22799917546252e-06,
"loss": 0.0549,
"num_input_tokens_seen": 223424,
"step": 715
},
{
"epoch": 8.0,
"grad_norm": 0.016849713400006294,
"learning_rate": 5.9113592545359945e-06,
"loss": 0.0444,
"num_input_tokens_seen": 225056,
"step": 720
},
{
"epoch": 8.0,
"eval_loss": 0.14301875233650208,
"eval_runtime": 0.5826,
"eval_samples_per_second": 68.663,
"eval_steps_per_second": 17.166,
"num_input_tokens_seen": 225056,
"step": 720
},
{
"epoch": 8.055555555555555,
"grad_norm": 1.2855383157730103,
"learning_rate": 5.601897790725643e-06,
"loss": 0.0501,
"num_input_tokens_seen": 226656,
"step": 725
},
{
"epoch": 8.11111111111111,
"grad_norm": 0.019593268632888794,
"learning_rate": 5.299731159831953e-06,
"loss": 0.0006,
"num_input_tokens_seen": 228160,
"step": 730
},
{
"epoch": 8.166666666666666,
"grad_norm": 0.03212496265769005,
"learning_rate": 5.004972994367102e-06,
"loss": 0.0017,
"num_input_tokens_seen": 229728,
"step": 735
},
{
"epoch": 8.222222222222221,
"grad_norm": 0.00420254236087203,
"learning_rate": 4.7177341408224e-06,
"loss": 0.0551,
"num_input_tokens_seen": 231328,
"step": 740
},
{
"epoch": 8.277777777777779,
"grad_norm": 0.013452420942485332,
"learning_rate": 4.438122617983443e-06,
"loss": 0.0728,
"num_input_tokens_seen": 232832,
"step": 745
},
{
"epoch": 8.333333333333334,
"grad_norm": 0.0068261465057730675,
"learning_rate": 4.166243576308712e-06,
"loss": 0.0007,
"num_input_tokens_seen": 234432,
"step": 750
},
{
"epoch": 8.38888888888889,
"grad_norm": 0.011409962549805641,
"learning_rate": 3.9021992583867325e-06,
"loss": 0.0058,
"num_input_tokens_seen": 235968,
"step": 755
},
{
"epoch": 8.444444444444445,
"grad_norm": 0.07829581946134567,
"learning_rate": 3.6460889604868626e-06,
"loss": 0.0034,
"num_input_tokens_seen": 237568,
"step": 760
},
{
"epoch": 8.5,
"grad_norm": 1.6991177797317505,
"learning_rate": 3.398008995217988e-06,
"loss": 0.0927,
"num_input_tokens_seen": 239168,
"step": 765
},
{
"epoch": 8.5,
"eval_loss": 0.14233675599098206,
"eval_runtime": 0.586,
"eval_samples_per_second": 68.264,
"eval_steps_per_second": 17.066,
"num_input_tokens_seen": 239168,
"step": 765
},
{
"epoch": 8.555555555555555,
"grad_norm": 0.19973303377628326,
"learning_rate": 3.158052655309332e-06,
"loss": 0.0369,
"num_input_tokens_seen": 240800,
"step": 770
},
{
"epoch": 8.61111111111111,
"grad_norm": 0.06274840980768204,
"learning_rate": 2.9263101785268254e-06,
"loss": 0.0027,
"num_input_tokens_seen": 242368,
"step": 775
},
{
"epoch": 8.666666666666666,
"grad_norm": 0.12896870076656342,
"learning_rate": 2.7028687137384267e-06,
"loss": 0.0024,
"num_input_tokens_seen": 243872,
"step": 780
},
{
"epoch": 8.722222222222221,
"grad_norm": 0.013180126436054707,
"learning_rate": 2.487812288140945e-06,
"loss": 0.008,
"num_input_tokens_seen": 245472,
"step": 785
},
{
"epoch": 8.777777777777779,
"grad_norm": 0.12642890214920044,
"learning_rate": 2.281221775660894e-06,
"loss": 0.0008,
"num_input_tokens_seen": 247072,
"step": 790
},
{
"epoch": 8.833333333333334,
"grad_norm": 0.011742387898266315,
"learning_rate": 2.0831748665410765e-06,
"loss": 0.0004,
"num_input_tokens_seen": 248640,
"step": 795
},
{
"epoch": 8.88888888888889,
"grad_norm": 0.005955726373940706,
"learning_rate": 1.893746038124497e-06,
"loss": 0.0014,
"num_input_tokens_seen": 250208,
"step": 800
},
{
"epoch": 8.944444444444445,
"grad_norm": 0.0940459817647934,
"learning_rate": 1.713006526846439e-06,
"loss": 0.0014,
"num_input_tokens_seen": 251744,
"step": 805
},
{
"epoch": 9.0,
"grad_norm": 0.24416488409042358,
"learning_rate": 1.541024301445404e-06,
"loss": 0.0111,
"num_input_tokens_seen": 253312,
"step": 810
},
{
"epoch": 9.0,
"eval_loss": 0.14459559321403503,
"eval_runtime": 0.5731,
"eval_samples_per_second": 69.794,
"eval_steps_per_second": 17.449,
"num_input_tokens_seen": 253312,
"step": 810
},
{
"epoch": 9.055555555555555,
"grad_norm": 0.057257551699876785,
"learning_rate": 1.3778640374027985e-06,
"loss": 0.0048,
"num_input_tokens_seen": 254944,
"step": 815
},
{
"epoch": 9.11111111111111,
"grad_norm": 0.006494577042758465,
"learning_rate": 1.2235870926211619e-06,
"loss": 0.0332,
"num_input_tokens_seen": 256512,
"step": 820
},
{
"epoch": 9.166666666666666,
"grad_norm": 0.6156882047653198,
"learning_rate": 1.0782514843499653e-06,
"loss": 0.0026,
"num_input_tokens_seen": 258016,
"step": 825
},
{
"epoch": 9.222222222222221,
"grad_norm": 0.024971961975097656,
"learning_rate": 9.419118673676924e-07,
"loss": 0.0657,
"num_input_tokens_seen": 259552,
"step": 830
},
{
"epoch": 9.277777777777779,
"grad_norm": 0.0062075089663267136,
"learning_rate": 8.146195134284052e-07,
"loss": 0.0145,
"num_input_tokens_seen": 261152,
"step": 835
},
{
"epoch": 9.333333333333334,
"grad_norm": 0.4766158163547516,
"learning_rate": 6.964222919805391e-07,
"loss": 0.0023,
"num_input_tokens_seen": 262720,
"step": 840
},
{
"epoch": 9.38888888888889,
"grad_norm": 0.056195683777332306,
"learning_rate": 5.87364652165176e-07,
"loss": 0.0087,
"num_input_tokens_seen": 264224,
"step": 845
},
{
"epoch": 9.444444444444445,
"grad_norm": 0.9626007080078125,
"learning_rate": 4.874876061005173e-07,
"loss": 0.0044,
"num_input_tokens_seen": 265792,
"step": 850
},
{
"epoch": 9.5,
"grad_norm": 0.15369237959384918,
"learning_rate": 3.9682871345891883e-07,
"loss": 0.001,
"num_input_tokens_seen": 267392,
"step": 855
},
{
"epoch": 9.5,
"eval_loss": 0.1462300568819046,
"eval_runtime": 0.5792,
"eval_samples_per_second": 69.063,
"eval_steps_per_second": 17.266,
"num_input_tokens_seen": 267392,
"step": 855
},
{
"epoch": 9.555555555555555,
"grad_norm": 0.028775176033377647,
"learning_rate": 3.1542206734221924e-07,
"loss": 0.0089,
"num_input_tokens_seen": 268896,
"step": 860
},
{
"epoch": 9.61111111111111,
"grad_norm": 0.20606637001037598,
"learning_rate": 2.4329828146074095e-07,
"loss": 0.0468,
"num_input_tokens_seen": 270432,
"step": 865
},
{
"epoch": 9.666666666666666,
"grad_norm": 0.8666467666625977,
"learning_rate": 1.8048447862070718e-07,
"loss": 0.015,
"num_input_tokens_seen": 271968,
"step": 870
},
{
"epoch": 9.722222222222221,
"grad_norm": 0.014927767217159271,
"learning_rate": 1.2700428052447033e-07,
"loss": 0.0026,
"num_input_tokens_seen": 273504,
"step": 875
},
{
"epoch": 9.777777777777779,
"grad_norm": 0.006279713939875364,
"learning_rate": 8.28777988873486e-08,
"loss": 0.0011,
"num_input_tokens_seen": 275104,
"step": 880
},
{
"epoch": 9.833333333333334,
"grad_norm": 1.544179916381836,
"learning_rate": 4.8121627874450625e-08,
"loss": 0.0058,
"num_input_tokens_seen": 276672,
"step": 885
},
{
"epoch": 9.88888888888889,
"grad_norm": 0.714333713054657,
"learning_rate": 2.2748837860270267e-08,
"loss": 0.0029,
"num_input_tokens_seen": 278272,
"step": 890
},
{
"epoch": 9.944444444444445,
"grad_norm": 0.01058835256844759,
"learning_rate": 6.768970513457151e-09,
"loss": 0.0869,
"num_input_tokens_seen": 279808,
"step": 895
},
{
"epoch": 10.0,
"grad_norm": 0.0075642429292202,
"learning_rate": 1.8803520859811406e-10,
"loss": 0.0256,
"num_input_tokens_seen": 281408,
"step": 900
},
{
"epoch": 10.0,
"eval_loss": 0.13989758491516113,
"eval_runtime": 0.5834,
"eval_samples_per_second": 68.56,
"eval_steps_per_second": 17.14,
"num_input_tokens_seen": 281408,
"step": 900
},
{
"epoch": 10.0,
"num_input_tokens_seen": 281408,
"step": 900,
"total_flos": 1.2672124584984576e+16,
"train_loss": 0.06401773532231649,
"train_runtime": 160.7306,
"train_samples_per_second": 22.398,
"train_steps_per_second": 5.599
}
],
"logging_steps": 5,
"max_steps": 900,
"num_input_tokens_seen": 281408,
"num_train_epochs": 10,
"save_steps": 45,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.2672124584984576e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}