train_copa_1757340255 / trainer_state.json
rbelanec's picture
End of training
b887f44 verified
{
"best_global_step": 225,
"best_metric": 0.05929639935493469,
"best_model_checkpoint": "saves_stability/lntuning/llama-3-8b-instruct/train_copa_1757340255/checkpoint-225",
"epoch": 10.0,
"eval_steps": 45,
"global_step": 900,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05555555555555555,
"grad_norm": 12.413766860961914,
"learning_rate": 2.2222222222222225e-06,
"loss": 0.5401,
"num_input_tokens_seen": 1632,
"step": 5
},
{
"epoch": 0.1111111111111111,
"grad_norm": 10.926953315734863,
"learning_rate": 5e-06,
"loss": 0.589,
"num_input_tokens_seen": 3232,
"step": 10
},
{
"epoch": 0.16666666666666666,
"grad_norm": 11.459014892578125,
"learning_rate": 7.777777777777777e-06,
"loss": 0.5822,
"num_input_tokens_seen": 4832,
"step": 15
},
{
"epoch": 0.2222222222222222,
"grad_norm": 9.86060619354248,
"learning_rate": 1.0555555555555555e-05,
"loss": 0.6143,
"num_input_tokens_seen": 6432,
"step": 20
},
{
"epoch": 0.2777777777777778,
"grad_norm": 8.448033332824707,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.4866,
"num_input_tokens_seen": 7968,
"step": 25
},
{
"epoch": 0.3333333333333333,
"grad_norm": 8.12926197052002,
"learning_rate": 1.6111111111111115e-05,
"loss": 0.2775,
"num_input_tokens_seen": 9504,
"step": 30
},
{
"epoch": 0.3888888888888889,
"grad_norm": 6.437356472015381,
"learning_rate": 1.888888888888889e-05,
"loss": 0.3527,
"num_input_tokens_seen": 11104,
"step": 35
},
{
"epoch": 0.4444444444444444,
"grad_norm": 6.665041923522949,
"learning_rate": 2.1666666666666667e-05,
"loss": 0.4136,
"num_input_tokens_seen": 12704,
"step": 40
},
{
"epoch": 0.5,
"grad_norm": 2.534313201904297,
"learning_rate": 2.4444444444444445e-05,
"loss": 0.2081,
"num_input_tokens_seen": 14240,
"step": 45
},
{
"epoch": 0.5,
"eval_loss": 0.11573882400989532,
"eval_runtime": 0.5875,
"eval_samples_per_second": 68.083,
"eval_steps_per_second": 17.021,
"num_input_tokens_seen": 14240,
"step": 45
},
{
"epoch": 0.5555555555555556,
"grad_norm": 5.011988639831543,
"learning_rate": 2.7222222222222223e-05,
"loss": 0.1648,
"num_input_tokens_seen": 15808,
"step": 50
},
{
"epoch": 0.6111111111111112,
"grad_norm": 0.8455270528793335,
"learning_rate": 3e-05,
"loss": 0.1657,
"num_input_tokens_seen": 17344,
"step": 55
},
{
"epoch": 0.6666666666666666,
"grad_norm": 6.417602062225342,
"learning_rate": 3.277777777777778e-05,
"loss": 0.0886,
"num_input_tokens_seen": 18912,
"step": 60
},
{
"epoch": 0.7222222222222222,
"grad_norm": 0.35575202107429504,
"learning_rate": 3.555555555555556e-05,
"loss": 0.1172,
"num_input_tokens_seen": 20448,
"step": 65
},
{
"epoch": 0.7777777777777778,
"grad_norm": 0.35196706652641296,
"learning_rate": 3.8333333333333334e-05,
"loss": 0.0435,
"num_input_tokens_seen": 21984,
"step": 70
},
{
"epoch": 0.8333333333333334,
"grad_norm": 0.32802021503448486,
"learning_rate": 4.111111111111111e-05,
"loss": 0.1677,
"num_input_tokens_seen": 23552,
"step": 75
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.16649259626865387,
"learning_rate": 4.388888888888889e-05,
"loss": 0.1363,
"num_input_tokens_seen": 25120,
"step": 80
},
{
"epoch": 0.9444444444444444,
"grad_norm": 0.053206074982881546,
"learning_rate": 4.666666666666667e-05,
"loss": 0.0173,
"num_input_tokens_seen": 26656,
"step": 85
},
{
"epoch": 1.0,
"grad_norm": 11.185739517211914,
"learning_rate": 4.9444444444444446e-05,
"loss": 0.2591,
"num_input_tokens_seen": 28192,
"step": 90
},
{
"epoch": 1.0,
"eval_loss": 0.07012443244457245,
"eval_runtime": 0.5977,
"eval_samples_per_second": 66.923,
"eval_steps_per_second": 16.731,
"num_input_tokens_seen": 28192,
"step": 90
},
{
"epoch": 1.0555555555555556,
"grad_norm": 4.218883991241455,
"learning_rate": 4.9996991493233693e-05,
"loss": 0.0344,
"num_input_tokens_seen": 29792,
"step": 95
},
{
"epoch": 1.1111111111111112,
"grad_norm": 0.030968718230724335,
"learning_rate": 4.99847706754774e-05,
"loss": 0.0927,
"num_input_tokens_seen": 31328,
"step": 100
},
{
"epoch": 1.1666666666666667,
"grad_norm": 4.61262321472168,
"learning_rate": 4.9963154107272295e-05,
"loss": 0.0883,
"num_input_tokens_seen": 32832,
"step": 105
},
{
"epoch": 1.2222222222222223,
"grad_norm": 5.351931571960449,
"learning_rate": 4.993214991772563e-05,
"loss": 0.0412,
"num_input_tokens_seen": 34304,
"step": 110
},
{
"epoch": 1.2777777777777777,
"grad_norm": 0.05530461296439171,
"learning_rate": 4.989176976624511e-05,
"loss": 0.1526,
"num_input_tokens_seen": 35840,
"step": 115
},
{
"epoch": 1.3333333333333333,
"grad_norm": 5.916304588317871,
"learning_rate": 4.9842028838154285e-05,
"loss": 0.2116,
"num_input_tokens_seen": 37376,
"step": 120
},
{
"epoch": 1.3888888888888888,
"grad_norm": 0.3439420759677887,
"learning_rate": 4.978294583898196e-05,
"loss": 0.0965,
"num_input_tokens_seen": 38944,
"step": 125
},
{
"epoch": 1.4444444444444444,
"grad_norm": 0.2820066511631012,
"learning_rate": 4.971454298742779e-05,
"loss": 0.0462,
"num_input_tokens_seen": 40512,
"step": 130
},
{
"epoch": 1.5,
"grad_norm": 0.4881448745727539,
"learning_rate": 4.963684600700679e-05,
"loss": 0.0367,
"num_input_tokens_seen": 42080,
"step": 135
},
{
"epoch": 1.5,
"eval_loss": 0.06827564537525177,
"eval_runtime": 0.5886,
"eval_samples_per_second": 67.952,
"eval_steps_per_second": 16.988,
"num_input_tokens_seen": 42080,
"step": 135
},
{
"epoch": 1.5555555555555556,
"grad_norm": 4.441239833831787,
"learning_rate": 4.9549884116375714e-05,
"loss": 0.143,
"num_input_tokens_seen": 43680,
"step": 140
},
{
"epoch": 1.6111111111111112,
"grad_norm": 0.04426378384232521,
"learning_rate": 4.9453690018345144e-05,
"loss": 0.0385,
"num_input_tokens_seen": 45216,
"step": 145
},
{
"epoch": 1.6666666666666665,
"grad_norm": 2.143786907196045,
"learning_rate": 4.934829988758131e-05,
"loss": 0.192,
"num_input_tokens_seen": 46720,
"step": 150
},
{
"epoch": 1.7222222222222223,
"grad_norm": 0.15666843950748444,
"learning_rate": 4.923375335700223e-05,
"loss": 0.0042,
"num_input_tokens_seen": 48288,
"step": 155
},
{
"epoch": 1.7777777777777777,
"grad_norm": 4.361499786376953,
"learning_rate": 4.9110093502873476e-05,
"loss": 0.0937,
"num_input_tokens_seen": 49888,
"step": 160
},
{
"epoch": 1.8333333333333335,
"grad_norm": 0.02156808227300644,
"learning_rate": 4.897736682860885e-05,
"loss": 0.0234,
"num_input_tokens_seen": 51424,
"step": 165
},
{
"epoch": 1.8888888888888888,
"grad_norm": 6.929230690002441,
"learning_rate": 4.883562324728241e-05,
"loss": 0.1932,
"num_input_tokens_seen": 52992,
"step": 170
},
{
"epoch": 1.9444444444444444,
"grad_norm": 0.6252282857894897,
"learning_rate": 4.868491606285823e-05,
"loss": 0.0798,
"num_input_tokens_seen": 54592,
"step": 175
},
{
"epoch": 2.0,
"grad_norm": 0.18464066088199615,
"learning_rate": 4.8525301950144894e-05,
"loss": 0.1331,
"num_input_tokens_seen": 56192,
"step": 180
},
{
"epoch": 2.0,
"eval_loss": 0.05967045947909355,
"eval_runtime": 0.5919,
"eval_samples_per_second": 67.58,
"eval_steps_per_second": 16.895,
"num_input_tokens_seen": 56192,
"step": 180
},
{
"epoch": 2.0555555555555554,
"grad_norm": 1.8507845401763916,
"learning_rate": 4.835684093348244e-05,
"loss": 0.0186,
"num_input_tokens_seen": 57760,
"step": 185
},
{
"epoch": 2.111111111111111,
"grad_norm": 0.018928587436676025,
"learning_rate": 4.817959636416969e-05,
"loss": 0.0333,
"num_input_tokens_seen": 59264,
"step": 190
},
{
"epoch": 2.1666666666666665,
"grad_norm": 0.03185804933309555,
"learning_rate": 4.7993634896640394e-05,
"loss": 0.0572,
"num_input_tokens_seen": 60768,
"step": 195
},
{
"epoch": 2.2222222222222223,
"grad_norm": 6.1525678634643555,
"learning_rate": 4.779902646339722e-05,
"loss": 0.1263,
"num_input_tokens_seen": 62272,
"step": 200
},
{
"epoch": 2.2777777777777777,
"grad_norm": 0.24679124355316162,
"learning_rate": 4.759584424871302e-05,
"loss": 0.0479,
"num_input_tokens_seen": 63808,
"step": 205
},
{
"epoch": 2.3333333333333335,
"grad_norm": 4.617246150970459,
"learning_rate": 4.7384164661109176e-05,
"loss": 0.1704,
"num_input_tokens_seen": 65344,
"step": 210
},
{
"epoch": 2.388888888888889,
"grad_norm": 1.2960439920425415,
"learning_rate": 4.7164067304621536e-05,
"loss": 0.0061,
"num_input_tokens_seen": 66912,
"step": 215
},
{
"epoch": 2.4444444444444446,
"grad_norm": 6.159663677215576,
"learning_rate": 4.693563494886455e-05,
"loss": 0.1972,
"num_input_tokens_seen": 68480,
"step": 220
},
{
"epoch": 2.5,
"grad_norm": 0.7147485017776489,
"learning_rate": 4.669895349790502e-05,
"loss": 0.0047,
"num_input_tokens_seen": 70048,
"step": 225
},
{
"epoch": 2.5,
"eval_loss": 0.05929639935493469,
"eval_runtime": 0.5943,
"eval_samples_per_second": 67.308,
"eval_steps_per_second": 16.827,
"num_input_tokens_seen": 70048,
"step": 225
},
{
"epoch": 2.5555555555555554,
"grad_norm": 0.47322940826416016,
"learning_rate": 4.645411195795709e-05,
"loss": 0.0096,
"num_input_tokens_seen": 71680,
"step": 230
},
{
"epoch": 2.611111111111111,
"grad_norm": 5.344168663024902,
"learning_rate": 4.620120240391065e-05,
"loss": 0.1928,
"num_input_tokens_seen": 73280,
"step": 235
},
{
"epoch": 2.6666666666666665,
"grad_norm": 0.012929716147482395,
"learning_rate": 4.5940319944705736e-05,
"loss": 0.0027,
"num_input_tokens_seen": 74752,
"step": 240
},
{
"epoch": 2.7222222222222223,
"grad_norm": 0.05127738416194916,
"learning_rate": 4.567156268756594e-05,
"loss": 0.106,
"num_input_tokens_seen": 76288,
"step": 245
},
{
"epoch": 2.7777777777777777,
"grad_norm": 0.03636276721954346,
"learning_rate": 4.539503170110431e-05,
"loss": 0.0649,
"num_input_tokens_seen": 77888,
"step": 250
},
{
"epoch": 2.8333333333333335,
"grad_norm": 0.31299880146980286,
"learning_rate": 4.5110830977315556e-05,
"loss": 0.0032,
"num_input_tokens_seen": 79456,
"step": 255
},
{
"epoch": 2.888888888888889,
"grad_norm": 0.5931472778320312,
"learning_rate": 4.4819067392468944e-05,
"loss": 0.0646,
"num_input_tokens_seen": 81088,
"step": 260
},
{
"epoch": 2.9444444444444446,
"grad_norm": 0.7903435230255127,
"learning_rate": 4.4519850666916484e-05,
"loss": 0.0188,
"num_input_tokens_seen": 82592,
"step": 265
},
{
"epoch": 3.0,
"grad_norm": 4.708324909210205,
"learning_rate": 4.4213293323831585e-05,
"loss": 0.0918,
"num_input_tokens_seen": 84192,
"step": 270
},
{
"epoch": 3.0,
"eval_loss": 0.059632014483213425,
"eval_runtime": 0.5811,
"eval_samples_per_second": 68.838,
"eval_steps_per_second": 17.21,
"num_input_tokens_seen": 84192,
"step": 270
},
{
"epoch": 3.0555555555555554,
"grad_norm": 0.02156168594956398,
"learning_rate": 4.38995106468937e-05,
"loss": 0.0337,
"num_input_tokens_seen": 85696,
"step": 275
},
{
"epoch": 3.111111111111111,
"grad_norm": 1.075160264968872,
"learning_rate": 4.357862063693486e-05,
"loss": 0.0222,
"num_input_tokens_seen": 87296,
"step": 280
},
{
"epoch": 3.1666666666666665,
"grad_norm": 0.09805523604154587,
"learning_rate": 4.325074396756437e-05,
"loss": 0.01,
"num_input_tokens_seen": 88832,
"step": 285
},
{
"epoch": 3.2222222222222223,
"grad_norm": 0.028949294239282608,
"learning_rate": 4.2916003939788403e-05,
"loss": 0.0024,
"num_input_tokens_seen": 90368,
"step": 290
},
{
"epoch": 3.2777777777777777,
"grad_norm": 0.4382733702659607,
"learning_rate": 4.257452643564155e-05,
"loss": 0.0078,
"num_input_tokens_seen": 91968,
"step": 295
},
{
"epoch": 3.3333333333333335,
"grad_norm": 2.2116539478302,
"learning_rate": 4.22264398708477e-05,
"loss": 0.0124,
"num_input_tokens_seen": 93568,
"step": 300
},
{
"epoch": 3.388888888888889,
"grad_norm": 0.011147049255669117,
"learning_rate": 4.1871875146528195e-05,
"loss": 0.01,
"num_input_tokens_seen": 95168,
"step": 305
},
{
"epoch": 3.4444444444444446,
"grad_norm": 0.057261109352111816,
"learning_rate": 4.1510965599975196e-05,
"loss": 0.0729,
"num_input_tokens_seen": 96704,
"step": 310
},
{
"epoch": 3.5,
"grad_norm": 0.028863638639450073,
"learning_rate": 4.114384695450906e-05,
"loss": 0.1091,
"num_input_tokens_seen": 98304,
"step": 315
},
{
"epoch": 3.5,
"eval_loss": 0.06172233074903488,
"eval_runtime": 0.5816,
"eval_samples_per_second": 68.77,
"eval_steps_per_second": 17.193,
"num_input_tokens_seen": 98304,
"step": 315
},
{
"epoch": 3.5555555555555554,
"grad_norm": 0.021796802058815956,
"learning_rate": 4.077065726843828e-05,
"loss": 0.0015,
"num_input_tokens_seen": 99840,
"step": 320
},
{
"epoch": 3.611111111111111,
"grad_norm": 0.8977848291397095,
"learning_rate": 4.039153688314145e-05,
"loss": 0.032,
"num_input_tokens_seen": 101376,
"step": 325
},
{
"epoch": 3.6666666666666665,
"grad_norm": 0.11903944611549377,
"learning_rate": 4.000662837029062e-05,
"loss": 0.0704,
"num_input_tokens_seen": 102976,
"step": 330
},
{
"epoch": 3.7222222222222223,
"grad_norm": 3.8452537059783936,
"learning_rate": 3.961607647823583e-05,
"loss": 0.0592,
"num_input_tokens_seen": 104576,
"step": 335
},
{
"epoch": 3.7777777777777777,
"grad_norm": 1.4291119575500488,
"learning_rate": 3.9220028077571295e-05,
"loss": 0.1871,
"num_input_tokens_seen": 106112,
"step": 340
},
{
"epoch": 3.8333333333333335,
"grad_norm": 0.04512714594602585,
"learning_rate": 3.881863210590332e-05,
"loss": 0.1058,
"num_input_tokens_seen": 107712,
"step": 345
},
{
"epoch": 3.888888888888889,
"grad_norm": 0.29252851009368896,
"learning_rate": 3.841203951184095e-05,
"loss": 0.1107,
"num_input_tokens_seen": 109344,
"step": 350
},
{
"epoch": 3.9444444444444446,
"grad_norm": 6.142524719238281,
"learning_rate": 3.8000403198230387e-05,
"loss": 0.1373,
"num_input_tokens_seen": 110944,
"step": 355
},
{
"epoch": 4.0,
"grad_norm": 0.19722937047481537,
"learning_rate": 3.75838779646545e-05,
"loss": 0.0053,
"num_input_tokens_seen": 112544,
"step": 360
},
{
"epoch": 4.0,
"eval_loss": 0.062212228775024414,
"eval_runtime": 0.5812,
"eval_samples_per_second": 68.82,
"eval_steps_per_second": 17.205,
"num_input_tokens_seen": 112544,
"step": 360
},
{
"epoch": 4.055555555555555,
"grad_norm": 0.09612889587879181,
"learning_rate": 3.7162620449219e-05,
"loss": 0.0397,
"num_input_tokens_seen": 114144,
"step": 365
},
{
"epoch": 4.111111111111111,
"grad_norm": 2.852125406265259,
"learning_rate": 3.673678906964727e-05,
"loss": 0.0304,
"num_input_tokens_seen": 115712,
"step": 370
},
{
"epoch": 4.166666666666667,
"grad_norm": 0.6219462156295776,
"learning_rate": 3.630654396370594e-05,
"loss": 0.1283,
"num_input_tokens_seen": 117312,
"step": 375
},
{
"epoch": 4.222222222222222,
"grad_norm": 0.216511532664299,
"learning_rate": 3.5872046928983626e-05,
"loss": 0.0795,
"num_input_tokens_seen": 118848,
"step": 380
},
{
"epoch": 4.277777777777778,
"grad_norm": 2.406106472015381,
"learning_rate": 3.543346136204545e-05,
"loss": 0.0155,
"num_input_tokens_seen": 120416,
"step": 385
},
{
"epoch": 4.333333333333333,
"grad_norm": 0.8766459822654724,
"learning_rate": 3.499095219698631e-05,
"loss": 0.0599,
"num_input_tokens_seen": 122016,
"step": 390
},
{
"epoch": 4.388888888888889,
"grad_norm": 6.439513683319092,
"learning_rate": 3.454468584340588e-05,
"loss": 0.0956,
"num_input_tokens_seen": 123584,
"step": 395
},
{
"epoch": 4.444444444444445,
"grad_norm": 0.011177383363246918,
"learning_rate": 3.409483012382879e-05,
"loss": 0.0031,
"num_input_tokens_seen": 125216,
"step": 400
},
{
"epoch": 4.5,
"grad_norm": 0.6205223798751831,
"learning_rate": 3.364155421059342e-05,
"loss": 0.0101,
"num_input_tokens_seen": 126784,
"step": 405
},
{
"epoch": 4.5,
"eval_loss": 0.06303347647190094,
"eval_runtime": 0.5821,
"eval_samples_per_second": 68.711,
"eval_steps_per_second": 17.178,
"num_input_tokens_seen": 126784,
"step": 405
},
{
"epoch": 4.555555555555555,
"grad_norm": 0.12966503202915192,
"learning_rate": 3.318502856223311e-05,
"loss": 0.0062,
"num_input_tokens_seen": 128352,
"step": 410
},
{
"epoch": 4.611111111111111,
"grad_norm": 0.27338626980781555,
"learning_rate": 3.272542485937369e-05,
"loss": 0.0062,
"num_input_tokens_seen": 129920,
"step": 415
},
{
"epoch": 4.666666666666667,
"grad_norm": 0.04085496813058853,
"learning_rate": 3.2262915940171376e-05,
"loss": 0.0027,
"num_input_tokens_seen": 131488,
"step": 420
},
{
"epoch": 4.722222222222222,
"grad_norm": 0.0658736526966095,
"learning_rate": 3.1797675735315455e-05,
"loss": 0.0676,
"num_input_tokens_seen": 133056,
"step": 425
},
{
"epoch": 4.777777777777778,
"grad_norm": 0.40652352571487427,
"learning_rate": 3.132987920262005e-05,
"loss": 0.038,
"num_input_tokens_seen": 134624,
"step": 430
},
{
"epoch": 4.833333333333333,
"grad_norm": 0.22422264516353607,
"learning_rate": 3.085970226122962e-05,
"loss": 0.0227,
"num_input_tokens_seen": 136192,
"step": 435
},
{
"epoch": 4.888888888888889,
"grad_norm": 0.009336884133517742,
"learning_rate": 3.0387321725463e-05,
"loss": 0.0115,
"num_input_tokens_seen": 137824,
"step": 440
},
{
"epoch": 4.944444444444445,
"grad_norm": 0.28929874300956726,
"learning_rate": 2.9912915238320754e-05,
"loss": 0.0154,
"num_input_tokens_seen": 139392,
"step": 445
},
{
"epoch": 5.0,
"grad_norm": 0.1447066068649292,
"learning_rate": 2.9436661204680882e-05,
"loss": 0.0808,
"num_input_tokens_seen": 140960,
"step": 450
},
{
"epoch": 5.0,
"eval_loss": 0.06195898726582527,
"eval_runtime": 0.5844,
"eval_samples_per_second": 68.443,
"eval_steps_per_second": 17.111,
"num_input_tokens_seen": 140960,
"step": 450
},
{
"epoch": 5.055555555555555,
"grad_norm": 0.04051095247268677,
"learning_rate": 2.8958738724208072e-05,
"loss": 0.0635,
"num_input_tokens_seen": 142560,
"step": 455
},
{
"epoch": 5.111111111111111,
"grad_norm": 0.023053204640746117,
"learning_rate": 2.8479327524001636e-05,
"loss": 0.0316,
"num_input_tokens_seen": 144096,
"step": 460
},
{
"epoch": 5.166666666666667,
"grad_norm": 0.23852871358394623,
"learning_rate": 2.7998607891007495e-05,
"loss": 0.0049,
"num_input_tokens_seen": 145632,
"step": 465
},
{
"epoch": 5.222222222222222,
"grad_norm": 2.9483938217163086,
"learning_rate": 2.7516760604219617e-05,
"loss": 0.0206,
"num_input_tokens_seen": 147232,
"step": 470
},
{
"epoch": 5.277777777777778,
"grad_norm": 0.058457694947719574,
"learning_rate": 2.7033966866696457e-05,
"loss": 0.0586,
"num_input_tokens_seen": 148864,
"step": 475
},
{
"epoch": 5.333333333333333,
"grad_norm": 0.023417210206389427,
"learning_rate": 2.6550408237417885e-05,
"loss": 0.003,
"num_input_tokens_seen": 150432,
"step": 480
},
{
"epoch": 5.388888888888889,
"grad_norm": 0.29810523986816406,
"learning_rate": 2.6066266563008267e-05,
"loss": 0.0771,
"num_input_tokens_seen": 152032,
"step": 485
},
{
"epoch": 5.444444444444445,
"grad_norm": 0.020725004374980927,
"learning_rate": 2.5581723909351406e-05,
"loss": 0.025,
"num_input_tokens_seen": 153600,
"step": 490
},
{
"epoch": 5.5,
"grad_norm": 0.4676690399646759,
"learning_rate": 2.5096962493123012e-05,
"loss": 0.0104,
"num_input_tokens_seen": 155200,
"step": 495
},
{
"epoch": 5.5,
"eval_loss": 0.0627414733171463,
"eval_runtime": 0.5812,
"eval_samples_per_second": 68.818,
"eval_steps_per_second": 17.204,
"num_input_tokens_seen": 155200,
"step": 495
},
{
"epoch": 5.555555555555555,
"grad_norm": 0.01173495128750801,
"learning_rate": 2.461216461326642e-05,
"loss": 0.0801,
"num_input_tokens_seen": 156800,
"step": 500
},
{
"epoch": 5.611111111111111,
"grad_norm": 0.007062096148729324,
"learning_rate": 2.4127512582437485e-05,
"loss": 0.0009,
"num_input_tokens_seen": 158336,
"step": 505
},
{
"epoch": 5.666666666666667,
"grad_norm": 0.04896526783704758,
"learning_rate": 2.364318865844416e-05,
"loss": 0.0027,
"num_input_tokens_seen": 159904,
"step": 510
},
{
"epoch": 5.722222222222222,
"grad_norm": 6.96190071105957,
"learning_rate": 2.3159374975706884e-05,
"loss": 0.0464,
"num_input_tokens_seen": 161504,
"step": 515
},
{
"epoch": 5.777777777777778,
"grad_norm": 0.008594535291194916,
"learning_rate": 2.2676253476765196e-05,
"loss": 0.0016,
"num_input_tokens_seen": 163008,
"step": 520
},
{
"epoch": 5.833333333333333,
"grad_norm": 0.40661683678627014,
"learning_rate": 2.2194005843856636e-05,
"loss": 0.0174,
"num_input_tokens_seen": 164512,
"step": 525
},
{
"epoch": 5.888888888888889,
"grad_norm": 0.24199238419532776,
"learning_rate": 2.1712813430593436e-05,
"loss": 0.1174,
"num_input_tokens_seen": 166080,
"step": 530
},
{
"epoch": 5.944444444444445,
"grad_norm": 0.025658387690782547,
"learning_rate": 2.1232857193762924e-05,
"loss": 0.0317,
"num_input_tokens_seen": 167680,
"step": 535
},
{
"epoch": 6.0,
"grad_norm": 0.006939691957086325,
"learning_rate": 2.0754317625276983e-05,
"loss": 0.0012,
"num_input_tokens_seen": 169216,
"step": 540
},
{
"epoch": 6.0,
"eval_loss": 0.06366995722055435,
"eval_runtime": 0.5877,
"eval_samples_per_second": 68.063,
"eval_steps_per_second": 17.016,
"num_input_tokens_seen": 169216,
"step": 540
},
{
"epoch": 6.055555555555555,
"grad_norm": 0.017029551789164543,
"learning_rate": 2.02773746842965e-05,
"loss": 0.0027,
"num_input_tokens_seen": 170784,
"step": 545
},
{
"epoch": 6.111111111111111,
"grad_norm": 4.089417457580566,
"learning_rate": 1.980220772955602e-05,
"loss": 0.0248,
"num_input_tokens_seen": 172352,
"step": 550
},
{
"epoch": 6.166666666666667,
"grad_norm": 0.15079845488071442,
"learning_rate": 1.932899545191433e-05,
"loss": 0.002,
"num_input_tokens_seen": 173920,
"step": 555
},
{
"epoch": 6.222222222222222,
"grad_norm": 0.006432878784835339,
"learning_rate": 1.8857915807156092e-05,
"loss": 0.0032,
"num_input_tokens_seen": 175488,
"step": 560
},
{
"epoch": 6.277777777777778,
"grad_norm": 2.5554280281066895,
"learning_rate": 1.838914594906995e-05,
"loss": 0.0127,
"num_input_tokens_seen": 177024,
"step": 565
},
{
"epoch": 6.333333333333333,
"grad_norm": 0.41308820247650146,
"learning_rate": 1.792286216282824e-05,
"loss": 0.025,
"num_input_tokens_seen": 178624,
"step": 570
},
{
"epoch": 6.388888888888889,
"grad_norm": 0.43808722496032715,
"learning_rate": 1.7459239798693364e-05,
"loss": 0.1628,
"num_input_tokens_seen": 180096,
"step": 575
},
{
"epoch": 6.444444444444445,
"grad_norm": 2.488274335861206,
"learning_rate": 1.699845320607571e-05,
"loss": 0.0157,
"num_input_tokens_seen": 181600,
"step": 580
},
{
"epoch": 6.5,
"grad_norm": 1.1927860975265503,
"learning_rate": 1.6540675667967974e-05,
"loss": 0.0056,
"num_input_tokens_seen": 183232,
"step": 585
},
{
"epoch": 6.5,
"eval_loss": 0.06768044084310532,
"eval_runtime": 0.5879,
"eval_samples_per_second": 68.036,
"eval_steps_per_second": 17.009,
"num_input_tokens_seen": 183232,
"step": 585
},
{
"epoch": 6.555555555555555,
"grad_norm": 0.0071192653849720955,
"learning_rate": 1.60860793357805e-05,
"loss": 0.0707,
"num_input_tokens_seen": 184768,
"step": 590
},
{
"epoch": 6.611111111111111,
"grad_norm": 0.004635022487491369,
"learning_rate": 1.56348351646022e-05,
"loss": 0.0261,
"num_input_tokens_seen": 186208,
"step": 595
},
{
"epoch": 6.666666666666667,
"grad_norm": 0.007054431829601526,
"learning_rate": 1.5187112848911323e-05,
"loss": 0.0091,
"num_input_tokens_seen": 187776,
"step": 600
},
{
"epoch": 6.722222222222222,
"grad_norm": 0.7310401797294617,
"learning_rate": 1.47430807587603e-05,
"loss": 0.0049,
"num_input_tokens_seen": 189344,
"step": 605
},
{
"epoch": 6.777777777777778,
"grad_norm": 0.06609062105417252,
"learning_rate": 1.430290587645865e-05,
"loss": 0.0027,
"num_input_tokens_seen": 190912,
"step": 610
},
{
"epoch": 6.833333333333333,
"grad_norm": 0.014985096640884876,
"learning_rate": 1.3866753733777765e-05,
"loss": 0.0607,
"num_input_tokens_seen": 192512,
"step": 615
},
{
"epoch": 6.888888888888889,
"grad_norm": 0.010730482637882233,
"learning_rate": 1.343478834970121e-05,
"loss": 0.0018,
"num_input_tokens_seen": 194112,
"step": 620
},
{
"epoch": 6.944444444444445,
"grad_norm": 0.30494803190231323,
"learning_rate": 1.3007172168743854e-05,
"loss": 0.0662,
"num_input_tokens_seen": 195712,
"step": 625
},
{
"epoch": 7.0,
"grad_norm": 0.06056293845176697,
"learning_rate": 1.2584065999863102e-05,
"loss": 0.0014,
"num_input_tokens_seen": 197248,
"step": 630
},
{
"epoch": 7.0,
"eval_loss": 0.07016871124505997,
"eval_runtime": 0.5849,
"eval_samples_per_second": 68.386,
"eval_steps_per_second": 17.096,
"num_input_tokens_seen": 197248,
"step": 630
},
{
"epoch": 7.055555555555555,
"grad_norm": 0.03782691806554794,
"learning_rate": 1.2165628955985314e-05,
"loss": 0.0026,
"num_input_tokens_seen": 198784,
"step": 635
},
{
"epoch": 7.111111111111111,
"grad_norm": 4.731870651245117,
"learning_rate": 1.175201839416988e-05,
"loss": 0.0685,
"num_input_tokens_seen": 200352,
"step": 640
},
{
"epoch": 7.166666666666667,
"grad_norm": 0.1799234002828598,
"learning_rate": 1.1343389856433658e-05,
"loss": 0.0026,
"num_input_tokens_seen": 201856,
"step": 645
},
{
"epoch": 7.222222222222222,
"grad_norm": 0.010190362110733986,
"learning_rate": 1.0939897011258001e-05,
"loss": 0.0256,
"num_input_tokens_seen": 203488,
"step": 650
},
{
"epoch": 7.277777777777778,
"grad_norm": 0.007492389529943466,
"learning_rate": 1.0541691595800337e-05,
"loss": 0.0021,
"num_input_tokens_seen": 205056,
"step": 655
},
{
"epoch": 7.333333333333333,
"grad_norm": 0.06244223192334175,
"learning_rate": 1.0148923358832022e-05,
"loss": 0.0582,
"num_input_tokens_seen": 206656,
"step": 660
},
{
"epoch": 7.388888888888889,
"grad_norm": 0.04081568121910095,
"learning_rate": 9.761740004423927e-06,
"loss": 0.0057,
"num_input_tokens_seen": 208288,
"step": 665
},
{
"epoch": 7.444444444444445,
"grad_norm": 7.480345726013184,
"learning_rate": 9.380287136401e-06,
"loss": 0.096,
"num_input_tokens_seen": 209856,
"step": 670
},
{
"epoch": 7.5,
"grad_norm": 0.7831881046295166,
"learning_rate": 9.00470820358663e-06,
"loss": 0.0042,
"num_input_tokens_seen": 211424,
"step": 675
},
{
"epoch": 7.5,
"eval_loss": 0.06858132034540176,
"eval_runtime": 0.5834,
"eval_samples_per_second": 68.565,
"eval_steps_per_second": 17.141,
"num_input_tokens_seen": 211424,
"step": 675
},
{
"epoch": 7.555555555555555,
"grad_norm": 0.01823975145816803,
"learning_rate": 8.635144445857406e-06,
"loss": 0.0013,
"num_input_tokens_seen": 212928,
"step": 680
},
{
"epoch": 7.611111111111111,
"grad_norm": 0.018310269340872765,
"learning_rate": 8.271734841028553e-06,
"loss": 0.0025,
"num_input_tokens_seen": 214496,
"step": 685
},
{
"epoch": 7.666666666666667,
"grad_norm": 0.01911945454776287,
"learning_rate": 7.914616052590071e-06,
"loss": 0.0016,
"num_input_tokens_seen": 216064,
"step": 690
},
{
"epoch": 7.722222222222222,
"grad_norm": 0.017379632219672203,
"learning_rate": 7.563922378313218e-06,
"loss": 0.0439,
"num_input_tokens_seen": 217600,
"step": 695
},
{
"epoch": 7.777777777777778,
"grad_norm": 0.5448654890060425,
"learning_rate": 7.219785699746573e-06,
"loss": 0.0035,
"num_input_tokens_seen": 219136,
"step": 700
},
{
"epoch": 7.833333333333333,
"grad_norm": 0.15058286488056183,
"learning_rate": 6.882335432620779e-06,
"loss": 0.0106,
"num_input_tokens_seen": 220704,
"step": 705
},
{
"epoch": 7.888888888888889,
"grad_norm": 0.2723049223423004,
"learning_rate": 6.55169847818059e-06,
"loss": 0.0235,
"num_input_tokens_seen": 222272,
"step": 710
},
{
"epoch": 7.944444444444445,
"grad_norm": 0.11821077018976212,
"learning_rate": 6.22799917546252e-06,
"loss": 0.0026,
"num_input_tokens_seen": 223872,
"step": 715
},
{
"epoch": 8.0,
"grad_norm": 2.420344352722168,
"learning_rate": 5.9113592545359945e-06,
"loss": 0.0692,
"num_input_tokens_seen": 225440,
"step": 720
},
{
"epoch": 8.0,
"eval_loss": 0.06697994470596313,
"eval_runtime": 0.5855,
"eval_samples_per_second": 68.323,
"eval_steps_per_second": 17.081,
"num_input_tokens_seen": 225440,
"step": 720
},
{
"epoch": 8.055555555555555,
"grad_norm": 1.6140986680984497,
"learning_rate": 5.601897790725643e-06,
"loss": 0.0069,
"num_input_tokens_seen": 226912,
"step": 725
},
{
"epoch": 8.11111111111111,
"grad_norm": 0.10575804114341736,
"learning_rate": 5.299731159831953e-06,
"loss": 0.0014,
"num_input_tokens_seen": 228512,
"step": 730
},
{
"epoch": 8.166666666666666,
"grad_norm": 0.4418221414089203,
"learning_rate": 5.004972994367102e-06,
"loss": 0.05,
"num_input_tokens_seen": 230080,
"step": 735
},
{
"epoch": 8.222222222222221,
"grad_norm": 0.006636911537498236,
"learning_rate": 4.7177341408224e-06,
"loss": 0.0009,
"num_input_tokens_seen": 231616,
"step": 740
},
{
"epoch": 8.277777777777779,
"grad_norm": 0.005838257260620594,
"learning_rate": 4.438122617983443e-06,
"loss": 0.0232,
"num_input_tokens_seen": 233184,
"step": 745
},
{
"epoch": 8.333333333333334,
"grad_norm": 0.16078728437423706,
"learning_rate": 4.166243576308712e-06,
"loss": 0.0033,
"num_input_tokens_seen": 234688,
"step": 750
},
{
"epoch": 8.38888888888889,
"grad_norm": 0.15908759832382202,
"learning_rate": 3.9021992583867325e-06,
"loss": 0.1145,
"num_input_tokens_seen": 236256,
"step": 755
},
{
"epoch": 8.444444444444445,
"grad_norm": 0.19336074590682983,
"learning_rate": 3.6460889604868626e-06,
"loss": 0.0028,
"num_input_tokens_seen": 237856,
"step": 760
},
{
"epoch": 8.5,
"grad_norm": 0.05375547334551811,
"learning_rate": 3.398008995217988e-06,
"loss": 0.0005,
"num_input_tokens_seen": 239392,
"step": 765
},
{
"epoch": 8.5,
"eval_loss": 0.06791551411151886,
"eval_runtime": 0.5839,
"eval_samples_per_second": 68.503,
"eval_steps_per_second": 17.126,
"num_input_tokens_seen": 239392,
"step": 765
},
{
"epoch": 8.555555555555555,
"grad_norm": 0.12801700830459595,
"learning_rate": 3.158052655309332e-06,
"loss": 0.0756,
"num_input_tokens_seen": 240960,
"step": 770
},
{
"epoch": 8.61111111111111,
"grad_norm": 0.013426029123365879,
"learning_rate": 2.9263101785268254e-06,
"loss": 0.0063,
"num_input_tokens_seen": 242496,
"step": 775
},
{
"epoch": 8.666666666666666,
"grad_norm": 1.9325268268585205,
"learning_rate": 2.7028687137384267e-06,
"loss": 0.0073,
"num_input_tokens_seen": 244064,
"step": 780
},
{
"epoch": 8.722222222222221,
"grad_norm": 0.0063558281399309635,
"learning_rate": 2.487812288140945e-06,
"loss": 0.0017,
"num_input_tokens_seen": 245664,
"step": 785
},
{
"epoch": 8.777777777777779,
"grad_norm": 0.04126088321208954,
"learning_rate": 2.281221775660894e-06,
"loss": 0.0015,
"num_input_tokens_seen": 247264,
"step": 790
},
{
"epoch": 8.833333333333334,
"grad_norm": 0.07367562502622604,
"learning_rate": 2.0831748665410765e-06,
"loss": 0.015,
"num_input_tokens_seen": 248832,
"step": 795
},
{
"epoch": 8.88888888888889,
"grad_norm": 0.00970037654042244,
"learning_rate": 1.893746038124497e-06,
"loss": 0.0739,
"num_input_tokens_seen": 250432,
"step": 800
},
{
"epoch": 8.944444444444445,
"grad_norm": 0.00786888599395752,
"learning_rate": 1.713006526846439e-06,
"loss": 0.0179,
"num_input_tokens_seen": 252064,
"step": 805
},
{
"epoch": 9.0,
"grad_norm": 0.005709424614906311,
"learning_rate": 1.541024301445404e-06,
"loss": 0.0015,
"num_input_tokens_seen": 253632,
"step": 810
},
{
"epoch": 9.0,
"eval_loss": 0.0697808712720871,
"eval_runtime": 0.5791,
"eval_samples_per_second": 69.068,
"eval_steps_per_second": 17.267,
"num_input_tokens_seen": 253632,
"step": 810
},
{
"epoch": 9.055555555555555,
"grad_norm": 0.16491225361824036,
"learning_rate": 1.3778640374027985e-06,
"loss": 0.0478,
"num_input_tokens_seen": 255232,
"step": 815
},
{
"epoch": 9.11111111111111,
"grad_norm": 0.006427465938031673,
"learning_rate": 1.2235870926211619e-06,
"loss": 0.0011,
"num_input_tokens_seen": 256800,
"step": 820
},
{
"epoch": 9.166666666666666,
"grad_norm": 0.00451459689065814,
"learning_rate": 1.0782514843499653e-06,
"loss": 0.0648,
"num_input_tokens_seen": 258368,
"step": 825
},
{
"epoch": 9.222222222222221,
"grad_norm": 0.03183142468333244,
"learning_rate": 9.419118673676924e-07,
"loss": 0.0007,
"num_input_tokens_seen": 259872,
"step": 830
},
{
"epoch": 9.277777777777779,
"grad_norm": 0.0683126449584961,
"learning_rate": 8.146195134284052e-07,
"loss": 0.0021,
"num_input_tokens_seen": 261376,
"step": 835
},
{
"epoch": 9.333333333333334,
"grad_norm": 0.4682014286518097,
"learning_rate": 6.964222919805391e-07,
"loss": 0.0036,
"num_input_tokens_seen": 262944,
"step": 840
},
{
"epoch": 9.38888888888889,
"grad_norm": 0.07767174392938614,
"learning_rate": 5.87364652165176e-07,
"loss": 0.0904,
"num_input_tokens_seen": 264544,
"step": 845
},
{
"epoch": 9.444444444444445,
"grad_norm": 0.2937130331993103,
"learning_rate": 4.874876061005173e-07,
"loss": 0.002,
"num_input_tokens_seen": 266080,
"step": 850
},
{
"epoch": 9.5,
"grad_norm": 0.15090857446193695,
"learning_rate": 3.9682871345891883e-07,
"loss": 0.0069,
"num_input_tokens_seen": 267680,
"step": 855
},
{
"epoch": 9.5,
"eval_loss": 0.06901142001152039,
"eval_runtime": 0.5858,
"eval_samples_per_second": 68.285,
"eval_steps_per_second": 17.071,
"num_input_tokens_seen": 267680,
"step": 855
},
{
"epoch": 9.555555555555555,
"grad_norm": 5.201120376586914,
"learning_rate": 3.1542206734221924e-07,
"loss": 0.1035,
"num_input_tokens_seen": 269280,
"step": 860
},
{
"epoch": 9.61111111111111,
"grad_norm": 0.4644763171672821,
"learning_rate": 2.4329828146074095e-07,
"loss": 0.0035,
"num_input_tokens_seen": 270912,
"step": 865
},
{
"epoch": 9.666666666666666,
"grad_norm": 0.033365171402692795,
"learning_rate": 1.8048447862070718e-07,
"loss": 0.0094,
"num_input_tokens_seen": 272512,
"step": 870
},
{
"epoch": 9.722222222222221,
"grad_norm": 0.2439211755990982,
"learning_rate": 1.2700428052447033e-07,
"loss": 0.0165,
"num_input_tokens_seen": 274048,
"step": 875
},
{
"epoch": 9.777777777777779,
"grad_norm": 2.971935987472534,
"learning_rate": 8.28777988873486e-08,
"loss": 0.0171,
"num_input_tokens_seen": 275648,
"step": 880
},
{
"epoch": 9.833333333333334,
"grad_norm": 0.022272281348705292,
"learning_rate": 4.8121627874450625e-08,
"loss": 0.0065,
"num_input_tokens_seen": 277248,
"step": 885
},
{
"epoch": 9.88888888888889,
"grad_norm": 0.04621336981654167,
"learning_rate": 2.2748837860270267e-08,
"loss": 0.0028,
"num_input_tokens_seen": 278816,
"step": 890
},
{
"epoch": 9.944444444444445,
"grad_norm": 0.011941147036850452,
"learning_rate": 6.768970513457151e-09,
"loss": 0.001,
"num_input_tokens_seen": 280416,
"step": 895
},
{
"epoch": 10.0,
"grad_norm": 0.061786286532878876,
"learning_rate": 1.8803520859811406e-10,
"loss": 0.003,
"num_input_tokens_seen": 281984,
"step": 900
},
{
"epoch": 10.0,
"eval_loss": 0.06747323274612427,
"eval_runtime": 0.5823,
"eval_samples_per_second": 68.688,
"eval_steps_per_second": 17.172,
"num_input_tokens_seen": 281984,
"step": 900
},
{
"epoch": 10.0,
"num_input_tokens_seen": 281984,
"step": 900,
"total_flos": 1.2698062524776448e+16,
"train_loss": 0.06748742392027958,
"train_runtime": 155.7099,
"train_samples_per_second": 23.12,
"train_steps_per_second": 5.78
}
],
"logging_steps": 5,
"max_steps": 900,
"num_input_tokens_seen": 281984,
"num_train_epochs": 10,
"save_steps": 45,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.2698062524776448e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}