train_cb_456_1760637753 / trainer_state.json
rbelanec's picture
End of training
7304a7c verified
{
"best_global_step": 300,
"best_metric": 0.16209302842617035,
"best_model_checkpoint": "saves_multiple/prefix-tuning/llama-3-8b-instruct/train_cb_456_1760637753/checkpoint-300",
"epoch": 20.0,
"eval_steps": 100,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.1,
"grad_norm": 216.57647705078125,
"learning_rate": 4.0000000000000003e-07,
"loss": 9.2623,
"num_input_tokens_seen": 2944,
"step": 5
},
{
"epoch": 0.2,
"grad_norm": 150.08148193359375,
"learning_rate": 9.000000000000001e-07,
"loss": 8.7722,
"num_input_tokens_seen": 6208,
"step": 10
},
{
"epoch": 0.3,
"grad_norm": 134.6036376953125,
"learning_rate": 1.4000000000000001e-06,
"loss": 8.0747,
"num_input_tokens_seen": 9440,
"step": 15
},
{
"epoch": 0.4,
"grad_norm": 120.53459930419922,
"learning_rate": 1.9000000000000002e-06,
"loss": 7.2417,
"num_input_tokens_seen": 12320,
"step": 20
},
{
"epoch": 0.5,
"grad_norm": 107.87600708007812,
"learning_rate": 2.4000000000000003e-06,
"loss": 5.8971,
"num_input_tokens_seen": 16096,
"step": 25
},
{
"epoch": 0.6,
"grad_norm": 91.98301696777344,
"learning_rate": 2.9e-06,
"loss": 4.5398,
"num_input_tokens_seen": 18880,
"step": 30
},
{
"epoch": 0.7,
"grad_norm": 104.20735168457031,
"learning_rate": 3.4000000000000005e-06,
"loss": 3.4192,
"num_input_tokens_seen": 22240,
"step": 35
},
{
"epoch": 0.8,
"grad_norm": 57.80829620361328,
"learning_rate": 3.900000000000001e-06,
"loss": 1.8096,
"num_input_tokens_seen": 25376,
"step": 40
},
{
"epoch": 0.9,
"grad_norm": 49.72511672973633,
"learning_rate": 4.4e-06,
"loss": 1.07,
"num_input_tokens_seen": 28576,
"step": 45
},
{
"epoch": 1.0,
"grad_norm": 45.287750244140625,
"learning_rate": 4.9000000000000005e-06,
"loss": 0.6796,
"num_input_tokens_seen": 32448,
"step": 50
},
{
"epoch": 1.1,
"grad_norm": 67.8308334350586,
"learning_rate": 5.400000000000001e-06,
"loss": 0.5664,
"num_input_tokens_seen": 35776,
"step": 55
},
{
"epoch": 1.2,
"grad_norm": 48.61396408081055,
"learning_rate": 5.9e-06,
"loss": 0.2188,
"num_input_tokens_seen": 38912,
"step": 60
},
{
"epoch": 1.3,
"grad_norm": 52.29746627807617,
"learning_rate": 6.4000000000000006e-06,
"loss": 0.3773,
"num_input_tokens_seen": 42368,
"step": 65
},
{
"epoch": 1.4,
"grad_norm": 33.272586822509766,
"learning_rate": 6.9e-06,
"loss": 0.2119,
"num_input_tokens_seen": 45920,
"step": 70
},
{
"epoch": 1.5,
"grad_norm": 15.55538272857666,
"learning_rate": 7.4e-06,
"loss": 0.4496,
"num_input_tokens_seen": 49024,
"step": 75
},
{
"epoch": 1.6,
"grad_norm": 28.139711380004883,
"learning_rate": 7.9e-06,
"loss": 0.2051,
"num_input_tokens_seen": 52448,
"step": 80
},
{
"epoch": 1.7,
"grad_norm": 62.42750549316406,
"learning_rate": 8.400000000000001e-06,
"loss": 0.3832,
"num_input_tokens_seen": 55680,
"step": 85
},
{
"epoch": 1.8,
"grad_norm": 16.478151321411133,
"learning_rate": 8.900000000000001e-06,
"loss": 0.2837,
"num_input_tokens_seen": 59072,
"step": 90
},
{
"epoch": 1.9,
"grad_norm": 43.594581604003906,
"learning_rate": 9.4e-06,
"loss": 0.2905,
"num_input_tokens_seen": 62368,
"step": 95
},
{
"epoch": 2.0,
"grad_norm": 9.988426208496094,
"learning_rate": 9.9e-06,
"loss": 0.1956,
"num_input_tokens_seen": 65088,
"step": 100
},
{
"epoch": 2.0,
"eval_loss": 0.20375482738018036,
"eval_runtime": 1.0065,
"eval_samples_per_second": 49.678,
"eval_steps_per_second": 12.916,
"num_input_tokens_seen": 65088,
"step": 100
},
{
"epoch": 2.1,
"grad_norm": 45.27996063232422,
"learning_rate": 9.999512620046523e-06,
"loss": 0.2352,
"num_input_tokens_seen": 67968,
"step": 105
},
{
"epoch": 2.2,
"grad_norm": 24.10486602783203,
"learning_rate": 9.997532801828659e-06,
"loss": 0.3626,
"num_input_tokens_seen": 70592,
"step": 110
},
{
"epoch": 2.3,
"grad_norm": 55.39781188964844,
"learning_rate": 9.994030686707171e-06,
"loss": 0.4318,
"num_input_tokens_seen": 73632,
"step": 115
},
{
"epoch": 2.4,
"grad_norm": 35.644168853759766,
"learning_rate": 9.989007341460251e-06,
"loss": 0.2275,
"num_input_tokens_seen": 77120,
"step": 120
},
{
"epoch": 2.5,
"grad_norm": 69.64350128173828,
"learning_rate": 9.982464296247523e-06,
"loss": 0.1836,
"num_input_tokens_seen": 80448,
"step": 125
},
{
"epoch": 2.6,
"grad_norm": 25.893815994262695,
"learning_rate": 9.974403544143942e-06,
"loss": 0.2254,
"num_input_tokens_seen": 83488,
"step": 130
},
{
"epoch": 2.7,
"grad_norm": 48.531349182128906,
"learning_rate": 9.964827540532685e-06,
"loss": 0.3111,
"num_input_tokens_seen": 86720,
"step": 135
},
{
"epoch": 2.8,
"grad_norm": 14.120803833007812,
"learning_rate": 9.953739202357219e-06,
"loss": 0.3352,
"num_input_tokens_seen": 90176,
"step": 140
},
{
"epoch": 2.9,
"grad_norm": 10.534299850463867,
"learning_rate": 9.941141907232766e-06,
"loss": 0.2023,
"num_input_tokens_seen": 94176,
"step": 145
},
{
"epoch": 3.0,
"grad_norm": 15.623373031616211,
"learning_rate": 9.927039492417452e-06,
"loss": 0.179,
"num_input_tokens_seen": 97536,
"step": 150
},
{
"epoch": 3.1,
"grad_norm": 24.761083602905273,
"learning_rate": 9.911436253643445e-06,
"loss": 0.3012,
"num_input_tokens_seen": 100672,
"step": 155
},
{
"epoch": 3.2,
"grad_norm": 18.8632869720459,
"learning_rate": 9.894336943808426e-06,
"loss": 0.1366,
"num_input_tokens_seen": 103808,
"step": 160
},
{
"epoch": 3.3,
"grad_norm": 9.393210411071777,
"learning_rate": 9.875746771527817e-06,
"loss": 0.186,
"num_input_tokens_seen": 106464,
"step": 165
},
{
"epoch": 3.4,
"grad_norm": 16.760845184326172,
"learning_rate": 9.85567139954818e-06,
"loss": 0.2138,
"num_input_tokens_seen": 109056,
"step": 170
},
{
"epoch": 3.5,
"grad_norm": 34.08476638793945,
"learning_rate": 9.834116943022299e-06,
"loss": 0.2066,
"num_input_tokens_seen": 111520,
"step": 175
},
{
"epoch": 3.6,
"grad_norm": 23.190500259399414,
"learning_rate": 9.811089967646427e-06,
"loss": 0.275,
"num_input_tokens_seen": 114816,
"step": 180
},
{
"epoch": 3.7,
"grad_norm": 12.0489501953125,
"learning_rate": 9.786597487660336e-06,
"loss": 0.2247,
"num_input_tokens_seen": 118528,
"step": 185
},
{
"epoch": 3.8,
"grad_norm": 11.4762601852417,
"learning_rate": 9.760646963710694e-06,
"loss": 0.2004,
"num_input_tokens_seen": 121568,
"step": 190
},
{
"epoch": 3.9,
"grad_norm": 18.891124725341797,
"learning_rate": 9.733246300578482e-06,
"loss": 0.218,
"num_input_tokens_seen": 125344,
"step": 195
},
{
"epoch": 4.0,
"grad_norm": 44.311527252197266,
"learning_rate": 9.704403844771128e-06,
"loss": 0.1539,
"num_input_tokens_seen": 128896,
"step": 200
},
{
"epoch": 4.0,
"eval_loss": 0.18065515160560608,
"eval_runtime": 0.9979,
"eval_samples_per_second": 50.106,
"eval_steps_per_second": 13.027,
"num_input_tokens_seen": 128896,
"step": 200
},
{
"epoch": 4.1,
"grad_norm": 15.962028503417969,
"learning_rate": 9.674128381980073e-06,
"loss": 0.2518,
"num_input_tokens_seen": 132000,
"step": 205
},
{
"epoch": 4.2,
"grad_norm": 19.0295352935791,
"learning_rate": 9.642429134404568e-06,
"loss": 0.1297,
"num_input_tokens_seen": 136032,
"step": 210
},
{
"epoch": 4.3,
"grad_norm": 22.2943115234375,
"learning_rate": 9.609315757942504e-06,
"loss": 0.1967,
"num_input_tokens_seen": 139392,
"step": 215
},
{
"epoch": 4.4,
"grad_norm": 35.93865203857422,
"learning_rate": 9.574798339249124e-06,
"loss": 0.1113,
"num_input_tokens_seen": 142624,
"step": 220
},
{
"epoch": 4.5,
"grad_norm": 30.04070281982422,
"learning_rate": 9.538887392664544e-06,
"loss": 0.2285,
"num_input_tokens_seen": 146144,
"step": 225
},
{
"epoch": 4.6,
"grad_norm": 35.45005416870117,
"learning_rate": 9.501593857010968e-06,
"loss": 0.1175,
"num_input_tokens_seen": 149248,
"step": 230
},
{
"epoch": 4.7,
"grad_norm": 75.65644073486328,
"learning_rate": 9.46292909226063e-06,
"loss": 0.2548,
"num_input_tokens_seen": 152384,
"step": 235
},
{
"epoch": 4.8,
"grad_norm": 37.20109939575195,
"learning_rate": 9.42290487607542e-06,
"loss": 0.3188,
"num_input_tokens_seen": 155232,
"step": 240
},
{
"epoch": 4.9,
"grad_norm": 19.07550621032715,
"learning_rate": 9.381533400219319e-06,
"loss": 0.1796,
"num_input_tokens_seen": 159104,
"step": 245
},
{
"epoch": 5.0,
"grad_norm": 34.64167404174805,
"learning_rate": 9.338827266844643e-06,
"loss": 0.2762,
"num_input_tokens_seen": 162144,
"step": 250
},
{
"epoch": 5.1,
"grad_norm": 12.19721508026123,
"learning_rate": 9.294799484653323e-06,
"loss": 0.1075,
"num_input_tokens_seen": 165312,
"step": 255
},
{
"epoch": 5.2,
"grad_norm": 10.703926086425781,
"learning_rate": 9.24946346493432e-06,
"loss": 0.1454,
"num_input_tokens_seen": 169696,
"step": 260
},
{
"epoch": 5.3,
"grad_norm": 4.545947551727295,
"learning_rate": 9.202833017478421e-06,
"loss": 0.1456,
"num_input_tokens_seen": 173504,
"step": 265
},
{
"epoch": 5.4,
"grad_norm": 55.721229553222656,
"learning_rate": 9.154922346371641e-06,
"loss": 0.2572,
"num_input_tokens_seen": 176256,
"step": 270
},
{
"epoch": 5.5,
"grad_norm": 27.98261070251465,
"learning_rate": 9.10574604566852e-06,
"loss": 0.1196,
"num_input_tokens_seen": 179104,
"step": 275
},
{
"epoch": 5.6,
"grad_norm": 28.25328254699707,
"learning_rate": 9.055319094946633e-06,
"loss": 0.2833,
"num_input_tokens_seen": 182112,
"step": 280
},
{
"epoch": 5.7,
"grad_norm": 65.87811279296875,
"learning_rate": 9.003656854743667e-06,
"loss": 0.2797,
"num_input_tokens_seen": 185024,
"step": 285
},
{
"epoch": 5.8,
"grad_norm": 41.558692932128906,
"learning_rate": 8.950775061878453e-06,
"loss": 0.2964,
"num_input_tokens_seen": 188032,
"step": 290
},
{
"epoch": 5.9,
"grad_norm": 41.10008239746094,
"learning_rate": 8.896689824657371e-06,
"loss": 0.2251,
"num_input_tokens_seen": 190816,
"step": 295
},
{
"epoch": 6.0,
"grad_norm": 19.21298599243164,
"learning_rate": 8.841417617967618e-06,
"loss": 0.1384,
"num_input_tokens_seen": 193888,
"step": 300
},
{
"epoch": 6.0,
"eval_loss": 0.16209302842617035,
"eval_runtime": 1.0001,
"eval_samples_per_second": 49.994,
"eval_steps_per_second": 12.998,
"num_input_tokens_seen": 193888,
"step": 300
},
{
"epoch": 6.1,
"grad_norm": 13.841469764709473,
"learning_rate": 8.784975278258783e-06,
"loss": 0.2431,
"num_input_tokens_seen": 197152,
"step": 305
},
{
"epoch": 6.2,
"grad_norm": 4.108739376068115,
"learning_rate": 8.727379998414311e-06,
"loss": 0.0386,
"num_input_tokens_seen": 200032,
"step": 310
},
{
"epoch": 6.3,
"grad_norm": 1.0758922100067139,
"learning_rate": 8.668649322514382e-06,
"loss": 0.0371,
"num_input_tokens_seen": 203744,
"step": 315
},
{
"epoch": 6.4,
"grad_norm": 23.765316009521484,
"learning_rate": 8.608801140491811e-06,
"loss": 0.0174,
"num_input_tokens_seen": 206880,
"step": 320
},
{
"epoch": 6.5,
"grad_norm": 81.70988464355469,
"learning_rate": 8.547853682682605e-06,
"loss": 0.3998,
"num_input_tokens_seen": 209952,
"step": 325
},
{
"epoch": 6.6,
"grad_norm": 148.9218292236328,
"learning_rate": 8.485825514272824e-06,
"loss": 0.1394,
"num_input_tokens_seen": 213280,
"step": 330
},
{
"epoch": 6.7,
"grad_norm": 90.68417358398438,
"learning_rate": 8.422735529643445e-06,
"loss": 0.2914,
"num_input_tokens_seen": 216448,
"step": 335
},
{
"epoch": 6.8,
"grad_norm": 84.91841125488281,
"learning_rate": 8.358602946614952e-06,
"loss": 0.4754,
"num_input_tokens_seen": 219776,
"step": 340
},
{
"epoch": 6.9,
"grad_norm": 6.3870463371276855,
"learning_rate": 8.293447300593402e-06,
"loss": 0.1109,
"num_input_tokens_seen": 223136,
"step": 345
},
{
"epoch": 7.0,
"grad_norm": 46.83969497680664,
"learning_rate": 8.227288438619754e-06,
"loss": 0.1319,
"num_input_tokens_seen": 226880,
"step": 350
},
{
"epoch": 7.1,
"grad_norm": 19.829757690429688,
"learning_rate": 8.160146513324256e-06,
"loss": 0.0609,
"num_input_tokens_seen": 229856,
"step": 355
},
{
"epoch": 7.2,
"grad_norm": 8.89151382446289,
"learning_rate": 8.092041976787772e-06,
"loss": 0.214,
"num_input_tokens_seen": 233216,
"step": 360
},
{
"epoch": 7.3,
"grad_norm": 22.78290367126465,
"learning_rate": 8.022995574311876e-06,
"loss": 0.1058,
"num_input_tokens_seen": 236416,
"step": 365
},
{
"epoch": 7.4,
"grad_norm": 18.72032356262207,
"learning_rate": 7.953028338099628e-06,
"loss": 0.0806,
"num_input_tokens_seen": 239456,
"step": 370
},
{
"epoch": 7.5,
"grad_norm": 34.90778350830078,
"learning_rate": 7.882161580848966e-06,
"loss": 0.1732,
"num_input_tokens_seen": 242272,
"step": 375
},
{
"epoch": 7.6,
"grad_norm": 81.65283203125,
"learning_rate": 7.810416889260653e-06,
"loss": 0.2579,
"num_input_tokens_seen": 245376,
"step": 380
},
{
"epoch": 7.7,
"grad_norm": 54.231712341308594,
"learning_rate": 7.737816117462752e-06,
"loss": 0.1688,
"num_input_tokens_seen": 248864,
"step": 385
},
{
"epoch": 7.8,
"grad_norm": 6.326272487640381,
"learning_rate": 7.66438138035365e-06,
"loss": 0.069,
"num_input_tokens_seen": 251776,
"step": 390
},
{
"epoch": 7.9,
"grad_norm": 24.330530166625977,
"learning_rate": 7.590135046865652e-06,
"loss": 0.0395,
"num_input_tokens_seen": 255360,
"step": 395
},
{
"epoch": 8.0,
"grad_norm": 37.1357421875,
"learning_rate": 7.515099733151177e-06,
"loss": 0.2604,
"num_input_tokens_seen": 258816,
"step": 400
},
{
"epoch": 8.0,
"eval_loss": 0.18744546175003052,
"eval_runtime": 0.9989,
"eval_samples_per_second": 50.055,
"eval_steps_per_second": 13.014,
"num_input_tokens_seen": 258816,
"step": 400
},
{
"epoch": 8.1,
"grad_norm": 2.0634098052978516,
"learning_rate": 7.4392982956936644e-06,
"loss": 0.0269,
"num_input_tokens_seen": 262144,
"step": 405
},
{
"epoch": 8.2,
"grad_norm": 9.939993858337402,
"learning_rate": 7.362753824345271e-06,
"loss": 0.1619,
"num_input_tokens_seen": 265440,
"step": 410
},
{
"epoch": 8.3,
"grad_norm": 40.41669464111328,
"learning_rate": 7.285489635293472e-06,
"loss": 0.1923,
"num_input_tokens_seen": 269248,
"step": 415
},
{
"epoch": 8.4,
"grad_norm": 9.423033714294434,
"learning_rate": 7.207529263958727e-06,
"loss": 0.0225,
"num_input_tokens_seen": 272096,
"step": 420
},
{
"epoch": 8.5,
"grad_norm": 81.35832214355469,
"learning_rate": 7.128896457825364e-06,
"loss": 0.1014,
"num_input_tokens_seen": 274912,
"step": 425
},
{
"epoch": 8.6,
"grad_norm": 92.09923553466797,
"learning_rate": 7.049615169207864e-06,
"loss": 0.1776,
"num_input_tokens_seen": 277856,
"step": 430
},
{
"epoch": 8.7,
"grad_norm": 41.74745559692383,
"learning_rate": 6.9697095479547564e-06,
"loss": 0.0569,
"num_input_tokens_seen": 281408,
"step": 435
},
{
"epoch": 8.8,
"grad_norm": 1.260631799697876,
"learning_rate": 6.889203934092337e-06,
"loss": 0.0729,
"num_input_tokens_seen": 284960,
"step": 440
},
{
"epoch": 8.9,
"grad_norm": 11.80173397064209,
"learning_rate": 6.808122850410461e-06,
"loss": 0.0428,
"num_input_tokens_seen": 288000,
"step": 445
},
{
"epoch": 9.0,
"grad_norm": 1.4177440404891968,
"learning_rate": 6.7264909949926735e-06,
"loss": 0.0264,
"num_input_tokens_seen": 291584,
"step": 450
},
{
"epoch": 9.1,
"grad_norm": 14.504668235778809,
"learning_rate": 6.644333233692917e-06,
"loss": 0.029,
"num_input_tokens_seen": 295264,
"step": 455
},
{
"epoch": 9.2,
"grad_norm": 35.566810607910156,
"learning_rate": 6.561674592561164e-06,
"loss": 0.0487,
"num_input_tokens_seen": 298368,
"step": 460
},
{
"epoch": 9.3,
"grad_norm": 0.43563374876976013,
"learning_rate": 6.4785402502202345e-06,
"loss": 0.0025,
"num_input_tokens_seen": 301760,
"step": 465
},
{
"epoch": 9.4,
"grad_norm": 65.56232452392578,
"learning_rate": 6.3949555301961474e-06,
"loss": 0.0865,
"num_input_tokens_seen": 304736,
"step": 470
},
{
"epoch": 9.5,
"grad_norm": 93.48302459716797,
"learning_rate": 6.310945893204324e-06,
"loss": 0.0722,
"num_input_tokens_seen": 307872,
"step": 475
},
{
"epoch": 9.6,
"grad_norm": 0.22434131801128387,
"learning_rate": 6.2265369293940135e-06,
"loss": 0.008,
"num_input_tokens_seen": 310624,
"step": 480
},
{
"epoch": 9.7,
"grad_norm": 0.6443999409675598,
"learning_rate": 6.141754350553279e-06,
"loss": 0.0332,
"num_input_tokens_seen": 314016,
"step": 485
},
{
"epoch": 9.8,
"grad_norm": 0.9288974404335022,
"learning_rate": 6.056623982276945e-06,
"loss": 0.0317,
"num_input_tokens_seen": 317376,
"step": 490
},
{
"epoch": 9.9,
"grad_norm": 1.3583974838256836,
"learning_rate": 5.97117175609986e-06,
"loss": 0.0318,
"num_input_tokens_seen": 320256,
"step": 495
},
{
"epoch": 10.0,
"grad_norm": 0.43309640884399414,
"learning_rate": 5.885423701597918e-06,
"loss": 0.0017,
"num_input_tokens_seen": 323328,
"step": 500
},
{
"epoch": 10.0,
"eval_loss": 0.2034723162651062,
"eval_runtime": 0.9994,
"eval_samples_per_second": 50.028,
"eval_steps_per_second": 13.007,
"num_input_tokens_seen": 323328,
"step": 500
},
{
"epoch": 10.1,
"grad_norm": 0.028279408812522888,
"learning_rate": 5.799405938459175e-06,
"loss": 0.0007,
"num_input_tokens_seen": 326624,
"step": 505
},
{
"epoch": 10.2,
"grad_norm": 0.04337608441710472,
"learning_rate": 5.7131446685275595e-06,
"loss": 0.0046,
"num_input_tokens_seen": 329824,
"step": 510
},
{
"epoch": 10.3,
"grad_norm": 0.10003524273633957,
"learning_rate": 5.626666167821522e-06,
"loss": 0.0049,
"num_input_tokens_seen": 333664,
"step": 515
},
{
"epoch": 10.4,
"grad_norm": 2.51832914352417,
"learning_rate": 5.539996778530114e-06,
"loss": 0.0717,
"num_input_tokens_seen": 337248,
"step": 520
},
{
"epoch": 10.5,
"grad_norm": 1.407471776008606,
"learning_rate": 5.453162900988902e-06,
"loss": 0.0623,
"num_input_tokens_seen": 340192,
"step": 525
},
{
"epoch": 10.6,
"grad_norm": 0.4082932770252228,
"learning_rate": 5.366190985638159e-06,
"loss": 0.0057,
"num_input_tokens_seen": 343232,
"step": 530
},
{
"epoch": 10.7,
"grad_norm": 0.08292396366596222,
"learning_rate": 5.27910752496582e-06,
"loss": 0.0331,
"num_input_tokens_seen": 346400,
"step": 535
},
{
"epoch": 10.8,
"grad_norm": 0.014418110251426697,
"learning_rate": 5.1919390454376e-06,
"loss": 0.0028,
"num_input_tokens_seen": 349760,
"step": 540
},
{
"epoch": 10.9,
"grad_norm": 10.531606674194336,
"learning_rate": 5.1047120994167855e-06,
"loss": 0.0056,
"num_input_tokens_seen": 352352,
"step": 545
},
{
"epoch": 11.0,
"grad_norm": 0.25946757197380066,
"learning_rate": 5.0174532570761194e-06,
"loss": 0.002,
"num_input_tokens_seen": 355584,
"step": 550
},
{
"epoch": 11.1,
"grad_norm": 0.05245262011885643,
"learning_rate": 4.9301890983042744e-06,
"loss": 0.0003,
"num_input_tokens_seen": 359200,
"step": 555
},
{
"epoch": 11.2,
"grad_norm": 0.3186964988708496,
"learning_rate": 4.842946204609359e-06,
"loss": 0.0213,
"num_input_tokens_seen": 363200,
"step": 560
},
{
"epoch": 11.3,
"grad_norm": 0.11435585469007492,
"learning_rate": 4.755751151021934e-06,
"loss": 0.0007,
"num_input_tokens_seen": 366624,
"step": 565
},
{
"epoch": 11.4,
"grad_norm": 0.02673327922821045,
"learning_rate": 4.668630498000001e-06,
"loss": 0.0005,
"num_input_tokens_seen": 369824,
"step": 570
},
{
"epoch": 11.5,
"grad_norm": 22.57182502746582,
"learning_rate": 4.581610783338424e-06,
"loss": 0.0024,
"num_input_tokens_seen": 373120,
"step": 575
},
{
"epoch": 11.6,
"grad_norm": 13.380699157714844,
"learning_rate": 4.494718514085269e-06,
"loss": 0.002,
"num_input_tokens_seen": 375776,
"step": 580
},
{
"epoch": 11.7,
"grad_norm": 0.11530610173940659,
"learning_rate": 4.4079801584674955e-06,
"loss": 0.0029,
"num_input_tokens_seen": 378624,
"step": 585
},
{
"epoch": 11.8,
"grad_norm": 0.466453492641449,
"learning_rate": 4.321422137828479e-06,
"loss": 0.0003,
"num_input_tokens_seen": 381728,
"step": 590
},
{
"epoch": 11.9,
"grad_norm": 0.0489952377974987,
"learning_rate": 4.23507081857981e-06,
"loss": 0.0051,
"num_input_tokens_seen": 385312,
"step": 595
},
{
"epoch": 12.0,
"grad_norm": 0.6647362112998962,
"learning_rate": 4.148952504169839e-06,
"loss": 0.0008,
"num_input_tokens_seen": 388448,
"step": 600
},
{
"epoch": 12.0,
"eval_loss": 0.2536269426345825,
"eval_runtime": 1.0014,
"eval_samples_per_second": 49.928,
"eval_steps_per_second": 12.981,
"num_input_tokens_seen": 388448,
"step": 600
},
{
"epoch": 12.1,
"grad_norm": 0.053329646587371826,
"learning_rate": 4.063093427071376e-06,
"loss": 0.0003,
"num_input_tokens_seen": 391296,
"step": 605
},
{
"epoch": 12.2,
"grad_norm": 0.05639015883207321,
"learning_rate": 3.977519740791049e-06,
"loss": 0.0022,
"num_input_tokens_seen": 394688,
"step": 610
},
{
"epoch": 12.3,
"grad_norm": 0.0420084111392498,
"learning_rate": 3.892257511902664e-06,
"loss": 0.0003,
"num_input_tokens_seen": 397792,
"step": 615
},
{
"epoch": 12.4,
"grad_norm": 0.9871878623962402,
"learning_rate": 3.8073327121070968e-06,
"loss": 0.0004,
"num_input_tokens_seen": 400576,
"step": 620
},
{
"epoch": 12.5,
"grad_norm": 0.32017242908477783,
"learning_rate": 3.7227712103210485e-06,
"loss": 0.0003,
"num_input_tokens_seen": 404000,
"step": 625
},
{
"epoch": 12.6,
"grad_norm": 0.1881232112646103,
"learning_rate": 3.6385987647971287e-06,
"loss": 0.0002,
"num_input_tokens_seen": 407712,
"step": 630
},
{
"epoch": 12.7,
"grad_norm": 0.10421431809663773,
"learning_rate": 3.5548410152776414e-06,
"loss": 0.0004,
"num_input_tokens_seen": 411104,
"step": 635
},
{
"epoch": 12.8,
"grad_norm": 0.011882122606039047,
"learning_rate": 3.471523475184472e-06,
"loss": 0.0006,
"num_input_tokens_seen": 414624,
"step": 640
},
{
"epoch": 12.9,
"grad_norm": 0.011652891524136066,
"learning_rate": 3.3886715238474454e-06,
"loss": 0.0002,
"num_input_tokens_seen": 417824,
"step": 645
},
{
"epoch": 13.0,
"grad_norm": 0.020541567355394363,
"learning_rate": 3.3063103987735433e-06,
"loss": 0.0003,
"num_input_tokens_seen": 421248,
"step": 650
},
{
"epoch": 13.1,
"grad_norm": 0.02439752034842968,
"learning_rate": 3.224465187959316e-06,
"loss": 0.0002,
"num_input_tokens_seen": 424800,
"step": 655
},
{
"epoch": 13.2,
"grad_norm": 0.01759609952569008,
"learning_rate": 3.1431608222488276e-06,
"loss": 0.0001,
"num_input_tokens_seen": 427872,
"step": 660
},
{
"epoch": 13.3,
"grad_norm": 0.015870150178670883,
"learning_rate": 3.0624220677394854e-06,
"loss": 0.0002,
"num_input_tokens_seen": 431392,
"step": 665
},
{
"epoch": 13.4,
"grad_norm": 0.027557481080293655,
"learning_rate": 2.98227351823805e-06,
"loss": 0.0001,
"num_input_tokens_seen": 434176,
"step": 670
},
{
"epoch": 13.5,
"grad_norm": 0.16971592605113983,
"learning_rate": 2.9027395877691143e-06,
"loss": 0.0002,
"num_input_tokens_seen": 437696,
"step": 675
},
{
"epoch": 13.6,
"grad_norm": 0.17097657918930054,
"learning_rate": 2.8238445031383634e-06,
"loss": 0.0002,
"num_input_tokens_seen": 440064,
"step": 680
},
{
"epoch": 13.7,
"grad_norm": 0.024238118901848793,
"learning_rate": 2.7456122965528475e-06,
"loss": 0.0001,
"num_input_tokens_seen": 443328,
"step": 685
},
{
"epoch": 13.8,
"grad_norm": 0.007093928754329681,
"learning_rate": 2.6680667983005446e-06,
"loss": 0.0001,
"num_input_tokens_seen": 447072,
"step": 690
},
{
"epoch": 13.9,
"grad_norm": 0.01546875573694706,
"learning_rate": 2.5912316294914232e-06,
"loss": 0.0002,
"num_input_tokens_seen": 450688,
"step": 695
},
{
"epoch": 14.0,
"grad_norm": 0.042617473751306534,
"learning_rate": 2.5151301948622235e-06,
"loss": 0.0002,
"num_input_tokens_seen": 453984,
"step": 700
},
{
"epoch": 14.0,
"eval_loss": 0.28826868534088135,
"eval_runtime": 1.001,
"eval_samples_per_second": 49.95,
"eval_steps_per_second": 12.987,
"num_input_tokens_seen": 453984,
"step": 700
},
{
"epoch": 14.1,
"grad_norm": 0.04438012093305588,
"learning_rate": 2.4397856756471435e-06,
"loss": 0.0001,
"num_input_tokens_seen": 457504,
"step": 705
},
{
"epoch": 14.2,
"grad_norm": 0.009802593849599361,
"learning_rate": 2.3652210225166122e-06,
"loss": 0.0001,
"num_input_tokens_seen": 460896,
"step": 710
},
{
"epoch": 14.3,
"grad_norm": 0.009903536178171635,
"learning_rate": 2.2914589485863015e-06,
"loss": 0.0001,
"num_input_tokens_seen": 463840,
"step": 715
},
{
"epoch": 14.4,
"grad_norm": 0.007097623776644468,
"learning_rate": 2.218521922498476e-06,
"loss": 0.0001,
"num_input_tokens_seen": 467648,
"step": 720
},
{
"epoch": 14.5,
"grad_norm": 0.018301447853446007,
"learning_rate": 2.146432161577842e-06,
"loss": 0.0002,
"num_input_tokens_seen": 470688,
"step": 725
},
{
"epoch": 14.6,
"grad_norm": 0.03601658716797829,
"learning_rate": 2.075211625063923e-06,
"loss": 0.0002,
"num_input_tokens_seen": 473952,
"step": 730
},
{
"epoch": 14.7,
"grad_norm": 0.038516778498888016,
"learning_rate": 2.0048820074220716e-06,
"loss": 0.0001,
"num_input_tokens_seen": 477120,
"step": 735
},
{
"epoch": 14.8,
"grad_norm": 0.07624400407075882,
"learning_rate": 1.9354647317351187e-06,
"loss": 0.0001,
"num_input_tokens_seen": 480704,
"step": 740
},
{
"epoch": 14.9,
"grad_norm": 0.013465417549014091,
"learning_rate": 1.8669809431776991e-06,
"loss": 0.0001,
"num_input_tokens_seen": 483744,
"step": 745
},
{
"epoch": 15.0,
"grad_norm": 0.007154775317758322,
"learning_rate": 1.799451502575222e-06,
"loss": 0.0001,
"num_input_tokens_seen": 487232,
"step": 750
},
{
"epoch": 15.1,
"grad_norm": 0.008196992799639702,
"learning_rate": 1.7328969800494727e-06,
"loss": 0.0001,
"num_input_tokens_seen": 491104,
"step": 755
},
{
"epoch": 15.2,
"grad_norm": 0.005913514643907547,
"learning_rate": 1.6673376487527382e-06,
"loss": 0.0001,
"num_input_tokens_seen": 493568,
"step": 760
},
{
"epoch": 15.3,
"grad_norm": 0.008342737331986427,
"learning_rate": 1.6027934786924187e-06,
"loss": 0.0001,
"num_input_tokens_seen": 496928,
"step": 765
},
{
"epoch": 15.4,
"grad_norm": 0.036556925624608994,
"learning_rate": 1.5392841306479667e-06,
"loss": 0.0001,
"num_input_tokens_seen": 499936,
"step": 770
},
{
"epoch": 15.5,
"grad_norm": 0.04268655180931091,
"learning_rate": 1.4768289501820265e-06,
"loss": 0.0001,
"num_input_tokens_seen": 503232,
"step": 775
},
{
"epoch": 15.6,
"grad_norm": 0.02554980479180813,
"learning_rate": 1.4154469617475864e-06,
"loss": 0.0002,
"num_input_tokens_seen": 506848,
"step": 780
},
{
"epoch": 15.7,
"grad_norm": 0.010822230018675327,
"learning_rate": 1.3551568628929434e-06,
"loss": 0.0001,
"num_input_tokens_seen": 510304,
"step": 785
},
{
"epoch": 15.8,
"grad_norm": 0.007679897826164961,
"learning_rate": 1.2959770185662502e-06,
"loss": 0.0001,
"num_input_tokens_seen": 512992,
"step": 790
},
{
"epoch": 15.9,
"grad_norm": 0.05034717917442322,
"learning_rate": 1.2379254555213788e-06,
"loss": 0.0002,
"num_input_tokens_seen": 516032,
"step": 795
},
{
"epoch": 16.0,
"grad_norm": 0.01680305227637291,
"learning_rate": 1.1810198568267906e-06,
"loss": 0.0001,
"num_input_tokens_seen": 519200,
"step": 800
},
{
"epoch": 16.0,
"eval_loss": 0.2697804570198059,
"eval_runtime": 0.9988,
"eval_samples_per_second": 50.059,
"eval_steps_per_second": 13.015,
"num_input_tokens_seen": 519200,
"step": 800
},
{
"epoch": 16.1,
"grad_norm": 0.018977373838424683,
"learning_rate": 1.1252775564791023e-06,
"loss": 0.0001,
"num_input_tokens_seen": 522272,
"step": 805
},
{
"epoch": 16.2,
"grad_norm": 0.0821557492017746,
"learning_rate": 1.0707155341229902e-06,
"loss": 0.0002,
"num_input_tokens_seen": 525440,
"step": 810
},
{
"epoch": 16.3,
"grad_norm": 0.029497282579541206,
"learning_rate": 1.0173504098790188e-06,
"loss": 0.0002,
"num_input_tokens_seen": 529056,
"step": 815
},
{
"epoch": 16.4,
"grad_norm": 0.013569852337241173,
"learning_rate": 9.651984392809916e-07,
"loss": 0.0001,
"num_input_tokens_seen": 532192,
"step": 820
},
{
"epoch": 16.5,
"grad_norm": 0.013164045289158821,
"learning_rate": 9.142755083243577e-07,
"loss": 0.0001,
"num_input_tokens_seen": 535712,
"step": 825
},
{
"epoch": 16.6,
"grad_norm": 0.025486232712864876,
"learning_rate": 8.645971286271903e-07,
"loss": 0.0001,
"num_input_tokens_seen": 538752,
"step": 830
},
{
"epoch": 16.7,
"grad_norm": 0.014193262904882431,
"learning_rate": 8.161784327051919e-07,
"loss": 0.0001,
"num_input_tokens_seen": 542368,
"step": 835
},
{
"epoch": 16.8,
"grad_norm": 0.011246598325669765,
"learning_rate": 7.690341693621805e-07,
"loss": 0.0001,
"num_input_tokens_seen": 545088,
"step": 840
},
{
"epoch": 16.9,
"grad_norm": 0.04158206656575203,
"learning_rate": 7.23178699197467e-07,
"loss": 0.0001,
"num_input_tokens_seen": 548800,
"step": 845
},
{
"epoch": 17.0,
"grad_norm": 0.004439216572791338,
"learning_rate": 6.786259902314768e-07,
"loss": 0.0001,
"num_input_tokens_seen": 551552,
"step": 850
},
{
"epoch": 17.1,
"grad_norm": 0.027351979166269302,
"learning_rate": 6.353896136509524e-07,
"loss": 0.0002,
"num_input_tokens_seen": 554528,
"step": 855
},
{
"epoch": 17.2,
"grad_norm": 0.00909334421157837,
"learning_rate": 5.934827396750392e-07,
"loss": 0.0001,
"num_input_tokens_seen": 557952,
"step": 860
},
{
"epoch": 17.3,
"grad_norm": 0.012324375100433826,
"learning_rate": 5.529181335435124e-07,
"loss": 0.0001,
"num_input_tokens_seen": 560960,
"step": 865
},
{
"epoch": 17.4,
"grad_norm": 0.018054546788334846,
"learning_rate": 5.137081516283582e-07,
"loss": 0.0001,
"num_input_tokens_seen": 564288,
"step": 870
},
{
"epoch": 17.5,
"grad_norm": 0.011498761363327503,
"learning_rate": 4.758647376699033e-07,
"loss": 0.0001,
"num_input_tokens_seen": 566880,
"step": 875
},
{
"epoch": 17.6,
"grad_norm": 0.019566155970096588,
"learning_rate": 4.3939941913863525e-07,
"loss": 0.0001,
"num_input_tokens_seen": 570208,
"step": 880
},
{
"epoch": 17.7,
"grad_norm": 0.0616542249917984,
"learning_rate": 4.043233037238281e-07,
"loss": 0.0001,
"num_input_tokens_seen": 573568,
"step": 885
},
{
"epoch": 17.8,
"grad_norm": 0.02959338016808033,
"learning_rate": 3.7064707595002636e-07,
"loss": 0.0001,
"num_input_tokens_seen": 576896,
"step": 890
},
{
"epoch": 17.9,
"grad_norm": 0.03563592955470085,
"learning_rate": 3.3838099392243915e-07,
"loss": 0.0001,
"num_input_tokens_seen": 580672,
"step": 895
},
{
"epoch": 18.0,
"grad_norm": 0.004577992018312216,
"learning_rate": 3.0753488620222037e-07,
"loss": 0.0001,
"num_input_tokens_seen": 583712,
"step": 900
},
{
"epoch": 18.0,
"eval_loss": 0.2686527371406555,
"eval_runtime": 1.0019,
"eval_samples_per_second": 49.907,
"eval_steps_per_second": 12.976,
"num_input_tokens_seen": 583712,
"step": 900
},
{
"epoch": 18.1,
"grad_norm": 0.009997401386499405,
"learning_rate": 2.7811814881259503e-07,
"loss": 0.0001,
"num_input_tokens_seen": 587040,
"step": 905
},
{
"epoch": 18.2,
"grad_norm": 0.007089632097631693,
"learning_rate": 2.5013974237673824e-07,
"loss": 0.0001,
"num_input_tokens_seen": 590496,
"step": 910
},
{
"epoch": 18.3,
"grad_norm": 0.005006881896406412,
"learning_rate": 2.2360818938828189e-07,
"loss": 0.0001,
"num_input_tokens_seen": 593792,
"step": 915
},
{
"epoch": 18.4,
"grad_norm": 0.01716301590204239,
"learning_rate": 1.9853157161528468e-07,
"loss": 0.0001,
"num_input_tokens_seen": 597408,
"step": 920
},
{
"epoch": 18.5,
"grad_norm": 0.013674321584403515,
"learning_rate": 1.7491752763844294e-07,
"loss": 0.0001,
"num_input_tokens_seen": 600864,
"step": 925
},
{
"epoch": 18.6,
"grad_norm": 0.038443662226200104,
"learning_rate": 1.5277325052430569e-07,
"loss": 0.0001,
"num_input_tokens_seen": 604096,
"step": 930
},
{
"epoch": 18.7,
"grad_norm": 0.02057618647813797,
"learning_rate": 1.3210548563419857e-07,
"loss": 0.0001,
"num_input_tokens_seen": 607200,
"step": 935
},
{
"epoch": 18.8,
"grad_norm": 0.014067213051021099,
"learning_rate": 1.1292052856952063e-07,
"loss": 0.0001,
"num_input_tokens_seen": 610432,
"step": 940
},
{
"epoch": 18.9,
"grad_norm": 0.020766226574778557,
"learning_rate": 9.522422325404234e-08,
"loss": 0.0001,
"num_input_tokens_seen": 613184,
"step": 945
},
{
"epoch": 19.0,
"grad_norm": 0.00667183892801404,
"learning_rate": 7.90219601537906e-08,
"loss": 0.0001,
"num_input_tokens_seen": 616096,
"step": 950
},
{
"epoch": 19.1,
"grad_norm": 0.0170288048684597,
"learning_rate": 6.431867463506047e-08,
"loss": 0.0001,
"num_input_tokens_seen": 619456,
"step": 955
},
{
"epoch": 19.2,
"grad_norm": 0.010430488735437393,
"learning_rate": 5.111884546105506e-08,
"loss": 0.0001,
"num_input_tokens_seen": 622752,
"step": 960
},
{
"epoch": 19.3,
"grad_norm": 0.060864806175231934,
"learning_rate": 3.9426493427611177e-08,
"loss": 0.0001,
"num_input_tokens_seen": 625504,
"step": 965
},
{
"epoch": 19.4,
"grad_norm": 0.013428901322185993,
"learning_rate": 2.9245180138423033e-08,
"loss": 0.0001,
"num_input_tokens_seen": 628960,
"step": 970
},
{
"epoch": 19.5,
"grad_norm": 0.021914439275860786,
"learning_rate": 2.057800692014833e-08,
"loss": 0.0001,
"num_input_tokens_seen": 632128,
"step": 975
},
{
"epoch": 19.6,
"grad_norm": 0.010772191919386387,
"learning_rate": 1.3427613877709523e-08,
"loss": 0.0001,
"num_input_tokens_seen": 635232,
"step": 980
},
{
"epoch": 19.7,
"grad_norm": 0.015666857361793518,
"learning_rate": 7.796179090094891e-09,
"loss": 0.0001,
"num_input_tokens_seen": 638400,
"step": 985
},
{
"epoch": 19.8,
"grad_norm": 0.017693420872092247,
"learning_rate": 3.685417946894254e-09,
"loss": 0.0001,
"num_input_tokens_seen": 641568,
"step": 990
},
{
"epoch": 19.9,
"grad_norm": 0.0059068226255476475,
"learning_rate": 1.096582625772502e-09,
"loss": 0.0001,
"num_input_tokens_seen": 644704,
"step": 995
},
{
"epoch": 20.0,
"grad_norm": 0.03871485963463783,
"learning_rate": 3.0461711048035415e-11,
"loss": 0.0001,
"num_input_tokens_seen": 648352,
"step": 1000
},
{
"epoch": 20.0,
"eval_loss": 0.27483537793159485,
"eval_runtime": 1.0018,
"eval_samples_per_second": 49.911,
"eval_steps_per_second": 12.977,
"num_input_tokens_seen": 648352,
"step": 1000
},
{
"epoch": 20.0,
"num_input_tokens_seen": 648352,
"step": 1000,
"total_flos": 2.9194997525643264e+16,
"train_loss": 0.3380334594594024,
"train_runtime": 177.1764,
"train_samples_per_second": 22.576,
"train_steps_per_second": 5.644
}
],
"logging_steps": 5,
"max_steps": 1000,
"num_input_tokens_seen": 648352,
"num_train_epochs": 20,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.9194997525643264e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}