gemmalorafull / trainer_state.json
jgayed's picture
Upload folder using huggingface_hub
5c53087 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 8.0,
"eval_steps": 500,
"global_step": 480,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08333333333333333,
"grad_norm": 41.758705139160156,
"learning_rate": 4.9986614686909146e-05,
"loss": 39.5171,
"num_input_tokens_seen": 47056,
"step": 5
},
{
"epoch": 0.16666666666666666,
"grad_norm": 35.72260665893555,
"learning_rate": 4.994647308096509e-05,
"loss": 3.2466,
"num_input_tokens_seen": 93544,
"step": 10
},
{
"epoch": 0.25,
"grad_norm": 40.3800163269043,
"learning_rate": 4.987961816680492e-05,
"loss": 3.3122,
"num_input_tokens_seen": 141120,
"step": 15
},
{
"epoch": 0.3333333333333333,
"grad_norm": 22.71551513671875,
"learning_rate": 4.9786121534345265e-05,
"loss": 3.0265,
"num_input_tokens_seen": 188544,
"step": 20
},
{
"epoch": 0.4166666666666667,
"grad_norm": 16.711044311523438,
"learning_rate": 4.966608330212198e-05,
"loss": 2.6989,
"num_input_tokens_seen": 235128,
"step": 25
},
{
"epoch": 0.5,
"grad_norm": 10.093056678771973,
"learning_rate": 4.951963201008076e-05,
"loss": 2.2284,
"num_input_tokens_seen": 281904,
"step": 30
},
{
"epoch": 0.5833333333333334,
"grad_norm": 12.554229736328125,
"learning_rate": 4.934692448193334e-05,
"loss": 2.2164,
"num_input_tokens_seen": 330096,
"step": 35
},
{
"epoch": 0.6666666666666666,
"grad_norm": 20.774412155151367,
"learning_rate": 4.914814565722671e-05,
"loss": 2.6854,
"num_input_tokens_seen": 376824,
"step": 40
},
{
"epoch": 0.75,
"grad_norm": 12.924601554870605,
"learning_rate": 4.892350839330522e-05,
"loss": 2.4108,
"num_input_tokens_seen": 424048,
"step": 45
},
{
"epoch": 0.8333333333333334,
"grad_norm": 23.568218231201172,
"learning_rate": 4.867325323737765e-05,
"loss": 2.419,
"num_input_tokens_seen": 470432,
"step": 50
},
{
"epoch": 0.9166666666666666,
"grad_norm": 29.64082908630371,
"learning_rate": 4.839764816893315e-05,
"loss": 2.3183,
"num_input_tokens_seen": 517624,
"step": 55
},
{
"epoch": 1.0,
"grad_norm": 17.5496883392334,
"learning_rate": 4.8096988312782174e-05,
"loss": 1.9734,
"num_input_tokens_seen": 564920,
"step": 60
},
{
"epoch": 1.0833333333333333,
"grad_norm": 14.56460189819336,
"learning_rate": 4.7771595623029394e-05,
"loss": 1.8086,
"num_input_tokens_seen": 611240,
"step": 65
},
{
"epoch": 1.1666666666666667,
"grad_norm": 11.394974708557129,
"learning_rate": 4.742181853831721e-05,
"loss": 2.4591,
"num_input_tokens_seen": 658904,
"step": 70
},
{
"epoch": 1.25,
"grad_norm": 5.737662315368652,
"learning_rate": 4.7048031608708876e-05,
"loss": 1.7169,
"num_input_tokens_seen": 706480,
"step": 75
},
{
"epoch": 1.3333333333333333,
"grad_norm": 18.580102920532227,
"learning_rate": 4.665063509461097e-05,
"loss": 1.809,
"num_input_tokens_seen": 752904,
"step": 80
},
{
"epoch": 1.4166666666666667,
"grad_norm": 18.77831268310547,
"learning_rate": 4.6230054538164474e-05,
"loss": 2.3907,
"num_input_tokens_seen": 800304,
"step": 85
},
{
"epoch": 1.5,
"grad_norm": 7.019700527191162,
"learning_rate": 4.5786740307563636e-05,
"loss": 2.0662,
"num_input_tokens_seen": 846792,
"step": 90
},
{
"epoch": 1.5833333333333335,
"grad_norm": 12.335309028625488,
"learning_rate": 4.5321167114790385e-05,
"loss": 2.0185,
"num_input_tokens_seen": 894552,
"step": 95
},
{
"epoch": 1.6666666666666665,
"grad_norm": 8.229971885681152,
"learning_rate": 4.4833833507280884e-05,
"loss": 1.9831,
"num_input_tokens_seen": 941040,
"step": 100
},
{
"epoch": 1.75,
"grad_norm": 13.02308177947998,
"learning_rate": 4.4325261334068426e-05,
"loss": 2.0873,
"num_input_tokens_seen": 987776,
"step": 105
},
{
"epoch": 1.8333333333333335,
"grad_norm": 6.7481513023376465,
"learning_rate": 4.379599518697444e-05,
"loss": 1.9163,
"num_input_tokens_seen": 1036072,
"step": 110
},
{
"epoch": 1.9166666666666665,
"grad_norm": 8.03829288482666,
"learning_rate": 4.324660181744589e-05,
"loss": 1.4848,
"num_input_tokens_seen": 1083328,
"step": 115
},
{
"epoch": 2.0,
"grad_norm": 9.837620735168457,
"learning_rate": 4.267766952966369e-05,
"loss": 1.7719,
"num_input_tokens_seen": 1129840,
"step": 120
},
{
"epoch": 2.0833333333333335,
"grad_norm": 14.701643943786621,
"learning_rate": 4.208980755057178e-05,
"loss": 1.4241,
"num_input_tokens_seen": 1177616,
"step": 125
},
{
"epoch": 2.1666666666666665,
"grad_norm": 30.390798568725586,
"learning_rate": 4.148364537750172e-05,
"loss": 1.6277,
"num_input_tokens_seen": 1225296,
"step": 130
},
{
"epoch": 2.25,
"grad_norm": 9.536770820617676,
"learning_rate": 4.085983210409114e-05,
"loss": 0.9761,
"num_input_tokens_seen": 1272160,
"step": 135
},
{
"epoch": 2.3333333333333335,
"grad_norm": 11.45682430267334,
"learning_rate": 4.021903572521802e-05,
"loss": 1.0144,
"num_input_tokens_seen": 1319432,
"step": 140
},
{
"epoch": 2.4166666666666665,
"grad_norm": 11.267831802368164,
"learning_rate": 3.956194242169506e-05,
"loss": 1.3858,
"num_input_tokens_seen": 1365992,
"step": 145
},
{
"epoch": 2.5,
"grad_norm": 11.92896556854248,
"learning_rate": 3.888925582549006e-05,
"loss": 0.9969,
"num_input_tokens_seen": 1412712,
"step": 150
},
{
"epoch": 2.5833333333333335,
"grad_norm": 15.724352836608887,
"learning_rate": 3.82016962662592e-05,
"loss": 1.0721,
"num_input_tokens_seen": 1459448,
"step": 155
},
{
"epoch": 2.6666666666666665,
"grad_norm": 12.68454360961914,
"learning_rate": 3.7500000000000003e-05,
"loss": 1.6622,
"num_input_tokens_seen": 1506944,
"step": 160
},
{
"epoch": 2.75,
"grad_norm": 6.482985496520996,
"learning_rate": 3.678491842064995e-05,
"loss": 1.059,
"num_input_tokens_seen": 1554216,
"step": 165
},
{
"epoch": 2.8333333333333335,
"grad_norm": 15.581390380859375,
"learning_rate": 3.6057217255475034e-05,
"loss": 0.774,
"num_input_tokens_seen": 1600608,
"step": 170
},
{
"epoch": 2.9166666666666665,
"grad_norm": 3.8697454929351807,
"learning_rate": 3.5317675745109866e-05,
"loss": 1.0403,
"num_input_tokens_seen": 1647272,
"step": 175
},
{
"epoch": 3.0,
"grad_norm": 54.316123962402344,
"learning_rate": 3.456708580912725e-05,
"loss": 2.173,
"num_input_tokens_seen": 1694760,
"step": 180
},
{
"epoch": 3.0833333333333335,
"grad_norm": 57.840152740478516,
"learning_rate": 3.380625119803084e-05,
"loss": 1.9583,
"num_input_tokens_seen": 1741936,
"step": 185
},
{
"epoch": 3.1666666666666665,
"grad_norm": 26.283540725708008,
"learning_rate": 3.303598663257904e-05,
"loss": 1.8407,
"num_input_tokens_seen": 1788624,
"step": 190
},
{
"epoch": 3.25,
"grad_norm": 15.37193489074707,
"learning_rate": 3.225711693136156e-05,
"loss": 1.1398,
"num_input_tokens_seen": 1835336,
"step": 195
},
{
"epoch": 3.3333333333333335,
"grad_norm": 53.772212982177734,
"learning_rate": 3.147047612756302e-05,
"loss": 0.8378,
"num_input_tokens_seen": 1882584,
"step": 200
},
{
"epoch": 3.4166666666666665,
"grad_norm": 11.857513427734375,
"learning_rate": 3.0676906575859334e-05,
"loss": 1.4633,
"num_input_tokens_seen": 1929344,
"step": 205
},
{
"epoch": 3.5,
"grad_norm": 5.734513282775879,
"learning_rate": 2.9877258050403212e-05,
"loss": 0.7289,
"num_input_tokens_seen": 1976064,
"step": 210
},
{
"epoch": 3.5833333333333335,
"grad_norm": 7.579070091247559,
"learning_rate": 2.9072386834864724e-05,
"loss": 0.9374,
"num_input_tokens_seen": 2023392,
"step": 215
},
{
"epoch": 3.6666666666666665,
"grad_norm": 15.9894437789917,
"learning_rate": 2.8263154805501297e-05,
"loss": 0.8814,
"num_input_tokens_seen": 2070752,
"step": 220
},
{
"epoch": 3.75,
"grad_norm": 17.481538772583008,
"learning_rate": 2.7450428508239024e-05,
"loss": 0.8873,
"num_input_tokens_seen": 2118464,
"step": 225
},
{
"epoch": 3.8333333333333335,
"grad_norm": 9.79324722290039,
"learning_rate": 2.663507823075358e-05,
"loss": 0.5864,
"num_input_tokens_seen": 2165488,
"step": 230
},
{
"epoch": 3.9166666666666665,
"grad_norm": 29.084766387939453,
"learning_rate": 2.5817977070544407e-05,
"loss": 0.7186,
"num_input_tokens_seen": 2212104,
"step": 235
},
{
"epoch": 4.0,
"grad_norm": 31.556673049926758,
"learning_rate": 2.5e-05,
"loss": 0.8153,
"num_input_tokens_seen": 2259680,
"step": 240
},
{
"epoch": 4.083333333333333,
"grad_norm": 13.266711235046387,
"learning_rate": 2.41820229294556e-05,
"loss": 0.5078,
"num_input_tokens_seen": 2307200,
"step": 245
},
{
"epoch": 4.166666666666667,
"grad_norm": 9.629940032958984,
"learning_rate": 2.3364921769246423e-05,
"loss": 0.5766,
"num_input_tokens_seen": 2354416,
"step": 250
},
{
"epoch": 4.25,
"grad_norm": 3.589583396911621,
"learning_rate": 2.2549571491760986e-05,
"loss": 0.2176,
"num_input_tokens_seen": 2400880,
"step": 255
},
{
"epoch": 4.333333333333333,
"grad_norm": 7.237700939178467,
"learning_rate": 2.173684519449872e-05,
"loss": 0.6257,
"num_input_tokens_seen": 2448016,
"step": 260
},
{
"epoch": 4.416666666666667,
"grad_norm": 34.4775505065918,
"learning_rate": 2.0927613165135285e-05,
"loss": 0.4594,
"num_input_tokens_seen": 2495032,
"step": 265
},
{
"epoch": 4.5,
"grad_norm": 1.8570725917816162,
"learning_rate": 2.0122741949596797e-05,
"loss": 0.1073,
"num_input_tokens_seen": 2542264,
"step": 270
},
{
"epoch": 4.583333333333333,
"grad_norm": 26.749910354614258,
"learning_rate": 1.932309342414067e-05,
"loss": 0.7155,
"num_input_tokens_seen": 2589368,
"step": 275
},
{
"epoch": 4.666666666666667,
"grad_norm": 11.495745658874512,
"learning_rate": 1.852952387243698e-05,
"loss": 1.0752,
"num_input_tokens_seen": 2636712,
"step": 280
},
{
"epoch": 4.75,
"grad_norm": 79.2107925415039,
"learning_rate": 1.7742883068638447e-05,
"loss": 0.7887,
"num_input_tokens_seen": 2682328,
"step": 285
},
{
"epoch": 4.833333333333333,
"grad_norm": 6.504589080810547,
"learning_rate": 1.6964013367420966e-05,
"loss": 0.1958,
"num_input_tokens_seen": 2729264,
"step": 290
},
{
"epoch": 4.916666666666667,
"grad_norm": 17.09076499938965,
"learning_rate": 1.6193748801969163e-05,
"loss": 0.1749,
"num_input_tokens_seen": 2777032,
"step": 295
},
{
"epoch": 5.0,
"grad_norm": 33.851985931396484,
"learning_rate": 1.5432914190872757e-05,
"loss": 0.6815,
"num_input_tokens_seen": 2824600,
"step": 300
},
{
"epoch": 5.083333333333333,
"grad_norm": 26.060302734375,
"learning_rate": 1.4682324254890134e-05,
"loss": 0.4818,
"num_input_tokens_seen": 2871552,
"step": 305
},
{
"epoch": 5.166666666666667,
"grad_norm": 23.55987548828125,
"learning_rate": 1.3942782744524973e-05,
"loss": 0.0944,
"num_input_tokens_seen": 2918968,
"step": 310
},
{
"epoch": 5.25,
"grad_norm": 0.07318225502967834,
"learning_rate": 1.3215081579350058e-05,
"loss": 0.0347,
"num_input_tokens_seen": 2966784,
"step": 315
},
{
"epoch": 5.333333333333333,
"grad_norm": 0.25874024629592896,
"learning_rate": 1.2500000000000006e-05,
"loss": 0.1513,
"num_input_tokens_seen": 3013000,
"step": 320
},
{
"epoch": 5.416666666666667,
"grad_norm": 9.690403938293457,
"learning_rate": 1.1798303733740802e-05,
"loss": 0.6741,
"num_input_tokens_seen": 3059640,
"step": 325
},
{
"epoch": 5.5,
"grad_norm": 0.252065509557724,
"learning_rate": 1.1110744174509952e-05,
"loss": 0.1384,
"num_input_tokens_seen": 3106528,
"step": 330
},
{
"epoch": 5.583333333333333,
"grad_norm": 68.76900482177734,
"learning_rate": 1.043805757830495e-05,
"loss": 0.1767,
"num_input_tokens_seen": 3154400,
"step": 335
},
{
"epoch": 5.666666666666667,
"grad_norm": 0.04263976961374283,
"learning_rate": 9.780964274781984e-06,
"loss": 0.0103,
"num_input_tokens_seen": 3201256,
"step": 340
},
{
"epoch": 5.75,
"grad_norm": 51.393367767333984,
"learning_rate": 9.140167895908867e-06,
"loss": 0.2753,
"num_input_tokens_seen": 3248464,
"step": 345
},
{
"epoch": 5.833333333333333,
"grad_norm": 0.051287949085235596,
"learning_rate": 8.51635462249828e-06,
"loss": 0.1367,
"num_input_tokens_seen": 3294448,
"step": 350
},
{
"epoch": 5.916666666666667,
"grad_norm": 133.54603576660156,
"learning_rate": 7.910192449428217e-06,
"loss": 1.0681,
"num_input_tokens_seen": 3341088,
"step": 355
},
{
"epoch": 6.0,
"grad_norm": 0.007671155035495758,
"learning_rate": 7.3223304703363135e-06,
"loss": 0.0855,
"num_input_tokens_seen": 3389520,
"step": 360
},
{
"epoch": 6.083333333333333,
"grad_norm": 0.07004520297050476,
"learning_rate": 6.753398182554116e-06,
"loss": 0.007,
"num_input_tokens_seen": 3435816,
"step": 365
},
{
"epoch": 6.166666666666667,
"grad_norm": 0.034883052110672,
"learning_rate": 6.204004813025568e-06,
"loss": 0.0161,
"num_input_tokens_seen": 3482312,
"step": 370
},
{
"epoch": 6.25,
"grad_norm": 0.08256684988737106,
"learning_rate": 5.674738665931575e-06,
"loss": 0.002,
"num_input_tokens_seen": 3529280,
"step": 375
},
{
"epoch": 6.333333333333333,
"grad_norm": 9.55763053894043,
"learning_rate": 5.166166492719124e-06,
"loss": 0.0501,
"num_input_tokens_seen": 3575952,
"step": 380
},
{
"epoch": 6.416666666666667,
"grad_norm": 130.89366149902344,
"learning_rate": 4.678832885209622e-06,
"loss": 0.2303,
"num_input_tokens_seen": 3623744,
"step": 385
},
{
"epoch": 6.5,
"grad_norm": 0.029895585030317307,
"learning_rate": 4.213259692436367e-06,
"loss": 0.0015,
"num_input_tokens_seen": 3671256,
"step": 390
},
{
"epoch": 6.583333333333333,
"grad_norm": 0.13803993165493011,
"learning_rate": 3.769945461835531e-06,
"loss": 0.0139,
"num_input_tokens_seen": 3718144,
"step": 395
},
{
"epoch": 6.666666666666667,
"grad_norm": 0.004999049007892609,
"learning_rate": 3.3493649053890326e-06,
"loss": 0.0001,
"num_input_tokens_seen": 3766064,
"step": 400
},
{
"epoch": 6.75,
"grad_norm": 0.4988614618778229,
"learning_rate": 2.9519683912911266e-06,
"loss": 0.0013,
"num_input_tokens_seen": 3812744,
"step": 405
},
{
"epoch": 6.833333333333333,
"grad_norm": 0.0008273015846498311,
"learning_rate": 2.578181461682794e-06,
"loss": 0.0275,
"num_input_tokens_seen": 3859792,
"step": 410
},
{
"epoch": 6.916666666666667,
"grad_norm": 0.04480042681097984,
"learning_rate": 2.2284043769706027e-06,
"loss": 0.0002,
"num_input_tokens_seen": 3906864,
"step": 415
},
{
"epoch": 7.0,
"grad_norm": 0.07072815299034119,
"learning_rate": 1.9030116872178316e-06,
"loss": 0.0004,
"num_input_tokens_seen": 3954440,
"step": 420
},
{
"epoch": 7.083333333333333,
"grad_norm": 0.002131677232682705,
"learning_rate": 1.6023518310668618e-06,
"loss": 0.0001,
"num_input_tokens_seen": 4001944,
"step": 425
},
{
"epoch": 7.166666666666667,
"grad_norm": 0.01150690671056509,
"learning_rate": 1.3267467626223606e-06,
"loss": 0.0003,
"num_input_tokens_seen": 4047736,
"step": 430
},
{
"epoch": 7.25,
"grad_norm": 0.0005230719689279795,
"learning_rate": 1.0764916066947794e-06,
"loss": 0.0001,
"num_input_tokens_seen": 4095480,
"step": 435
},
{
"epoch": 7.333333333333333,
"grad_norm": 0.0005898113595321774,
"learning_rate": 8.51854342773295e-07,
"loss": 0.0001,
"num_input_tokens_seen": 4142520,
"step": 440
},
{
"epoch": 7.416666666666667,
"grad_norm": 0.004632467869669199,
"learning_rate": 6.530755180666592e-07,
"loss": 0.001,
"num_input_tokens_seen": 4188920,
"step": 445
},
{
"epoch": 7.5,
"grad_norm": 0.007595964707434177,
"learning_rate": 4.803679899192392e-07,
"loss": 0.0001,
"num_input_tokens_seen": 4236368,
"step": 450
},
{
"epoch": 7.583333333333333,
"grad_norm": 0.00039777965866960585,
"learning_rate": 3.339166978780256e-07,
"loss": 0.0,
"num_input_tokens_seen": 4283360,
"step": 455
},
{
"epoch": 7.666666666666667,
"grad_norm": 0.12269078195095062,
"learning_rate": 2.1387846565474045e-07,
"loss": 0.0001,
"num_input_tokens_seen": 4330912,
"step": 460
},
{
"epoch": 7.75,
"grad_norm": 0.011688662692904472,
"learning_rate": 1.2038183319507955e-07,
"loss": 0.0191,
"num_input_tokens_seen": 4378920,
"step": 465
},
{
"epoch": 7.833333333333333,
"grad_norm": 0.32097485661506653,
"learning_rate": 5.352691903491303e-08,
"loss": 0.0016,
"num_input_tokens_seen": 4425608,
"step": 470
},
{
"epoch": 7.916666666666667,
"grad_norm": 0.001609671046026051,
"learning_rate": 1.3385313090857887e-08,
"loss": 0.0016,
"num_input_tokens_seen": 4472872,
"step": 475
},
{
"epoch": 8.0,
"grad_norm": 0.0008315809536725283,
"learning_rate": 0.0,
"loss": 0.0,
"num_input_tokens_seen": 4519360,
"step": 480
},
{
"epoch": 8.0,
"num_input_tokens_seen": 4519360,
"step": 480,
"total_flos": 7.178223503576986e+17,
"train_loss": 1.3478541066272858,
"train_runtime": 3084.094,
"train_samples_per_second": 1.245,
"train_steps_per_second": 0.156
}
],
"logging_steps": 5,
"max_steps": 480,
"num_input_tokens_seen": 4519360,
"num_train_epochs": 8,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.178223503576986e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}