lexpt / trainer_state.json
sik247's picture
Upload folder using huggingface_hub
39b50a8 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.00010184492073919044,
"eval_steps": 500,
"global_step": 60,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.697415345653174e-06,
"grad_norm": 14.852449417114258,
"learning_rate": 0.0,
"loss": 4.8121,
"step": 1
},
{
"epoch": 3.394830691306348e-06,
"grad_norm": 14.8806791305542,
"learning_rate": 4e-05,
"loss": 4.6917,
"step": 2
},
{
"epoch": 5.092246036959522e-06,
"grad_norm": 13.359925270080566,
"learning_rate": 8e-05,
"loss": 4.7274,
"step": 3
},
{
"epoch": 6.789661382612696e-06,
"grad_norm": 15.945028305053711,
"learning_rate": 0.00012,
"loss": 4.4832,
"step": 4
},
{
"epoch": 8.48707672826587e-06,
"grad_norm": 15.983574867248535,
"learning_rate": 0.00016,
"loss": 3.7165,
"step": 5
},
{
"epoch": 1.0184492073919044e-05,
"grad_norm": 8.159729957580566,
"learning_rate": 0.0002,
"loss": 3.1151,
"step": 6
},
{
"epoch": 1.1881907419572217e-05,
"grad_norm": 2.5195391178131104,
"learning_rate": 0.00019636363636363636,
"loss": 2.3097,
"step": 7
},
{
"epoch": 1.3579322765225391e-05,
"grad_norm": 2.4503605365753174,
"learning_rate": 0.00019272727272727274,
"loss": 2.5628,
"step": 8
},
{
"epoch": 1.5276738110878564e-05,
"grad_norm": 1.8374202251434326,
"learning_rate": 0.0001890909090909091,
"loss": 2.0669,
"step": 9
},
{
"epoch": 1.697415345653174e-05,
"grad_norm": 1.6104134321212769,
"learning_rate": 0.00018545454545454545,
"loss": 2.5027,
"step": 10
},
{
"epoch": 1.8671568802184913e-05,
"grad_norm": 1.375555396080017,
"learning_rate": 0.00018181818181818183,
"loss": 2.331,
"step": 11
},
{
"epoch": 2.036898414783809e-05,
"grad_norm": 1.073371410369873,
"learning_rate": 0.0001781818181818182,
"loss": 2.3672,
"step": 12
},
{
"epoch": 2.206639949349126e-05,
"grad_norm": 0.8431914448738098,
"learning_rate": 0.00017454545454545454,
"loss": 2.2733,
"step": 13
},
{
"epoch": 2.3763814839144434e-05,
"grad_norm": 0.7211784720420837,
"learning_rate": 0.0001709090909090909,
"loss": 2.3543,
"step": 14
},
{
"epoch": 2.546123018479761e-05,
"grad_norm": 0.7179782390594482,
"learning_rate": 0.00016727272727272728,
"loss": 2.329,
"step": 15
},
{
"epoch": 2.7158645530450783e-05,
"grad_norm": 0.8131771683692932,
"learning_rate": 0.00016363636363636366,
"loss": 2.3005,
"step": 16
},
{
"epoch": 2.8856060876103955e-05,
"grad_norm": 0.6170381307601929,
"learning_rate": 0.00016,
"loss": 2.3647,
"step": 17
},
{
"epoch": 3.055347622175713e-05,
"grad_norm": 0.5012179613113403,
"learning_rate": 0.00015636363636363637,
"loss": 2.1676,
"step": 18
},
{
"epoch": 3.2250891567410304e-05,
"grad_norm": 0.5283344388008118,
"learning_rate": 0.00015272727272727275,
"loss": 2.6384,
"step": 19
},
{
"epoch": 3.394830691306348e-05,
"grad_norm": 0.4352604150772095,
"learning_rate": 0.0001490909090909091,
"loss": 2.3207,
"step": 20
},
{
"epoch": 3.564572225871665e-05,
"grad_norm": 0.4322673976421356,
"learning_rate": 0.00014545454545454546,
"loss": 2.202,
"step": 21
},
{
"epoch": 3.7343137604369825e-05,
"grad_norm": 0.41870686411857605,
"learning_rate": 0.00014181818181818184,
"loss": 2.2743,
"step": 22
},
{
"epoch": 3.9040552950023e-05,
"grad_norm": 0.4491978585720062,
"learning_rate": 0.0001381818181818182,
"loss": 2.1997,
"step": 23
},
{
"epoch": 4.073796829567618e-05,
"grad_norm": 0.35630711913108826,
"learning_rate": 0.00013454545454545455,
"loss": 2.2946,
"step": 24
},
{
"epoch": 4.2435383641329346e-05,
"grad_norm": 0.45095592737197876,
"learning_rate": 0.00013090909090909093,
"loss": 2.4473,
"step": 25
},
{
"epoch": 4.413279898698252e-05,
"grad_norm": 0.3589417338371277,
"learning_rate": 0.00012727272727272728,
"loss": 2.1492,
"step": 26
},
{
"epoch": 4.58302143326357e-05,
"grad_norm": 0.3918216824531555,
"learning_rate": 0.00012363636363636364,
"loss": 2.2618,
"step": 27
},
{
"epoch": 4.752762967828887e-05,
"grad_norm": 0.4695117473602295,
"learning_rate": 0.00012,
"loss": 2.2992,
"step": 28
},
{
"epoch": 4.9225045023942044e-05,
"grad_norm": 0.36814066767692566,
"learning_rate": 0.00011636363636363636,
"loss": 2.0903,
"step": 29
},
{
"epoch": 5.092246036959522e-05,
"grad_norm": 0.30460822582244873,
"learning_rate": 0.00011272727272727272,
"loss": 2.1443,
"step": 30
},
{
"epoch": 5.261987571524839e-05,
"grad_norm": 0.34920844435691833,
"learning_rate": 0.00010909090909090909,
"loss": 2.2643,
"step": 31
},
{
"epoch": 5.4317291060901565e-05,
"grad_norm": 0.33444303274154663,
"learning_rate": 0.00010545454545454545,
"loss": 2.1944,
"step": 32
},
{
"epoch": 5.601470640655474e-05,
"grad_norm": 0.3292400538921356,
"learning_rate": 0.00010181818181818181,
"loss": 2.2034,
"step": 33
},
{
"epoch": 5.771212175220791e-05,
"grad_norm": 0.3749557137489319,
"learning_rate": 9.818181818181818e-05,
"loss": 2.0774,
"step": 34
},
{
"epoch": 5.9409537097861086e-05,
"grad_norm": 0.3593912124633789,
"learning_rate": 9.454545454545455e-05,
"loss": 2.1118,
"step": 35
},
{
"epoch": 6.110695244351426e-05,
"grad_norm": 0.29883214831352234,
"learning_rate": 9.090909090909092e-05,
"loss": 2.3215,
"step": 36
},
{
"epoch": 6.280436778916744e-05,
"grad_norm": 0.3091987669467926,
"learning_rate": 8.727272727272727e-05,
"loss": 2.2502,
"step": 37
},
{
"epoch": 6.450178313482061e-05,
"grad_norm": 0.3151979446411133,
"learning_rate": 8.363636363636364e-05,
"loss": 2.2538,
"step": 38
},
{
"epoch": 6.619919848047378e-05,
"grad_norm": 0.28855639696121216,
"learning_rate": 8e-05,
"loss": 2.277,
"step": 39
},
{
"epoch": 6.789661382612696e-05,
"grad_norm": 0.34061557054519653,
"learning_rate": 7.636363636363637e-05,
"loss": 2.1291,
"step": 40
},
{
"epoch": 6.959402917178013e-05,
"grad_norm": 0.2809044122695923,
"learning_rate": 7.272727272727273e-05,
"loss": 2.2523,
"step": 41
},
{
"epoch": 7.12914445174333e-05,
"grad_norm": 0.347295880317688,
"learning_rate": 6.90909090909091e-05,
"loss": 2.2234,
"step": 42
},
{
"epoch": 7.298885986308648e-05,
"grad_norm": 0.34090250730514526,
"learning_rate": 6.545454545454546e-05,
"loss": 2.39,
"step": 43
},
{
"epoch": 7.468627520873965e-05,
"grad_norm": 0.32947802543640137,
"learning_rate": 6.181818181818182e-05,
"loss": 2.4902,
"step": 44
},
{
"epoch": 7.638369055439282e-05,
"grad_norm": 0.3437351882457733,
"learning_rate": 5.818181818181818e-05,
"loss": 1.993,
"step": 45
},
{
"epoch": 7.8081105900046e-05,
"grad_norm": 0.3611629605293274,
"learning_rate": 5.4545454545454546e-05,
"loss": 2.2408,
"step": 46
},
{
"epoch": 7.977852124569917e-05,
"grad_norm": 0.27960702776908875,
"learning_rate": 5.090909090909091e-05,
"loss": 2.1705,
"step": 47
},
{
"epoch": 8.147593659135235e-05,
"grad_norm": 0.40314871072769165,
"learning_rate": 4.7272727272727275e-05,
"loss": 2.2786,
"step": 48
},
{
"epoch": 8.317335193700552e-05,
"grad_norm": 0.3217058777809143,
"learning_rate": 4.3636363636363636e-05,
"loss": 2.3335,
"step": 49
},
{
"epoch": 8.487076728265869e-05,
"grad_norm": 0.25312352180480957,
"learning_rate": 4e-05,
"loss": 2.1215,
"step": 50
},
{
"epoch": 8.656818262831188e-05,
"grad_norm": 0.38854506611824036,
"learning_rate": 3.6363636363636364e-05,
"loss": 2.3635,
"step": 51
},
{
"epoch": 8.826559797396505e-05,
"grad_norm": 0.30721694231033325,
"learning_rate": 3.272727272727273e-05,
"loss": 2.1679,
"step": 52
},
{
"epoch": 8.996301331961821e-05,
"grad_norm": 0.3165723979473114,
"learning_rate": 2.909090909090909e-05,
"loss": 2.2843,
"step": 53
},
{
"epoch": 9.16604286652714e-05,
"grad_norm": 0.30133214592933655,
"learning_rate": 2.5454545454545454e-05,
"loss": 2.3054,
"step": 54
},
{
"epoch": 9.335784401092457e-05,
"grad_norm": 0.3381540775299072,
"learning_rate": 2.1818181818181818e-05,
"loss": 2.1347,
"step": 55
},
{
"epoch": 9.505525935657774e-05,
"grad_norm": 0.29754453897476196,
"learning_rate": 1.8181818181818182e-05,
"loss": 2.1742,
"step": 56
},
{
"epoch": 9.675267470223092e-05,
"grad_norm": 0.23036840558052063,
"learning_rate": 1.4545454545454545e-05,
"loss": 2.134,
"step": 57
},
{
"epoch": 9.845009004788409e-05,
"grad_norm": 0.3304996192455292,
"learning_rate": 1.0909090909090909e-05,
"loss": 2.3209,
"step": 58
},
{
"epoch": 0.00010014750539353726,
"grad_norm": 0.3149855136871338,
"learning_rate": 7.272727272727272e-06,
"loss": 2.1926,
"step": 59
},
{
"epoch": 0.00010184492073919044,
"grad_norm": 0.3483605682849884,
"learning_rate": 3.636363636363636e-06,
"loss": 2.2702,
"step": 60
},
{
"epoch": 0.00010184492073919044,
"step": 60,
"total_flos": 1.62142612718059e+17,
"train_loss": 2.4620304067929584,
"train_runtime": 2098.6864,
"train_samples_per_second": 0.229,
"train_steps_per_second": 0.029
}
],
"logging_steps": 1,
"max_steps": 60,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.62142612718059e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}