gokulsrinivasagan's picture
End of training
2f30eb4 verified
{
"best_global_step": 20000,
"best_metric": 5.635913372039795,
"best_model_checkpoint": "tinybert_base_train_book_ent_15p_s_init/checkpoint-20000",
"epoch": 24.0,
"eval_steps": 10000,
"global_step": 27456,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.4370629370629371,
"grad_norm": 3.084730386734009,
"learning_rate": 4.9900000000000005e-06,
"loss": 7.2145,
"step": 500
},
{
"epoch": 0.8741258741258742,
"grad_norm": 1.6794264316558838,
"learning_rate": 9.990000000000001e-06,
"loss": 6.6993,
"step": 1000
},
{
"epoch": 1.3111888111888113,
"grad_norm": 0.43600839376449585,
"learning_rate": 1.499e-05,
"loss": 6.4656,
"step": 1500
},
{
"epoch": 1.7482517482517483,
"grad_norm": 0.5343518853187561,
"learning_rate": 1.999e-05,
"loss": 6.0652,
"step": 2000
},
{
"epoch": 2.1853146853146854,
"grad_norm": 3.4993603229522705,
"learning_rate": 2.4990000000000003e-05,
"loss": 4.2034,
"step": 2500
},
{
"epoch": 2.6223776223776225,
"grad_norm": 0.8302733898162842,
"learning_rate": 2.9990000000000003e-05,
"loss": 4.1233,
"step": 3000
},
{
"epoch": 3.0594405594405596,
"grad_norm": 1.1415528059005737,
"learning_rate": 3.499e-05,
"loss": 6.0294,
"step": 3500
},
{
"epoch": 3.4965034965034967,
"grad_norm": 0.3220045864582062,
"learning_rate": 3.999e-05,
"loss": 7.6921,
"step": 4000
},
{
"epoch": 3.9335664335664333,
"grad_norm": 2.5710136890411377,
"learning_rate": 4.499e-05,
"loss": 7.4362,
"step": 4500
},
{
"epoch": 4.370629370629371,
"grad_norm": 1.61074960231781,
"learning_rate": 4.999e-05,
"loss": 6.096,
"step": 5000
},
{
"epoch": 4.8076923076923075,
"grad_norm": 0.29658418893814087,
"learning_rate": 5.499000000000001e-05,
"loss": 6.5745,
"step": 5500
},
{
"epoch": 5.244755244755245,
"grad_norm": 0.688091516494751,
"learning_rate": 5.999e-05,
"loss": 6.444,
"step": 6000
},
{
"epoch": 5.681818181818182,
"grad_norm": 0.6230639219284058,
"learning_rate": 6.499000000000001e-05,
"loss": 6.9739,
"step": 6500
},
{
"epoch": 6.118881118881119,
"grad_norm": 1.1754754781723022,
"learning_rate": 6.999e-05,
"loss": 5.7531,
"step": 7000
},
{
"epoch": 6.555944055944056,
"grad_norm": 4.404361248016357,
"learning_rate": 7.499e-05,
"loss": 5.3091,
"step": 7500
},
{
"epoch": 6.993006993006993,
"grad_norm": 0.781994104385376,
"learning_rate": 7.999000000000001e-05,
"loss": 5.9669,
"step": 8000
},
{
"epoch": 7.43006993006993,
"grad_norm": 1.3538726568222046,
"learning_rate": 8.499e-05,
"loss": 6.3625,
"step": 8500
},
{
"epoch": 7.867132867132867,
"grad_norm": 2.3600542545318604,
"learning_rate": 8.999000000000001e-05,
"loss": 5.153,
"step": 9000
},
{
"epoch": 8.304195804195805,
"grad_norm": 1.3912559747695923,
"learning_rate": 9.499e-05,
"loss": 6.0561,
"step": 9500
},
{
"epoch": 8.741258741258742,
"grad_norm": 1.6746373176574707,
"learning_rate": 9.999000000000001e-05,
"loss": 5.3929,
"step": 10000
},
{
"epoch": 8.741258741258742,
"eval_accuracy": 0.1617547379551555,
"eval_loss": 5.967167854309082,
"eval_runtime": 1.926,
"eval_samples_per_second": 248.696,
"eval_steps_per_second": 1.558,
"step": 10000
},
{
"epoch": 9.178321678321678,
"grad_norm": 1.5575922727584839,
"learning_rate": 9.714138405132907e-05,
"loss": 6.1957,
"step": 10500
},
{
"epoch": 9.615384615384615,
"grad_norm": 1.0062355995178223,
"learning_rate": 9.427703941338222e-05,
"loss": 5.8074,
"step": 11000
},
{
"epoch": 10.052447552447552,
"grad_norm": 0.869584321975708,
"learning_rate": 9.141269477543538e-05,
"loss": 4.0817,
"step": 11500
},
{
"epoch": 10.48951048951049,
"grad_norm": 1.2013421058654785,
"learning_rate": 8.854835013748855e-05,
"loss": 3.59,
"step": 12000
},
{
"epoch": 10.926573426573427,
"grad_norm": 0.9844303727149963,
"learning_rate": 8.568400549954171e-05,
"loss": 4.4935,
"step": 12500
},
{
"epoch": 11.363636363636363,
"grad_norm": 1.0267465114593506,
"learning_rate": 8.281966086159487e-05,
"loss": 4.2636,
"step": 13000
},
{
"epoch": 11.8006993006993,
"grad_norm": 1.0032545328140259,
"learning_rate": 7.995531622364803e-05,
"loss": 4.6715,
"step": 13500
},
{
"epoch": 12.237762237762238,
"grad_norm": 1.1699270009994507,
"learning_rate": 7.70909715857012e-05,
"loss": 5.0537,
"step": 14000
},
{
"epoch": 12.674825174825175,
"grad_norm": 1.651526927947998,
"learning_rate": 7.422662694775435e-05,
"loss": 5.8745,
"step": 14500
},
{
"epoch": 13.111888111888112,
"grad_norm": 1.1928800344467163,
"learning_rate": 7.136228230980752e-05,
"loss": 6.2548,
"step": 15000
},
{
"epoch": 13.548951048951048,
"grad_norm": 1.213062047958374,
"learning_rate": 6.849793767186068e-05,
"loss": 6.3592,
"step": 15500
},
{
"epoch": 13.986013986013987,
"grad_norm": 1.3651458024978638,
"learning_rate": 6.563359303391385e-05,
"loss": 6.3495,
"step": 16000
},
{
"epoch": 14.423076923076923,
"grad_norm": 1.116390347480774,
"learning_rate": 6.276924839596701e-05,
"loss": 6.2993,
"step": 16500
},
{
"epoch": 14.86013986013986,
"grad_norm": 1.324367642402649,
"learning_rate": 5.990490375802017e-05,
"loss": 6.2516,
"step": 17000
},
{
"epoch": 15.297202797202797,
"grad_norm": 1.1858265399932861,
"learning_rate": 5.7040559120073336e-05,
"loss": 6.1861,
"step": 17500
},
{
"epoch": 15.734265734265735,
"grad_norm": 1.1394177675247192,
"learning_rate": 5.4176214482126495e-05,
"loss": 6.0894,
"step": 18000
},
{
"epoch": 16.17132867132867,
"grad_norm": 1.223654866218567,
"learning_rate": 5.131186984417965e-05,
"loss": 6.0153,
"step": 18500
},
{
"epoch": 16.60839160839161,
"grad_norm": 1.0455007553100586,
"learning_rate": 4.844752520623282e-05,
"loss": 5.9313,
"step": 19000
},
{
"epoch": 17.045454545454547,
"grad_norm": 1.1198850870132446,
"learning_rate": 4.558318056828598e-05,
"loss": 5.8995,
"step": 19500
},
{
"epoch": 17.482517482517483,
"grad_norm": 1.2449570894241333,
"learning_rate": 4.271883593033914e-05,
"loss": 5.8297,
"step": 20000
},
{
"epoch": 17.482517482517483,
"eval_accuracy": 0.1853157281132229,
"eval_loss": 5.635913372039795,
"eval_runtime": 1.9422,
"eval_samples_per_second": 246.63,
"eval_steps_per_second": 1.545,
"step": 20000
},
{
"epoch": 17.91958041958042,
"grad_norm": 1.0795655250549316,
"learning_rate": 3.98544912923923e-05,
"loss": 5.7946,
"step": 20500
},
{
"epoch": 18.356643356643357,
"grad_norm": 1.3464816808700562,
"learning_rate": 3.699014665444546e-05,
"loss": 5.742,
"step": 21000
},
{
"epoch": 18.793706293706293,
"grad_norm": 1.2070534229278564,
"learning_rate": 3.4125802016498626e-05,
"loss": 5.725,
"step": 21500
},
{
"epoch": 19.23076923076923,
"grad_norm": 0.9267760515213013,
"learning_rate": 3.126145737855179e-05,
"loss": 5.6568,
"step": 22000
},
{
"epoch": 19.667832167832167,
"grad_norm": 1.0271191596984863,
"learning_rate": 2.8397112740604954e-05,
"loss": 5.6281,
"step": 22500
},
{
"epoch": 20.104895104895103,
"grad_norm": 1.1037367582321167,
"learning_rate": 2.5532768102658113e-05,
"loss": 5.6152,
"step": 23000
},
{
"epoch": 20.541958041958043,
"grad_norm": 1.0020090341567993,
"learning_rate": 2.2668423464711275e-05,
"loss": 5.5562,
"step": 23500
},
{
"epoch": 20.97902097902098,
"grad_norm": 1.1875511407852173,
"learning_rate": 1.9804078826764437e-05,
"loss": 5.5316,
"step": 24000
},
{
"epoch": 21.416083916083917,
"grad_norm": 1.1084744930267334,
"learning_rate": 1.69397341888176e-05,
"loss": 5.5283,
"step": 24500
},
{
"epoch": 21.853146853146853,
"grad_norm": 1.079026699066162,
"learning_rate": 1.4075389550870763e-05,
"loss": 5.5119,
"step": 25000
},
{
"epoch": 22.29020979020979,
"grad_norm": 1.0321869850158691,
"learning_rate": 1.1211044912923923e-05,
"loss": 5.4713,
"step": 25500
},
{
"epoch": 22.727272727272727,
"grad_norm": 1.1838607788085938,
"learning_rate": 8.346700274977086e-06,
"loss": 5.4734,
"step": 26000
},
{
"epoch": 23.164335664335663,
"grad_norm": 1.1609679460525513,
"learning_rate": 5.482355637030248e-06,
"loss": 5.4625,
"step": 26500
},
{
"epoch": 23.6013986013986,
"grad_norm": 0.9143561720848083,
"learning_rate": 2.61801099908341e-06,
"loss": 5.4393,
"step": 27000
},
{
"epoch": 24.0,
"step": 27456,
"total_flos": 2.175368143688663e+17,
"train_loss": 5.765684167821925,
"train_runtime": 24142.3828,
"train_samples_per_second": 227.291,
"train_steps_per_second": 1.137
}
],
"logging_steps": 500,
"max_steps": 27456,
"num_input_tokens_seen": 0,
"num_train_epochs": 24,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.175368143688663e+17,
"train_batch_size": 200,
"trial_name": null,
"trial_params": null
}