tapt_llrd_only_LR-2e-05 / trainer_state.json
Mardiyyah's picture
End of training
fa20212 verified
{
"best_metric": 1.5578951835632324,
"best_model_checkpoint": "/nfs/production/literature/amina-mardiyyah/new_data/OT-Entity-Extraction-Pipeline/model_outputs/Continued_pretraining/TAPT/microsoft/BiomedNLP-BiomedBERT-base-uncased-abstract-fulltext/Mardiyyah/TAPT_data_V2_split/tapt_llrd_only_LR-2e-05/checkpoint-1827",
"epoch": 10.0,
"eval_steps": 500,
"global_step": 6090,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"grad_norm": 31.149822235107422,
"learning_rate": 2e-05,
"loss": 1.8412,
"step": 609
},
{
"epoch": 1.0,
"eval_loss": 1.6327614784240723,
"eval_runtime": 3.3806,
"eval_samples_per_second": 575.345,
"eval_steps_per_second": 36.088,
"step": 609
},
{
"epoch": 2.0,
"grad_norm": 42.3134651184082,
"learning_rate": 1.7777777777777777e-05,
"loss": 1.6676,
"step": 1218
},
{
"epoch": 2.0,
"eval_loss": 1.6140305995941162,
"eval_runtime": 3.4237,
"eval_samples_per_second": 568.095,
"eval_steps_per_second": 35.634,
"step": 1218
},
{
"epoch": 3.0,
"grad_norm": 40.80204772949219,
"learning_rate": 1.555555555555556e-05,
"loss": 1.5616,
"step": 1827
},
{
"epoch": 3.0,
"eval_loss": 1.5578951835632324,
"eval_runtime": 3.3764,
"eval_samples_per_second": 576.059,
"eval_steps_per_second": 36.133,
"step": 1827
},
{
"epoch": 4.0,
"grad_norm": 36.631927490234375,
"learning_rate": 1.3333333333333333e-05,
"loss": 1.4876,
"step": 2436
},
{
"epoch": 4.0,
"eval_loss": 1.6425701379776,
"eval_runtime": 3.4142,
"eval_samples_per_second": 569.684,
"eval_steps_per_second": 35.733,
"step": 2436
},
{
"epoch": 5.0,
"grad_norm": 58.63182067871094,
"learning_rate": 1.1111111111111113e-05,
"loss": 1.416,
"step": 3045
},
{
"epoch": 5.0,
"eval_loss": 1.6180479526519775,
"eval_runtime": 3.4615,
"eval_samples_per_second": 561.887,
"eval_steps_per_second": 35.244,
"step": 3045
},
{
"epoch": 6.0,
"grad_norm": 25.30164909362793,
"learning_rate": 8.888888888888888e-06,
"loss": 1.3898,
"step": 3654
},
{
"epoch": 6.0,
"eval_loss": 1.6720105409622192,
"eval_runtime": 3.4401,
"eval_samples_per_second": 565.384,
"eval_steps_per_second": 35.464,
"step": 3654
},
{
"epoch": 7.0,
"grad_norm": 27.665294647216797,
"learning_rate": 6.666666666666667e-06,
"loss": 1.3674,
"step": 4263
},
{
"epoch": 7.0,
"eval_loss": 1.682289481163025,
"eval_runtime": 3.4548,
"eval_samples_per_second": 562.987,
"eval_steps_per_second": 35.313,
"step": 4263
},
{
"epoch": 8.0,
"grad_norm": 35.61056137084961,
"learning_rate": 4.444444444444444e-06,
"loss": 1.3325,
"step": 4872
},
{
"epoch": 8.0,
"eval_loss": 1.6565485000610352,
"eval_runtime": 3.4917,
"eval_samples_per_second": 557.031,
"eval_steps_per_second": 34.94,
"step": 4872
},
{
"epoch": 9.0,
"grad_norm": 24.077539443969727,
"learning_rate": 2.222222222222222e-06,
"loss": 1.3063,
"step": 5481
},
{
"epoch": 9.0,
"eval_loss": 1.6364006996154785,
"eval_runtime": 3.3972,
"eval_samples_per_second": 572.532,
"eval_steps_per_second": 35.912,
"step": 5481
},
{
"epoch": 10.0,
"grad_norm": 21.178720474243164,
"learning_rate": 0.0,
"loss": 1.3161,
"step": 6090
},
{
"epoch": 10.0,
"eval_loss": 1.6274725198745728,
"eval_runtime": 3.3662,
"eval_samples_per_second": 577.807,
"eval_steps_per_second": 36.243,
"step": 6090
},
{
"epoch": 10.0,
"step": 6090,
"total_flos": 4339270626462000.0,
"train_loss": 1.4685930624775503,
"train_runtime": 599.5073,
"train_samples_per_second": 162.35,
"train_steps_per_second": 10.158
}
],
"logging_steps": 500,
"max_steps": 6090,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4339270626462000.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}