Mardiyyah's picture
End of training
a3bf8bc verified
{
"best_metric": 3.474938154220581,
"best_model_checkpoint": "./TAPT_data-V2_Bioformer-16L_LR-0.0005/checkpoint-6090",
"epoch": 10.0,
"eval_steps": 500,
"global_step": 6090,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"grad_norm": 49.082210540771484,
"learning_rate": 0.00047877358490566036,
"loss": 2.9555,
"step": 609
},
{
"epoch": 1.0,
"eval_loss": 3.884225845336914,
"eval_runtime": 2.2953,
"eval_samples_per_second": 847.372,
"eval_steps_per_second": 53.151,
"step": 609
},
{
"epoch": 2.0,
"grad_norm": 29.47579002380371,
"learning_rate": 0.0004255765199161425,
"loss": 3.4591,
"step": 1218
},
{
"epoch": 2.0,
"eval_loss": 4.176673412322998,
"eval_runtime": 2.3902,
"eval_samples_per_second": 813.746,
"eval_steps_per_second": 51.042,
"step": 1218
},
{
"epoch": 3.0,
"grad_norm": 20.8883113861084,
"learning_rate": 0.00037237945492662474,
"loss": 3.3266,
"step": 1827
},
{
"epoch": 3.0,
"eval_loss": 4.201623439788818,
"eval_runtime": 2.3441,
"eval_samples_per_second": 829.735,
"eval_steps_per_second": 52.045,
"step": 1827
},
{
"epoch": 4.0,
"grad_norm": 20.087749481201172,
"learning_rate": 0.0003191823899371069,
"loss": 3.0264,
"step": 2436
},
{
"epoch": 4.0,
"eval_loss": 4.025837421417236,
"eval_runtime": 2.1116,
"eval_samples_per_second": 921.11,
"eval_steps_per_second": 57.777,
"step": 2436
},
{
"epoch": 5.0,
"grad_norm": 24.700023651123047,
"learning_rate": 0.00026598532494758907,
"loss": 2.7402,
"step": 3045
},
{
"epoch": 5.0,
"eval_loss": 3.8213584423065186,
"eval_runtime": 2.3843,
"eval_samples_per_second": 815.767,
"eval_steps_per_second": 51.169,
"step": 3045
},
{
"epoch": 6.0,
"grad_norm": 21.42559051513672,
"learning_rate": 0.00021278825995807126,
"loss": 2.4781,
"step": 3654
},
{
"epoch": 6.0,
"eval_loss": 3.810668706893921,
"eval_runtime": 2.3336,
"eval_samples_per_second": 833.486,
"eval_steps_per_second": 52.28,
"step": 3654
},
{
"epoch": 7.0,
"grad_norm": 20.396045684814453,
"learning_rate": 0.00015959119496855345,
"loss": 2.2409,
"step": 4263
},
{
"epoch": 7.0,
"eval_loss": 3.685856819152832,
"eval_runtime": 2.1403,
"eval_samples_per_second": 908.742,
"eval_steps_per_second": 57.001,
"step": 4263
},
{
"epoch": 8.0,
"grad_norm": 18.948102951049805,
"learning_rate": 0.00010639412997903563,
"loss": 1.9816,
"step": 4872
},
{
"epoch": 8.0,
"eval_loss": 3.6025681495666504,
"eval_runtime": 2.1051,
"eval_samples_per_second": 923.932,
"eval_steps_per_second": 57.954,
"step": 4872
},
{
"epoch": 9.0,
"grad_norm": 26.760536193847656,
"learning_rate": 5.3197064989517815e-05,
"loss": 1.7825,
"step": 5481
},
{
"epoch": 9.0,
"eval_loss": 3.4999165534973145,
"eval_runtime": 2.0996,
"eval_samples_per_second": 926.384,
"eval_steps_per_second": 58.107,
"step": 5481
},
{
"epoch": 10.0,
"grad_norm": 25.620874404907227,
"learning_rate": 0.0,
"loss": 1.6141,
"step": 6090
},
{
"epoch": 10.0,
"eval_loss": 3.474938154220581,
"eval_runtime": 2.2881,
"eval_samples_per_second": 850.053,
"eval_steps_per_second": 53.32,
"step": 6090
},
{
"epoch": 10.0,
"step": 6090,
"total_flos": 1536221652235776.0,
"train_loss": 2.5605053229872228,
"train_runtime": 388.3929,
"train_samples_per_second": 250.597,
"train_steps_per_second": 15.68
}
],
"logging_steps": 500,
"max_steps": 6090,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1536221652235776.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}