NLP_assignment_2 / run-0 /checkpoint-39 /trainer_state.json
EN3S's picture
Training in progress, epoch 1
faf72b8 verified
{
"best_global_step": 39,
"best_metric": 0.6173285198555957,
"best_model_checkpoint": "bert-base-uncased-finetuned-rte-run_14/run-0/checkpoint-39",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 39,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.2564102564102564,
"grad_norm": 1.662625789642334,
"learning_rate": 9.487179487179487e-05,
"loss": 0.696,
"step": 10
},
{
"epoch": 0.5128205128205128,
"grad_norm": 2.0300142765045166,
"learning_rate": 8.974358974358975e-05,
"loss": 0.6793,
"step": 20
},
{
"epoch": 0.7692307692307693,
"grad_norm": 4.4956440925598145,
"learning_rate": 8.461538461538461e-05,
"loss": 0.6499,
"step": 30
},
{
"epoch": 1.0,
"eval_accuracy": 0.6173285198555957,
"eval_loss": 0.6330550909042358,
"eval_runtime": 0.6611,
"eval_samples_per_second": 418.98,
"eval_steps_per_second": 7.563,
"step": 39
}
],
"logging_steps": 10,
"max_steps": 195,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 194932403139840.0,
"train_batch_size": 64,
"trial_name": null,
"trial_params": {
"dropout_rate": 0.01,
"learning_rate": 0.0001,
"max_length": 32,
"num_train_epochs": 5,
"per_device_train_batch_size": 64
}
}