Capstone04's picture
Upload folder using huggingface_hub
243b6d6 verified
{
"best_metric": 0.043050188571214676,
"best_model_checkpoint": "whisper-finetuned_iter1/checkpoint-95",
"epoch": 4.810126582278481,
"eval_steps": 500,
"global_step": 95,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.9620253164556962,
"eval_loss": 3.2106480598449707,
"eval_runtime": 1.3528,
"eval_samples_per_second": 13.306,
"eval_steps_per_second": 6.653,
"step": 19
},
{
"epoch": 1.2658227848101267,
"grad_norm": 5.736849308013916,
"learning_rate": 4.800000000000001e-06,
"loss": 3.6971,
"step": 25
},
{
"epoch": 1.9746835443037973,
"eval_loss": 2.0353643894195557,
"eval_runtime": 1.3659,
"eval_samples_per_second": 13.178,
"eval_steps_per_second": 6.589,
"step": 39
},
{
"epoch": 2.5316455696202533,
"grad_norm": 5.99801778793335,
"learning_rate": 9.800000000000001e-06,
"loss": 2.1556,
"step": 50
},
{
"epoch": 2.9873417721518987,
"eval_loss": 0.541114091873169,
"eval_runtime": 1.3717,
"eval_samples_per_second": 13.123,
"eval_steps_per_second": 6.561,
"step": 59
},
{
"epoch": 3.7974683544303796,
"grad_norm": 4.196587085723877,
"learning_rate": 4.666666666666667e-06,
"loss": 0.5293,
"step": 75
},
{
"epoch": 4.0,
"eval_loss": 0.08589410781860352,
"eval_runtime": 1.4028,
"eval_samples_per_second": 12.831,
"eval_steps_per_second": 6.416,
"step": 79
},
{
"epoch": 4.810126582278481,
"eval_loss": 0.043050188571214676,
"eval_runtime": 1.4521,
"eval_samples_per_second": 12.396,
"eval_steps_per_second": 6.198,
"step": 95
}
],
"logging_steps": 25,
"max_steps": 95,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.859877838848e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}