aug_audio_tiny / last-checkpoint /trainer_state.json
h-myatcho's picture
Training in progress, step 500, checkpoint
610a77a verified
{
"best_metric": 93.46343226272757,
"best_model_checkpoint": "./whisper-small-lt/checkpoint-500",
"epoch": 0.37509377344336087,
"eval_steps": 500,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.018754688672168042,
"grad_norm": 23.011449813842773,
"learning_rate": 4.4e-07,
"loss": 1.8448,
"step": 25
},
{
"epoch": 0.037509377344336084,
"grad_norm": 12.146849632263184,
"learning_rate": 9.400000000000001e-07,
"loss": 1.6768,
"step": 50
},
{
"epoch": 0.056264066016504126,
"grad_norm": 7.498438358306885,
"learning_rate": 1.44e-06,
"loss": 1.4548,
"step": 75
},
{
"epoch": 0.07501875468867217,
"grad_norm": 5.752747535705566,
"learning_rate": 1.94e-06,
"loss": 1.287,
"step": 100
},
{
"epoch": 0.09377344336084022,
"grad_norm": 7.7577338218688965,
"learning_rate": 2.4400000000000004e-06,
"loss": 1.1515,
"step": 125
},
{
"epoch": 0.11252813203300825,
"grad_norm": 7.937697410583496,
"learning_rate": 2.9400000000000002e-06,
"loss": 1.0615,
"step": 150
},
{
"epoch": 0.1312828207051763,
"grad_norm": 7.121279239654541,
"learning_rate": 3.44e-06,
"loss": 0.9823,
"step": 175
},
{
"epoch": 0.15003750937734434,
"grad_norm": 22.921356201171875,
"learning_rate": 3.94e-06,
"loss": 0.9172,
"step": 200
},
{
"epoch": 0.16879219804951237,
"grad_norm": 13.65730094909668,
"learning_rate": 4.440000000000001e-06,
"loss": 0.863,
"step": 225
},
{
"epoch": 0.18754688672168043,
"grad_norm": 8.611408233642578,
"learning_rate": 4.94e-06,
"loss": 0.8367,
"step": 250
},
{
"epoch": 0.20630157539384847,
"grad_norm": 9.173303604125977,
"learning_rate": 5.4400000000000004e-06,
"loss": 0.8039,
"step": 275
},
{
"epoch": 0.2250562640660165,
"grad_norm": 13.330084800720215,
"learning_rate": 5.94e-06,
"loss": 0.783,
"step": 300
},
{
"epoch": 0.24381095273818454,
"grad_norm": 11.618350982666016,
"learning_rate": 6.440000000000001e-06,
"loss": 0.7571,
"step": 325
},
{
"epoch": 0.2625656414103526,
"grad_norm": 13.276190757751465,
"learning_rate": 6.9400000000000005e-06,
"loss": 0.7442,
"step": 350
},
{
"epoch": 0.2813203300825206,
"grad_norm": 11.061415672302246,
"learning_rate": 7.440000000000001e-06,
"loss": 0.7348,
"step": 375
},
{
"epoch": 0.30007501875468867,
"grad_norm": 10.216636657714844,
"learning_rate": 7.94e-06,
"loss": 0.7038,
"step": 400
},
{
"epoch": 0.31882970742685673,
"grad_norm": 17.22205352783203,
"learning_rate": 8.44e-06,
"loss": 0.6925,
"step": 425
},
{
"epoch": 0.33758439609902474,
"grad_norm": 14.04393482208252,
"learning_rate": 8.94e-06,
"loss": 0.6802,
"step": 450
},
{
"epoch": 0.3563390847711928,
"grad_norm": 13.6129732131958,
"learning_rate": 9.440000000000001e-06,
"loss": 0.6579,
"step": 475
},
{
"epoch": 0.37509377344336087,
"grad_norm": 11.165059089660645,
"learning_rate": 9.940000000000001e-06,
"loss": 0.6496,
"step": 500
},
{
"epoch": 0.37509377344336087,
"eval_loss": 0.6465398669242859,
"eval_runtime": 4808.13,
"eval_samples_per_second": 1.639,
"eval_steps_per_second": 0.205,
"eval_wer": 93.46343226272757,
"step": 500
}
],
"logging_steps": 25,
"max_steps": 4000,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.9695108096e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}