adapter_all_data / checkpoint-474 /trainer_state.json
bitwisemind's picture
Checkpoint at step 474
eb66bbb verified
{
"best_metric": 39.43644074527058,
"best_model_checkpoint": "./whisper-lora-15k-adapters/checkpoint-237",
"epoch": 0.5550351288056206,
"eval_steps": 237,
"global_step": 474,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02927400468384075,
"grad_norm": 0.616703450679779,
"learning_rate": 0.0005,
"loss": 0.8917,
"step": 25
},
{
"epoch": 0.0585480093676815,
"grad_norm": 0.4405761659145355,
"learning_rate": 0.001,
"loss": 0.736,
"step": 50
},
{
"epoch": 0.08782201405152225,
"grad_norm": 0.4590846598148346,
"learning_rate": 0.0009940758293838863,
"loss": 0.7102,
"step": 75
},
{
"epoch": 0.117096018735363,
"grad_norm": 0.4082449972629547,
"learning_rate": 0.0009881516587677726,
"loss": 0.6302,
"step": 100
},
{
"epoch": 0.14637002341920374,
"grad_norm": 0.3845512866973877,
"learning_rate": 0.0009822274881516586,
"loss": 0.6538,
"step": 125
},
{
"epoch": 0.1756440281030445,
"grad_norm": 0.47404810786247253,
"learning_rate": 0.000976303317535545,
"loss": 0.6261,
"step": 150
},
{
"epoch": 0.20491803278688525,
"grad_norm": 0.38350749015808105,
"learning_rate": 0.0009703791469194313,
"loss": 0.6408,
"step": 175
},
{
"epoch": 0.234192037470726,
"grad_norm": 0.4432656466960907,
"learning_rate": 0.0009644549763033176,
"loss": 0.5789,
"step": 200
},
{
"epoch": 0.26346604215456676,
"grad_norm": 0.4181855618953705,
"learning_rate": 0.0009585308056872039,
"loss": 0.5673,
"step": 225
},
{
"epoch": 0.2775175644028103,
"eval_loss": 0.6236673593521118,
"eval_runtime": 10166.9784,
"eval_samples_per_second": 0.149,
"eval_steps_per_second": 0.009,
"eval_wer": 39.43644074527058,
"step": 237
},
{
"epoch": 0.2927400468384075,
"grad_norm": 0.5231379866600037,
"learning_rate": 0.0009526066350710901,
"loss": 0.6721,
"step": 250
},
{
"epoch": 0.32201405152224827,
"grad_norm": 1.0669022798538208,
"learning_rate": 0.0009466824644549763,
"loss": 0.6632,
"step": 275
},
{
"epoch": 0.351288056206089,
"grad_norm": 0.408888578414917,
"learning_rate": 0.0009407582938388626,
"loss": 0.5663,
"step": 300
},
{
"epoch": 0.3805620608899297,
"grad_norm": 0.45238634943962097,
"learning_rate": 0.0009348341232227489,
"loss": 0.5634,
"step": 325
},
{
"epoch": 0.4098360655737705,
"grad_norm": 0.3726654648780823,
"learning_rate": 0.0009289099526066352,
"loss": 0.5848,
"step": 350
},
{
"epoch": 0.43911007025761123,
"grad_norm": 0.3688499927520752,
"learning_rate": 0.0009229857819905212,
"loss": 0.6182,
"step": 375
},
{
"epoch": 0.468384074941452,
"grad_norm": 0.483505517244339,
"learning_rate": 0.0009170616113744075,
"loss": 0.6721,
"step": 400
},
{
"epoch": 0.49765807962529274,
"grad_norm": 0.4502080976963043,
"learning_rate": 0.0009111374407582938,
"loss": 0.5218,
"step": 425
},
{
"epoch": 0.5269320843091335,
"grad_norm": 0.3693532645702362,
"learning_rate": 0.0009052132701421801,
"loss": 0.652,
"step": 450
},
{
"epoch": 0.5550351288056206,
"eval_loss": 0.5961939096450806,
"eval_runtime": 10101.5474,
"eval_samples_per_second": 0.15,
"eval_steps_per_second": 0.009,
"eval_wer": 40.3672347792938,
"step": 474
}
],
"logging_steps": 25,
"max_steps": 4270,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 237,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7.84333072760832e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}