Bronsn's picture
Upload checkpoints/checkpoint-3000/trainer_state.json with huggingface_hub
5cd4b41 verified
{
"best_metric": 0.5802612119808278,
"best_model_checkpoint": "whisper-tiny-luganda-v2/checkpoint-3000",
"epoch": 0.6525995214270176,
"eval_steps": 500,
"global_step": 3000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.021753317380900587,
"grad_norm": 47.60859680175781,
"learning_rate": 4.5238095238095235e-06,
"loss": 9.6365,
"step": 100
},
{
"epoch": 0.04350663476180117,
"grad_norm": 41.293460845947266,
"learning_rate": 9.285714285714286e-06,
"loss": 4.282,
"step": 200
},
{
"epoch": 0.06525995214270176,
"grad_norm": 29.167564392089844,
"learning_rate": 1.4047619047619048e-05,
"loss": 2.8974,
"step": 300
},
{
"epoch": 0.08701326952360235,
"grad_norm": 30.911415100097656,
"learning_rate": 1.880952380952381e-05,
"loss": 2.2716,
"step": 400
},
{
"epoch": 0.10876658690450294,
"grad_norm": 25.824464797973633,
"learning_rate": 2.357142857142857e-05,
"loss": 2.0498,
"step": 500
},
{
"epoch": 0.10876658690450294,
"eval_loss": 0.9947264194488525,
"eval_runtime": 2111.5938,
"eval_samples_per_second": 6.458,
"eval_steps_per_second": 1.615,
"eval_wer": 0.8350676193361011,
"step": 500
},
{
"epoch": 0.13051990428540353,
"grad_norm": 22.513357162475586,
"learning_rate": 2.8333333333333335e-05,
"loss": 1.8357,
"step": 600
},
{
"epoch": 0.1522732216663041,
"grad_norm": 22.64017105102539,
"learning_rate": 3.309523809523809e-05,
"loss": 1.7019,
"step": 700
},
{
"epoch": 0.1740265390472047,
"grad_norm": 22.659271240234375,
"learning_rate": 3.785714285714286e-05,
"loss": 1.6458,
"step": 800
},
{
"epoch": 0.19577985642810528,
"grad_norm": 19.53955841064453,
"learning_rate": 4.261904761904762e-05,
"loss": 1.6001,
"step": 900
},
{
"epoch": 0.2175331738090059,
"grad_norm": 21.89055061340332,
"learning_rate": 4.738095238095238e-05,
"loss": 1.5134,
"step": 1000
},
{
"epoch": 0.2175331738090059,
"eval_loss": 0.7854410409927368,
"eval_runtime": 2131.7562,
"eval_samples_per_second": 6.397,
"eval_steps_per_second": 1.6,
"eval_wer": 0.7198385688576876,
"step": 1000
},
{
"epoch": 0.23928649118990647,
"grad_norm": 24.862058639526367,
"learning_rate": 4.988721804511278e-05,
"loss": 1.421,
"step": 1100
},
{
"epoch": 0.26103980857080705,
"grad_norm": 18.665178298950195,
"learning_rate": 4.963659147869674e-05,
"loss": 1.4254,
"step": 1200
},
{
"epoch": 0.28279312595170764,
"grad_norm": 16.429445266723633,
"learning_rate": 4.93859649122807e-05,
"loss": 1.3466,
"step": 1300
},
{
"epoch": 0.3045464433326082,
"grad_norm": 24.4464054107666,
"learning_rate": 4.913533834586466e-05,
"loss": 1.3342,
"step": 1400
},
{
"epoch": 0.3262997607135088,
"grad_norm": 14.964367866516113,
"learning_rate": 4.888471177944862e-05,
"loss": 1.2494,
"step": 1500
},
{
"epoch": 0.3262997607135088,
"eval_loss": 0.6914966106414795,
"eval_runtime": 2154.6429,
"eval_samples_per_second": 6.329,
"eval_steps_per_second": 1.583,
"eval_wer": 0.7817383247509934,
"step": 1500
},
{
"epoch": 0.3480530780944094,
"grad_norm": 17.042715072631836,
"learning_rate": 4.863408521303258e-05,
"loss": 1.2668,
"step": 1600
},
{
"epoch": 0.36980639547530997,
"grad_norm": 14.82885456085205,
"learning_rate": 4.838345864661654e-05,
"loss": 1.2279,
"step": 1700
},
{
"epoch": 0.39155971285621055,
"grad_norm": 19.486909866333008,
"learning_rate": 4.81328320802005e-05,
"loss": 1.2124,
"step": 1800
},
{
"epoch": 0.41331303023711113,
"grad_norm": 18.727062225341797,
"learning_rate": 4.7882205513784464e-05,
"loss": 1.2099,
"step": 1900
},
{
"epoch": 0.4350663476180118,
"grad_norm": 17.26986312866211,
"learning_rate": 4.7631578947368424e-05,
"loss": 1.1587,
"step": 2000
},
{
"epoch": 0.4350663476180118,
"eval_loss": 0.6377026438713074,
"eval_runtime": 2184.0106,
"eval_samples_per_second": 6.244,
"eval_steps_per_second": 1.561,
"eval_wer": 0.7916005915578284,
"step": 2000
},
{
"epoch": 0.45681966499891236,
"grad_norm": 20.09056282043457,
"learning_rate": 4.738095238095238e-05,
"loss": 1.1293,
"step": 2100
},
{
"epoch": 0.47857298237981294,
"grad_norm": 12.891928672790527,
"learning_rate": 4.713032581453634e-05,
"loss": 1.1347,
"step": 2200
},
{
"epoch": 0.5003262997607135,
"grad_norm": 14.460548400878906,
"learning_rate": 4.68796992481203e-05,
"loss": 1.1219,
"step": 2300
},
{
"epoch": 0.5220796171416141,
"grad_norm": 14.870162963867188,
"learning_rate": 4.662907268170426e-05,
"loss": 1.0758,
"step": 2400
},
{
"epoch": 0.5438329345225147,
"grad_norm": 15.290679931640625,
"learning_rate": 4.637844611528822e-05,
"loss": 1.0453,
"step": 2500
},
{
"epoch": 0.5438329345225147,
"eval_loss": 0.5957969427108765,
"eval_runtime": 2164.8981,
"eval_samples_per_second": 6.299,
"eval_steps_per_second": 1.575,
"eval_wer": 0.7275804928460703,
"step": 2500
},
{
"epoch": 0.5655862519034153,
"grad_norm": 14.45456314086914,
"learning_rate": 4.612781954887218e-05,
"loss": 1.0498,
"step": 2600
},
{
"epoch": 0.5873395692843159,
"grad_norm": 14.366482734680176,
"learning_rate": 4.587719298245614e-05,
"loss": 1.0699,
"step": 2700
},
{
"epoch": 0.6090928866652164,
"grad_norm": 20.212724685668945,
"learning_rate": 4.56265664160401e-05,
"loss": 1.0341,
"step": 2800
},
{
"epoch": 0.630846204046117,
"grad_norm": 14.940664291381836,
"learning_rate": 4.537593984962406e-05,
"loss": 1.0165,
"step": 2900
},
{
"epoch": 0.6525995214270176,
"grad_norm": 12.089710235595703,
"learning_rate": 4.512531328320802e-05,
"loss": 1.0065,
"step": 3000
},
{
"epoch": 0.6525995214270176,
"eval_loss": 0.5708178281784058,
"eval_runtime": 2103.4657,
"eval_samples_per_second": 6.483,
"eval_steps_per_second": 1.621,
"eval_wer": 0.5802612119808278,
"step": 3000
}
],
"logging_steps": 100,
"max_steps": 21000,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.18170648576e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}