File size: 2,593 Bytes
02ce4c1 68ad9a8 02ce4c1 68ad9a8 02ce4c1 68ad9a8 02ce4c1 68ad9a8 02ce4c1 68ad9a8 02ce4c1 68ad9a8 02ce4c1 68ad9a8 02ce4c1 68ad9a8 02ce4c1 68ad9a8 02ce4c1 68ad9a8 02ce4c1 68ad9a8 02ce4c1 68ad9a8 02ce4c1 68ad9a8 02ce4c1 68ad9a8 02ce4c1 68ad9a8 02ce4c1 68ad9a8 02ce4c1 68ad9a8 02ce4c1 68ad9a8 02ce4c1 68ad9a8 02ce4c1 68ad9a8 02ce4c1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 | {
"best_metric": 39.43644074527058,
"best_model_checkpoint": "./whisper-lora-15k-adapters/checkpoint-237",
"epoch": 0.2775175644028103,
"eval_steps": 237,
"global_step": 237,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02927400468384075,
"grad_norm": 0.616703450679779,
"learning_rate": 0.0005,
"loss": 0.8917,
"step": 25
},
{
"epoch": 0.0585480093676815,
"grad_norm": 0.4405761659145355,
"learning_rate": 0.001,
"loss": 0.736,
"step": 50
},
{
"epoch": 0.08782201405152225,
"grad_norm": 0.4590846598148346,
"learning_rate": 0.0009940758293838863,
"loss": 0.7102,
"step": 75
},
{
"epoch": 0.117096018735363,
"grad_norm": 0.4082449972629547,
"learning_rate": 0.0009881516587677726,
"loss": 0.6302,
"step": 100
},
{
"epoch": 0.14637002341920374,
"grad_norm": 0.3845512866973877,
"learning_rate": 0.0009822274881516586,
"loss": 0.6538,
"step": 125
},
{
"epoch": 0.1756440281030445,
"grad_norm": 0.47404810786247253,
"learning_rate": 0.000976303317535545,
"loss": 0.6261,
"step": 150
},
{
"epoch": 0.20491803278688525,
"grad_norm": 0.38350749015808105,
"learning_rate": 0.0009703791469194313,
"loss": 0.6408,
"step": 175
},
{
"epoch": 0.234192037470726,
"grad_norm": 0.4432656466960907,
"learning_rate": 0.0009644549763033176,
"loss": 0.5789,
"step": 200
},
{
"epoch": 0.26346604215456676,
"grad_norm": 0.4181855618953705,
"learning_rate": 0.0009585308056872039,
"loss": 0.5673,
"step": 225
},
{
"epoch": 0.2775175644028103,
"eval_loss": 0.6236673593521118,
"eval_runtime": 10166.9784,
"eval_samples_per_second": 0.149,
"eval_steps_per_second": 0.009,
"eval_wer": 39.43644074527058,
"step": 237
}
],
"logging_steps": 25,
"max_steps": 4270,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 237,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.92166536380416e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}
|