| { | |
| "best_metric": 11.271395917499468, | |
| "best_model_checkpoint": "whisper-it-small/checkpoint-932", | |
| "epoch": 2.0, | |
| "global_step": 932, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 9.600000000000001e-07, | |
| "loss": 0.9816, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 1.9600000000000003e-06, | |
| "loss": 0.519, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 2.96e-06, | |
| "loss": 0.3418, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 3.96e-06, | |
| "loss": 0.3032, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 4.960000000000001e-06, | |
| "loss": 0.2901, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 5.9600000000000005e-06, | |
| "loss": 0.2783, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 6.96e-06, | |
| "loss": 0.2771, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 7.960000000000002e-06, | |
| "loss": 0.2659, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 8.96e-06, | |
| "loss": 0.2529, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.2758234441280365, | |
| "eval_runtime": 5945.4592, | |
| "eval_samples_per_second": 2.523, | |
| "eval_steps_per_second": 0.079, | |
| "eval_wer": 12.490697427174144, | |
| "step": 466 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 9.960000000000001e-06, | |
| "loss": 0.2122, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "learning_rate": 8.888888888888888e-06, | |
| "loss": 0.1893, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 7.731481481481483e-06, | |
| "loss": 0.1872, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.39, | |
| "learning_rate": 6.574074074074075e-06, | |
| "loss": 0.1803, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 5.416666666666667e-06, | |
| "loss": 0.1843, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 4.2592592592592596e-06, | |
| "loss": 0.1806, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 3.101851851851852e-06, | |
| "loss": 0.1787, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "learning_rate": 1.944444444444445e-06, | |
| "loss": 0.1761, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.93, | |
| "learning_rate": 7.870370370370371e-07, | |
| "loss": 0.1711, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.25167033076286316, | |
| "eval_runtime": 5711.8942, | |
| "eval_samples_per_second": 2.627, | |
| "eval_steps_per_second": 0.082, | |
| "eval_wer": 11.271395917499468, | |
| "step": 932 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "step": 932, | |
| "total_flos": 1.718295190585344e+19, | |
| "train_loss": 0.2830442016216818, | |
| "train_runtime": 31129.4467, | |
| "train_samples_per_second": 1.913, | |
| "train_steps_per_second": 0.03 | |
| } | |
| ], | |
| "max_steps": 932, | |
| "num_train_epochs": 2, | |
| "total_flos": 1.718295190585344e+19, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |