| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 29.52029520295203, | |
| "global_step": 16000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 5e-05, | |
| "loss": 7.7564, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "eval_loss": 3.7167632579803467, | |
| "eval_runtime": 40.5551, | |
| "eval_samples_per_second": 19.726, | |
| "eval_steps_per_second": 2.466, | |
| "eval_wer": 1.0, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 0.0001, | |
| "loss": 3.202, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "eval_loss": 4.078752517700195, | |
| "eval_runtime": 40.739, | |
| "eval_samples_per_second": 19.637, | |
| "eval_steps_per_second": 2.455, | |
| "eval_wer": 1.0, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.77, | |
| "learning_rate": 9.672346002621232e-05, | |
| "loss": 2.9485, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.77, | |
| "eval_loss": 3.1143431663513184, | |
| "eval_runtime": 40.8557, | |
| "eval_samples_per_second": 19.581, | |
| "eval_steps_per_second": 2.448, | |
| "eval_wer": 1.0, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 3.69, | |
| "learning_rate": 9.344692005242464e-05, | |
| "loss": 2.9063, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.69, | |
| "eval_loss": 3.0433576107025146, | |
| "eval_runtime": 40.5953, | |
| "eval_samples_per_second": 19.707, | |
| "eval_steps_per_second": 2.463, | |
| "eval_wer": 1.0, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 4.61, | |
| "learning_rate": 9.017038007863697e-05, | |
| "loss": 2.9039, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 4.61, | |
| "eval_loss": 3.0591933727264404, | |
| "eval_runtime": 40.8492, | |
| "eval_samples_per_second": 19.584, | |
| "eval_steps_per_second": 2.448, | |
| "eval_wer": 1.2996031746031746, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 5.54, | |
| "learning_rate": 8.689384010484928e-05, | |
| "loss": 3.2327, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 5.54, | |
| "eval_loss": 3.356442451477051, | |
| "eval_runtime": 40.9249, | |
| "eval_samples_per_second": 19.548, | |
| "eval_steps_per_second": 2.443, | |
| "eval_wer": 1.0505952380952381, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 6.46, | |
| "learning_rate": 8.36173001310616e-05, | |
| "loss": 3.2719, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 6.46, | |
| "eval_loss": 3.3622446060180664, | |
| "eval_runtime": 40.6417, | |
| "eval_samples_per_second": 19.684, | |
| "eval_steps_per_second": 2.461, | |
| "eval_wer": 1.2996031746031746, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 7.38, | |
| "learning_rate": 8.034076015727393e-05, | |
| "loss": 3.2052, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 7.38, | |
| "eval_loss": 3.4156241416931152, | |
| "eval_runtime": 40.8257, | |
| "eval_samples_per_second": 19.595, | |
| "eval_steps_per_second": 2.449, | |
| "eval_wer": 1.2976190476190477, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 8.3, | |
| "learning_rate": 7.706422018348625e-05, | |
| "loss": 3.1792, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 8.3, | |
| "eval_loss": 3.422733783721924, | |
| "eval_runtime": 40.9943, | |
| "eval_samples_per_second": 19.515, | |
| "eval_steps_per_second": 2.439, | |
| "eval_wer": 1.2996031746031746, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 9.23, | |
| "learning_rate": 7.378768020969856e-05, | |
| "loss": 3.1267, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 9.23, | |
| "eval_loss": 3.4389374256134033, | |
| "eval_runtime": 40.497, | |
| "eval_samples_per_second": 19.755, | |
| "eval_steps_per_second": 2.469, | |
| "eval_wer": 1.2996031746031746, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 10.15, | |
| "learning_rate": 7.051114023591087e-05, | |
| "loss": 3.0731, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 10.15, | |
| "eval_loss": 3.344641923904419, | |
| "eval_runtime": 40.4944, | |
| "eval_samples_per_second": 19.756, | |
| "eval_steps_per_second": 2.469, | |
| "eval_wer": 1.058531746031746, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 11.07, | |
| "learning_rate": 6.723460026212321e-05, | |
| "loss": 3.0213, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 11.07, | |
| "eval_loss": 3.312887668609619, | |
| "eval_runtime": 40.4134, | |
| "eval_samples_per_second": 19.795, | |
| "eval_steps_per_second": 2.474, | |
| "eval_wer": 1.277281746031746, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 11.99, | |
| "learning_rate": 6.395806028833552e-05, | |
| "loss": 2.9988, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 11.99, | |
| "eval_loss": 3.3593108654022217, | |
| "eval_runtime": 40.3422, | |
| "eval_samples_per_second": 19.83, | |
| "eval_steps_per_second": 2.479, | |
| "eval_wer": 1.1091269841269842, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 12.92, | |
| "learning_rate": 6.068152031454783e-05, | |
| "loss": 2.9538, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 12.92, | |
| "eval_loss": 3.3612701892852783, | |
| "eval_runtime": 40.3616, | |
| "eval_samples_per_second": 19.821, | |
| "eval_steps_per_second": 2.478, | |
| "eval_wer": 1.2956349206349207, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 13.84, | |
| "learning_rate": 5.7404980340760164e-05, | |
| "loss": 2.9751, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 13.84, | |
| "eval_loss": 3.385903835296631, | |
| "eval_runtime": 40.4879, | |
| "eval_samples_per_second": 19.759, | |
| "eval_steps_per_second": 2.47, | |
| "eval_wer": 1.2991071428571428, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 14.76, | |
| "learning_rate": 5.4128440366972475e-05, | |
| "loss": 2.9393, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 14.76, | |
| "eval_loss": 3.477492094039917, | |
| "eval_runtime": 40.9046, | |
| "eval_samples_per_second": 19.558, | |
| "eval_steps_per_second": 2.445, | |
| "eval_wer": 1.2971230158730158, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 15.68, | |
| "learning_rate": 5.085190039318481e-05, | |
| "loss": 2.8759, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 15.68, | |
| "eval_loss": 3.2230968475341797, | |
| "eval_runtime": 40.3188, | |
| "eval_samples_per_second": 19.842, | |
| "eval_steps_per_second": 2.48, | |
| "eval_wer": 1.2961309523809523, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 16.61, | |
| "learning_rate": 4.757536041939712e-05, | |
| "loss": 2.995, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 16.61, | |
| "eval_loss": 3.15659761428833, | |
| "eval_runtime": 40.4414, | |
| "eval_samples_per_second": 19.782, | |
| "eval_steps_per_second": 2.473, | |
| "eval_wer": 1.2142857142857142, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 17.53, | |
| "learning_rate": 4.429882044560944e-05, | |
| "loss": 3.01, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 17.53, | |
| "eval_loss": 3.3003454208374023, | |
| "eval_runtime": 40.7113, | |
| "eval_samples_per_second": 19.651, | |
| "eval_steps_per_second": 2.456, | |
| "eval_wer": 1.0, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 18.45, | |
| "learning_rate": 4.1022280471821753e-05, | |
| "loss": 2.8689, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 18.45, | |
| "eval_loss": 3.0380513668060303, | |
| "eval_runtime": 40.33, | |
| "eval_samples_per_second": 19.836, | |
| "eval_steps_per_second": 2.48, | |
| "eval_wer": 1.0049603174603174, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 19.37, | |
| "learning_rate": 3.774574049803408e-05, | |
| "loss": 2.839, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 19.37, | |
| "eval_loss": 3.065554618835449, | |
| "eval_runtime": 40.2289, | |
| "eval_samples_per_second": 19.886, | |
| "eval_steps_per_second": 2.486, | |
| "eval_wer": 1.2996031746031746, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 20.3, | |
| "learning_rate": 3.4469200524246396e-05, | |
| "loss": 2.817, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 20.3, | |
| "eval_loss": 3.139739990234375, | |
| "eval_runtime": 40.0864, | |
| "eval_samples_per_second": 19.957, | |
| "eval_steps_per_second": 2.495, | |
| "eval_wer": 1.2986111111111112, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 21.22, | |
| "learning_rate": 3.119266055045872e-05, | |
| "loss": 2.8275, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 21.22, | |
| "eval_loss": 3.068601369857788, | |
| "eval_runtime": 40.3574, | |
| "eval_samples_per_second": 19.823, | |
| "eval_steps_per_second": 2.478, | |
| "eval_wer": 1.2996031746031746, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 22.14, | |
| "learning_rate": 2.7916120576671035e-05, | |
| "loss": 2.7473, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 22.14, | |
| "eval_loss": 3.0423059463500977, | |
| "eval_runtime": 40.542, | |
| "eval_samples_per_second": 19.733, | |
| "eval_steps_per_second": 2.467, | |
| "eval_wer": 1.2996031746031746, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 23.06, | |
| "learning_rate": 2.4639580602883356e-05, | |
| "loss": 2.7963, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 23.06, | |
| "eval_loss": 3.1901113986968994, | |
| "eval_runtime": 40.5556, | |
| "eval_samples_per_second": 19.726, | |
| "eval_steps_per_second": 2.466, | |
| "eval_wer": 1.2996031746031746, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 23.99, | |
| "learning_rate": 2.1363040629095678e-05, | |
| "loss": 3.0131, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 23.99, | |
| "eval_loss": 3.095088243484497, | |
| "eval_runtime": 40.2956, | |
| "eval_samples_per_second": 19.853, | |
| "eval_steps_per_second": 2.482, | |
| "eval_wer": 1.2996031746031746, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 24.91, | |
| "learning_rate": 1.8086500655307995e-05, | |
| "loss": 2.8969, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 24.91, | |
| "eval_loss": 3.089463472366333, | |
| "eval_runtime": 40.3824, | |
| "eval_samples_per_second": 19.811, | |
| "eval_steps_per_second": 2.476, | |
| "eval_wer": 1.2996031746031746, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 25.83, | |
| "learning_rate": 1.4809960681520315e-05, | |
| "loss": 2.8868, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 25.83, | |
| "eval_loss": 3.105912446975708, | |
| "eval_runtime": 40.363, | |
| "eval_samples_per_second": 19.82, | |
| "eval_steps_per_second": 2.478, | |
| "eval_wer": 1.2996031746031746, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 26.75, | |
| "learning_rate": 1.1533420707732635e-05, | |
| "loss": 2.7392, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 26.75, | |
| "eval_loss": 3.0639867782592773, | |
| "eval_runtime": 40.3882, | |
| "eval_samples_per_second": 19.808, | |
| "eval_steps_per_second": 2.476, | |
| "eval_wer": 1.2996031746031746, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 27.68, | |
| "learning_rate": 8.256880733944954e-06, | |
| "loss": 2.6868, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 27.68, | |
| "eval_loss": 3.0495223999023438, | |
| "eval_runtime": 40.4362, | |
| "eval_samples_per_second": 19.784, | |
| "eval_steps_per_second": 2.473, | |
| "eval_wer": 1.2996031746031746, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 28.6, | |
| "learning_rate": 4.980340760157274e-06, | |
| "loss": 2.6525, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 28.6, | |
| "eval_loss": 3.0540473461151123, | |
| "eval_runtime": 41.5944, | |
| "eval_samples_per_second": 19.233, | |
| "eval_steps_per_second": 2.404, | |
| "eval_wer": 1.2981150793650793, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 29.52, | |
| "learning_rate": 1.703800786369594e-06, | |
| "loss": 2.6313, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 29.52, | |
| "eval_loss": 3.0440595149993896, | |
| "eval_runtime": 40.407, | |
| "eval_samples_per_second": 19.799, | |
| "eval_steps_per_second": 2.475, | |
| "eval_wer": 1.2971230158730158, | |
| "step": 16000 | |
| } | |
| ], | |
| "max_steps": 16260, | |
| "num_train_epochs": 30, | |
| "total_flos": 3.570830961799601e+18, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |