| { | |
| "best_metric": 46.52014652014652, | |
| "best_model_checkpoint": "./checkpoint-280", | |
| "epoch": 0.3333333333333333, | |
| "global_step": 300, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 0.0, | |
| "loss": 2.7922, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 7e-05, | |
| "loss": 2.5366, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "eval_loss": 1.5401501655578613, | |
| "eval_runtime": 61.7756, | |
| "eval_samples_per_second": 1.036, | |
| "eval_steps_per_second": 0.032, | |
| "eval_wer": 94.5054945054945, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 9.631578947368421e-05, | |
| "loss": 1.3721, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "eval_loss": 1.0021113157272339, | |
| "eval_runtime": 18.2189, | |
| "eval_samples_per_second": 3.513, | |
| "eval_steps_per_second": 0.11, | |
| "eval_wer": 75.82417582417582, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 9.105263157894738e-05, | |
| "loss": 0.9921, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "eval_loss": 0.8322427868843079, | |
| "eval_runtime": 18.5377, | |
| "eval_samples_per_second": 3.452, | |
| "eval_steps_per_second": 0.108, | |
| "eval_wer": 75.0915750915751, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 8.578947368421054e-05, | |
| "loss": 0.9844, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "eval_loss": 0.8080323338508606, | |
| "eval_runtime": 18.3811, | |
| "eval_samples_per_second": 3.482, | |
| "eval_steps_per_second": 0.109, | |
| "eval_wer": 72.89377289377289, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 8.052631578947368e-05, | |
| "loss": 0.7071, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "eval_loss": 0.7861990332603455, | |
| "eval_runtime": 18.6934, | |
| "eval_samples_per_second": 3.424, | |
| "eval_steps_per_second": 0.107, | |
| "eval_wer": 77.2893772893773, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 7.526315789473685e-05, | |
| "loss": 0.7998, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "eval_loss": 0.7052078247070312, | |
| "eval_runtime": 18.0962, | |
| "eval_samples_per_second": 3.537, | |
| "eval_steps_per_second": 0.111, | |
| "eval_wer": 68.86446886446886, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 7e-05, | |
| "loss": 0.6935, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "eval_loss": 0.6780518889427185, | |
| "eval_runtime": 18.1288, | |
| "eval_samples_per_second": 3.53, | |
| "eval_steps_per_second": 0.11, | |
| "eval_wer": 64.28571428571429, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 6.473684210526316e-05, | |
| "loss": 0.81, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "eval_loss": 0.6341258883476257, | |
| "eval_runtime": 17.5354, | |
| "eval_samples_per_second": 3.65, | |
| "eval_steps_per_second": 0.114, | |
| "eval_wer": 63.55311355311355, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 5.9473684210526315e-05, | |
| "loss": 0.6133, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "eval_loss": 0.608344316482544, | |
| "eval_runtime": 18.2697, | |
| "eval_samples_per_second": 3.503, | |
| "eval_steps_per_second": 0.109, | |
| "eval_wer": 62.637362637362635, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 5.421052631578948e-05, | |
| "loss": 0.6675, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "eval_loss": 0.585149347782135, | |
| "eval_runtime": 18.1531, | |
| "eval_samples_per_second": 3.526, | |
| "eval_steps_per_second": 0.11, | |
| "eval_wer": 62.82051282051282, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 4.8947368421052635e-05, | |
| "loss": 0.5577, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "eval_loss": 0.5650949478149414, | |
| "eval_runtime": 18.781, | |
| "eval_samples_per_second": 3.408, | |
| "eval_steps_per_second": 0.106, | |
| "eval_wer": 59.34065934065934, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 4.368421052631579e-05, | |
| "loss": 0.6473, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "eval_loss": 0.5637935400009155, | |
| "eval_runtime": 19.7936, | |
| "eval_samples_per_second": 3.233, | |
| "eval_steps_per_second": 0.101, | |
| "eval_wer": 58.058608058608066, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 3.842105263157895e-05, | |
| "loss": 0.6018, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "eval_loss": 0.5434007048606873, | |
| "eval_runtime": 18.5859, | |
| "eval_samples_per_second": 3.443, | |
| "eval_steps_per_second": 0.108, | |
| "eval_wer": 53.84615384615385, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 3.3157894736842106e-05, | |
| "loss": 0.5918, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "eval_loss": 0.5384606719017029, | |
| "eval_runtime": 17.6361, | |
| "eval_samples_per_second": 3.629, | |
| "eval_steps_per_second": 0.113, | |
| "eval_wer": 54.94505494505495, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 2.7894736842105263e-05, | |
| "loss": 0.5654, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "eval_loss": 0.5200322866439819, | |
| "eval_runtime": 19.3669, | |
| "eval_samples_per_second": 3.305, | |
| "eval_steps_per_second": 0.103, | |
| "eval_wer": 58.058608058608066, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 2.2631578947368423e-05, | |
| "loss": 0.587, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "eval_loss": 0.4974484145641327, | |
| "eval_runtime": 20.073, | |
| "eval_samples_per_second": 3.188, | |
| "eval_steps_per_second": 0.1, | |
| "eval_wer": 57.14285714285714, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 1.736842105263158e-05, | |
| "loss": 0.6157, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "eval_loss": 0.483424574136734, | |
| "eval_runtime": 20.2725, | |
| "eval_samples_per_second": 3.157, | |
| "eval_steps_per_second": 0.099, | |
| "eval_wer": 53.2967032967033, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 1.2105263157894737e-05, | |
| "loss": 0.6803, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "eval_loss": 0.4851979613304138, | |
| "eval_runtime": 17.9572, | |
| "eval_samples_per_second": 3.564, | |
| "eval_steps_per_second": 0.111, | |
| "eval_wer": 55.86080586080586, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 6.842105263157896e-06, | |
| "loss": 0.4813, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "eval_loss": 0.4685819447040558, | |
| "eval_runtime": 17.9367, | |
| "eval_samples_per_second": 3.568, | |
| "eval_steps_per_second": 0.112, | |
| "eval_wer": 51.28205128205128, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 1.5789473684210528e-06, | |
| "loss": 0.4952, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.4624484181404114, | |
| "eval_runtime": 19.165, | |
| "eval_samples_per_second": 3.339, | |
| "eval_steps_per_second": 0.104, | |
| "eval_wer": 51.46520146520146, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 3.206896551724138e-05, | |
| "loss": 0.3956, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "eval_loss": 0.46904227137565613, | |
| "eval_runtime": 16.6622, | |
| "eval_samples_per_second": 3.841, | |
| "eval_steps_per_second": 0.12, | |
| "eval_wer": 52.01465201465202, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 2.8620689655172417e-05, | |
| "loss": 0.3719, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "eval_loss": 0.4673417806625366, | |
| "eval_runtime": 17.4548, | |
| "eval_samples_per_second": 3.667, | |
| "eval_steps_per_second": 0.115, | |
| "eval_wer": 52.74725274725275, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 2.517241379310345e-05, | |
| "loss": 0.3168, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "eval_loss": 0.44994863867759705, | |
| "eval_runtime": 17.0948, | |
| "eval_samples_per_second": 3.744, | |
| "eval_steps_per_second": 0.117, | |
| "eval_wer": 51.46520146520146, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 2.1724137931034484e-05, | |
| "loss": 0.3582, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "eval_loss": 0.4524703323841095, | |
| "eval_runtime": 17.7891, | |
| "eval_samples_per_second": 3.598, | |
| "eval_steps_per_second": 0.112, | |
| "eval_wer": 46.88644688644688, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 1.827586206896552e-05, | |
| "loss": 0.2475, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "eval_loss": 0.46121472120285034, | |
| "eval_runtime": 17.7044, | |
| "eval_samples_per_second": 3.615, | |
| "eval_steps_per_second": 0.113, | |
| "eval_wer": 52.38095238095239, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 1.482758620689655e-05, | |
| "loss": 0.2988, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "eval_loss": 0.4346223473548889, | |
| "eval_runtime": 19.6665, | |
| "eval_samples_per_second": 3.254, | |
| "eval_steps_per_second": 0.102, | |
| "eval_wer": 49.81684981684982, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 1.1379310344827587e-05, | |
| "loss": 0.2749, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "eval_loss": 0.42485401034355164, | |
| "eval_runtime": 17.6854, | |
| "eval_samples_per_second": 3.619, | |
| "eval_steps_per_second": 0.113, | |
| "eval_wer": 48.9010989010989, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 7.93103448275862e-06, | |
| "loss": 0.3368, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "eval_loss": 0.43880951404571533, | |
| "eval_runtime": 18.1285, | |
| "eval_samples_per_second": 3.53, | |
| "eval_steps_per_second": 0.11, | |
| "eval_wer": 46.52014652014652, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 4.482758620689655e-06, | |
| "loss": 0.2574, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "eval_loss": 0.43085092306137085, | |
| "eval_runtime": 18.1023, | |
| "eval_samples_per_second": 3.535, | |
| "eval_steps_per_second": 0.11, | |
| "eval_wer": 46.7032967032967, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 1.0344827586206898e-06, | |
| "loss": 0.2921, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "eval_loss": 0.4282010793685913, | |
| "eval_runtime": 18.1178, | |
| "eval_samples_per_second": 3.532, | |
| "eval_steps_per_second": 0.11, | |
| "eval_wer": 46.7032967032967, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "step": 300, | |
| "total_flos": 2.36341297152e+17, | |
| "train_loss": 0.10500287771224975, | |
| "train_runtime": 1208.0467, | |
| "train_samples_per_second": 7.947, | |
| "train_steps_per_second": 0.248 | |
| } | |
| ], | |
| "max_steps": 300, | |
| "num_train_epochs": 9223372036854775807, | |
| "total_flos": 2.36341297152e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |