| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 29.891304347826086, | |
| "global_step": 16500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 5e-05, | |
| "loss": 7.7856, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "eval_loss": 4.400742530822754, | |
| "eval_runtime": 38.637, | |
| "eval_samples_per_second": 18.48, | |
| "eval_steps_per_second": 2.329, | |
| "eval_wer": 1.397364771151179, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 0.0001, | |
| "loss": 3.017, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "eval_loss": 4.290840148925781, | |
| "eval_runtime": 38.4091, | |
| "eval_samples_per_second": 18.589, | |
| "eval_steps_per_second": 2.343, | |
| "eval_wer": 1.397364771151179, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.72, | |
| "learning_rate": 9.67866323907455e-05, | |
| "loss": 3.0991, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.72, | |
| "eval_loss": 4.723344326019287, | |
| "eval_runtime": 38.4734, | |
| "eval_samples_per_second": 18.558, | |
| "eval_steps_per_second": 2.339, | |
| "eval_wer": 1.397364771151179, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 3.62, | |
| "learning_rate": 9.357326478149101e-05, | |
| "loss": 2.9494, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.62, | |
| "eval_loss": 3.185974359512329, | |
| "eval_runtime": 38.5479, | |
| "eval_samples_per_second": 18.522, | |
| "eval_steps_per_second": 2.335, | |
| "eval_wer": 1.397364771151179, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 4.53, | |
| "learning_rate": 9.03598971722365e-05, | |
| "loss": 2.9429, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 4.53, | |
| "eval_loss": 3.0841710567474365, | |
| "eval_runtime": 39.1108, | |
| "eval_samples_per_second": 18.256, | |
| "eval_steps_per_second": 2.301, | |
| "eval_wer": 1.1171983356449375, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 5.43, | |
| "learning_rate": 8.7146529562982e-05, | |
| "loss": 2.9444, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 5.43, | |
| "eval_loss": 3.103475570678711, | |
| "eval_runtime": 38.5665, | |
| "eval_samples_per_second": 18.513, | |
| "eval_steps_per_second": 2.334, | |
| "eval_wer": 1.397364771151179, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 6.34, | |
| "learning_rate": 8.393316195372751e-05, | |
| "loss": 2.9323, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 6.34, | |
| "eval_loss": 3.037775754928589, | |
| "eval_runtime": 38.5517, | |
| "eval_samples_per_second": 18.521, | |
| "eval_steps_per_second": 2.335, | |
| "eval_wer": 1.1477115117891816, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 7.25, | |
| "learning_rate": 8.071979434447301e-05, | |
| "loss": 2.8922, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 7.25, | |
| "eval_loss": 2.9985926151275635, | |
| "eval_runtime": 38.7656, | |
| "eval_samples_per_second": 18.418, | |
| "eval_steps_per_second": 2.322, | |
| "eval_wer": 1.397364771151179, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 8.15, | |
| "learning_rate": 7.750642673521852e-05, | |
| "loss": 2.8732, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 8.15, | |
| "eval_loss": 2.9726836681365967, | |
| "eval_runtime": 38.7314, | |
| "eval_samples_per_second": 18.435, | |
| "eval_steps_per_second": 2.324, | |
| "eval_wer": 1.397364771151179, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 9.06, | |
| "learning_rate": 7.429305912596401e-05, | |
| "loss": 2.8702, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 9.06, | |
| "eval_loss": 3.1187801361083984, | |
| "eval_runtime": 38.8365, | |
| "eval_samples_per_second": 18.385, | |
| "eval_steps_per_second": 2.317, | |
| "eval_wer": 1.352981969486824, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 9.96, | |
| "learning_rate": 7.107969151670951e-05, | |
| "loss": 2.8939, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 9.96, | |
| "eval_loss": 3.054856777191162, | |
| "eval_runtime": 38.7022, | |
| "eval_samples_per_second": 18.449, | |
| "eval_steps_per_second": 2.325, | |
| "eval_wer": 1.0, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 10.87, | |
| "learning_rate": 6.786632390745502e-05, | |
| "loss": 2.857, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 10.87, | |
| "eval_loss": 3.0151751041412354, | |
| "eval_runtime": 38.9012, | |
| "eval_samples_per_second": 18.354, | |
| "eval_steps_per_second": 2.314, | |
| "eval_wer": 1.397364771151179, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 11.78, | |
| "learning_rate": 6.465295629820052e-05, | |
| "loss": 2.9163, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 11.78, | |
| "eval_loss": 3.5377986431121826, | |
| "eval_runtime": 38.9155, | |
| "eval_samples_per_second": 18.347, | |
| "eval_steps_per_second": 2.313, | |
| "eval_wer": 1.0055478502080444, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 12.68, | |
| "learning_rate": 6.143958868894601e-05, | |
| "loss": 2.8716, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 12.68, | |
| "eval_loss": 3.126168727874756, | |
| "eval_runtime": 38.8504, | |
| "eval_samples_per_second": 18.378, | |
| "eval_steps_per_second": 2.317, | |
| "eval_wer": 1.397364771151179, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 13.59, | |
| "learning_rate": 5.822622107969152e-05, | |
| "loss": 2.831, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 13.59, | |
| "eval_loss": 3.2567176818847656, | |
| "eval_runtime": 38.8323, | |
| "eval_samples_per_second": 18.387, | |
| "eval_steps_per_second": 2.318, | |
| "eval_wer": 1.397364771151179, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 14.49, | |
| "learning_rate": 5.501285347043702e-05, | |
| "loss": 2.8157, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 14.49, | |
| "eval_loss": 3.2480556964874268, | |
| "eval_runtime": 38.834, | |
| "eval_samples_per_second": 18.386, | |
| "eval_steps_per_second": 2.318, | |
| "eval_wer": 1.397364771151179, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 15.4, | |
| "learning_rate": 5.1799485861182514e-05, | |
| "loss": 2.8195, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 15.4, | |
| "eval_loss": 3.429318904876709, | |
| "eval_runtime": 39.1065, | |
| "eval_samples_per_second": 18.258, | |
| "eval_steps_per_second": 2.301, | |
| "eval_wer": 1.397364771151179, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 16.3, | |
| "learning_rate": 4.8586118251928024e-05, | |
| "loss": 2.8144, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 16.3, | |
| "eval_loss": 3.257302761077881, | |
| "eval_runtime": 38.8234, | |
| "eval_samples_per_second": 18.391, | |
| "eval_steps_per_second": 2.318, | |
| "eval_wer": 1.397364771151179, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 17.21, | |
| "learning_rate": 4.537275064267352e-05, | |
| "loss": 3.1969, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 17.21, | |
| "eval_loss": 3.610121726989746, | |
| "eval_runtime": 38.7751, | |
| "eval_samples_per_second": 18.414, | |
| "eval_steps_per_second": 2.321, | |
| "eval_wer": 1.0, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 18.12, | |
| "learning_rate": 4.215938303341902e-05, | |
| "loss": 3.1897, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 18.12, | |
| "eval_loss": 4.002283573150635, | |
| "eval_runtime": 38.8786, | |
| "eval_samples_per_second": 18.365, | |
| "eval_steps_per_second": 2.315, | |
| "eval_wer": 1.0, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 19.02, | |
| "learning_rate": 3.8946015424164526e-05, | |
| "loss": 2.8664, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 19.02, | |
| "eval_loss": 3.204529047012329, | |
| "eval_runtime": 38.8277, | |
| "eval_samples_per_second": 18.389, | |
| "eval_steps_per_second": 2.318, | |
| "eval_wer": 1.0166435506241331, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 19.93, | |
| "learning_rate": 3.573264781491003e-05, | |
| "loss": 2.8413, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 19.93, | |
| "eval_loss": 3.223893404006958, | |
| "eval_runtime": 39.0618, | |
| "eval_samples_per_second": 18.279, | |
| "eval_steps_per_second": 2.304, | |
| "eval_wer": 1.0624133148404993, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 20.83, | |
| "learning_rate": 3.251928020565553e-05, | |
| "loss": 2.8134, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 20.83, | |
| "eval_loss": 3.3309028148651123, | |
| "eval_runtime": 38.9169, | |
| "eval_samples_per_second": 18.347, | |
| "eval_steps_per_second": 2.313, | |
| "eval_wer": 1.397364771151179, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 21.74, | |
| "learning_rate": 2.930591259640103e-05, | |
| "loss": 2.8245, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 21.74, | |
| "eval_loss": 3.363848924636841, | |
| "eval_runtime": 38.887, | |
| "eval_samples_per_second": 18.361, | |
| "eval_steps_per_second": 2.314, | |
| "eval_wer": 1.397364771151179, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 22.64, | |
| "learning_rate": 2.6092544987146534e-05, | |
| "loss": 2.8064, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 22.64, | |
| "eval_loss": 3.598132848739624, | |
| "eval_runtime": 39.0353, | |
| "eval_samples_per_second": 18.291, | |
| "eval_steps_per_second": 2.306, | |
| "eval_wer": 1.397364771151179, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 23.55, | |
| "learning_rate": 2.2879177377892033e-05, | |
| "loss": 2.7996, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 23.55, | |
| "eval_loss": 3.5139570236206055, | |
| "eval_runtime": 38.9907, | |
| "eval_samples_per_second": 18.312, | |
| "eval_steps_per_second": 2.308, | |
| "eval_wer": 1.397364771151179, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 24.46, | |
| "learning_rate": 1.9665809768637533e-05, | |
| "loss": 2.7851, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 24.46, | |
| "eval_loss": 4.437741279602051, | |
| "eval_runtime": 38.9952, | |
| "eval_samples_per_second": 18.31, | |
| "eval_steps_per_second": 2.308, | |
| "eval_wer": 1.0464632454923717, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 25.36, | |
| "learning_rate": 1.6452442159383032e-05, | |
| "loss": 2.81, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 25.36, | |
| "eval_loss": 3.646423578262329, | |
| "eval_runtime": 39.109, | |
| "eval_samples_per_second": 18.257, | |
| "eval_steps_per_second": 2.301, | |
| "eval_wer": 1.389736477115118, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 26.27, | |
| "learning_rate": 1.3239074550128535e-05, | |
| "loss": 2.7911, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 26.27, | |
| "eval_loss": 3.9372551441192627, | |
| "eval_runtime": 39.0213, | |
| "eval_samples_per_second": 18.298, | |
| "eval_steps_per_second": 2.306, | |
| "eval_wer": 1.0492371705963939, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 27.17, | |
| "learning_rate": 1.0025706940874038e-05, | |
| "loss": 2.7698, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 27.17, | |
| "eval_loss": 4.371081829071045, | |
| "eval_runtime": 38.9023, | |
| "eval_samples_per_second": 18.354, | |
| "eval_steps_per_second": 2.313, | |
| "eval_wer": 1.096393897364771, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 28.08, | |
| "learning_rate": 6.812339331619537e-06, | |
| "loss": 2.7669, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 28.08, | |
| "eval_loss": 4.853293418884277, | |
| "eval_runtime": 39.0745, | |
| "eval_samples_per_second": 18.273, | |
| "eval_steps_per_second": 2.303, | |
| "eval_wer": 1.397364771151179, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 28.99, | |
| "learning_rate": 3.598971722365039e-06, | |
| "loss": 2.7467, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 28.99, | |
| "eval_loss": 4.779541492462158, | |
| "eval_runtime": 39.0475, | |
| "eval_samples_per_second": 18.285, | |
| "eval_steps_per_second": 2.305, | |
| "eval_wer": 1.384882108183079, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 29.89, | |
| "learning_rate": 3.8560411311053987e-07, | |
| "loss": 2.7511, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 29.89, | |
| "eval_loss": 5.095989227294922, | |
| "eval_runtime": 39.0099, | |
| "eval_samples_per_second": 18.303, | |
| "eval_steps_per_second": 2.307, | |
| "eval_wer": 1.397364771151179, | |
| "step": 16500 | |
| } | |
| ], | |
| "max_steps": 16560, | |
| "num_train_epochs": 30, | |
| "total_flos": 3.691127303160186e+18, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |