| { | |
| "best_global_step": 50, | |
| "best_metric": 94.90034412693939, | |
| "best_model_checkpoint": "./artifacts/whisper-small-bn/checkpoint-50", | |
| "epoch": 0.05235602094240838, | |
| "eval_steps": 5, | |
| "global_step": 50, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.005235602094240838, | |
| "grad_norm": 15.15451717376709, | |
| "learning_rate": 1.0000000000000002e-06, | |
| "loss": 1.8518, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.005235602094240838, | |
| "eval_loss": 1.8115043640136719, | |
| "eval_model_preparation_time": 0.0029, | |
| "eval_runtime": 31064.0047, | |
| "eval_samples_per_second": 0.3, | |
| "eval_steps_per_second": 0.038, | |
| "eval_wer": 147.3591487262605, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.010471204188481676, | |
| "grad_norm": 10.38752555847168, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 1.7685, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.010471204188481676, | |
| "eval_loss": 1.7016992568969727, | |
| "eval_model_preparation_time": 0.0029, | |
| "eval_runtime": 30036.062, | |
| "eval_samples_per_second": 0.311, | |
| "eval_steps_per_second": 0.039, | |
| "eval_wer": 134.91772664810966, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.015706806282722512, | |
| "grad_norm": 8.120345115661621, | |
| "learning_rate": 3e-06, | |
| "loss": 1.6483, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.015706806282722512, | |
| "eval_loss": 1.5776597261428833, | |
| "eval_model_preparation_time": 0.0029, | |
| "eval_runtime": 30603.6517, | |
| "eval_samples_per_second": 0.305, | |
| "eval_steps_per_second": 0.038, | |
| "eval_wer": 133.56118530002465, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.020942408376963352, | |
| "grad_norm": 7.45015811920166, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 1.532, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.020942408376963352, | |
| "eval_loss": 1.4560885429382324, | |
| "eval_model_preparation_time": 0.0029, | |
| "eval_runtime": 31279.9657, | |
| "eval_samples_per_second": 0.298, | |
| "eval_steps_per_second": 0.037, | |
| "eval_wer": 122.22613720446778, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.02617801047120419, | |
| "grad_norm": 7.290828704833984, | |
| "learning_rate": 5e-06, | |
| "loss": 1.4186, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.02617801047120419, | |
| "eval_loss": 1.3617514371871948, | |
| "eval_model_preparation_time": 0.0029, | |
| "eval_runtime": 31425.8283, | |
| "eval_samples_per_second": 0.297, | |
| "eval_steps_per_second": 0.037, | |
| "eval_wer": 104.2305298145473, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.031413612565445025, | |
| "grad_norm": 7.941519737243652, | |
| "learning_rate": 6e-06, | |
| "loss": 1.3173, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.031413612565445025, | |
| "eval_loss": 1.2517695426940918, | |
| "eval_model_preparation_time": 0.0029, | |
| "eval_runtime": 31322.7629, | |
| "eval_samples_per_second": 0.298, | |
| "eval_steps_per_second": 0.037, | |
| "eval_wer": 121.96070140821911, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.03664921465968586, | |
| "grad_norm": 7.520599365234375, | |
| "learning_rate": 7e-06, | |
| "loss": 1.1809, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.03664921465968586, | |
| "eval_loss": 1.0982742309570312, | |
| "eval_model_preparation_time": 0.0029, | |
| "eval_runtime": 31224.2913, | |
| "eval_samples_per_second": 0.299, | |
| "eval_steps_per_second": 0.037, | |
| "eval_wer": 115.53151756456785, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.041884816753926704, | |
| "grad_norm": 6.726326942443848, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 1.0262, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.041884816753926704, | |
| "eval_loss": 0.9411609768867493, | |
| "eval_model_preparation_time": 0.0029, | |
| "eval_runtime": 31344.2102, | |
| "eval_samples_per_second": 0.298, | |
| "eval_steps_per_second": 0.037, | |
| "eval_wer": 109.31726624619758, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.04712041884816754, | |
| "grad_norm": 7.976568698883057, | |
| "learning_rate": 9e-06, | |
| "loss": 0.8603, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.04712041884816754, | |
| "eval_loss": 0.7995896339416504, | |
| "eval_model_preparation_time": 0.0029, | |
| "eval_runtime": 29970.6388, | |
| "eval_samples_per_second": 0.311, | |
| "eval_steps_per_second": 0.039, | |
| "eval_wer": 97.59463490833069, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.05235602094240838, | |
| "grad_norm": 7.062014579772949, | |
| "learning_rate": 0.0, | |
| "loss": 0.7294, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.05235602094240838, | |
| "eval_loss": 0.6859976053237915, | |
| "eval_model_preparation_time": 0.0029, | |
| "eval_runtime": 29101.6197, | |
| "eval_samples_per_second": 0.32, | |
| "eval_steps_per_second": 0.04, | |
| "eval_wer": 94.90034412693939, | |
| "step": 50 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 50, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 5, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.61736640512e+17, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |