| { | |
| "best_global_step": 50, | |
| "best_metric": 22.202486678507995, | |
| "best_model_checkpoint": "models/iterative-large/iteration1\\checkpoint-50", | |
| "epoch": 15.0, | |
| "eval_steps": 50, | |
| "global_step": 300, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.5128205128205128, | |
| "grad_norm": 3.0743765830993652, | |
| "learning_rate": 4.5000000000000003e-07, | |
| "loss": 2.1301, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 3.854400634765625, | |
| "learning_rate": 9.500000000000001e-07, | |
| "loss": 2.1611, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.5128205128205128, | |
| "grad_norm": 2.148568630218506, | |
| "learning_rate": 1.45e-06, | |
| "loss": 2.0474, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 4.415708065032959, | |
| "learning_rate": 1.9500000000000004e-06, | |
| "loss": 2.1874, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 2.5128205128205128, | |
| "grad_norm": 1.8306738138198853, | |
| "learning_rate": 2.4500000000000003e-06, | |
| "loss": 2.2126, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.5128205128205128, | |
| "eval_cer": 11.211098612673416, | |
| "eval_loss": 1.999901294708252, | |
| "eval_runtime": 46.4677, | |
| "eval_samples_per_second": 0.387, | |
| "eval_steps_per_second": 0.194, | |
| "eval_wer": 22.202486678507995, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 4.926548004150391, | |
| "learning_rate": 2.95e-06, | |
| "loss": 2.0341, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 3.5128205128205128, | |
| "grad_norm": 1.4946054220199585, | |
| "learning_rate": 3.45e-06, | |
| "loss": 2.1137, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 4.967564105987549, | |
| "learning_rate": 3.95e-06, | |
| "loss": 2.2081, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 4.512820512820513, | |
| "grad_norm": 3.6834723949432373, | |
| "learning_rate": 4.450000000000001e-06, | |
| "loss": 2.0843, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 3.584566593170166, | |
| "learning_rate": 4.95e-06, | |
| "loss": 2.053, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_cer": 12.260967379077616, | |
| "eval_loss": 2.005992889404297, | |
| "eval_runtime": 19.8864, | |
| "eval_samples_per_second": 0.905, | |
| "eval_steps_per_second": 0.453, | |
| "eval_wer": 23.44582593250444, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 5.512820512820513, | |
| "grad_norm": 4.8058366775512695, | |
| "learning_rate": 4.975059144291395e-06, | |
| "loss": 2.1603, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 4.5404953956604, | |
| "learning_rate": 4.889482536995826e-06, | |
| "loss": 2.247, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 6.512820512820513, | |
| "grad_norm": 3.383509874343872, | |
| "learning_rate": 4.745068939401539e-06, | |
| "loss": 2.1578, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 2.6765153408050537, | |
| "learning_rate": 4.545374293562559e-06, | |
| "loss": 2.1382, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 7.512820512820513, | |
| "grad_norm": 3.2549970149993896, | |
| "learning_rate": 4.295315744407973e-06, | |
| "loss": 2.1607, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 7.512820512820513, | |
| "eval_cer": 12.185976752905887, | |
| "eval_loss": 2.0080726146698, | |
| "eval_runtime": 20.1325, | |
| "eval_samples_per_second": 0.894, | |
| "eval_steps_per_second": 0.447, | |
| "eval_wer": 23.44582593250444, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 2.906156063079834, | |
| "learning_rate": 4.001050563314711e-06, | |
| "loss": 2.1257, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 8.512820512820513, | |
| "grad_norm": 1.7229502201080322, | |
| "learning_rate": 3.6698245356514337e-06, | |
| "loss": 2.0533, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "grad_norm": 4.491665840148926, | |
| "learning_rate": 3.309793545495374e-06, | |
| "loss": 2.1557, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 9.512820512820513, | |
| "grad_norm": 1.68223237991333, | |
| "learning_rate": 2.929822750698524e-06, | |
| "loss": 2.1284, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 4.560079097747803, | |
| "learning_rate": 2.539268293279552e-06, | |
| "loss": 2.0968, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_cer": 12.03599550056243, | |
| "eval_loss": 2.008697032928467, | |
| "eval_runtime": 19.9122, | |
| "eval_samples_per_second": 0.904, | |
| "eval_steps_per_second": 0.452, | |
| "eval_wer": 22.912966252220247, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 10.512820512820513, | |
| "grad_norm": 3.6882331371307373, | |
| "learning_rate": 2.1477469201560435e-06, | |
| "loss": 2.1444, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "grad_norm": 6.8315110206604, | |
| "learning_rate": 1.7648991869192406e-06, | |
| "loss": 2.1314, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 11.512820512820513, | |
| "grad_norm": 3.4026432037353516, | |
| "learning_rate": 1.400152075360212e-06, | |
| "loss": 2.1301, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "grad_norm": 4.786012649536133, | |
| "learning_rate": 1.0624868698918045e-06, | |
| "loss": 2.2472, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 12.512820512820513, | |
| "grad_norm": 4.79121732711792, | |
| "learning_rate": 7.602180085192143e-07, | |
| "loss": 2.1174, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 12.512820512820513, | |
| "eval_cer": 12.03599550056243, | |
| "eval_loss": 2.0076510906219482, | |
| "eval_runtime": 20.2912, | |
| "eval_samples_per_second": 0.887, | |
| "eval_steps_per_second": 0.444, | |
| "eval_wer": 22.912966252220247, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "grad_norm": 2.767958641052246, | |
| "learning_rate": 5.007883537822738e-07, | |
| "loss": 2.0301, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 13.512820512820513, | |
| "grad_norm": 3.764622211456299, | |
| "learning_rate": 2.9058592477826636e-07, | |
| "loss": 2.0731, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "grad_norm": 2.2239279747009277, | |
| "learning_rate": 1.3478660293113677e-07, | |
| "loss": 2.031, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 14.512820512820513, | |
| "grad_norm": 3.0162758827209473, | |
| "learning_rate": 3.722668461306533e-08, | |
| "loss": 2.1404, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "grad_norm": 2.83974027633667, | |
| "learning_rate": 3.0841879584853073e-10, | |
| "loss": 2.1003, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "eval_cer": 12.148481439820022, | |
| "eval_loss": 2.007880926132202, | |
| "eval_runtime": 20.3648, | |
| "eval_samples_per_second": 0.884, | |
| "eval_steps_per_second": 0.442, | |
| "eval_wer": 22.735346358792185, | |
| "step": 300 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 300, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 15, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.01 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 5 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.1397501689856e+18, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |