| { | |
| "best_global_step": 300, | |
| "best_metric": 22.202486678507995, | |
| "best_model_checkpoint": "models/iterative-large/iteration2\\checkpoint-300", | |
| "epoch": 15.0, | |
| "eval_steps": 50, | |
| "global_step": 300, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.5128205128205128, | |
| "grad_norm": 2.996086597442627, | |
| "learning_rate": 4.5000000000000003e-07, | |
| "loss": 2.3646, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 3.3487439155578613, | |
| "learning_rate": 9.500000000000001e-07, | |
| "loss": 2.4107, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.5128205128205128, | |
| "grad_norm": 3.2800023555755615, | |
| "learning_rate": 1.45e-06, | |
| "loss": 2.521, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 3.7190394401550293, | |
| "learning_rate": 1.9500000000000004e-06, | |
| "loss": 2.3518, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 2.5128205128205128, | |
| "grad_norm": 4.037679195404053, | |
| "learning_rate": 2.4500000000000003e-06, | |
| "loss": 2.544, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.5128205128205128, | |
| "eval_cer": 12.260967379077616, | |
| "eval_loss": 2.017274856567383, | |
| "eval_runtime": 54.2097, | |
| "eval_samples_per_second": 0.332, | |
| "eval_steps_per_second": 0.166, | |
| "eval_wer": 22.735346358792185, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 3.2123286724090576, | |
| "learning_rate": 2.95e-06, | |
| "loss": 2.3837, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 3.5128205128205128, | |
| "grad_norm": 3.1410415172576904, | |
| "learning_rate": 3.45e-06, | |
| "loss": 2.3584, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 4.298803329467773, | |
| "learning_rate": 3.95e-06, | |
| "loss": 2.3995, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 4.512820512820513, | |
| "grad_norm": 4.144355773925781, | |
| "learning_rate": 4.450000000000001e-06, | |
| "loss": 2.3549, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 11.266213417053223, | |
| "learning_rate": 4.95e-06, | |
| "loss": 2.4564, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_cer": 12.29846269216348, | |
| "eval_loss": 2.0140507221221924, | |
| "eval_runtime": 27.888, | |
| "eval_samples_per_second": 0.645, | |
| "eval_steps_per_second": 0.323, | |
| "eval_wer": 22.912966252220247, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 5.512820512820513, | |
| "grad_norm": 3.633810520172119, | |
| "learning_rate": 4.9889049115077e-06, | |
| "loss": 2.276, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 4.385662078857422, | |
| "learning_rate": 4.9506779365543054e-06, | |
| "loss": 2.5345, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 6.512820512820513, | |
| "grad_norm": 5.4497785568237305, | |
| "learning_rate": 4.885600821290692e-06, | |
| "loss": 2.3369, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 8.95763874053955, | |
| "learning_rate": 4.794386564209953e-06, | |
| "loss": 2.4669, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 7.512820512820513, | |
| "grad_norm": 2.7476675510406494, | |
| "learning_rate": 4.6780345278004744e-06, | |
| "loss": 2.4809, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 7.512820512820513, | |
| "eval_cer": 12.185976752905887, | |
| "eval_loss": 2.0147056579589844, | |
| "eval_runtime": 28.0093, | |
| "eval_samples_per_second": 0.643, | |
| "eval_steps_per_second": 0.321, | |
| "eval_wer": 22.55772646536412, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 3.588670253753662, | |
| "learning_rate": 4.537819489321385e-06, | |
| "loss": 2.4501, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 8.512820512820513, | |
| "grad_norm": 3.2149131298065186, | |
| "learning_rate": 4.3752776740761495e-06, | |
| "loss": 2.2247, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "grad_norm": 7.501545429229736, | |
| "learning_rate": 4.192189924206652e-06, | |
| "loss": 2.3179, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 9.512820512820513, | |
| "grad_norm": 2.922227144241333, | |
| "learning_rate": 3.9905621874140396e-06, | |
| "loss": 2.4516, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 4.930537700653076, | |
| "learning_rate": 3.772603539375929e-06, | |
| "loss": 2.3659, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_cer": 12.260967379077616, | |
| "eval_loss": 2.013343334197998, | |
| "eval_runtime": 28.453, | |
| "eval_samples_per_second": 0.633, | |
| "eval_steps_per_second": 0.316, | |
| "eval_wer": 22.735346358792185, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 10.512820512820513, | |
| "grad_norm": 3.6379880905151367, | |
| "learning_rate": 3.5407019806510035e-06, | |
| "loss": 2.2918, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "grad_norm": 3.8149631023406982, | |
| "learning_rate": 3.2973982732451753e-06, | |
| "loss": 2.3306, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 11.512820512820513, | |
| "grad_norm": 3.5519373416900635, | |
| "learning_rate": 3.045358103491357e-06, | |
| "loss": 2.5811, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "grad_norm": 7.584492206573486, | |
| "learning_rate": 2.7873428762321667e-06, | |
| "loss": 2.3152, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 12.512820512820513, | |
| "grad_norm": 4.233317852020264, | |
| "learning_rate": 2.526179460290615e-06, | |
| "loss": 2.264, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 12.512820512820513, | |
| "eval_cer": 12.29846269216348, | |
| "eval_loss": 2.0075132846832275, | |
| "eval_runtime": 28.4486, | |
| "eval_samples_per_second": 0.633, | |
| "eval_steps_per_second": 0.316, | |
| "eval_wer": 22.380106571936057, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "grad_norm": 9.289950370788574, | |
| "learning_rate": 2.2647292167037143e-06, | |
| "loss": 2.3654, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 13.512820512820513, | |
| "grad_norm": 3.141766309738159, | |
| "learning_rate": 2.0058566490521848e-06, | |
| "loss": 2.3092, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "grad_norm": 6.005232334136963, | |
| "learning_rate": 1.7523980193597837e-06, | |
| "loss": 2.3613, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 14.512820512820513, | |
| "grad_norm": 3.238589286804199, | |
| "learning_rate": 1.5071302734130488e-06, | |
| "loss": 2.2593, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "grad_norm": 11.191150665283203, | |
| "learning_rate": 1.272740615962148e-06, | |
| "loss": 2.3288, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "eval_cer": 12.223472065991752, | |
| "eval_loss": 2.00502347946167, | |
| "eval_runtime": 28.2171, | |
| "eval_samples_per_second": 0.638, | |
| "eval_steps_per_second": 0.319, | |
| "eval_wer": 22.202486678507995, | |
| "step": 300 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 400, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 20, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 2, | |
| "early_stopping_threshold": 0.005 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.1397501689856e+18, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |