| { | |
| "best_global_step": 600, | |
| "best_metric": 24.187029783000686, | |
| "best_model_checkpoint": "./JUDIC/checkpoint-600", | |
| "epoch": 7.0588235294117645, | |
| "eval_steps": 100, | |
| "global_step": 600, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.23529411764705882, | |
| "grad_norm": 4.239101886749268, | |
| "learning_rate": 1.9000000000000002e-06, | |
| "loss": 0.4078, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.47058823529411764, | |
| "grad_norm": 2.235217809677124, | |
| "learning_rate": 3.900000000000001e-06, | |
| "loss": 0.4217, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.7058823529411765, | |
| "grad_norm": 3.711195707321167, | |
| "learning_rate": 5.9e-06, | |
| "loss": 0.4252, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.9411764705882353, | |
| "grad_norm": 4.5320820808410645, | |
| "learning_rate": 7.9e-06, | |
| "loss": 0.3812, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.1764705882352942, | |
| "grad_norm": 3.9703948497772217, | |
| "learning_rate": 9.9e-06, | |
| "loss": 0.4223, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.1764705882352942, | |
| "eval_loss": 0.40899839997291565, | |
| "eval_runtime": 323.6745, | |
| "eval_samples_per_second": 2.104, | |
| "eval_steps_per_second": 0.266, | |
| "eval_wer": 36.6100851831126, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.4117647058823528, | |
| "grad_norm": 3.8265273571014404, | |
| "learning_rate": 9.746666666666668e-06, | |
| "loss": 0.2872, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.6470588235294117, | |
| "grad_norm": 4.8625168800354, | |
| "learning_rate": 9.48e-06, | |
| "loss": 0.3207, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.8823529411764706, | |
| "grad_norm": 4.235057830810547, | |
| "learning_rate": 9.213333333333334e-06, | |
| "loss": 0.3585, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.1176470588235294, | |
| "grad_norm": 2.1225504875183105, | |
| "learning_rate": 8.973333333333334e-06, | |
| "loss": 0.2658, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.3529411764705883, | |
| "grad_norm": 8.930822372436523, | |
| "learning_rate": 8.706666666666667e-06, | |
| "loss": 0.2576, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.3529411764705883, | |
| "eval_loss": 0.35160648822784424, | |
| "eval_runtime": 296.116, | |
| "eval_samples_per_second": 2.3, | |
| "eval_steps_per_second": 0.29, | |
| "eval_wer": 29.938444320089534, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.588235294117647, | |
| "grad_norm": 1.942764163017273, | |
| "learning_rate": 8.44e-06, | |
| "loss": 0.283, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 2.8235294117647056, | |
| "grad_norm": 3.2245473861694336, | |
| "learning_rate": 8.173333333333334e-06, | |
| "loss": 0.2443, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 3.0588235294117645, | |
| "grad_norm": 3.0929267406463623, | |
| "learning_rate": 7.906666666666667e-06, | |
| "loss": 0.2489, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 3.2941176470588234, | |
| "grad_norm": 3.477473497390747, | |
| "learning_rate": 7.640000000000001e-06, | |
| "loss": 0.2006, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 3.5294117647058822, | |
| "grad_norm": 3.163804531097412, | |
| "learning_rate": 7.373333333333334e-06, | |
| "loss": 0.1837, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 3.5294117647058822, | |
| "eval_loss": 0.30386850237846375, | |
| "eval_runtime": 329.1958, | |
| "eval_samples_per_second": 2.069, | |
| "eval_steps_per_second": 0.261, | |
| "eval_wer": 31.555058135919918, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 3.764705882352941, | |
| "grad_norm": 2.77553391456604, | |
| "learning_rate": 7.1066666666666675e-06, | |
| "loss": 0.2342, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 3.944244861602783, | |
| "learning_rate": 6.8400000000000014e-06, | |
| "loss": 0.1751, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 4.235294117647059, | |
| "grad_norm": 6.829845905303955, | |
| "learning_rate": 6.573333333333334e-06, | |
| "loss": 0.1431, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 4.470588235294118, | |
| "grad_norm": 3.4126970767974854, | |
| "learning_rate": 6.3066666666666676e-06, | |
| "loss": 0.1468, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 4.705882352941177, | |
| "grad_norm": 3.5233240127563477, | |
| "learning_rate": 6.040000000000001e-06, | |
| "loss": 0.1376, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 4.705882352941177, | |
| "eval_loss": 0.27411767840385437, | |
| "eval_runtime": 307.4101, | |
| "eval_samples_per_second": 2.215, | |
| "eval_steps_per_second": 0.28, | |
| "eval_wer": 25.511409562892496, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 4.9411764705882355, | |
| "grad_norm": 2.9697437286376953, | |
| "learning_rate": 5.7733333333333345e-06, | |
| "loss": 0.1395, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 5.176470588235294, | |
| "grad_norm": 3.495859384536743, | |
| "learning_rate": 5.506666666666667e-06, | |
| "loss": 0.1046, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 5.411764705882353, | |
| "grad_norm": 4.7592902183532715, | |
| "learning_rate": 5.240000000000001e-06, | |
| "loss": 0.1179, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 5.647058823529412, | |
| "grad_norm": 7.579683303833008, | |
| "learning_rate": 4.973333333333334e-06, | |
| "loss": 0.1361, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 5.882352941176471, | |
| "grad_norm": 4.949639797210693, | |
| "learning_rate": 4.706666666666667e-06, | |
| "loss": 0.0935, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 5.882352941176471, | |
| "eval_loss": 0.2585192918777466, | |
| "eval_runtime": 321.1171, | |
| "eval_samples_per_second": 2.121, | |
| "eval_steps_per_second": 0.268, | |
| "eval_wer": 26.26997450724367, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 6.117647058823529, | |
| "grad_norm": 3.536499500274658, | |
| "learning_rate": 4.440000000000001e-06, | |
| "loss": 0.0917, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 6.352941176470588, | |
| "grad_norm": 4.393896579742432, | |
| "learning_rate": 4.173333333333334e-06, | |
| "loss": 0.0959, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 6.588235294117647, | |
| "grad_norm": 6.232944011688232, | |
| "learning_rate": 3.906666666666667e-06, | |
| "loss": 0.0844, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 6.823529411764706, | |
| "grad_norm": 3.1390299797058105, | |
| "learning_rate": 3.6400000000000003e-06, | |
| "loss": 0.0734, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 7.0588235294117645, | |
| "grad_norm": 0.599709153175354, | |
| "learning_rate": 3.3733333333333334e-06, | |
| "loss": 0.0575, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 7.0588235294117645, | |
| "eval_loss": 0.24722784757614136, | |
| "eval_runtime": 320.2621, | |
| "eval_samples_per_second": 2.126, | |
| "eval_steps_per_second": 0.269, | |
| "eval_wer": 24.187029783000686, | |
| "step": 600 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 850, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.540839686144e+18, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |