| { | |
| "best_global_step": 300, | |
| "best_metric": 56.034321954859166, | |
| "best_model_checkpoint": "./JUDICM/checkpoint-300", | |
| "epoch": 7.0588235294117645, | |
| "eval_steps": 100, | |
| "global_step": 600, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.23529411764705882, | |
| "grad_norm": 9.61864185333252, | |
| "learning_rate": 1.7000000000000002e-06, | |
| "loss": 2.9117, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.47058823529411764, | |
| "grad_norm": 8.609731674194336, | |
| "learning_rate": 3.7e-06, | |
| "loss": 2.0419, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.7058823529411765, | |
| "grad_norm": 7.390444278717041, | |
| "learning_rate": 5.7e-06, | |
| "loss": 1.5565, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.9411764705882353, | |
| "grad_norm": 5.834730625152588, | |
| "learning_rate": 7.7e-06, | |
| "loss": 0.9792, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.1764705882352942, | |
| "grad_norm": 5.353567123413086, | |
| "learning_rate": 9.7e-06, | |
| "loss": 0.8465, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.1764705882352942, | |
| "eval_loss": 0.681628942489624, | |
| "eval_runtime": 544.1503, | |
| "eval_samples_per_second": 1.251, | |
| "eval_steps_per_second": 0.158, | |
| "eval_wer": 74.07821923770442, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.4117647058823528, | |
| "grad_norm": 4.50194787979126, | |
| "learning_rate": 9.773333333333335e-06, | |
| "loss": 0.6137, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.6470588235294117, | |
| "grad_norm": 4.959797382354736, | |
| "learning_rate": 9.506666666666667e-06, | |
| "loss": 0.6733, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.8823529411764706, | |
| "grad_norm": 5.115293502807617, | |
| "learning_rate": 9.240000000000001e-06, | |
| "loss": 0.7026, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.1176470588235294, | |
| "grad_norm": 3.1620101928710938, | |
| "learning_rate": 8.973333333333334e-06, | |
| "loss": 0.5185, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.3529411764705883, | |
| "grad_norm": 4.74745512008667, | |
| "learning_rate": 8.706666666666667e-06, | |
| "loss": 0.4669, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.3529411764705883, | |
| "eval_loss": 0.47813770174980164, | |
| "eval_runtime": 481.7363, | |
| "eval_samples_per_second": 1.414, | |
| "eval_steps_per_second": 0.179, | |
| "eval_wer": 61.49350245600945, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.588235294117647, | |
| "grad_norm": 3.0848231315612793, | |
| "learning_rate": 8.44e-06, | |
| "loss": 0.5316, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 2.8235294117647056, | |
| "grad_norm": 3.566438674926758, | |
| "learning_rate": 8.173333333333334e-06, | |
| "loss": 0.4629, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 3.0588235294117645, | |
| "grad_norm": 2.9106101989746094, | |
| "learning_rate": 7.906666666666667e-06, | |
| "loss": 0.4565, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 3.2941176470588234, | |
| "grad_norm": 3.077847957611084, | |
| "learning_rate": 7.640000000000001e-06, | |
| "loss": 0.345, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 3.5294117647058822, | |
| "grad_norm": 3.5237925052642822, | |
| "learning_rate": 7.373333333333334e-06, | |
| "loss": 0.3376, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 3.5294117647058822, | |
| "eval_loss": 0.367522269487381, | |
| "eval_runtime": 520.1482, | |
| "eval_samples_per_second": 1.309, | |
| "eval_steps_per_second": 0.165, | |
| "eval_wer": 56.034321954859166, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 3.764705882352941, | |
| "grad_norm": 3.1221227645874023, | |
| "learning_rate": 7.1066666666666675e-06, | |
| "loss": 0.3879, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 3.873680353164673, | |
| "learning_rate": 6.8400000000000014e-06, | |
| "loss": 0.3156, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 4.235294117647059, | |
| "grad_norm": 5.0606160163879395, | |
| "learning_rate": 6.573333333333334e-06, | |
| "loss": 0.243, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 4.470588235294118, | |
| "grad_norm": 2.9561641216278076, | |
| "learning_rate": 6.3066666666666676e-06, | |
| "loss": 0.2344, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 4.705882352941177, | |
| "grad_norm": 3.9523017406463623, | |
| "learning_rate": 6.040000000000001e-06, | |
| "loss": 0.2195, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 4.705882352941177, | |
| "eval_loss": 0.2805194854736328, | |
| "eval_runtime": 475.2131, | |
| "eval_samples_per_second": 1.433, | |
| "eval_steps_per_second": 0.181, | |
| "eval_wer": 58.521420133059756, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 4.9411764705882355, | |
| "grad_norm": 3.081202983856201, | |
| "learning_rate": 5.7733333333333345e-06, | |
| "loss": 0.2079, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 5.176470588235294, | |
| "grad_norm": 3.1263742446899414, | |
| "learning_rate": 5.506666666666667e-06, | |
| "loss": 0.1388, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 5.411764705882353, | |
| "grad_norm": 3.4922068119049072, | |
| "learning_rate": 5.240000000000001e-06, | |
| "loss": 0.1538, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 5.647058823529412, | |
| "grad_norm": 5.9641594886779785, | |
| "learning_rate": 4.973333333333334e-06, | |
| "loss": 0.1697, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 5.882352941176471, | |
| "grad_norm": 4.860027313232422, | |
| "learning_rate": 4.706666666666667e-06, | |
| "loss": 0.1284, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 5.882352941176471, | |
| "eval_loss": 0.2389652281999588, | |
| "eval_runtime": 524.8677, | |
| "eval_samples_per_second": 1.297, | |
| "eval_steps_per_second": 0.164, | |
| "eval_wer": 59.46651744077598, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 6.117647058823529, | |
| "grad_norm": 4.114612102508545, | |
| "learning_rate": 4.440000000000001e-06, | |
| "loss": 0.1121, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 6.352941176470588, | |
| "grad_norm": 2.893221855163574, | |
| "learning_rate": 4.173333333333334e-06, | |
| "loss": 0.1032, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 6.588235294117647, | |
| "grad_norm": 5.563397407531738, | |
| "learning_rate": 3.906666666666667e-06, | |
| "loss": 0.0902, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 6.823529411764706, | |
| "grad_norm": 3.545964002609253, | |
| "learning_rate": 3.6400000000000003e-06, | |
| "loss": 0.0801, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 7.0588235294117645, | |
| "grad_norm": 0.6986887454986572, | |
| "learning_rate": 3.3733333333333334e-06, | |
| "loss": 0.0558, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 7.0588235294117645, | |
| "eval_loss": 0.2277635931968689, | |
| "eval_runtime": 501.1892, | |
| "eval_samples_per_second": 1.359, | |
| "eval_steps_per_second": 0.172, | |
| "eval_wer": 59.578436858795, | |
| "step": 600 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 850, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.9595614224384e+19, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |