| { | |
| "best_global_step": 10420, | |
| "best_metric": 0.046850736209360914, | |
| "best_model_checkpoint": "checkpoints/checkpoint-10420", | |
| "epoch": 0.9997313581763058, | |
| "eval_steps": 2605, | |
| "global_step": 13025, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03837740338488698, | |
| "grad_norm": 0.09230651706457138, | |
| "learning_rate": 4.997989574631836e-05, | |
| "loss": 0.1488, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.07675480676977396, | |
| "grad_norm": 0.12535081803798676, | |
| "learning_rate": 4.9919454234104656e-05, | |
| "loss": 0.1477, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.11513221015466094, | |
| "grad_norm": 0.09996860474348068, | |
| "learning_rate": 4.9818772983087216e-05, | |
| "loss": 0.148, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.15350961353954792, | |
| "grad_norm": 0.10347171127796173, | |
| "learning_rate": 4.967801457255871e-05, | |
| "loss": 0.1479, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.19188701692443488, | |
| "grad_norm": 0.10039370507001877, | |
| "learning_rate": 4.949740629809395e-05, | |
| "loss": 0.1463, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.23026442030932187, | |
| "grad_norm": 0.12594294548034668, | |
| "learning_rate": 4.8931895320019325e-05, | |
| "loss": 0.1435, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.26864182369420886, | |
| "grad_norm": 0.09491392970085144, | |
| "learning_rate": 4.862212134330527e-05, | |
| "loss": 0.1437, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.30701922707909585, | |
| "grad_norm": 0.09572149813175201, | |
| "learning_rate": 4.827420255074112e-05, | |
| "loss": 0.1429, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.34539663046398283, | |
| "grad_norm": 0.09541437774896622, | |
| "learning_rate": 4.788870075885808e-05, | |
| "loss": 0.1425, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.38377403384886977, | |
| "grad_norm": 0.09861047565937042, | |
| "learning_rate": 4.7466238472919244e-05, | |
| "loss": 0.1419, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.3998925432705223, | |
| "eval_avg": 0.04867361780589206, | |
| "eval_cer": 0.03163900244740649, | |
| "eval_der": 0.041930648121315375, | |
| "eval_loss": 0.13073518872261047, | |
| "eval_runtime": 1697.2219, | |
| "eval_samples_per_second": 30.5, | |
| "eval_steps_per_second": 0.382, | |
| "eval_wer": 0.07245120284895432, | |
| "step": 5210 | |
| }, | |
| { | |
| "epoch": 0.42215143723375675, | |
| "grad_norm": 0.11749344319105148, | |
| "learning_rate": 4.700749788170293e-05, | |
| "loss": 0.1409, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.46052884061864374, | |
| "grad_norm": 0.17759008705615997, | |
| "learning_rate": 4.651321975590966e-05, | |
| "loss": 0.1407, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.49890624400353073, | |
| "grad_norm": 0.11128637939691544, | |
| "learning_rate": 4.598420225197139e-05, | |
| "loss": 0.1403, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.5372836473884177, | |
| "grad_norm": 0.10760766267776489, | |
| "learning_rate": 4.542245865061001e-05, | |
| "loss": 0.1389, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.5756610507733047, | |
| "grad_norm": 0.10907711833715439, | |
| "learning_rate": 4.482664487724415e-05, | |
| "loss": 0.1396, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.5998388149057835, | |
| "eval_avg": 0.047643108039959726, | |
| "eval_cer": 0.031145497115735506, | |
| "eval_der": 0.04097053994011138, | |
| "eval_loss": 0.12629394233226776, | |
| "eval_runtime": 1720.873, | |
| "eval_samples_per_second": 30.081, | |
| "eval_steps_per_second": 0.377, | |
| "eval_wer": 0.07081328706403231, | |
| "step": 7815 | |
| }, | |
| { | |
| "epoch": 0.6140384541581917, | |
| "grad_norm": 0.10269790142774582, | |
| "learning_rate": 4.905050311075738e-05, | |
| "loss": 0.139, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.6524158575430786, | |
| "grad_norm": 0.08717872947454453, | |
| "learning_rate": 4.8756902702886506e-05, | |
| "loss": 0.1374, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.6907932609279657, | |
| "grad_norm": 0.09375835955142975, | |
| "learning_rate": 4.842493983528561e-05, | |
| "loss": 0.1381, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.7291706643128526, | |
| "grad_norm": 0.09731540083885193, | |
| "learning_rate": 4.805515055898372e-05, | |
| "loss": 0.1365, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.7675480676977395, | |
| "grad_norm": 0.13318291306495667, | |
| "learning_rate": 4.764813200679724e-05, | |
| "loss": 0.1367, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.7997850865410446, | |
| "eval_avg": 0.046850736209360914, | |
| "eval_cer": 0.030717447084398133, | |
| "eval_der": 0.040079204095426355, | |
| "eval_loss": 0.12388349324464798, | |
| "eval_runtime": 1688.4755, | |
| "eval_samples_per_second": 30.658, | |
| "eval_steps_per_second": 0.384, | |
| "eval_wer": 0.06975555744825825, | |
| "step": 10420 | |
| }, | |
| { | |
| "epoch": 0.8059254710826266, | |
| "grad_norm": 0.09923764318227768, | |
| "learning_rate": 4.720454142908466e-05, | |
| "loss": 0.1357, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 0.8443028744675135, | |
| "grad_norm": 0.08920133858919144, | |
| "learning_rate": 4.672509513242378e-05, | |
| "loss": 0.1354, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 0.8826802778524006, | |
| "grad_norm": 0.12717817723751068, | |
| "learning_rate": 4.621056732292549e-05, | |
| "loss": 0.1349, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 0.9210576812372875, | |
| "grad_norm": 0.0902245044708252, | |
| "learning_rate": 4.566178885605181e-05, | |
| "loss": 0.1338, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.9594350846221744, | |
| "grad_norm": 0.08335994184017181, | |
| "learning_rate": 4.507964589495708e-05, | |
| "loss": 0.1336, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 0.9978124880070615, | |
| "grad_norm": 0.10289563983678818, | |
| "learning_rate": 4.446507847951861e-05, | |
| "loss": 0.1336, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 0.9997313581763058, | |
| "eval_avg": 0.04704636570056212, | |
| "eval_cer": 0.030927283997077136, | |
| "eval_der": 0.040125760649082305, | |
| "eval_loss": 0.12336452305316925, | |
| "eval_runtime": 1693.6943, | |
| "eval_samples_per_second": 30.563, | |
| "eval_steps_per_second": 0.383, | |
| "eval_wer": 0.07008605245552695, | |
| "step": 13025 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 39087, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 2605, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 9.05467973861376e+17, | |
| "train_batch_size": 80, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |