| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 7.0, | |
| "eval_steps": 1000.0, | |
| "global_step": 7693, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.09099181073703366, | |
| "grad_norm": 12.070003509521484, | |
| "learning_rate": 3.17117900723503e-05, | |
| "loss": 11.3381, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.18198362147406733, | |
| "grad_norm": 2.9062180519104004, | |
| "learning_rate": 6.34235801447006e-05, | |
| "loss": 4.0713, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.272975432211101, | |
| "grad_norm": 2.260572910308838, | |
| "learning_rate": 6.45061097702771e-05, | |
| "loss": 3.2752, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.36396724294813465, | |
| "grad_norm": 0.8717735409736633, | |
| "learning_rate": 6.363358021839996e-05, | |
| "loss": 3.1816, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.4549590536851683, | |
| "grad_norm": 0.6315112113952637, | |
| "learning_rate": 6.276105066652282e-05, | |
| "loss": 3.1526, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.545950864422202, | |
| "grad_norm": 2.5357179641723633, | |
| "learning_rate": 6.188852111464567e-05, | |
| "loss": 2.9969, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.6369426751592356, | |
| "grad_norm": 1.636403203010559, | |
| "learning_rate": 6.101599156276853e-05, | |
| "loss": 1.6074, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.7279344858962693, | |
| "grad_norm": 1.3069971799850464, | |
| "learning_rate": 6.014346201089139e-05, | |
| "loss": 0.9392, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.818926296633303, | |
| "grad_norm": 1.4789917469024658, | |
| "learning_rate": 5.927093245901425e-05, | |
| "loss": 0.7199, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.9099181073703366, | |
| "grad_norm": 1.1590214967727661, | |
| "learning_rate": 5.839840290713711e-05, | |
| "loss": 0.6324, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.9099181073703366, | |
| "eval_cer": 0.23809921148149693, | |
| "eval_loss": 0.5003824830055237, | |
| "eval_runtime": 16.9015, | |
| "eval_samples_per_second": 29.583, | |
| "eval_steps_per_second": 0.947, | |
| "eval_wer": 0.6082649306538352, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.0009099181073704, | |
| "grad_norm": 1.4140914678573608, | |
| "learning_rate": 5.752587335525997e-05, | |
| "loss": 0.5548, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.091901728844404, | |
| "grad_norm": 2.3637049198150635, | |
| "learning_rate": 5.6653343803382825e-05, | |
| "loss": 0.4844, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.1828935395814377, | |
| "grad_norm": 1.3760229349136353, | |
| "learning_rate": 5.578081425150568e-05, | |
| "loss": 0.466, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.2738853503184713, | |
| "grad_norm": 1.446753978729248, | |
| "learning_rate": 5.490828469962854e-05, | |
| "loss": 0.4387, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.364877161055505, | |
| "grad_norm": 1.2168169021606445, | |
| "learning_rate": 5.40357551477514e-05, | |
| "loss": 0.409, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.4558689717925386, | |
| "grad_norm": 1.9888843297958374, | |
| "learning_rate": 5.3163225595874256e-05, | |
| "loss": 0.3875, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.5468607825295724, | |
| "grad_norm": 1.507049560546875, | |
| "learning_rate": 5.2290696043997114e-05, | |
| "loss": 0.3851, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.6378525932666061, | |
| "grad_norm": 2.9892451763153076, | |
| "learning_rate": 5.141816649211998e-05, | |
| "loss": 0.372, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.7288444040036397, | |
| "grad_norm": 1.2426109313964844, | |
| "learning_rate": 5.0545636940242836e-05, | |
| "loss": 0.3753, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.8198362147406733, | |
| "grad_norm": 1.0674372911453247, | |
| "learning_rate": 4.9673107388365694e-05, | |
| "loss": 0.3497, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.8198362147406733, | |
| "eval_cer": 0.196545537986566, | |
| "eval_loss": 0.30870166420936584, | |
| "eval_runtime": 16.7127, | |
| "eval_samples_per_second": 29.917, | |
| "eval_steps_per_second": 0.957, | |
| "eval_wer": 0.4650438720634022, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.910828025477707, | |
| "grad_norm": 1.1968690156936646, | |
| "learning_rate": 4.880057783648855e-05, | |
| "loss": 0.3393, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 2.001819836214741, | |
| "grad_norm": 1.350705623626709, | |
| "learning_rate": 4.792804828461141e-05, | |
| "loss": 0.3421, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 2.092811646951774, | |
| "grad_norm": 1.293790340423584, | |
| "learning_rate": 4.705551873273427e-05, | |
| "loss": 0.3002, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 2.183803457688808, | |
| "grad_norm": 1.174820065498352, | |
| "learning_rate": 4.618298918085712e-05, | |
| "loss": 0.3017, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 2.2747952684258417, | |
| "grad_norm": 1.1537854671478271, | |
| "learning_rate": 4.5310459628979975e-05, | |
| "loss": 0.2996, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 2.3657870791628755, | |
| "grad_norm": 1.1924264430999756, | |
| "learning_rate": 4.443793007710283e-05, | |
| "loss": 0.2893, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 2.4567788898999092, | |
| "grad_norm": 0.8446494936943054, | |
| "learning_rate": 4.356540052522569e-05, | |
| "loss": 0.2789, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 2.5477707006369426, | |
| "grad_norm": 1.1881588697433472, | |
| "learning_rate": 4.2692870973348555e-05, | |
| "loss": 0.2776, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 2.6387625113739763, | |
| "grad_norm": 1.0319584608078003, | |
| "learning_rate": 4.182034142147141e-05, | |
| "loss": 0.2645, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 2.72975432211101, | |
| "grad_norm": 0.9115346670150757, | |
| "learning_rate": 4.094781186959427e-05, | |
| "loss": 0.2642, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.72975432211101, | |
| "eval_cer": 0.18407109182694315, | |
| "eval_loss": 0.2636227309703827, | |
| "eval_runtime": 16.7759, | |
| "eval_samples_per_second": 29.805, | |
| "eval_steps_per_second": 0.954, | |
| "eval_wer": 0.4248514010755732, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.8207461328480434, | |
| "grad_norm": 1.4283329248428345, | |
| "learning_rate": 4.007528231771713e-05, | |
| "loss": 0.279, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 2.911737943585077, | |
| "grad_norm": 0.9780188798904419, | |
| "learning_rate": 3.9202752765839986e-05, | |
| "loss": 0.2656, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 3.002729754322111, | |
| "grad_norm": 0.9775941371917725, | |
| "learning_rate": 3.8330223213962844e-05, | |
| "loss": 0.2661, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 3.0937215650591448, | |
| "grad_norm": 1.2078616619110107, | |
| "learning_rate": 3.74576936620857e-05, | |
| "loss": 0.2331, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 3.1847133757961785, | |
| "grad_norm": 1.5175094604492188, | |
| "learning_rate": 3.658516411020856e-05, | |
| "loss": 0.2413, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 3.275705186533212, | |
| "grad_norm": 1.2120680809020996, | |
| "learning_rate": 3.571263455833142e-05, | |
| "loss": 0.2272, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 3.3666969972702456, | |
| "grad_norm": 1.4247957468032837, | |
| "learning_rate": 3.484010500645428e-05, | |
| "loss": 0.2309, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 3.4576888080072794, | |
| "grad_norm": 0.9986005425453186, | |
| "learning_rate": 3.396757545457714e-05, | |
| "loss": 0.2364, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 3.548680618744313, | |
| "grad_norm": 1.8682105541229248, | |
| "learning_rate": 3.30950459027e-05, | |
| "loss": 0.2366, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 3.6396724294813465, | |
| "grad_norm": 1.6900557279586792, | |
| "learning_rate": 3.2222516350822855e-05, | |
| "loss": 0.2328, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 3.6396724294813465, | |
| "eval_cer": 0.17885602236221787, | |
| "eval_loss": 0.24313220381736755, | |
| "eval_runtime": 16.6038, | |
| "eval_samples_per_second": 30.114, | |
| "eval_steps_per_second": 0.964, | |
| "eval_wer": 0.3959807529012171, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 3.7306642402183803, | |
| "grad_norm": 1.7356419563293457, | |
| "learning_rate": 3.134998679894571e-05, | |
| "loss": 0.2165, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 3.821656050955414, | |
| "grad_norm": 1.163643479347229, | |
| "learning_rate": 3.0477457247068567e-05, | |
| "loss": 0.2151, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 3.912647861692448, | |
| "grad_norm": 1.036080241203308, | |
| "learning_rate": 2.9604927695191425e-05, | |
| "loss": 0.218, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 4.003639672429482, | |
| "grad_norm": 4.445271015167236, | |
| "learning_rate": 2.8732398143314286e-05, | |
| "loss": 0.2095, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 4.094631483166515, | |
| "grad_norm": 1.1208213567733765, | |
| "learning_rate": 2.7859868591437143e-05, | |
| "loss": 0.2027, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 4.185623293903548, | |
| "grad_norm": 1.2286118268966675, | |
| "learning_rate": 2.6987339039559998e-05, | |
| "loss": 0.2022, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 4.276615104640582, | |
| "grad_norm": 0.8969898819923401, | |
| "learning_rate": 2.6114809487682856e-05, | |
| "loss": 0.2038, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 4.367606915377616, | |
| "grad_norm": 0.9858496785163879, | |
| "learning_rate": 2.5242279935805717e-05, | |
| "loss": 0.1959, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 4.45859872611465, | |
| "grad_norm": 0.8040638566017151, | |
| "learning_rate": 2.4369750383928574e-05, | |
| "loss": 0.2075, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 4.549590536851683, | |
| "grad_norm": 1.1833239793777466, | |
| "learning_rate": 2.3497220832051432e-05, | |
| "loss": 0.1933, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 4.549590536851683, | |
| "eval_cer": 0.17322374734031457, | |
| "eval_loss": 0.22891011834144592, | |
| "eval_runtime": 16.6972, | |
| "eval_samples_per_second": 29.945, | |
| "eval_steps_per_second": 0.958, | |
| "eval_wer": 0.3772997452589867, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 4.640582347588717, | |
| "grad_norm": 2.7539749145507812, | |
| "learning_rate": 2.262469128017429e-05, | |
| "loss": 0.2037, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 4.731574158325751, | |
| "grad_norm": 1.6865957975387573, | |
| "learning_rate": 2.1752161728297148e-05, | |
| "loss": 0.1968, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 4.822565969062785, | |
| "grad_norm": 0.922374427318573, | |
| "learning_rate": 2.087963217642001e-05, | |
| "loss": 0.1999, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 4.9135577797998184, | |
| "grad_norm": 0.786503791809082, | |
| "learning_rate": 2.0007102624542866e-05, | |
| "loss": 0.1885, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 5.004549590536851, | |
| "grad_norm": 0.8755506277084351, | |
| "learning_rate": 1.913457307266572e-05, | |
| "loss": 0.1916, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 5.095541401273885, | |
| "grad_norm": 1.141218662261963, | |
| "learning_rate": 1.826204352078858e-05, | |
| "loss": 0.1806, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 5.186533212010919, | |
| "grad_norm": 1.0689939260482788, | |
| "learning_rate": 1.7389513968911436e-05, | |
| "loss": 0.1849, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 5.277525022747953, | |
| "grad_norm": 1.020820140838623, | |
| "learning_rate": 1.6516984417034297e-05, | |
| "loss": 0.1699, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 5.368516833484986, | |
| "grad_norm": 1.7188619375228882, | |
| "learning_rate": 1.5644454865157155e-05, | |
| "loss": 0.1816, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 5.45950864422202, | |
| "grad_norm": 1.033896565437317, | |
| "learning_rate": 1.4771925313280013e-05, | |
| "loss": 0.1783, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 5.45950864422202, | |
| "eval_cer": 0.17109599899870667, | |
| "eval_loss": 0.22998099029064178, | |
| "eval_runtime": 16.5438, | |
| "eval_samples_per_second": 30.223, | |
| "eval_steps_per_second": 0.967, | |
| "eval_wer": 0.37277101613359753, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 5.550500454959054, | |
| "grad_norm": 2.6767489910125732, | |
| "learning_rate": 1.3899395761402872e-05, | |
| "loss": 0.1741, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 5.641492265696087, | |
| "grad_norm": 1.5569095611572266, | |
| "learning_rate": 1.3026866209525728e-05, | |
| "loss": 0.1769, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 5.732484076433121, | |
| "grad_norm": 0.6999034881591797, | |
| "learning_rate": 1.2154336657648588e-05, | |
| "loss": 0.176, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 5.823475887170154, | |
| "grad_norm": 1.3141757249832153, | |
| "learning_rate": 1.1281807105771445e-05, | |
| "loss": 0.1763, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 5.914467697907188, | |
| "grad_norm": 1.060334324836731, | |
| "learning_rate": 1.0409277553894303e-05, | |
| "loss": 0.1727, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 6.005459508644222, | |
| "grad_norm": 1.0684072971343994, | |
| "learning_rate": 9.53674800201716e-06, | |
| "loss": 0.1696, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 6.096451319381256, | |
| "grad_norm": 1.1349165439605713, | |
| "learning_rate": 8.664218450140019e-06, | |
| "loss": 0.1613, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 6.1874431301182895, | |
| "grad_norm": 0.6644859313964844, | |
| "learning_rate": 7.791688898262876e-06, | |
| "loss": 0.1642, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 6.278434940855323, | |
| "grad_norm": 0.9588508009910583, | |
| "learning_rate": 6.919159346385735e-06, | |
| "loss": 0.1679, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 6.369426751592357, | |
| "grad_norm": 1.6633976697921753, | |
| "learning_rate": 6.0466297945085934e-06, | |
| "loss": 0.1617, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 6.369426751592357, | |
| "eval_cer": 0.169969543994326, | |
| "eval_loss": 0.2233332395553589, | |
| "eval_runtime": 16.6328, | |
| "eval_samples_per_second": 30.061, | |
| "eval_steps_per_second": 0.962, | |
| "eval_wer": 0.3637135578828191, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 6.460418562329391, | |
| "grad_norm": 0.7526870369911194, | |
| "learning_rate": 5.174100242631451e-06, | |
| "loss": 0.1678, | |
| "step": 7100 | |
| }, | |
| { | |
| "epoch": 6.551410373066424, | |
| "grad_norm": 1.6900984048843384, | |
| "learning_rate": 4.30157069075431e-06, | |
| "loss": 0.1653, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 6.6424021838034575, | |
| "grad_norm": 0.7345483303070068, | |
| "learning_rate": 3.429041138877168e-06, | |
| "loss": 0.1662, | |
| "step": 7300 | |
| }, | |
| { | |
| "epoch": 6.733393994540491, | |
| "grad_norm": 0.7114779949188232, | |
| "learning_rate": 2.5565115870000256e-06, | |
| "loss": 0.1579, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 6.824385805277525, | |
| "grad_norm": 0.9554657936096191, | |
| "learning_rate": 1.683982035122884e-06, | |
| "loss": 0.1588, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 6.915377616014559, | |
| "grad_norm": 0.8747773170471191, | |
| "learning_rate": 8.114524832457419e-07, | |
| "loss": 0.1651, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "step": 7693, | |
| "total_flos": 5.670215301911177e+19, | |
| "train_loss": 0.6279906528631082, | |
| "train_runtime": 12613.1325, | |
| "train_samples_per_second": 19.504, | |
| "train_steps_per_second": 0.61 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 7693, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 7, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.670215301911177e+19, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |