| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 7.0, | |
| "eval_steps": 1000.0, | |
| "global_step": 7693, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.09099181073703366, | |
| "grad_norm": 7.5102996826171875, | |
| "learning_rate": 4.270214798827307e-05, | |
| "loss": 9.9184, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.18198362147406733, | |
| "grad_norm": 1.3230904340744019, | |
| "learning_rate": 4.299387943270356e-05, | |
| "loss": 3.5315, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.272975432211101, | |
| "grad_norm": 0.9986037015914917, | |
| "learning_rate": 4.2420092172157666e-05, | |
| "loss": 3.2339, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.36396724294813465, | |
| "grad_norm": 0.46828845143318176, | |
| "learning_rate": 4.184630491161178e-05, | |
| "loss": 3.1838, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.4549590536851683, | |
| "grad_norm": 0.8114163875579834, | |
| "learning_rate": 4.1272517651065886e-05, | |
| "loss": 3.1627, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.545950864422202, | |
| "grad_norm": 0.4831515848636627, | |
| "learning_rate": 4.069873039051999e-05, | |
| "loss": 3.1449, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.6369426751592356, | |
| "grad_norm": 0.8736672401428223, | |
| "learning_rate": 4.0124943129974106e-05, | |
| "loss": 2.966, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.7279344858962693, | |
| "grad_norm": 1.4908201694488525, | |
| "learning_rate": 3.955115586942821e-05, | |
| "loss": 1.8526, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.818926296633303, | |
| "grad_norm": 1.4355599880218506, | |
| "learning_rate": 3.8977368608882326e-05, | |
| "loss": 1.1569, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.9099181073703366, | |
| "grad_norm": 2.112861394882202, | |
| "learning_rate": 3.840358134833644e-05, | |
| "loss": 0.9556, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.9099181073703366, | |
| "eval_cer": 0.297551003379365, | |
| "eval_loss": 0.7513839602470398, | |
| "eval_runtime": 16.7143, | |
| "eval_samples_per_second": 29.914, | |
| "eval_steps_per_second": 0.957, | |
| "eval_wer": 0.7659213133314464, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.0009099181073704, | |
| "grad_norm": 1.5618329048156738, | |
| "learning_rate": 3.7829794087790545e-05, | |
| "loss": 0.8382, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.091901728844404, | |
| "grad_norm": 2.229194164276123, | |
| "learning_rate": 3.725600682724466e-05, | |
| "loss": 0.7385, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.1828935395814377, | |
| "grad_norm": 1.4764809608459473, | |
| "learning_rate": 3.6682219566698765e-05, | |
| "loss": 0.6983, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.2738853503184713, | |
| "grad_norm": 3.041120767593384, | |
| "learning_rate": 3.610843230615287e-05, | |
| "loss": 0.6448, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.364877161055505, | |
| "grad_norm": 1.5701287984848022, | |
| "learning_rate": 3.5534645045606985e-05, | |
| "loss": 0.6075, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.4558689717925386, | |
| "grad_norm": 1.530661702156067, | |
| "learning_rate": 3.496085778506109e-05, | |
| "loss": 0.5734, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.5468607825295724, | |
| "grad_norm": 1.5015157461166382, | |
| "learning_rate": 3.4387070524515204e-05, | |
| "loss": 0.5511, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.6378525932666061, | |
| "grad_norm": 3.4556000232696533, | |
| "learning_rate": 3.381328326396932e-05, | |
| "loss": 0.5314, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.7288444040036397, | |
| "grad_norm": 1.5478144884109497, | |
| "learning_rate": 3.3239496003423424e-05, | |
| "loss": 0.521, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.8198362147406733, | |
| "grad_norm": 1.625609040260315, | |
| "learning_rate": 3.266570874287753e-05, | |
| "loss": 0.4941, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.8198362147406733, | |
| "eval_cer": 0.21740581584546706, | |
| "eval_loss": 0.3986930549144745, | |
| "eval_runtime": 16.3992, | |
| "eval_samples_per_second": 30.489, | |
| "eval_steps_per_second": 0.976, | |
| "eval_wer": 0.5471270874610812, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.910828025477707, | |
| "grad_norm": 1.2447913885116577, | |
| "learning_rate": 3.209192148233164e-05, | |
| "loss": 0.4767, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 2.001819836214741, | |
| "grad_norm": 1.7874252796173096, | |
| "learning_rate": 3.151813422178575e-05, | |
| "loss": 0.4683, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 2.092811646951774, | |
| "grad_norm": 2.041367530822754, | |
| "learning_rate": 3.0944346961239864e-05, | |
| "loss": 0.4324, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 2.183803457688808, | |
| "grad_norm": 1.0832343101501465, | |
| "learning_rate": 3.037055970069397e-05, | |
| "loss": 0.4308, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 2.2747952684258417, | |
| "grad_norm": 1.5363576412200928, | |
| "learning_rate": 2.9796772440148083e-05, | |
| "loss": 0.4154, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 2.3657870791628755, | |
| "grad_norm": 2.582390308380127, | |
| "learning_rate": 2.9222985179602193e-05, | |
| "loss": 0.4049, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 2.4567788898999092, | |
| "grad_norm": 1.7013890743255615, | |
| "learning_rate": 2.86491979190563e-05, | |
| "loss": 0.3905, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 2.5477707006369426, | |
| "grad_norm": 1.6584831476211548, | |
| "learning_rate": 2.8075410658510413e-05, | |
| "loss": 0.389, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 2.6387625113739763, | |
| "grad_norm": 2.1508281230926514, | |
| "learning_rate": 2.750162339796452e-05, | |
| "loss": 0.3679, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 2.72975432211101, | |
| "grad_norm": 1.0315163135528564, | |
| "learning_rate": 2.692783613741863e-05, | |
| "loss": 0.3694, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.72975432211101, | |
| "eval_cer": 0.20046727022403937, | |
| "eval_loss": 0.3281673192977905, | |
| "eval_runtime": 16.5376, | |
| "eval_samples_per_second": 30.234, | |
| "eval_steps_per_second": 0.967, | |
| "eval_wer": 0.4874044721200113, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.8207461328480434, | |
| "grad_norm": 1.1783397197723389, | |
| "learning_rate": 2.6354048876872742e-05, | |
| "loss": 0.3776, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 2.911737943585077, | |
| "grad_norm": 1.5299417972564697, | |
| "learning_rate": 2.578026161632685e-05, | |
| "loss": 0.3671, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 3.002729754322111, | |
| "grad_norm": 1.179459810256958, | |
| "learning_rate": 2.520647435578096e-05, | |
| "loss": 0.3588, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 3.0937215650591448, | |
| "grad_norm": 2.9286251068115234, | |
| "learning_rate": 2.463268709523507e-05, | |
| "loss": 0.3383, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 3.1847133757961785, | |
| "grad_norm": 4.09226131439209, | |
| "learning_rate": 2.405889983468918e-05, | |
| "loss": 0.3374, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 3.275705186533212, | |
| "grad_norm": 1.4895093441009521, | |
| "learning_rate": 2.348511257414329e-05, | |
| "loss": 0.3231, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 3.3666969972702456, | |
| "grad_norm": 1.2932852506637573, | |
| "learning_rate": 2.2911325313597398e-05, | |
| "loss": 0.3286, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 3.4576888080072794, | |
| "grad_norm": 1.4036494493484497, | |
| "learning_rate": 2.2337538053051508e-05, | |
| "loss": 0.3326, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 3.548680618744313, | |
| "grad_norm": 2.6920440196990967, | |
| "learning_rate": 2.1763750792505618e-05, | |
| "loss": 0.3291, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 3.6396724294813465, | |
| "grad_norm": 2.416705369949341, | |
| "learning_rate": 2.1189963531959728e-05, | |
| "loss": 0.3199, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 3.6396724294813465, | |
| "eval_cer": 0.19007885185030665, | |
| "eval_loss": 0.28464144468307495, | |
| "eval_runtime": 16.6491, | |
| "eval_samples_per_second": 30.032, | |
| "eval_steps_per_second": 0.961, | |
| "eval_wer": 0.4506085479762242, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 3.7306642402183803, | |
| "grad_norm": 2.419663906097412, | |
| "learning_rate": 2.0616176271413838e-05, | |
| "loss": 0.3038, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 3.821656050955414, | |
| "grad_norm": 1.5200859308242798, | |
| "learning_rate": 2.0042389010867947e-05, | |
| "loss": 0.3002, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 3.912647861692448, | |
| "grad_norm": 1.2179443836212158, | |
| "learning_rate": 1.9468601750322057e-05, | |
| "loss": 0.302, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 4.003639672429482, | |
| "grad_norm": 1.685207724571228, | |
| "learning_rate": 1.8894814489776167e-05, | |
| "loss": 0.2969, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 4.094631483166515, | |
| "grad_norm": 1.147800326347351, | |
| "learning_rate": 1.8321027229230277e-05, | |
| "loss": 0.2882, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 4.185623293903548, | |
| "grad_norm": 2.0389134883880615, | |
| "learning_rate": 1.7747239968684387e-05, | |
| "loss": 0.2833, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 4.276615104640582, | |
| "grad_norm": 2.9132614135742188, | |
| "learning_rate": 1.7173452708138497e-05, | |
| "loss": 0.2912, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 4.367606915377616, | |
| "grad_norm": 1.1282093524932861, | |
| "learning_rate": 1.6599665447592606e-05, | |
| "loss": 0.2806, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 4.45859872611465, | |
| "grad_norm": 0.9727948904037476, | |
| "learning_rate": 1.6025878187046713e-05, | |
| "loss": 0.2883, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 4.549590536851683, | |
| "grad_norm": 1.485788106918335, | |
| "learning_rate": 1.5452090926500826e-05, | |
| "loss": 0.2805, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 4.549590536851683, | |
| "eval_cer": 0.1855313112770662, | |
| "eval_loss": 0.2716318368911743, | |
| "eval_runtime": 16.6008, | |
| "eval_samples_per_second": 30.119, | |
| "eval_steps_per_second": 0.964, | |
| "eval_wer": 0.4254174922162468, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 4.640582347588717, | |
| "grad_norm": 11.880212783813477, | |
| "learning_rate": 1.4878303665954934e-05, | |
| "loss": 0.2827, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 4.731574158325751, | |
| "grad_norm": 3.0947282314300537, | |
| "learning_rate": 1.4304516405409044e-05, | |
| "loss": 0.2759, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 4.822565969062785, | |
| "grad_norm": 1.0977256298065186, | |
| "learning_rate": 1.3730729144863152e-05, | |
| "loss": 0.2785, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 4.9135577797998184, | |
| "grad_norm": 1.035343050956726, | |
| "learning_rate": 1.3156941884317266e-05, | |
| "loss": 0.2713, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 5.004549590536851, | |
| "grad_norm": 1.091783881187439, | |
| "learning_rate": 1.2583154623771374e-05, | |
| "loss": 0.2768, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 5.095541401273885, | |
| "grad_norm": 1.1632474660873413, | |
| "learning_rate": 1.2009367363225484e-05, | |
| "loss": 0.2633, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 5.186533212010919, | |
| "grad_norm": 1.4272072315216064, | |
| "learning_rate": 1.1435580102679592e-05, | |
| "loss": 0.2664, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 5.277525022747953, | |
| "grad_norm": 1.1368160247802734, | |
| "learning_rate": 1.0861792842133703e-05, | |
| "loss": 0.2516, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 5.368516833484986, | |
| "grad_norm": 10.24752426147461, | |
| "learning_rate": 1.0288005581587813e-05, | |
| "loss": 0.2588, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 5.45950864422202, | |
| "grad_norm": 1.692718505859375, | |
| "learning_rate": 9.714218321041923e-06, | |
| "loss": 0.2572, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 5.45950864422202, | |
| "eval_cer": 0.18102549125954356, | |
| "eval_loss": 0.26223766803741455, | |
| "eval_runtime": 16.6819, | |
| "eval_samples_per_second": 29.973, | |
| "eval_steps_per_second": 0.959, | |
| "eval_wer": 0.40843475799603735, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 5.550500454959054, | |
| "grad_norm": 2.930900812149048, | |
| "learning_rate": 9.140431060496031e-06, | |
| "loss": 0.2559, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 5.641492265696087, | |
| "grad_norm": 1.0715835094451904, | |
| "learning_rate": 8.566643799950141e-06, | |
| "loss": 0.2593, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 5.732484076433121, | |
| "grad_norm": 1.025639533996582, | |
| "learning_rate": 7.992856539404251e-06, | |
| "loss": 0.2531, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 5.823475887170154, | |
| "grad_norm": 1.0805244445800781, | |
| "learning_rate": 7.4190692788583616e-06, | |
| "loss": 0.2541, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 5.914467697907188, | |
| "grad_norm": 1.0423027276992798, | |
| "learning_rate": 6.845282018312471e-06, | |
| "loss": 0.2523, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 6.005459508644222, | |
| "grad_norm": 0.9967121481895447, | |
| "learning_rate": 6.271494757766581e-06, | |
| "loss": 0.2494, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 6.096451319381256, | |
| "grad_norm": 1.4251012802124023, | |
| "learning_rate": 5.69770749722069e-06, | |
| "loss": 0.2398, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 6.1874431301182895, | |
| "grad_norm": 1.3021987676620483, | |
| "learning_rate": 5.1239202366748e-06, | |
| "loss": 0.2451, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 6.278434940855323, | |
| "grad_norm": 0.9879375100135803, | |
| "learning_rate": 4.55013297612891e-06, | |
| "loss": 0.2467, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 6.369426751592357, | |
| "grad_norm": 2.257263660430908, | |
| "learning_rate": 3.976345715583019e-06, | |
| "loss": 0.2389, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 6.369426751592357, | |
| "eval_cer": 0.17910634569652467, | |
| "eval_loss": 0.24834765493869781, | |
| "eval_runtime": 16.597, | |
| "eval_samples_per_second": 30.126, | |
| "eval_steps_per_second": 0.964, | |
| "eval_wer": 0.4002264364562695, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 6.460418562329391, | |
| "grad_norm": 1.686767578125, | |
| "learning_rate": 3.4025584550371293e-06, | |
| "loss": 0.2532, | |
| "step": 7100 | |
| }, | |
| { | |
| "epoch": 6.551410373066424, | |
| "grad_norm": 1.9620840549468994, | |
| "learning_rate": 2.8287711944912387e-06, | |
| "loss": 0.2427, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 6.6424021838034575, | |
| "grad_norm": 1.0499871969223022, | |
| "learning_rate": 2.2549839339453486e-06, | |
| "loss": 0.2449, | |
| "step": 7300 | |
| }, | |
| { | |
| "epoch": 6.733393994540491, | |
| "grad_norm": 2.9436795711517334, | |
| "learning_rate": 1.6811966733994584e-06, | |
| "loss": 0.2428, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 6.824385805277525, | |
| "grad_norm": 1.0118967294692993, | |
| "learning_rate": 1.1074094128535683e-06, | |
| "loss": 0.2341, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 6.915377616014559, | |
| "grad_norm": 1.00021493434906, | |
| "learning_rate": 5.336221523076779e-07, | |
| "loss": 0.2468, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "step": 7693, | |
| "total_flos": 5.670215301911177e+19, | |
| "train_loss": 0.7367133075233617, | |
| "train_runtime": 12578.5255, | |
| "train_samples_per_second": 19.558, | |
| "train_steps_per_second": 0.612 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 7693, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 7, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.670215301911177e+19, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |