| { | |
| "best_global_step": 32400, | |
| "best_metric": 0.19510757441791923, | |
| "best_model_checkpoint": "./distil-whisper/checkpoint-32400", | |
| "epoch": 29.996971316586496, | |
| "eval_steps": 400, | |
| "global_step": 42090, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07126313914127917, | |
| "grad_norm": 681.8389892578125, | |
| "learning_rate": 1.7000000000000003e-05, | |
| "loss": 1297.2127, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.14252627828255834, | |
| "grad_norm": 464.8807067871094, | |
| "learning_rate": 3.7e-05, | |
| "loss": 187.2825, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.21378941742383753, | |
| "grad_norm": 389.95465087890625, | |
| "learning_rate": 5.6999999999999996e-05, | |
| "loss": 79.5026, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.2850525565651167, | |
| "grad_norm": 271.1416931152344, | |
| "learning_rate": 7.7e-05, | |
| "loss": 46.3716, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.2850525565651167, | |
| "eval_loss": 0.16972535848617554, | |
| "eval_runtime": 171.9129, | |
| "eval_samples_per_second": 2.908, | |
| "eval_steps_per_second": 0.366, | |
| "eval_wer": 0.6097848511641615, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.3563156957063959, | |
| "grad_norm": 200.9510955810547, | |
| "learning_rate": 9.7e-05, | |
| "loss": 32.0457, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.42757883484767506, | |
| "grad_norm": 148.45339965820312, | |
| "learning_rate": 9.937176644493719e-05, | |
| "loss": 24.0656, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.4988419739889542, | |
| "grad_norm": 204.2472686767578, | |
| "learning_rate": 9.863266814486328e-05, | |
| "loss": 19.3495, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.5701051131302334, | |
| "grad_norm": 113.892578125, | |
| "learning_rate": 9.789356984478936e-05, | |
| "loss": 16.3556, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.5701051131302334, | |
| "eval_loss": 0.13547594845294952, | |
| "eval_runtime": 162.518, | |
| "eval_samples_per_second": 3.077, | |
| "eval_steps_per_second": 0.388, | |
| "eval_wer": 0.3555850279988211, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.6413682522715125, | |
| "grad_norm": 103.74024963378906, | |
| "learning_rate": 9.715447154471545e-05, | |
| "loss": 14.24, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.7126313914127917, | |
| "grad_norm": 181.4265594482422, | |
| "learning_rate": 9.641537324464154e-05, | |
| "loss": 12.9121, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.7838945305540709, | |
| "grad_norm": 116.5692138671875, | |
| "learning_rate": 9.567627494456764e-05, | |
| "loss": 11.8352, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.8551576696953501, | |
| "grad_norm": 106.76929473876953, | |
| "learning_rate": 9.493717664449371e-05, | |
| "loss": 11.9327, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.8551576696953501, | |
| "eval_loss": 0.12299185246229172, | |
| "eval_runtime": 159.9552, | |
| "eval_samples_per_second": 3.126, | |
| "eval_steps_per_second": 0.394, | |
| "eval_wer": 0.30002947244326555, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.9264208088366293, | |
| "grad_norm": 148.32879638671875, | |
| "learning_rate": 9.419807834441981e-05, | |
| "loss": 10.2177, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.9976839479779084, | |
| "grad_norm": 166.2821807861328, | |
| "learning_rate": 9.34589800443459e-05, | |
| "loss": 10.1695, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.068412613575628, | |
| "grad_norm": 108.40921020507812, | |
| "learning_rate": 9.2719881744272e-05, | |
| "loss": 7.8838, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.1396757527169072, | |
| "grad_norm": 81.83760833740234, | |
| "learning_rate": 9.198078344419808e-05, | |
| "loss": 8.1222, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.1396757527169072, | |
| "eval_loss": 0.11962544173002243, | |
| "eval_runtime": 154.6688, | |
| "eval_samples_per_second": 3.233, | |
| "eval_steps_per_second": 0.407, | |
| "eval_wer": 0.2543471853816681, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.2109388918581863, | |
| "grad_norm": 66.1691665649414, | |
| "learning_rate": 9.124168514412418e-05, | |
| "loss": 7.4228, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.2822020309994655, | |
| "grad_norm": 98.77230072021484, | |
| "learning_rate": 9.050258684405026e-05, | |
| "loss": 7.4755, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.3534651701407447, | |
| "grad_norm": 94.88159942626953, | |
| "learning_rate": 8.976348854397635e-05, | |
| "loss": 7.2464, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.424728309282024, | |
| "grad_norm": 78.41780090332031, | |
| "learning_rate": 8.902439024390244e-05, | |
| "loss": 6.2775, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.424728309282024, | |
| "eval_loss": 0.11653754115104675, | |
| "eval_runtime": 157.609, | |
| "eval_samples_per_second": 3.172, | |
| "eval_steps_per_second": 0.4, | |
| "eval_wer": 0.26186265841438255, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.495991448423303, | |
| "grad_norm": 68.5005874633789, | |
| "learning_rate": 8.828529194382853e-05, | |
| "loss": 6.5, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 1.5672545875645822, | |
| "grad_norm": 123.60826873779297, | |
| "learning_rate": 8.754619364375463e-05, | |
| "loss": 6.7454, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.6385177267058615, | |
| "grad_norm": 60.07472229003906, | |
| "learning_rate": 8.680709534368072e-05, | |
| "loss": 6.4664, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 1.7097808658471405, | |
| "grad_norm": 91.09046173095703, | |
| "learning_rate": 8.60679970436068e-05, | |
| "loss": 5.6861, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.7097808658471405, | |
| "eval_loss": 0.11434157937765121, | |
| "eval_runtime": 159.2078, | |
| "eval_samples_per_second": 3.141, | |
| "eval_steps_per_second": 0.396, | |
| "eval_wer": 0.23902151488358384, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.7810440049884197, | |
| "grad_norm": 53.910728454589844, | |
| "learning_rate": 8.532889874353289e-05, | |
| "loss": 5.7159, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.852307144129699, | |
| "grad_norm": 47.61832046508789, | |
| "learning_rate": 8.458980044345899e-05, | |
| "loss": 5.5957, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.923570283270978, | |
| "grad_norm": 71.0946044921875, | |
| "learning_rate": 8.385070214338508e-05, | |
| "loss": 5.6279, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 1.9948334224122573, | |
| "grad_norm": 81.29457092285156, | |
| "learning_rate": 8.311160384331117e-05, | |
| "loss": 5.238, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 1.9948334224122573, | |
| "eval_loss": 0.11149635910987854, | |
| "eval_runtime": 159.9177, | |
| "eval_samples_per_second": 3.127, | |
| "eval_steps_per_second": 0.394, | |
| "eval_wer": 0.23460064839375183, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 2.065562088009977, | |
| "grad_norm": 42.97408676147461, | |
| "learning_rate": 8.237250554323725e-05, | |
| "loss": 4.1653, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 2.136825227151256, | |
| "grad_norm": 86.63339233398438, | |
| "learning_rate": 8.163340724316334e-05, | |
| "loss": 4.1343, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.208088366292535, | |
| "grad_norm": 87.8547134399414, | |
| "learning_rate": 8.089430894308944e-05, | |
| "loss": 4.69, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 2.2793515054338145, | |
| "grad_norm": 41.078346252441406, | |
| "learning_rate": 8.015521064301552e-05, | |
| "loss": 4.5097, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 2.2793515054338145, | |
| "eval_loss": 0.11067745089530945, | |
| "eval_runtime": 156.5697, | |
| "eval_samples_per_second": 3.193, | |
| "eval_steps_per_second": 0.402, | |
| "eval_wer": 0.2256115531977601, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 2.3506146445750935, | |
| "grad_norm": 56.07571792602539, | |
| "learning_rate": 7.941611234294162e-05, | |
| "loss": 4.2569, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 2.4218777837163725, | |
| "grad_norm": 80.53390502929688, | |
| "learning_rate": 7.86770140428677e-05, | |
| "loss": 4.3998, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 2.493140922857652, | |
| "grad_norm": 70.70401763916016, | |
| "learning_rate": 7.79379157427938e-05, | |
| "loss": 4.1977, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 2.564404061998931, | |
| "grad_norm": 62.703758239746094, | |
| "learning_rate": 7.719881744271988e-05, | |
| "loss": 3.9677, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 2.564404061998931, | |
| "eval_loss": 0.10945500433444977, | |
| "eval_runtime": 159.3631, | |
| "eval_samples_per_second": 3.137, | |
| "eval_steps_per_second": 0.395, | |
| "eval_wer": 0.22620100206307103, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 2.63566720114021, | |
| "grad_norm": 40.595977783203125, | |
| "learning_rate": 7.645971914264598e-05, | |
| "loss": 3.9478, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 2.7069303402814895, | |
| "grad_norm": 43.14921188354492, | |
| "learning_rate": 7.572062084257207e-05, | |
| "loss": 3.9157, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 2.7781934794227685, | |
| "grad_norm": 53.32358169555664, | |
| "learning_rate": 7.498152254249816e-05, | |
| "loss": 3.877, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 2.849456618564048, | |
| "grad_norm": 53.401611328125, | |
| "learning_rate": 7.424242424242424e-05, | |
| "loss": 3.8998, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.849456618564048, | |
| "eval_loss": 0.1084858775138855, | |
| "eval_runtime": 161.4334, | |
| "eval_samples_per_second": 3.097, | |
| "eval_steps_per_second": 0.39, | |
| "eval_wer": 0.2300324196875921, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.920719757705327, | |
| "grad_norm": 52.240577697753906, | |
| "learning_rate": 7.350332594235033e-05, | |
| "loss": 3.7483, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 2.991982896846606, | |
| "grad_norm": 89.89251708984375, | |
| "learning_rate": 7.276422764227643e-05, | |
| "loss": 3.8055, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 3.0627115624443255, | |
| "grad_norm": 44.852535247802734, | |
| "learning_rate": 7.202512934220252e-05, | |
| "loss": 3.1335, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 3.133974701585605, | |
| "grad_norm": 51.19050598144531, | |
| "learning_rate": 7.12860310421286e-05, | |
| "loss": 3.3351, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 3.133974701585605, | |
| "eval_loss": 0.10667955130338669, | |
| "eval_runtime": 156.6885, | |
| "eval_samples_per_second": 3.191, | |
| "eval_steps_per_second": 0.402, | |
| "eval_wer": 0.21396993810786913, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 3.205237840726884, | |
| "grad_norm": 85.72518157958984, | |
| "learning_rate": 7.05469327420547e-05, | |
| "loss": 3.2473, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 3.276500979868163, | |
| "grad_norm": 53.99726867675781, | |
| "learning_rate": 6.98078344419808e-05, | |
| "loss": 3.0099, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 3.3477641190094425, | |
| "grad_norm": 61.77521514892578, | |
| "learning_rate": 6.906873614190688e-05, | |
| "loss": 3.1004, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 3.4190272581507215, | |
| "grad_norm": 49.96204376220703, | |
| "learning_rate": 6.832963784183297e-05, | |
| "loss": 3.1317, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 3.4190272581507215, | |
| "eval_loss": 0.1066509559750557, | |
| "eval_runtime": 157.6866, | |
| "eval_samples_per_second": 3.171, | |
| "eval_steps_per_second": 0.4, | |
| "eval_wer": 0.21986442676097848, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 3.4902903972920005, | |
| "grad_norm": 60.96116638183594, | |
| "learning_rate": 6.759053954175906e-05, | |
| "loss": 3.1707, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 3.56155353643328, | |
| "grad_norm": 62.14186477661133, | |
| "learning_rate": 6.685144124168514e-05, | |
| "loss": 3.1594, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 3.632816675574559, | |
| "grad_norm": 35.275325775146484, | |
| "learning_rate": 6.611234294161123e-05, | |
| "loss": 3.1857, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 3.704079814715838, | |
| "grad_norm": 54.71980285644531, | |
| "learning_rate": 6.537324464153732e-05, | |
| "loss": 2.9814, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 3.704079814715838, | |
| "eval_loss": 0.10459103435277939, | |
| "eval_runtime": 155.7856, | |
| "eval_samples_per_second": 3.21, | |
| "eval_steps_per_second": 0.404, | |
| "eval_wer": 0.2119068670792809, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 3.7753429538571175, | |
| "grad_norm": 44.45451354980469, | |
| "learning_rate": 6.463414634146342e-05, | |
| "loss": 2.9051, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 3.8466060929983965, | |
| "grad_norm": 32.420108795166016, | |
| "learning_rate": 6.389504804138951e-05, | |
| "loss": 3.2812, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 3.917869232139676, | |
| "grad_norm": 56.81290817260742, | |
| "learning_rate": 6.31559497413156e-05, | |
| "loss": 3.1616, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 3.989132371280955, | |
| "grad_norm": 47.525577545166016, | |
| "learning_rate": 6.241685144124168e-05, | |
| "loss": 3.167, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 3.989132371280955, | |
| "eval_loss": 0.103938028216362, | |
| "eval_runtime": 153.9307, | |
| "eval_samples_per_second": 3.248, | |
| "eval_steps_per_second": 0.409, | |
| "eval_wer": 0.21043324491600354, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 4.059861036878675, | |
| "grad_norm": 41.089988708496094, | |
| "learning_rate": 6.167775314116778e-05, | |
| "loss": 2.7796, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 4.131124176019954, | |
| "grad_norm": 28.174148559570312, | |
| "learning_rate": 6.0938654841093865e-05, | |
| "loss": 2.4943, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 4.202387315161233, | |
| "grad_norm": 73.63423156738281, | |
| "learning_rate": 6.0199556541019966e-05, | |
| "loss": 2.5883, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 4.273650454302512, | |
| "grad_norm": 35.365089416503906, | |
| "learning_rate": 5.9460458240946046e-05, | |
| "loss": 2.498, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 4.273650454302512, | |
| "eval_loss": 0.1066313236951828, | |
| "eval_runtime": 154.5122, | |
| "eval_samples_per_second": 3.236, | |
| "eval_steps_per_second": 0.408, | |
| "eval_wer": 0.2176539935160625, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 4.344913593443791, | |
| "grad_norm": 43.693443298339844, | |
| "learning_rate": 5.8721359940872134e-05, | |
| "loss": 2.5415, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 4.41617673258507, | |
| "grad_norm": 55.82042694091797, | |
| "learning_rate": 5.798226164079823e-05, | |
| "loss": 2.5464, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 4.48743987172635, | |
| "grad_norm": 37.502227783203125, | |
| "learning_rate": 5.7243163340724315e-05, | |
| "loss": 2.534, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 4.558703010867629, | |
| "grad_norm": 36.125301361083984, | |
| "learning_rate": 5.650406504065041e-05, | |
| "loss": 2.8372, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 4.558703010867629, | |
| "eval_loss": 0.10223670303821564, | |
| "eval_runtime": 155.3604, | |
| "eval_samples_per_second": 3.218, | |
| "eval_steps_per_second": 0.406, | |
| "eval_wer": 0.20984379605069262, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 4.629966150008908, | |
| "grad_norm": 48.96213912963867, | |
| "learning_rate": 5.57649667405765e-05, | |
| "loss": 2.6121, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 4.701229289150187, | |
| "grad_norm": 59.73322677612305, | |
| "learning_rate": 5.502586844050259e-05, | |
| "loss": 2.7291, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 4.772492428291466, | |
| "grad_norm": 120.08707427978516, | |
| "learning_rate": 5.428677014042868e-05, | |
| "loss": 2.7742, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 4.843755567432745, | |
| "grad_norm": 46.421714782714844, | |
| "learning_rate": 5.354767184035477e-05, | |
| "loss": 2.5573, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 4.843755567432745, | |
| "eval_loss": 0.10277628153562546, | |
| "eval_runtime": 157.4825, | |
| "eval_samples_per_second": 3.175, | |
| "eval_steps_per_second": 0.4, | |
| "eval_wer": 0.21809608016504567, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 4.915018706574025, | |
| "grad_norm": 50.61531448364258, | |
| "learning_rate": 5.280857354028086e-05, | |
| "loss": 2.3244, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 4.986281845715304, | |
| "grad_norm": 39.10790252685547, | |
| "learning_rate": 5.2069475240206955e-05, | |
| "loss": 2.5899, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 5.057010511313023, | |
| "grad_norm": 36.1613883972168, | |
| "learning_rate": 5.133037694013304e-05, | |
| "loss": 2.2914, | |
| "step": 7100 | |
| }, | |
| { | |
| "epoch": 5.128273650454302, | |
| "grad_norm": 41.908348083496094, | |
| "learning_rate": 5.059127864005912e-05, | |
| "loss": 2.3309, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 5.128273650454302, | |
| "eval_loss": 0.10057391971349716, | |
| "eval_runtime": 155.7627, | |
| "eval_samples_per_second": 3.21, | |
| "eval_steps_per_second": 0.404, | |
| "eval_wer": 0.20910698496905394, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 5.199536789595582, | |
| "grad_norm": 73.74039459228516, | |
| "learning_rate": 4.9852180339985224e-05, | |
| "loss": 2.3553, | |
| "step": 7300 | |
| }, | |
| { | |
| "epoch": 5.270799928736861, | |
| "grad_norm": 30.432615280151367, | |
| "learning_rate": 4.911308203991131e-05, | |
| "loss": 2.2731, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 5.34206306787814, | |
| "grad_norm": 43.42770767211914, | |
| "learning_rate": 4.8373983739837406e-05, | |
| "loss": 2.1843, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 5.413326207019419, | |
| "grad_norm": 36.634033203125, | |
| "learning_rate": 4.7634885439763486e-05, | |
| "loss": 2.2589, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 5.413326207019419, | |
| "eval_loss": 0.10152015835046768, | |
| "eval_runtime": 157.1024, | |
| "eval_samples_per_second": 3.183, | |
| "eval_steps_per_second": 0.401, | |
| "eval_wer": 0.20999115826702033, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 5.484589346160698, | |
| "grad_norm": 44.67591094970703, | |
| "learning_rate": 4.689578713968958e-05, | |
| "loss": 2.3519, | |
| "step": 7700 | |
| }, | |
| { | |
| "epoch": 5.555852485301978, | |
| "grad_norm": 34.04768753051758, | |
| "learning_rate": 4.615668883961567e-05, | |
| "loss": 1.9867, | |
| "step": 7800 | |
| }, | |
| { | |
| "epoch": 5.627115624443257, | |
| "grad_norm": 29.320768356323242, | |
| "learning_rate": 4.541759053954176e-05, | |
| "loss": 2.1691, | |
| "step": 7900 | |
| }, | |
| { | |
| "epoch": 5.698378763584536, | |
| "grad_norm": 35.776451110839844, | |
| "learning_rate": 4.467849223946785e-05, | |
| "loss": 2.1409, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 5.698378763584536, | |
| "eval_loss": 0.10244826227426529, | |
| "eval_runtime": 154.1587, | |
| "eval_samples_per_second": 3.243, | |
| "eval_steps_per_second": 0.409, | |
| "eval_wer": 0.20645446507515472, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 5.769641902725815, | |
| "grad_norm": 48.27608108520508, | |
| "learning_rate": 4.3939393939393944e-05, | |
| "loss": 2.1876, | |
| "step": 8100 | |
| }, | |
| { | |
| "epoch": 5.840905041867094, | |
| "grad_norm": 35.93488311767578, | |
| "learning_rate": 4.320029563932003e-05, | |
| "loss": 2.2169, | |
| "step": 8200 | |
| }, | |
| { | |
| "epoch": 5.912168181008373, | |
| "grad_norm": 23.94822120666504, | |
| "learning_rate": 4.2461197339246126e-05, | |
| "loss": 2.1963, | |
| "step": 8300 | |
| }, | |
| { | |
| "epoch": 5.983431320149653, | |
| "grad_norm": 28.82554817199707, | |
| "learning_rate": 4.172209903917221e-05, | |
| "loss": 2.1048, | |
| "step": 8400 | |
| }, | |
| { | |
| "epoch": 5.983431320149653, | |
| "eval_loss": 0.09920582175254822, | |
| "eval_runtime": 158.0225, | |
| "eval_samples_per_second": 3.164, | |
| "eval_steps_per_second": 0.399, | |
| "eval_wer": 0.2138225758915414, | |
| "step": 8400 | |
| }, | |
| { | |
| "epoch": 6.054159985747372, | |
| "grad_norm": 30.941600799560547, | |
| "learning_rate": 4.09830007390983e-05, | |
| "loss": 2.1247, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 6.125423124888651, | |
| "grad_norm": 30.994831085205078, | |
| "learning_rate": 4.0243902439024395e-05, | |
| "loss": 1.9593, | |
| "step": 8600 | |
| }, | |
| { | |
| "epoch": 6.196686264029931, | |
| "grad_norm": 34.325279235839844, | |
| "learning_rate": 3.950480413895048e-05, | |
| "loss": 1.9629, | |
| "step": 8700 | |
| }, | |
| { | |
| "epoch": 6.26794940317121, | |
| "grad_norm": 24.892839431762695, | |
| "learning_rate": 3.876570583887657e-05, | |
| "loss": 1.8826, | |
| "step": 8800 | |
| }, | |
| { | |
| "epoch": 6.26794940317121, | |
| "eval_loss": 0.09866400063037872, | |
| "eval_runtime": 157.1762, | |
| "eval_samples_per_second": 3.181, | |
| "eval_steps_per_second": 0.401, | |
| "eval_wer": 0.2116121426466254, | |
| "step": 8800 | |
| }, | |
| { | |
| "epoch": 6.339212542312489, | |
| "grad_norm": 30.01650047302246, | |
| "learning_rate": 3.8026607538802664e-05, | |
| "loss": 1.9085, | |
| "step": 8900 | |
| }, | |
| { | |
| "epoch": 6.410475681453768, | |
| "grad_norm": 45.52370071411133, | |
| "learning_rate": 3.728750923872875e-05, | |
| "loss": 1.9014, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 6.481738820595047, | |
| "grad_norm": 26.402570724487305, | |
| "learning_rate": 3.6548410938654846e-05, | |
| "loss": 1.824, | |
| "step": 9100 | |
| }, | |
| { | |
| "epoch": 6.553001959736326, | |
| "grad_norm": 43.61695861816406, | |
| "learning_rate": 3.580931263858093e-05, | |
| "loss": 1.8778, | |
| "step": 9200 | |
| }, | |
| { | |
| "epoch": 6.553001959736326, | |
| "eval_loss": 0.09877074509859085, | |
| "eval_runtime": 155.4125, | |
| "eval_samples_per_second": 3.217, | |
| "eval_steps_per_second": 0.405, | |
| "eval_wer": 0.20733863837312114, | |
| "step": 9200 | |
| }, | |
| { | |
| "epoch": 6.624265098877606, | |
| "grad_norm": 40.971221923828125, | |
| "learning_rate": 3.507021433850702e-05, | |
| "loss": 1.9757, | |
| "step": 9300 | |
| }, | |
| { | |
| "epoch": 6.695528238018885, | |
| "grad_norm": 39.76367950439453, | |
| "learning_rate": 3.4331116038433115e-05, | |
| "loss": 1.9012, | |
| "step": 9400 | |
| }, | |
| { | |
| "epoch": 6.766791377160164, | |
| "grad_norm": 39.237640380859375, | |
| "learning_rate": 3.35920177383592e-05, | |
| "loss": 1.9141, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 6.838054516301443, | |
| "grad_norm": 31.298519134521484, | |
| "learning_rate": 3.2852919438285297e-05, | |
| "loss": 2.0199, | |
| "step": 9600 | |
| }, | |
| { | |
| "epoch": 6.838054516301443, | |
| "eval_loss": 0.09809073060750961, | |
| "eval_runtime": 156.8243, | |
| "eval_samples_per_second": 3.188, | |
| "eval_steps_per_second": 0.402, | |
| "eval_wer": 0.2045387562628942, | |
| "step": 9600 | |
| }, | |
| { | |
| "epoch": 6.909317655442722, | |
| "grad_norm": 59.62895202636719, | |
| "learning_rate": 3.2113821138211384e-05, | |
| "loss": 1.9717, | |
| "step": 9700 | |
| }, | |
| { | |
| "epoch": 6.980580794584001, | |
| "grad_norm": 26.735105514526367, | |
| "learning_rate": 3.137472283813747e-05, | |
| "loss": 1.9839, | |
| "step": 9800 | |
| }, | |
| { | |
| "epoch": 7.051309460181721, | |
| "grad_norm": 39.49419021606445, | |
| "learning_rate": 3.063562453806356e-05, | |
| "loss": 1.7422, | |
| "step": 9900 | |
| }, | |
| { | |
| "epoch": 7.122572599323, | |
| "grad_norm": 36.06403732299805, | |
| "learning_rate": 2.9896526237989653e-05, | |
| "loss": 1.7238, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 7.122572599323, | |
| "eval_loss": 0.09965246915817261, | |
| "eval_runtime": 156.0402, | |
| "eval_samples_per_second": 3.204, | |
| "eval_steps_per_second": 0.404, | |
| "eval_wer": 0.20218096080165046, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 7.193835738464279, | |
| "grad_norm": 34.01579284667969, | |
| "learning_rate": 2.9157427937915744e-05, | |
| "loss": 1.8574, | |
| "step": 10100 | |
| }, | |
| { | |
| "epoch": 7.265098877605558, | |
| "grad_norm": 23.857589721679688, | |
| "learning_rate": 2.8418329637841835e-05, | |
| "loss": 1.7262, | |
| "step": 10200 | |
| }, | |
| { | |
| "epoch": 7.336362016746838, | |
| "grad_norm": 22.911731719970703, | |
| "learning_rate": 2.7679231337767926e-05, | |
| "loss": 1.803, | |
| "step": 10300 | |
| }, | |
| { | |
| "epoch": 7.407625155888117, | |
| "grad_norm": 40.41181564331055, | |
| "learning_rate": 2.6940133037694017e-05, | |
| "loss": 1.8087, | |
| "step": 10400 | |
| }, | |
| { | |
| "epoch": 7.407625155888117, | |
| "eval_loss": 0.09829334914684296, | |
| "eval_runtime": 156.8822, | |
| "eval_samples_per_second": 3.187, | |
| "eval_steps_per_second": 0.402, | |
| "eval_wer": 0.2036545829649278, | |
| "step": 10400 | |
| }, | |
| { | |
| "epoch": 7.483876714769286, | |
| "grad_norm": 34.382659912109375, | |
| "learning_rate": 2.6201034737620107e-05, | |
| "loss": 1.8111, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 7.555139853910565, | |
| "grad_norm": 33.14358901977539, | |
| "learning_rate": 2.5461936437546198e-05, | |
| "loss": 1.7896, | |
| "step": 10600 | |
| }, | |
| { | |
| "epoch": 7.626402993051844, | |
| "grad_norm": 39.97333526611328, | |
| "learning_rate": 2.4722838137472286e-05, | |
| "loss": 1.7157, | |
| "step": 10700 | |
| }, | |
| { | |
| "epoch": 7.697666132193123, | |
| "grad_norm": 33.9200553894043, | |
| "learning_rate": 2.3983739837398377e-05, | |
| "loss": 1.7075, | |
| "step": 10800 | |
| }, | |
| { | |
| "epoch": 7.697666132193123, | |
| "eval_loss": 0.09847129136323929, | |
| "eval_runtime": 148.952, | |
| "eval_samples_per_second": 3.357, | |
| "eval_steps_per_second": 0.423, | |
| "eval_wer": 0.2058650162098438, | |
| "step": 10800 | |
| }, | |
| { | |
| "epoch": 7.768929271334402, | |
| "grad_norm": 22.087848663330078, | |
| "learning_rate": 2.3244641537324467e-05, | |
| "loss": 1.6991, | |
| "step": 10900 | |
| }, | |
| { | |
| "epoch": 7.840192410475682, | |
| "grad_norm": 42.86452102661133, | |
| "learning_rate": 2.2505543237250555e-05, | |
| "loss": 1.7294, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 7.911455549616961, | |
| "grad_norm": 29.147266387939453, | |
| "learning_rate": 2.1766444937176646e-05, | |
| "loss": 1.7738, | |
| "step": 11100 | |
| }, | |
| { | |
| "epoch": 7.98271868875824, | |
| "grad_norm": 38.3438835144043, | |
| "learning_rate": 2.1027346637102737e-05, | |
| "loss": 1.7072, | |
| "step": 11200 | |
| }, | |
| { | |
| "epoch": 7.98271868875824, | |
| "eval_loss": 0.09768818318843842, | |
| "eval_runtime": 146.2719, | |
| "eval_samples_per_second": 3.418, | |
| "eval_steps_per_second": 0.431, | |
| "eval_wer": 0.20615974064249926, | |
| "step": 11200 | |
| }, | |
| { | |
| "epoch": 8.054159985747372, | |
| "grad_norm": 25.453853607177734, | |
| "learning_rate": 2.0288248337028824e-05, | |
| "loss": 1.5749, | |
| "step": 11300 | |
| }, | |
| { | |
| "epoch": 8.125423124888652, | |
| "grad_norm": 21.901262283325195, | |
| "learning_rate": 1.9549150036954915e-05, | |
| "loss": 1.588, | |
| "step": 11400 | |
| }, | |
| { | |
| "epoch": 8.19668626402993, | |
| "grad_norm": 47.3566780090332, | |
| "learning_rate": 1.8810051736881006e-05, | |
| "loss": 1.6455, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 8.26794940317121, | |
| "grad_norm": 31.993080139160156, | |
| "learning_rate": 1.8070953436807093e-05, | |
| "loss": 1.5864, | |
| "step": 11600 | |
| }, | |
| { | |
| "epoch": 8.26794940317121, | |
| "eval_loss": 0.09770301729440689, | |
| "eval_runtime": 145.9667, | |
| "eval_samples_per_second": 3.425, | |
| "eval_steps_per_second": 0.432, | |
| "eval_wer": 0.20660182729148246, | |
| "step": 11600 | |
| }, | |
| { | |
| "epoch": 8.339212542312488, | |
| "grad_norm": 22.330097198486328, | |
| "learning_rate": 1.7331855136733184e-05, | |
| "loss": 1.6879, | |
| "step": 11700 | |
| }, | |
| { | |
| "epoch": 8.410475681453768, | |
| "grad_norm": 25.55733299255371, | |
| "learning_rate": 1.6592756836659275e-05, | |
| "loss": 1.525, | |
| "step": 11800 | |
| }, | |
| { | |
| "epoch": 8.481738820595048, | |
| "grad_norm": 25.518842697143555, | |
| "learning_rate": 1.5853658536585366e-05, | |
| "loss": 1.6049, | |
| "step": 11900 | |
| }, | |
| { | |
| "epoch": 8.553001959736326, | |
| "grad_norm": 23.22325897216797, | |
| "learning_rate": 1.5114560236511458e-05, | |
| "loss": 1.6869, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 8.553001959736326, | |
| "eval_loss": 0.0971849113702774, | |
| "eval_runtime": 146.1299, | |
| "eval_samples_per_second": 3.422, | |
| "eval_steps_per_second": 0.431, | |
| "eval_wer": 0.2080754494547598, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 8.624265098877606, | |
| "grad_norm": 26.13189125061035, | |
| "learning_rate": 1.4375461936437547e-05, | |
| "loss": 1.6066, | |
| "step": 12100 | |
| }, | |
| { | |
| "epoch": 8.695528238018884, | |
| "grad_norm": 18.172691345214844, | |
| "learning_rate": 1.3636363636363637e-05, | |
| "loss": 1.4317, | |
| "step": 12200 | |
| }, | |
| { | |
| "epoch": 8.766791377160164, | |
| "grad_norm": 28.6180419921875, | |
| "learning_rate": 1.2897265336289727e-05, | |
| "loss": 1.6208, | |
| "step": 12300 | |
| }, | |
| { | |
| "epoch": 8.838054516301444, | |
| "grad_norm": 24.86948013305664, | |
| "learning_rate": 1.2158167036215817e-05, | |
| "loss": 1.7383, | |
| "step": 12400 | |
| }, | |
| { | |
| "epoch": 8.838054516301444, | |
| "eval_loss": 0.09760728478431702, | |
| "eval_runtime": 147.3722, | |
| "eval_samples_per_second": 3.393, | |
| "eval_steps_per_second": 0.427, | |
| "eval_wer": 0.20409666961391099, | |
| "step": 12400 | |
| }, | |
| { | |
| "epoch": 8.909317655442722, | |
| "grad_norm": 24.008527755737305, | |
| "learning_rate": 1.1419068736141907e-05, | |
| "loss": 1.5721, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 8.980580794584002, | |
| "grad_norm": 19.964536666870117, | |
| "learning_rate": 1.0679970436067997e-05, | |
| "loss": 1.5477, | |
| "step": 12600 | |
| }, | |
| { | |
| "epoch": 9.051309460181722, | |
| "grad_norm": 26.542261123657227, | |
| "learning_rate": 9.940872135994087e-06, | |
| "loss": 1.4216, | |
| "step": 12700 | |
| }, | |
| { | |
| "epoch": 9.122572599323, | |
| "grad_norm": 16.520477294921875, | |
| "learning_rate": 9.201773835920177e-06, | |
| "loss": 1.4336, | |
| "step": 12800 | |
| }, | |
| { | |
| "epoch": 9.122572599323, | |
| "eval_loss": 0.09696762263774872, | |
| "eval_runtime": 145.4672, | |
| "eval_samples_per_second": 3.437, | |
| "eval_steps_per_second": 0.433, | |
| "eval_wer": 0.2045387562628942, | |
| "step": 12800 | |
| }, | |
| { | |
| "epoch": 9.19383573846428, | |
| "grad_norm": 21.41847801208496, | |
| "learning_rate": 8.462675535846267e-06, | |
| "loss": 1.4627, | |
| "step": 12900 | |
| }, | |
| { | |
| "epoch": 9.265098877605558, | |
| "grad_norm": 28.16568946838379, | |
| "learning_rate": 7.723577235772358e-06, | |
| "loss": 1.4259, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 9.336362016746838, | |
| "grad_norm": 19.043367385864258, | |
| "learning_rate": 6.984478935698447e-06, | |
| "loss": 1.5803, | |
| "step": 13100 | |
| }, | |
| { | |
| "epoch": 9.407625155888116, | |
| "grad_norm": 18.10884666442871, | |
| "learning_rate": 6.245380635624538e-06, | |
| "loss": 1.5429, | |
| "step": 13200 | |
| }, | |
| { | |
| "epoch": 9.407625155888116, | |
| "eval_loss": 0.09686141461133957, | |
| "eval_runtime": 144.8892, | |
| "eval_samples_per_second": 3.451, | |
| "eval_steps_per_second": 0.435, | |
| "eval_wer": 0.20100206307102858, | |
| "step": 13200 | |
| }, | |
| { | |
| "epoch": 9.478888295029396, | |
| "grad_norm": 19.601333618164062, | |
| "learning_rate": 5.506282335550628e-06, | |
| "loss": 1.6243, | |
| "step": 13300 | |
| }, | |
| { | |
| "epoch": 9.550151434170676, | |
| "grad_norm": 26.340774536132812, | |
| "learning_rate": 4.767184035476718e-06, | |
| "loss": 1.4496, | |
| "step": 13400 | |
| }, | |
| { | |
| "epoch": 9.621414573311954, | |
| "grad_norm": 19.866046905517578, | |
| "learning_rate": 4.028085735402809e-06, | |
| "loss": 1.5778, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 9.692677712453234, | |
| "grad_norm": 16.61330223083496, | |
| "learning_rate": 3.288987435328899e-06, | |
| "loss": 1.5726, | |
| "step": 13600 | |
| }, | |
| { | |
| "epoch": 9.692677712453234, | |
| "eval_loss": 0.09693081676959991, | |
| "eval_runtime": 145.8875, | |
| "eval_samples_per_second": 3.427, | |
| "eval_steps_per_second": 0.432, | |
| "eval_wer": 0.20837017388741527, | |
| "step": 13600 | |
| }, | |
| { | |
| "epoch": 9.763940851594512, | |
| "grad_norm": 26.37179183959961, | |
| "learning_rate": 2.549889135254989e-06, | |
| "loss": 1.546, | |
| "step": 13700 | |
| }, | |
| { | |
| "epoch": 9.835203990735792, | |
| "grad_norm": 17.592809677124023, | |
| "learning_rate": 1.8107908351810791e-06, | |
| "loss": 1.4815, | |
| "step": 13800 | |
| }, | |
| { | |
| "epoch": 9.906467129877072, | |
| "grad_norm": 14.37030029296875, | |
| "learning_rate": 1.0716925351071693e-06, | |
| "loss": 1.414, | |
| "step": 13900 | |
| }, | |
| { | |
| "epoch": 9.97773026901835, | |
| "grad_norm": 21.16905975341797, | |
| "learning_rate": 3.3259423503325944e-07, | |
| "loss": 1.4709, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 9.97773026901835, | |
| "eval_loss": 0.097068190574646, | |
| "eval_runtime": 145.6677, | |
| "eval_samples_per_second": 3.432, | |
| "eval_steps_per_second": 0.432, | |
| "eval_wer": 0.20439139404656645, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 10.049884197398896, | |
| "grad_norm": 27.002792358398438, | |
| "learning_rate": 4.1822620016273397e-05, | |
| "loss": 1.4666, | |
| "step": 14100 | |
| }, | |
| { | |
| "epoch": 10.121147336540174, | |
| "grad_norm": 27.5255069732666, | |
| "learning_rate": 4.139437283199863e-05, | |
| "loss": 1.58, | |
| "step": 14200 | |
| }, | |
| { | |
| "epoch": 10.192410475681454, | |
| "grad_norm": 29.90217399597168, | |
| "learning_rate": 4.096612564772387e-05, | |
| "loss": 1.7165, | |
| "step": 14300 | |
| }, | |
| { | |
| "epoch": 10.263673614822732, | |
| "grad_norm": 35.381736755371094, | |
| "learning_rate": 4.05378784634491e-05, | |
| "loss": 1.5442, | |
| "step": 14400 | |
| }, | |
| { | |
| "epoch": 10.263673614822732, | |
| "eval_loss": 0.09780226647853851, | |
| "eval_runtime": 154.9465, | |
| "eval_samples_per_second": 3.227, | |
| "eval_steps_per_second": 0.407, | |
| "eval_wer": 0.20881226053639848, | |
| "step": 14400 | |
| }, | |
| { | |
| "epoch": 10.334936753964012, | |
| "grad_norm": 24.984546661376953, | |
| "learning_rate": 4.010963127917434e-05, | |
| "loss": 1.6025, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 10.406199893105292, | |
| "grad_norm": 29.315156936645508, | |
| "learning_rate": 3.968138409489958e-05, | |
| "loss": 1.6566, | |
| "step": 14600 | |
| }, | |
| { | |
| "epoch": 10.47746303224657, | |
| "grad_norm": 26.027830123901367, | |
| "learning_rate": 3.925313691062482e-05, | |
| "loss": 1.5891, | |
| "step": 14700 | |
| }, | |
| { | |
| "epoch": 10.54872617138785, | |
| "grad_norm": 23.834056854248047, | |
| "learning_rate": 3.882488972635005e-05, | |
| "loss": 1.5764, | |
| "step": 14800 | |
| }, | |
| { | |
| "epoch": 10.54872617138785, | |
| "eval_loss": 0.09848986566066742, | |
| "eval_runtime": 151.1928, | |
| "eval_samples_per_second": 3.307, | |
| "eval_steps_per_second": 0.417, | |
| "eval_wer": 0.215148835838491, | |
| "step": 14800 | |
| }, | |
| { | |
| "epoch": 10.619989310529128, | |
| "grad_norm": 30.214248657226562, | |
| "learning_rate": 3.839664254207529e-05, | |
| "loss": 1.6469, | |
| "step": 14900 | |
| }, | |
| { | |
| "epoch": 10.691252449670408, | |
| "grad_norm": 22.957365036010742, | |
| "learning_rate": 3.796839535780052e-05, | |
| "loss": 1.609, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 10.762515588811688, | |
| "grad_norm": 24.754613876342773, | |
| "learning_rate": 3.7540148173525766e-05, | |
| "loss": 1.6223, | |
| "step": 15100 | |
| }, | |
| { | |
| "epoch": 10.833778727952966, | |
| "grad_norm": 29.267169952392578, | |
| "learning_rate": 3.7111900989251e-05, | |
| "loss": 1.6821, | |
| "step": 15200 | |
| }, | |
| { | |
| "epoch": 10.833778727952966, | |
| "eval_loss": 0.09700831025838852, | |
| "eval_runtime": 149.5595, | |
| "eval_samples_per_second": 3.343, | |
| "eval_steps_per_second": 0.421, | |
| "eval_wer": 0.20660182729148246, | |
| "step": 15200 | |
| }, | |
| { | |
| "epoch": 10.905041867094246, | |
| "grad_norm": 23.508710861206055, | |
| "learning_rate": 3.668365380497623e-05, | |
| "loss": 1.6449, | |
| "step": 15300 | |
| }, | |
| { | |
| "epoch": 10.976305006235524, | |
| "grad_norm": 19.81301498413086, | |
| "learning_rate": 3.625540662070147e-05, | |
| "loss": 1.6106, | |
| "step": 15400 | |
| }, | |
| { | |
| "epoch": 11.047033671833244, | |
| "grad_norm": 27.057025909423828, | |
| "learning_rate": 3.58271594364267e-05, | |
| "loss": 1.6911, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 11.118296810974524, | |
| "grad_norm": 24.53127098083496, | |
| "learning_rate": 3.539891225215195e-05, | |
| "loss": 1.6529, | |
| "step": 15600 | |
| }, | |
| { | |
| "epoch": 11.118296810974524, | |
| "eval_loss": 0.09739436209201813, | |
| "eval_runtime": 150.7387, | |
| "eval_samples_per_second": 3.317, | |
| "eval_steps_per_second": 0.418, | |
| "eval_wer": 0.20822281167108753, | |
| "step": 15600 | |
| }, | |
| { | |
| "epoch": 11.189559950115802, | |
| "grad_norm": 28.257545471191406, | |
| "learning_rate": 3.497066506787718e-05, | |
| "loss": 1.5513, | |
| "step": 15700 | |
| }, | |
| { | |
| "epoch": 11.260823089257082, | |
| "grad_norm": 18.52756690979004, | |
| "learning_rate": 3.454241788360242e-05, | |
| "loss": 1.4921, | |
| "step": 15800 | |
| }, | |
| { | |
| "epoch": 11.33208622839836, | |
| "grad_norm": 50.12236022949219, | |
| "learning_rate": 3.411417069932765e-05, | |
| "loss": 1.5063, | |
| "step": 15900 | |
| }, | |
| { | |
| "epoch": 11.40334936753964, | |
| "grad_norm": 44.198509216308594, | |
| "learning_rate": 3.368592351505289e-05, | |
| "loss": 1.5455, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 11.40334936753964, | |
| "eval_loss": 0.09711702167987823, | |
| "eval_runtime": 147.0043, | |
| "eval_samples_per_second": 3.401, | |
| "eval_steps_per_second": 0.429, | |
| "eval_wer": 0.20571765399351608, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 11.47461250668092, | |
| "grad_norm": 29.74050521850586, | |
| "learning_rate": 3.325767633077813e-05, | |
| "loss": 1.6061, | |
| "step": 16100 | |
| }, | |
| { | |
| "epoch": 11.545875645822198, | |
| "grad_norm": 28.548961639404297, | |
| "learning_rate": 3.282942914650337e-05, | |
| "loss": 1.5224, | |
| "step": 16200 | |
| }, | |
| { | |
| "epoch": 11.617138784963478, | |
| "grad_norm": 20.90842628479004, | |
| "learning_rate": 3.24011819622286e-05, | |
| "loss": 1.491, | |
| "step": 16300 | |
| }, | |
| { | |
| "epoch": 11.688401924104756, | |
| "grad_norm": 32.348304748535156, | |
| "learning_rate": 3.197293477795384e-05, | |
| "loss": 1.4845, | |
| "step": 16400 | |
| }, | |
| { | |
| "epoch": 11.688401924104756, | |
| "eval_loss": 0.09728699922561646, | |
| "eval_runtime": 150.28, | |
| "eval_samples_per_second": 3.327, | |
| "eval_steps_per_second": 0.419, | |
| "eval_wer": 0.21396993810786913, | |
| "step": 16400 | |
| }, | |
| { | |
| "epoch": 11.759665063246036, | |
| "grad_norm": 31.023475646972656, | |
| "learning_rate": 3.154468759367907e-05, | |
| "loss": 1.5757, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 11.830928202387316, | |
| "grad_norm": 28.920454025268555, | |
| "learning_rate": 3.111644040940431e-05, | |
| "loss": 1.5098, | |
| "step": 16600 | |
| }, | |
| { | |
| "epoch": 11.902191341528594, | |
| "grad_norm": 28.04616928100586, | |
| "learning_rate": 3.068819322512955e-05, | |
| "loss": 1.3697, | |
| "step": 16700 | |
| }, | |
| { | |
| "epoch": 11.973454480669874, | |
| "grad_norm": 25.157943725585938, | |
| "learning_rate": 3.025994604085478e-05, | |
| "loss": 1.4953, | |
| "step": 16800 | |
| }, | |
| { | |
| "epoch": 11.973454480669874, | |
| "eval_loss": 0.09595508873462677, | |
| "eval_runtime": 147.9777, | |
| "eval_samples_per_second": 3.379, | |
| "eval_steps_per_second": 0.426, | |
| "eval_wer": 0.20291777188328913, | |
| "step": 16800 | |
| }, | |
| { | |
| "epoch": 12.044183146267594, | |
| "grad_norm": 33.86606216430664, | |
| "learning_rate": 2.983169885658002e-05, | |
| "loss": 1.4119, | |
| "step": 16900 | |
| }, | |
| { | |
| "epoch": 12.115446285408872, | |
| "grad_norm": 22.206035614013672, | |
| "learning_rate": 2.9403451672305256e-05, | |
| "loss": 1.4145, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 12.186709424550152, | |
| "grad_norm": 30.262418746948242, | |
| "learning_rate": 2.8975204488030495e-05, | |
| "loss": 1.4809, | |
| "step": 17100 | |
| }, | |
| { | |
| "epoch": 12.25797256369143, | |
| "grad_norm": 16.270219802856445, | |
| "learning_rate": 2.8546957303755727e-05, | |
| "loss": 1.4349, | |
| "step": 17200 | |
| }, | |
| { | |
| "epoch": 12.25797256369143, | |
| "eval_loss": 0.09577883034944534, | |
| "eval_runtime": 146.7879, | |
| "eval_samples_per_second": 3.406, | |
| "eval_steps_per_second": 0.429, | |
| "eval_wer": 0.20085470085470086, | |
| "step": 17200 | |
| }, | |
| { | |
| "epoch": 12.32923570283271, | |
| "grad_norm": 19.906198501586914, | |
| "learning_rate": 2.8118710119480966e-05, | |
| "loss": 1.4291, | |
| "step": 17300 | |
| }, | |
| { | |
| "epoch": 12.400498841973988, | |
| "grad_norm": 29.69014549255371, | |
| "learning_rate": 2.7690462935206202e-05, | |
| "loss": 1.3993, | |
| "step": 17400 | |
| }, | |
| { | |
| "epoch": 12.471761981115268, | |
| "grad_norm": 21.653276443481445, | |
| "learning_rate": 2.726221575093144e-05, | |
| "loss": 1.4257, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 12.543025120256548, | |
| "grad_norm": 19.317724227905273, | |
| "learning_rate": 2.6833968566656676e-05, | |
| "loss": 1.4104, | |
| "step": 17600 | |
| }, | |
| { | |
| "epoch": 12.543025120256548, | |
| "eval_loss": 0.09735482186079025, | |
| "eval_runtime": 147.616, | |
| "eval_samples_per_second": 3.387, | |
| "eval_steps_per_second": 0.427, | |
| "eval_wer": 0.20247568523430592, | |
| "step": 17600 | |
| }, | |
| { | |
| "epoch": 12.614288259397826, | |
| "grad_norm": 24.637189865112305, | |
| "learning_rate": 2.640572138238191e-05, | |
| "loss": 1.343, | |
| "step": 17700 | |
| }, | |
| { | |
| "epoch": 12.685551398539106, | |
| "grad_norm": 24.483217239379883, | |
| "learning_rate": 2.5977474198107148e-05, | |
| "loss": 1.4593, | |
| "step": 17800 | |
| }, | |
| { | |
| "epoch": 12.756814537680384, | |
| "grad_norm": 17.71087074279785, | |
| "learning_rate": 2.5549227013832383e-05, | |
| "loss": 1.4749, | |
| "step": 17900 | |
| }, | |
| { | |
| "epoch": 12.828077676821664, | |
| "grad_norm": 22.82439613342285, | |
| "learning_rate": 2.5120979829557622e-05, | |
| "loss": 1.5073, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 12.828077676821664, | |
| "eval_loss": 0.09532631188631058, | |
| "eval_runtime": 149.2979, | |
| "eval_samples_per_second": 3.349, | |
| "eval_steps_per_second": 0.422, | |
| "eval_wer": 0.20439139404656645, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 12.899340815962944, | |
| "grad_norm": 23.298507690429688, | |
| "learning_rate": 2.4692732645282858e-05, | |
| "loss": 1.4439, | |
| "step": 18100 | |
| }, | |
| { | |
| "epoch": 12.970603955104222, | |
| "grad_norm": 18.416128158569336, | |
| "learning_rate": 2.4264485461008097e-05, | |
| "loss": 1.3723, | |
| "step": 18200 | |
| }, | |
| { | |
| "epoch": 13.041332620701942, | |
| "grad_norm": 27.486845016479492, | |
| "learning_rate": 2.383623827673333e-05, | |
| "loss": 1.3505, | |
| "step": 18300 | |
| }, | |
| { | |
| "epoch": 13.112595759843222, | |
| "grad_norm": 13.97179889678955, | |
| "learning_rate": 2.3407991092458568e-05, | |
| "loss": 1.2488, | |
| "step": 18400 | |
| }, | |
| { | |
| "epoch": 13.112595759843222, | |
| "eval_loss": 0.09486553817987442, | |
| "eval_runtime": 147.7955, | |
| "eval_samples_per_second": 3.383, | |
| "eval_steps_per_second": 0.426, | |
| "eval_wer": 0.19658119658119658, | |
| "step": 18400 | |
| }, | |
| { | |
| "epoch": 13.1838588989845, | |
| "grad_norm": 22.142789840698242, | |
| "learning_rate": 2.2979743908183804e-05, | |
| "loss": 1.3619, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 13.25512203812578, | |
| "grad_norm": 24.32270050048828, | |
| "learning_rate": 2.255149672390904e-05, | |
| "loss": 1.3079, | |
| "step": 18600 | |
| }, | |
| { | |
| "epoch": 13.326385177267058, | |
| "grad_norm": 20.621620178222656, | |
| "learning_rate": 2.2123249539634278e-05, | |
| "loss": 1.3346, | |
| "step": 18700 | |
| }, | |
| { | |
| "epoch": 13.397648316408338, | |
| "grad_norm": 19.052913665771484, | |
| "learning_rate": 2.1695002355359514e-05, | |
| "loss": 1.277, | |
| "step": 18800 | |
| }, | |
| { | |
| "epoch": 13.397648316408338, | |
| "eval_loss": 0.09550650417804718, | |
| "eval_runtime": 150.9097, | |
| "eval_samples_per_second": 3.313, | |
| "eval_steps_per_second": 0.417, | |
| "eval_wer": 0.20837017388741527, | |
| "step": 18800 | |
| }, | |
| { | |
| "epoch": 13.468911455549616, | |
| "grad_norm": 29.400739669799805, | |
| "learning_rate": 2.1266755171084753e-05, | |
| "loss": 1.4286, | |
| "step": 18900 | |
| }, | |
| { | |
| "epoch": 13.540174594690896, | |
| "grad_norm": 21.397193908691406, | |
| "learning_rate": 2.083850798680999e-05, | |
| "loss": 1.3856, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 13.611437733832176, | |
| "grad_norm": 15.47054386138916, | |
| "learning_rate": 2.0410260802535224e-05, | |
| "loss": 1.285, | |
| "step": 19100 | |
| }, | |
| { | |
| "epoch": 13.682700872973454, | |
| "grad_norm": 17.92173957824707, | |
| "learning_rate": 1.9982013618260463e-05, | |
| "loss": 1.2443, | |
| "step": 19200 | |
| }, | |
| { | |
| "epoch": 13.682700872973454, | |
| "eval_loss": 0.09599152952432632, | |
| "eval_runtime": 147.2957, | |
| "eval_samples_per_second": 3.395, | |
| "eval_steps_per_second": 0.428, | |
| "eval_wer": 0.19952844090775126, | |
| "step": 19200 | |
| }, | |
| { | |
| "epoch": 13.753964012114734, | |
| "grad_norm": 19.764711380004883, | |
| "learning_rate": 1.95537664339857e-05, | |
| "loss": 1.3789, | |
| "step": 19300 | |
| }, | |
| { | |
| "epoch": 13.825227151256012, | |
| "grad_norm": 14.71243667602539, | |
| "learning_rate": 1.9125519249710934e-05, | |
| "loss": 1.3413, | |
| "step": 19400 | |
| }, | |
| { | |
| "epoch": 13.896490290397292, | |
| "grad_norm": 23.086503982543945, | |
| "learning_rate": 1.869727206543617e-05, | |
| "loss": 1.361, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 13.967753429538572, | |
| "grad_norm": 21.994956970214844, | |
| "learning_rate": 1.8269024881161405e-05, | |
| "loss": 1.3972, | |
| "step": 19600 | |
| }, | |
| { | |
| "epoch": 13.967753429538572, | |
| "eval_loss": 0.09553589671850204, | |
| "eval_runtime": 148.0027, | |
| "eval_samples_per_second": 3.378, | |
| "eval_steps_per_second": 0.426, | |
| "eval_wer": 0.2027704096669614, | |
| "step": 19600 | |
| }, | |
| { | |
| "epoch": 14.03848209513629, | |
| "grad_norm": 29.658489227294922, | |
| "learning_rate": 1.7840777696886644e-05, | |
| "loss": 1.259, | |
| "step": 19700 | |
| }, | |
| { | |
| "epoch": 14.10974523427757, | |
| "grad_norm": 22.740825653076172, | |
| "learning_rate": 1.741253051261188e-05, | |
| "loss": 1.2666, | |
| "step": 19800 | |
| }, | |
| { | |
| "epoch": 14.18100837341885, | |
| "grad_norm": 15.764113426208496, | |
| "learning_rate": 1.6984283328337116e-05, | |
| "loss": 1.2416, | |
| "step": 19900 | |
| }, | |
| { | |
| "epoch": 14.252271512560128, | |
| "grad_norm": 16.60357093811035, | |
| "learning_rate": 1.6556036144062355e-05, | |
| "loss": 1.2847, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 14.252271512560128, | |
| "eval_loss": 0.09485521167516708, | |
| "eval_runtime": 148.9312, | |
| "eval_samples_per_second": 3.357, | |
| "eval_steps_per_second": 0.423, | |
| "eval_wer": 0.20335985853227231, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 14.323534651701408, | |
| "grad_norm": 13.188959121704102, | |
| "learning_rate": 1.612778895978759e-05, | |
| "loss": 1.2445, | |
| "step": 20100 | |
| }, | |
| { | |
| "epoch": 14.394797790842686, | |
| "grad_norm": 15.217430114746094, | |
| "learning_rate": 1.5699541775512826e-05, | |
| "loss": 1.2725, | |
| "step": 20200 | |
| }, | |
| { | |
| "epoch": 14.466060929983966, | |
| "grad_norm": 19.81468963623047, | |
| "learning_rate": 1.5271294591238065e-05, | |
| "loss": 1.2663, | |
| "step": 20300 | |
| }, | |
| { | |
| "epoch": 14.537324069125244, | |
| "grad_norm": 16.013675689697266, | |
| "learning_rate": 1.48430474069633e-05, | |
| "loss": 1.3107, | |
| "step": 20400 | |
| }, | |
| { | |
| "epoch": 14.537324069125244, | |
| "eval_loss": 0.09505146741867065, | |
| "eval_runtime": 147.9624, | |
| "eval_samples_per_second": 3.379, | |
| "eval_steps_per_second": 0.426, | |
| "eval_wer": 0.20129678750368404, | |
| "step": 20400 | |
| }, | |
| { | |
| "epoch": 14.608587208266524, | |
| "grad_norm": 26.155323028564453, | |
| "learning_rate": 1.4414800222688538e-05, | |
| "loss": 1.3006, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 14.679850347407804, | |
| "grad_norm": 25.04928970336914, | |
| "learning_rate": 1.3986553038413775e-05, | |
| "loss": 1.2639, | |
| "step": 20600 | |
| }, | |
| { | |
| "epoch": 14.751113486549082, | |
| "grad_norm": 17.566251754760742, | |
| "learning_rate": 1.3558305854139009e-05, | |
| "loss": 1.262, | |
| "step": 20700 | |
| }, | |
| { | |
| "epoch": 14.822376625690362, | |
| "grad_norm": 21.8209228515625, | |
| "learning_rate": 1.3130058669864246e-05, | |
| "loss": 1.2232, | |
| "step": 20800 | |
| }, | |
| { | |
| "epoch": 14.822376625690362, | |
| "eval_loss": 0.0947200208902359, | |
| "eval_runtime": 149.3452, | |
| "eval_samples_per_second": 3.348, | |
| "eval_steps_per_second": 0.422, | |
| "eval_wer": 0.2002652519893899, | |
| "step": 20800 | |
| }, | |
| { | |
| "epoch": 14.89363976483164, | |
| "grad_norm": 19.568490982055664, | |
| "learning_rate": 1.2701811485589482e-05, | |
| "loss": 1.2632, | |
| "step": 20900 | |
| }, | |
| { | |
| "epoch": 14.96490290397292, | |
| "grad_norm": 14.820449829101562, | |
| "learning_rate": 1.2273564301314719e-05, | |
| "loss": 1.3063, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 15.03563156957064, | |
| "grad_norm": 17.102474212646484, | |
| "learning_rate": 1.1845317117039956e-05, | |
| "loss": 1.2273, | |
| "step": 21100 | |
| }, | |
| { | |
| "epoch": 15.106894708711918, | |
| "grad_norm": 16.626834869384766, | |
| "learning_rate": 1.1417069932765192e-05, | |
| "loss": 1.2233, | |
| "step": 21200 | |
| }, | |
| { | |
| "epoch": 15.106894708711918, | |
| "eval_loss": 0.09491468966007233, | |
| "eval_runtime": 148.8446, | |
| "eval_samples_per_second": 3.359, | |
| "eval_steps_per_second": 0.423, | |
| "eval_wer": 0.19849690539345713, | |
| "step": 21200 | |
| }, | |
| { | |
| "epoch": 15.178157847853198, | |
| "grad_norm": 18.95102310180664, | |
| "learning_rate": 1.098882274849043e-05, | |
| "loss": 1.1737, | |
| "step": 21300 | |
| }, | |
| { | |
| "epoch": 15.249420986994478, | |
| "grad_norm": 15.553340911865234, | |
| "learning_rate": 1.0560575564215666e-05, | |
| "loss": 1.213, | |
| "step": 21400 | |
| }, | |
| { | |
| "epoch": 15.320684126135756, | |
| "grad_norm": 25.440275192260742, | |
| "learning_rate": 1.0132328379940904e-05, | |
| "loss": 1.2196, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 15.391947265277036, | |
| "grad_norm": 20.26975440979004, | |
| "learning_rate": 9.708363667508887e-06, | |
| "loss": 1.1999, | |
| "step": 21600 | |
| }, | |
| { | |
| "epoch": 15.391947265277036, | |
| "eval_loss": 0.09457062929868698, | |
| "eval_runtime": 149.2533, | |
| "eval_samples_per_second": 3.35, | |
| "eval_steps_per_second": 0.422, | |
| "eval_wer": 0.20247568523430592, | |
| "step": 21600 | |
| }, | |
| { | |
| "epoch": 15.463210404418314, | |
| "grad_norm": 12.798954010009766, | |
| "learning_rate": 9.280116483234123e-06, | |
| "loss": 1.2562, | |
| "step": 21700 | |
| }, | |
| { | |
| "epoch": 15.534473543559594, | |
| "grad_norm": 15.019392967224121, | |
| "learning_rate": 8.85186929895936e-06, | |
| "loss": 1.2189, | |
| "step": 21800 | |
| }, | |
| { | |
| "epoch": 15.605736682700872, | |
| "grad_norm": 16.623435974121094, | |
| "learning_rate": 8.423622114684597e-06, | |
| "loss": 1.2374, | |
| "step": 21900 | |
| }, | |
| { | |
| "epoch": 15.676999821842152, | |
| "grad_norm": 26.94820213317871, | |
| "learning_rate": 7.995374930409833e-06, | |
| "loss": 1.236, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 15.676999821842152, | |
| "eval_loss": 0.09487726539373398, | |
| "eval_runtime": 150.1052, | |
| "eval_samples_per_second": 3.331, | |
| "eval_steps_per_second": 0.42, | |
| "eval_wer": 0.20291777188328913, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 15.748262960983432, | |
| "grad_norm": 29.318561553955078, | |
| "learning_rate": 7.567127746135069e-06, | |
| "loss": 1.2446, | |
| "step": 22100 | |
| }, | |
| { | |
| "epoch": 15.81952610012471, | |
| "grad_norm": 11.961990356445312, | |
| "learning_rate": 7.1388805618603064e-06, | |
| "loss": 1.2262, | |
| "step": 22200 | |
| }, | |
| { | |
| "epoch": 15.89078923926599, | |
| "grad_norm": 19.715322494506836, | |
| "learning_rate": 6.710633377585543e-06, | |
| "loss": 1.1431, | |
| "step": 22300 | |
| }, | |
| { | |
| "epoch": 15.962052378407268, | |
| "grad_norm": 16.766855239868164, | |
| "learning_rate": 6.282386193310779e-06, | |
| "loss": 1.2252, | |
| "step": 22400 | |
| }, | |
| { | |
| "epoch": 15.962052378407268, | |
| "eval_loss": 0.09453491866588593, | |
| "eval_runtime": 148.1933, | |
| "eval_samples_per_second": 3.374, | |
| "eval_steps_per_second": 0.425, | |
| "eval_wer": 0.19938107869142352, | |
| "step": 22400 | |
| }, | |
| { | |
| "epoch": 16.032781044004988, | |
| "grad_norm": 24.855350494384766, | |
| "learning_rate": 5.854139009036016e-06, | |
| "loss": 1.0984, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 16.104044183146268, | |
| "grad_norm": 16.3688907623291, | |
| "learning_rate": 5.425891824761253e-06, | |
| "loss": 1.2072, | |
| "step": 22600 | |
| }, | |
| { | |
| "epoch": 16.175307322287548, | |
| "grad_norm": 21.17823600769043, | |
| "learning_rate": 4.997644640486489e-06, | |
| "loss": 1.156, | |
| "step": 22700 | |
| }, | |
| { | |
| "epoch": 16.246570461428824, | |
| "grad_norm": 13.77876091003418, | |
| "learning_rate": 4.569397456211726e-06, | |
| "loss": 1.2094, | |
| "step": 22800 | |
| }, | |
| { | |
| "epoch": 16.246570461428824, | |
| "eval_loss": 0.094144806265831, | |
| "eval_runtime": 148.3747, | |
| "eval_samples_per_second": 3.37, | |
| "eval_steps_per_second": 0.425, | |
| "eval_wer": 0.2049808429118774, | |
| "step": 22800 | |
| }, | |
| { | |
| "epoch": 16.317833600570104, | |
| "grad_norm": 15.153692245483398, | |
| "learning_rate": 4.141150271936962e-06, | |
| "loss": 1.184, | |
| "step": 22900 | |
| }, | |
| { | |
| "epoch": 16.389096739711384, | |
| "grad_norm": 17.136568069458008, | |
| "learning_rate": 3.7129030876621984e-06, | |
| "loss": 1.18, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 16.460359878852664, | |
| "grad_norm": 17.46211051940918, | |
| "learning_rate": 3.2846559033874353e-06, | |
| "loss": 1.173, | |
| "step": 23100 | |
| }, | |
| { | |
| "epoch": 16.531623017993944, | |
| "grad_norm": 13.692911148071289, | |
| "learning_rate": 2.8564087191126717e-06, | |
| "loss": 1.2505, | |
| "step": 23200 | |
| }, | |
| { | |
| "epoch": 16.531623017993944, | |
| "eval_loss": 0.0941072553396225, | |
| "eval_runtime": 147.9642, | |
| "eval_samples_per_second": 3.379, | |
| "eval_steps_per_second": 0.426, | |
| "eval_wer": 0.2002652519893899, | |
| "step": 23200 | |
| }, | |
| { | |
| "epoch": 16.60288615713522, | |
| "grad_norm": 12.050436019897461, | |
| "learning_rate": 2.4281615348379086e-06, | |
| "loss": 1.1627, | |
| "step": 23300 | |
| }, | |
| { | |
| "epoch": 16.6741492962765, | |
| "grad_norm": 13.23355484008789, | |
| "learning_rate": 1.999914350563145e-06, | |
| "loss": 1.1094, | |
| "step": 23400 | |
| }, | |
| { | |
| "epoch": 16.74541243541778, | |
| "grad_norm": 11.773246765136719, | |
| "learning_rate": 1.5716671662883815e-06, | |
| "loss": 1.1767, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 16.81667557455906, | |
| "grad_norm": 11.992323875427246, | |
| "learning_rate": 1.1434199820136182e-06, | |
| "loss": 1.1193, | |
| "step": 23600 | |
| }, | |
| { | |
| "epoch": 16.81667557455906, | |
| "eval_loss": 0.09421209245920181, | |
| "eval_runtime": 146.8439, | |
| "eval_samples_per_second": 3.405, | |
| "eval_steps_per_second": 0.429, | |
| "eval_wer": 0.19908635425876806, | |
| "step": 23600 | |
| }, | |
| { | |
| "epoch": 16.88793871370034, | |
| "grad_norm": 13.370680809020996, | |
| "learning_rate": 7.151727977388549e-07, | |
| "loss": 1.1688, | |
| "step": 23700 | |
| }, | |
| { | |
| "epoch": 16.959201852841616, | |
| "grad_norm": 11.178988456726074, | |
| "learning_rate": 2.869256134640915e-07, | |
| "loss": 1.1531, | |
| "step": 23800 | |
| }, | |
| { | |
| "epoch": 17.034918938179228, | |
| "grad_norm": 21.082555770874023, | |
| "learning_rate": 3.236731742588576e-05, | |
| "loss": 1.1705, | |
| "step": 23900 | |
| }, | |
| { | |
| "epoch": 17.106182077320504, | |
| "grad_norm": 11.843573570251465, | |
| "learning_rate": 3.208098336948662e-05, | |
| "loss": 1.1992, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 17.106182077320504, | |
| "eval_loss": 0.09458199143409729, | |
| "eval_runtime": 165.5872, | |
| "eval_samples_per_second": 3.02, | |
| "eval_steps_per_second": 0.38, | |
| "eval_wer": 0.20203359858532272, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 17.177445216461784, | |
| "grad_norm": 23.818212509155273, | |
| "learning_rate": 3.179175704989154e-05, | |
| "loss": 1.2774, | |
| "step": 24100 | |
| }, | |
| { | |
| "epoch": 17.248708355603064, | |
| "grad_norm": 13.351399421691895, | |
| "learning_rate": 3.150253073029646e-05, | |
| "loss": 1.2139, | |
| "step": 24200 | |
| }, | |
| { | |
| "epoch": 17.319971494744344, | |
| "grad_norm": 34.073760986328125, | |
| "learning_rate": 3.121330441070138e-05, | |
| "loss": 1.2695, | |
| "step": 24300 | |
| }, | |
| { | |
| "epoch": 17.391234633885624, | |
| "grad_norm": 28.087905883789062, | |
| "learning_rate": 3.092407809110629e-05, | |
| "loss": 1.2794, | |
| "step": 24400 | |
| }, | |
| { | |
| "epoch": 17.391234633885624, | |
| "eval_loss": 0.09535403549671173, | |
| "eval_runtime": 164.8794, | |
| "eval_samples_per_second": 3.033, | |
| "eval_steps_per_second": 0.382, | |
| "eval_wer": 0.21175950486295314, | |
| "step": 24400 | |
| }, | |
| { | |
| "epoch": 17.4624977730269, | |
| "grad_norm": 13.820633888244629, | |
| "learning_rate": 3.063485177151121e-05, | |
| "loss": 1.2422, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 17.53376091216818, | |
| "grad_norm": 23.503931045532227, | |
| "learning_rate": 3.0345625451916128e-05, | |
| "loss": 1.2676, | |
| "step": 24600 | |
| }, | |
| { | |
| "epoch": 17.60502405130946, | |
| "grad_norm": 25.35702133178711, | |
| "learning_rate": 3.0056399132321044e-05, | |
| "loss": 1.2898, | |
| "step": 24700 | |
| }, | |
| { | |
| "epoch": 17.67628719045074, | |
| "grad_norm": 22.99468231201172, | |
| "learning_rate": 2.9767172812725957e-05, | |
| "loss": 1.2362, | |
| "step": 24800 | |
| }, | |
| { | |
| "epoch": 17.67628719045074, | |
| "eval_loss": 0.0947684794664383, | |
| "eval_runtime": 160.2628, | |
| "eval_samples_per_second": 3.12, | |
| "eval_steps_per_second": 0.393, | |
| "eval_wer": 0.20247568523430592, | |
| "step": 24800 | |
| }, | |
| { | |
| "epoch": 17.74755032959202, | |
| "grad_norm": 18.09113311767578, | |
| "learning_rate": 2.9477946493130876e-05, | |
| "loss": 1.2968, | |
| "step": 24900 | |
| }, | |
| { | |
| "epoch": 17.818813468733296, | |
| "grad_norm": 19.54793930053711, | |
| "learning_rate": 2.9188720173535792e-05, | |
| "loss": 1.2264, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 17.890076607874576, | |
| "grad_norm": 20.854331970214844, | |
| "learning_rate": 2.8899493853940712e-05, | |
| "loss": 1.2457, | |
| "step": 25100 | |
| }, | |
| { | |
| "epoch": 17.961339747015856, | |
| "grad_norm": 19.649433135986328, | |
| "learning_rate": 2.8610267534345625e-05, | |
| "loss": 1.3528, | |
| "step": 25200 | |
| }, | |
| { | |
| "epoch": 17.961339747015856, | |
| "eval_loss": 0.09558839350938797, | |
| "eval_runtime": 162.932, | |
| "eval_samples_per_second": 3.069, | |
| "eval_steps_per_second": 0.387, | |
| "eval_wer": 0.20704391394046567, | |
| "step": 25200 | |
| }, | |
| { | |
| "epoch": 18.032068412613576, | |
| "grad_norm": 12.970821380615234, | |
| "learning_rate": 2.8321041214750544e-05, | |
| "loss": 1.3198, | |
| "step": 25300 | |
| }, | |
| { | |
| "epoch": 18.103331551754856, | |
| "grad_norm": 17.95279884338379, | |
| "learning_rate": 2.803181489515546e-05, | |
| "loss": 1.2166, | |
| "step": 25400 | |
| }, | |
| { | |
| "epoch": 18.174594690896132, | |
| "grad_norm": 15.008156776428223, | |
| "learning_rate": 2.774258857556038e-05, | |
| "loss": 1.1703, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 18.245857830037412, | |
| "grad_norm": 28.749645233154297, | |
| "learning_rate": 2.7453362255965293e-05, | |
| "loss": 1.1863, | |
| "step": 25600 | |
| }, | |
| { | |
| "epoch": 18.245857830037412, | |
| "eval_loss": 0.09352152794599533, | |
| "eval_runtime": 161.799, | |
| "eval_samples_per_second": 3.09, | |
| "eval_steps_per_second": 0.389, | |
| "eval_wer": 0.2036545829649278, | |
| "step": 25600 | |
| }, | |
| { | |
| "epoch": 18.317120969178692, | |
| "grad_norm": 25.244779586791992, | |
| "learning_rate": 2.7164135936370212e-05, | |
| "loss": 1.1881, | |
| "step": 25700 | |
| }, | |
| { | |
| "epoch": 18.388384108319972, | |
| "grad_norm": 28.115192413330078, | |
| "learning_rate": 2.687490961677513e-05, | |
| "loss": 1.1862, | |
| "step": 25800 | |
| }, | |
| { | |
| "epoch": 18.459647247461252, | |
| "grad_norm": 14.245773315429688, | |
| "learning_rate": 2.6585683297180048e-05, | |
| "loss": 1.2374, | |
| "step": 25900 | |
| }, | |
| { | |
| "epoch": 18.530910386602528, | |
| "grad_norm": 17.683853149414062, | |
| "learning_rate": 2.629645697758496e-05, | |
| "loss": 1.2936, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 18.530910386602528, | |
| "eval_loss": 0.09395604580640793, | |
| "eval_runtime": 159.852, | |
| "eval_samples_per_second": 3.128, | |
| "eval_steps_per_second": 0.394, | |
| "eval_wer": 0.2032124963159446, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 18.602173525743808, | |
| "grad_norm": 23.95792579650879, | |
| "learning_rate": 2.6007230657989877e-05, | |
| "loss": 1.2801, | |
| "step": 26100 | |
| }, | |
| { | |
| "epoch": 18.673436664885088, | |
| "grad_norm": 33.534027099609375, | |
| "learning_rate": 2.5718004338394796e-05, | |
| "loss": 1.1837, | |
| "step": 26200 | |
| }, | |
| { | |
| "epoch": 18.744699804026368, | |
| "grad_norm": 25.70989227294922, | |
| "learning_rate": 2.542877801879971e-05, | |
| "loss": 1.2212, | |
| "step": 26300 | |
| }, | |
| { | |
| "epoch": 18.815962943167648, | |
| "grad_norm": 19.731300354003906, | |
| "learning_rate": 2.513955169920463e-05, | |
| "loss": 1.2434, | |
| "step": 26400 | |
| }, | |
| { | |
| "epoch": 18.815962943167648, | |
| "eval_loss": 0.09383614361286163, | |
| "eval_runtime": 162.1968, | |
| "eval_samples_per_second": 3.083, | |
| "eval_steps_per_second": 0.388, | |
| "eval_wer": 0.20291777188328913, | |
| "step": 26400 | |
| }, | |
| { | |
| "epoch": 18.887226082308924, | |
| "grad_norm": 21.168506622314453, | |
| "learning_rate": 2.4850325379609545e-05, | |
| "loss": 1.2603, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 18.958489221450204, | |
| "grad_norm": 22.403703689575195, | |
| "learning_rate": 2.456109906001446e-05, | |
| "loss": 1.2183, | |
| "step": 26600 | |
| }, | |
| { | |
| "epoch": 19.029217887047924, | |
| "grad_norm": 18.090112686157227, | |
| "learning_rate": 2.427187274041938e-05, | |
| "loss": 1.1921, | |
| "step": 26700 | |
| }, | |
| { | |
| "epoch": 19.100481026189204, | |
| "grad_norm": 17.219758987426758, | |
| "learning_rate": 2.3982646420824293e-05, | |
| "loss": 1.1254, | |
| "step": 26800 | |
| }, | |
| { | |
| "epoch": 19.100481026189204, | |
| "eval_loss": 0.09326955676078796, | |
| "eval_runtime": 160.0801, | |
| "eval_samples_per_second": 3.123, | |
| "eval_steps_per_second": 0.394, | |
| "eval_wer": 0.20262304745063367, | |
| "step": 26800 | |
| }, | |
| { | |
| "epoch": 19.171744165330484, | |
| "grad_norm": 12.749887466430664, | |
| "learning_rate": 2.3696312364425163e-05, | |
| "loss": 1.1345, | |
| "step": 26900 | |
| }, | |
| { | |
| "epoch": 19.24300730447176, | |
| "grad_norm": 31.437406539916992, | |
| "learning_rate": 2.3407086044830083e-05, | |
| "loss": 1.1849, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 19.31427044361304, | |
| "grad_norm": 12.56886100769043, | |
| "learning_rate": 2.3117859725234995e-05, | |
| "loss": 1.2, | |
| "step": 27100 | |
| }, | |
| { | |
| "epoch": 19.38553358275432, | |
| "grad_norm": 15.81522274017334, | |
| "learning_rate": 2.2828633405639915e-05, | |
| "loss": 1.2345, | |
| "step": 27200 | |
| }, | |
| { | |
| "epoch": 19.38553358275432, | |
| "eval_loss": 0.0934244766831398, | |
| "eval_runtime": 158.6427, | |
| "eval_samples_per_second": 3.152, | |
| "eval_steps_per_second": 0.397, | |
| "eval_wer": 0.20085470085470086, | |
| "step": 27200 | |
| }, | |
| { | |
| "epoch": 19.4567967218956, | |
| "grad_norm": 22.67652130126953, | |
| "learning_rate": 2.253940708604483e-05, | |
| "loss": 1.1491, | |
| "step": 27300 | |
| }, | |
| { | |
| "epoch": 19.52805986103688, | |
| "grad_norm": 25.45644760131836, | |
| "learning_rate": 2.225018076644975e-05, | |
| "loss": 1.2354, | |
| "step": 27400 | |
| }, | |
| { | |
| "epoch": 19.599323000178156, | |
| "grad_norm": 17.67236328125, | |
| "learning_rate": 2.1960954446854663e-05, | |
| "loss": 1.1475, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 19.670586139319436, | |
| "grad_norm": 16.663982391357422, | |
| "learning_rate": 2.1671728127259583e-05, | |
| "loss": 1.2177, | |
| "step": 27600 | |
| }, | |
| { | |
| "epoch": 19.670586139319436, | |
| "eval_loss": 0.09379982203245163, | |
| "eval_runtime": 158.8505, | |
| "eval_samples_per_second": 3.148, | |
| "eval_steps_per_second": 0.397, | |
| "eval_wer": 0.2036545829649278, | |
| "step": 27600 | |
| }, | |
| { | |
| "epoch": 19.741849278460716, | |
| "grad_norm": 17.305145263671875, | |
| "learning_rate": 2.13825018076645e-05, | |
| "loss": 1.1699, | |
| "step": 27700 | |
| }, | |
| { | |
| "epoch": 19.813112417601996, | |
| "grad_norm": 15.652444839477539, | |
| "learning_rate": 2.1093275488069415e-05, | |
| "loss": 1.1853, | |
| "step": 27800 | |
| }, | |
| { | |
| "epoch": 19.884375556743276, | |
| "grad_norm": 12.307770729064941, | |
| "learning_rate": 2.080404916847433e-05, | |
| "loss": 1.1209, | |
| "step": 27900 | |
| }, | |
| { | |
| "epoch": 19.955638695884552, | |
| "grad_norm": 16.08258056640625, | |
| "learning_rate": 2.0514822848879248e-05, | |
| "loss": 1.1479, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 19.955638695884552, | |
| "eval_loss": 0.09380526840686798, | |
| "eval_runtime": 158.3203, | |
| "eval_samples_per_second": 3.158, | |
| "eval_steps_per_second": 0.398, | |
| "eval_wer": 0.20070733863837312, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 20.026367361482272, | |
| "grad_norm": 13.121270179748535, | |
| "learning_rate": 2.0225596529284167e-05, | |
| "loss": 1.1111, | |
| "step": 28100 | |
| }, | |
| { | |
| "epoch": 20.097630500623552, | |
| "grad_norm": 26.73927116394043, | |
| "learning_rate": 1.993637020968908e-05, | |
| "loss": 1.1596, | |
| "step": 28200 | |
| }, | |
| { | |
| "epoch": 20.168893639764832, | |
| "grad_norm": 14.311580657958984, | |
| "learning_rate": 1.9647143890094e-05, | |
| "loss": 1.1404, | |
| "step": 28300 | |
| }, | |
| { | |
| "epoch": 20.240156778906112, | |
| "grad_norm": 17.167621612548828, | |
| "learning_rate": 1.9357917570498916e-05, | |
| "loss": 1.1077, | |
| "step": 28400 | |
| }, | |
| { | |
| "epoch": 20.240156778906112, | |
| "eval_loss": 0.09331627190113068, | |
| "eval_runtime": 158.3839, | |
| "eval_samples_per_second": 3.157, | |
| "eval_steps_per_second": 0.398, | |
| "eval_wer": 0.19952844090775126, | |
| "step": 28400 | |
| }, | |
| { | |
| "epoch": 20.31141991804739, | |
| "grad_norm": 26.617481231689453, | |
| "learning_rate": 1.9068691250903832e-05, | |
| "loss": 1.1133, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 20.382683057188668, | |
| "grad_norm": 15.19737434387207, | |
| "learning_rate": 1.8779464931308748e-05, | |
| "loss": 1.1067, | |
| "step": 28600 | |
| }, | |
| { | |
| "epoch": 20.453946196329948, | |
| "grad_norm": 16.4631404876709, | |
| "learning_rate": 1.8490238611713668e-05, | |
| "loss": 1.0911, | |
| "step": 28700 | |
| }, | |
| { | |
| "epoch": 20.525209335471228, | |
| "grad_norm": 16.38656997680664, | |
| "learning_rate": 1.8201012292118584e-05, | |
| "loss": 1.1615, | |
| "step": 28800 | |
| }, | |
| { | |
| "epoch": 20.525209335471228, | |
| "eval_loss": 0.09307432919740677, | |
| "eval_runtime": 160.2363, | |
| "eval_samples_per_second": 3.12, | |
| "eval_steps_per_second": 0.393, | |
| "eval_wer": 0.20247568523430592, | |
| "step": 28800 | |
| }, | |
| { | |
| "epoch": 20.596472474612508, | |
| "grad_norm": 12.079431533813477, | |
| "learning_rate": 1.79117859725235e-05, | |
| "loss": 1.1665, | |
| "step": 28900 | |
| }, | |
| { | |
| "epoch": 20.667735613753784, | |
| "grad_norm": 20.58789825439453, | |
| "learning_rate": 1.7622559652928416e-05, | |
| "loss": 1.1225, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 20.738998752895064, | |
| "grad_norm": 20.81654930114746, | |
| "learning_rate": 1.7333333333333336e-05, | |
| "loss": 1.0583, | |
| "step": 29100 | |
| }, | |
| { | |
| "epoch": 20.810261892036344, | |
| "grad_norm": 19.925395965576172, | |
| "learning_rate": 1.7044107013738252e-05, | |
| "loss": 1.0642, | |
| "step": 29200 | |
| }, | |
| { | |
| "epoch": 20.810261892036344, | |
| "eval_loss": 0.09396182000637054, | |
| "eval_runtime": 160.8477, | |
| "eval_samples_per_second": 3.109, | |
| "eval_steps_per_second": 0.392, | |
| "eval_wer": 0.2045387562628942, | |
| "step": 29200 | |
| }, | |
| { | |
| "epoch": 20.881525031177624, | |
| "grad_norm": 15.387258529663086, | |
| "learning_rate": 1.6754880694143168e-05, | |
| "loss": 1.1447, | |
| "step": 29300 | |
| }, | |
| { | |
| "epoch": 20.952788170318904, | |
| "grad_norm": 15.967385292053223, | |
| "learning_rate": 1.6465654374548084e-05, | |
| "loss": 1.1769, | |
| "step": 29400 | |
| }, | |
| { | |
| "epoch": 21.023516835916624, | |
| "grad_norm": 14.936897277832031, | |
| "learning_rate": 1.6176428054953004e-05, | |
| "loss": 1.0766, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 21.0947799750579, | |
| "grad_norm": 15.853533744812012, | |
| "learning_rate": 1.5887201735357916e-05, | |
| "loss": 1.0922, | |
| "step": 29600 | |
| }, | |
| { | |
| "epoch": 21.0947799750579, | |
| "eval_loss": 0.09347377717494965, | |
| "eval_runtime": 159.5968, | |
| "eval_samples_per_second": 3.133, | |
| "eval_steps_per_second": 0.395, | |
| "eval_wer": 0.20114942528735633, | |
| "step": 29600 | |
| }, | |
| { | |
| "epoch": 21.16604311419918, | |
| "grad_norm": 20.242115020751953, | |
| "learning_rate": 1.5597975415762836e-05, | |
| "loss": 1.1478, | |
| "step": 29700 | |
| }, | |
| { | |
| "epoch": 21.23730625334046, | |
| "grad_norm": 19.621402740478516, | |
| "learning_rate": 1.5308749096167752e-05, | |
| "loss": 1.0792, | |
| "step": 29800 | |
| }, | |
| { | |
| "epoch": 21.30856939248174, | |
| "grad_norm": 14.971497535705566, | |
| "learning_rate": 1.501952277657267e-05, | |
| "loss": 1.0581, | |
| "step": 29900 | |
| }, | |
| { | |
| "epoch": 21.379832531623016, | |
| "grad_norm": 13.164278030395508, | |
| "learning_rate": 1.4730296456977585e-05, | |
| "loss": 1.0885, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 21.379832531623016, | |
| "eval_loss": 0.0928725004196167, | |
| "eval_runtime": 161.3647, | |
| "eval_samples_per_second": 3.099, | |
| "eval_steps_per_second": 0.39, | |
| "eval_wer": 0.20100206307102858, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 21.451095670764296, | |
| "grad_norm": 13.703166961669922, | |
| "learning_rate": 1.4441070137382504e-05, | |
| "loss": 1.0712, | |
| "step": 30100 | |
| }, | |
| { | |
| "epoch": 21.522358809905576, | |
| "grad_norm": 11.881406784057617, | |
| "learning_rate": 1.4151843817787419e-05, | |
| "loss": 1.0534, | |
| "step": 30200 | |
| }, | |
| { | |
| "epoch": 21.593621949046856, | |
| "grad_norm": 17.952503204345703, | |
| "learning_rate": 1.3862617498192338e-05, | |
| "loss": 1.0741, | |
| "step": 30300 | |
| }, | |
| { | |
| "epoch": 21.664885088188136, | |
| "grad_norm": 14.229839324951172, | |
| "learning_rate": 1.3573391178597253e-05, | |
| "loss": 1.107, | |
| "step": 30400 | |
| }, | |
| { | |
| "epoch": 21.664885088188136, | |
| "eval_loss": 0.09303069859743118, | |
| "eval_runtime": 157.8883, | |
| "eval_samples_per_second": 3.167, | |
| "eval_steps_per_second": 0.399, | |
| "eval_wer": 0.1987916298261126, | |
| "step": 30400 | |
| }, | |
| { | |
| "epoch": 21.736148227329412, | |
| "grad_norm": 17.410789489746094, | |
| "learning_rate": 1.328416485900217e-05, | |
| "loss": 1.0532, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 21.807411366470692, | |
| "grad_norm": 19.12940788269043, | |
| "learning_rate": 1.2994938539407087e-05, | |
| "loss": 1.0863, | |
| "step": 30600 | |
| }, | |
| { | |
| "epoch": 21.878674505611972, | |
| "grad_norm": 10.864148139953613, | |
| "learning_rate": 1.2705712219812004e-05, | |
| "loss": 1.0953, | |
| "step": 30700 | |
| }, | |
| { | |
| "epoch": 21.949937644753252, | |
| "grad_norm": 15.553997993469238, | |
| "learning_rate": 1.241648590021692e-05, | |
| "loss": 1.0449, | |
| "step": 30800 | |
| }, | |
| { | |
| "epoch": 21.949937644753252, | |
| "eval_loss": 0.09309829026460648, | |
| "eval_runtime": 158.4381, | |
| "eval_samples_per_second": 3.156, | |
| "eval_steps_per_second": 0.398, | |
| "eval_wer": 0.2001178897730622, | |
| "step": 30800 | |
| }, | |
| { | |
| "epoch": 22.020666310350972, | |
| "grad_norm": 16.839780807495117, | |
| "learning_rate": 1.2130151843817789e-05, | |
| "loss": 1.0535, | |
| "step": 30900 | |
| }, | |
| { | |
| "epoch": 22.091929449492252, | |
| "grad_norm": 14.113849639892578, | |
| "learning_rate": 1.1840925524222705e-05, | |
| "loss": 1.0341, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 22.163192588633528, | |
| "grad_norm": 16.825448989868164, | |
| "learning_rate": 1.1551699204627623e-05, | |
| "loss": 1.0091, | |
| "step": 31100 | |
| }, | |
| { | |
| "epoch": 22.234455727774808, | |
| "grad_norm": 17.561140060424805, | |
| "learning_rate": 1.1262472885032537e-05, | |
| "loss": 1.033, | |
| "step": 31200 | |
| }, | |
| { | |
| "epoch": 22.234455727774808, | |
| "eval_loss": 0.09307766705751419, | |
| "eval_runtime": 159.9295, | |
| "eval_samples_per_second": 3.126, | |
| "eval_steps_per_second": 0.394, | |
| "eval_wer": 0.20483348069554966, | |
| "step": 31200 | |
| }, | |
| { | |
| "epoch": 22.305718866916088, | |
| "grad_norm": 11.815247535705566, | |
| "learning_rate": 1.0973246565437455e-05, | |
| "loss": 1.0777, | |
| "step": 31300 | |
| }, | |
| { | |
| "epoch": 22.376982006057368, | |
| "grad_norm": 14.315788269042969, | |
| "learning_rate": 1.0684020245842371e-05, | |
| "loss": 1.0391, | |
| "step": 31400 | |
| }, | |
| { | |
| "epoch": 22.448245145198648, | |
| "grad_norm": 20.650876998901367, | |
| "learning_rate": 1.0394793926247289e-05, | |
| "loss": 1.0338, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 22.519508284339924, | |
| "grad_norm": 11.829896926879883, | |
| "learning_rate": 1.0105567606652205e-05, | |
| "loss": 1.057, | |
| "step": 31600 | |
| }, | |
| { | |
| "epoch": 22.519508284339924, | |
| "eval_loss": 0.09323979169130325, | |
| "eval_runtime": 157.3931, | |
| "eval_samples_per_second": 3.177, | |
| "eval_steps_per_second": 0.4, | |
| "eval_wer": 0.1987916298261126, | |
| "step": 31600 | |
| }, | |
| { | |
| "epoch": 22.590771423481204, | |
| "grad_norm": 16.371854782104492, | |
| "learning_rate": 9.816341287057123e-06, | |
| "loss": 1.0875, | |
| "step": 31700 | |
| }, | |
| { | |
| "epoch": 22.662034562622484, | |
| "grad_norm": 12.373185157775879, | |
| "learning_rate": 9.527114967462039e-06, | |
| "loss": 1.0525, | |
| "step": 31800 | |
| }, | |
| { | |
| "epoch": 22.733297701763764, | |
| "grad_norm": 16.901241302490234, | |
| "learning_rate": 9.237888647866955e-06, | |
| "loss": 0.9976, | |
| "step": 31900 | |
| }, | |
| { | |
| "epoch": 22.80456084090504, | |
| "grad_norm": 11.734404563903809, | |
| "learning_rate": 8.948662328271873e-06, | |
| "loss": 1.0248, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 22.80456084090504, | |
| "eval_loss": 0.09294673800468445, | |
| "eval_runtime": 159.1061, | |
| "eval_samples_per_second": 3.143, | |
| "eval_steps_per_second": 0.396, | |
| "eval_wer": 0.201886236368995, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 22.87582398004632, | |
| "grad_norm": 13.594120025634766, | |
| "learning_rate": 8.65943600867679e-06, | |
| "loss": 1.0777, | |
| "step": 32100 | |
| }, | |
| { | |
| "epoch": 22.9470871191876, | |
| "grad_norm": 15.224822044372559, | |
| "learning_rate": 8.370209689081707e-06, | |
| "loss": 1.0544, | |
| "step": 32200 | |
| }, | |
| { | |
| "epoch": 23.01781578478532, | |
| "grad_norm": 11.079718589782715, | |
| "learning_rate": 8.080983369486623e-06, | |
| "loss": 1.0563, | |
| "step": 32300 | |
| }, | |
| { | |
| "epoch": 23.0890789239266, | |
| "grad_norm": 12.417219161987305, | |
| "learning_rate": 7.791757049891541e-06, | |
| "loss": 0.9784, | |
| "step": 32400 | |
| }, | |
| { | |
| "epoch": 23.0890789239266, | |
| "eval_loss": 0.0926588624715805, | |
| "eval_runtime": 159.1992, | |
| "eval_samples_per_second": 3.141, | |
| "eval_steps_per_second": 0.396, | |
| "eval_wer": 0.19510757441791923, | |
| "step": 32400 | |
| }, | |
| { | |
| "epoch": 23.16034206306788, | |
| "grad_norm": 15.528765678405762, | |
| "learning_rate": 7.502530730296457e-06, | |
| "loss": 0.9855, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 23.231605202209156, | |
| "grad_norm": 11.74361801147461, | |
| "learning_rate": 7.213304410701374e-06, | |
| "loss": 1.0193, | |
| "step": 32600 | |
| }, | |
| { | |
| "epoch": 23.302868341350436, | |
| "grad_norm": 11.150557518005371, | |
| "learning_rate": 6.924078091106291e-06, | |
| "loss": 0.9878, | |
| "step": 32700 | |
| }, | |
| { | |
| "epoch": 23.374131480491716, | |
| "grad_norm": 11.871788024902344, | |
| "learning_rate": 6.634851771511208e-06, | |
| "loss": 1.0443, | |
| "step": 32800 | |
| }, | |
| { | |
| "epoch": 23.374131480491716, | |
| "eval_loss": 0.09271341562271118, | |
| "eval_runtime": 157.7057, | |
| "eval_samples_per_second": 3.17, | |
| "eval_steps_per_second": 0.399, | |
| "eval_wer": 0.19952844090775126, | |
| "step": 32800 | |
| }, | |
| { | |
| "epoch": 23.445394619632996, | |
| "grad_norm": 17.588510513305664, | |
| "learning_rate": 6.3456254519161245e-06, | |
| "loss": 1.019, | |
| "step": 32900 | |
| }, | |
| { | |
| "epoch": 23.516657758774272, | |
| "grad_norm": 13.247255325317383, | |
| "learning_rate": 6.0563991323210416e-06, | |
| "loss": 1.0711, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 23.587920897915552, | |
| "grad_norm": 10.2926025390625, | |
| "learning_rate": 5.7671728127259586e-06, | |
| "loss": 1.0257, | |
| "step": 33100 | |
| }, | |
| { | |
| "epoch": 23.659184037056832, | |
| "grad_norm": 13.390459060668945, | |
| "learning_rate": 5.4779464931308756e-06, | |
| "loss": 0.9972, | |
| "step": 33200 | |
| }, | |
| { | |
| "epoch": 23.659184037056832, | |
| "eval_loss": 0.09225763380527496, | |
| "eval_runtime": 158.7123, | |
| "eval_samples_per_second": 3.15, | |
| "eval_steps_per_second": 0.397, | |
| "eval_wer": 0.19952844090775126, | |
| "step": 33200 | |
| }, | |
| { | |
| "epoch": 23.730447176198112, | |
| "grad_norm": 18.851301193237305, | |
| "learning_rate": 5.188720173535792e-06, | |
| "loss": 1.0099, | |
| "step": 33300 | |
| }, | |
| { | |
| "epoch": 23.801710315339392, | |
| "grad_norm": 12.088775634765625, | |
| "learning_rate": 4.899493853940709e-06, | |
| "loss": 0.9931, | |
| "step": 33400 | |
| }, | |
| { | |
| "epoch": 23.872973454480668, | |
| "grad_norm": 12.231010437011719, | |
| "learning_rate": 4.610267534345626e-06, | |
| "loss": 1.0545, | |
| "step": 33500 | |
| }, | |
| { | |
| "epoch": 23.944236593621948, | |
| "grad_norm": 12.138983726501465, | |
| "learning_rate": 4.323933477946494e-06, | |
| "loss": 1.0527, | |
| "step": 33600 | |
| }, | |
| { | |
| "epoch": 23.944236593621948, | |
| "eval_loss": 0.09295401722192764, | |
| "eval_runtime": 158.8474, | |
| "eval_samples_per_second": 3.148, | |
| "eval_steps_per_second": 0.397, | |
| "eval_wer": 0.19643383436486886, | |
| "step": 33600 | |
| }, | |
| { | |
| "epoch": 24.014965259219668, | |
| "grad_norm": 12.55706787109375, | |
| "learning_rate": 4.03470715835141e-06, | |
| "loss": 0.9904, | |
| "step": 33700 | |
| }, | |
| { | |
| "epoch": 24.086228398360948, | |
| "grad_norm": 15.65674114227295, | |
| "learning_rate": 3.745480838756327e-06, | |
| "loss": 1.0292, | |
| "step": 33800 | |
| }, | |
| { | |
| "epoch": 24.157491537502228, | |
| "grad_norm": 12.829319953918457, | |
| "learning_rate": 3.4562545191612434e-06, | |
| "loss": 0.9981, | |
| "step": 33900 | |
| }, | |
| { | |
| "epoch": 24.228754676643508, | |
| "grad_norm": 10.557638168334961, | |
| "learning_rate": 3.1670281995661605e-06, | |
| "loss": 0.9927, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 24.228754676643508, | |
| "eval_loss": 0.09272577613592148, | |
| "eval_runtime": 160.295, | |
| "eval_samples_per_second": 3.119, | |
| "eval_steps_per_second": 0.393, | |
| "eval_wer": 0.19790745652814618, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 24.300017815784784, | |
| "grad_norm": 11.129683494567871, | |
| "learning_rate": 2.8778018799710775e-06, | |
| "loss": 0.9646, | |
| "step": 34100 | |
| }, | |
| { | |
| "epoch": 24.371280954926064, | |
| "grad_norm": 11.022905349731445, | |
| "learning_rate": 2.588575560375994e-06, | |
| "loss": 0.9873, | |
| "step": 34200 | |
| }, | |
| { | |
| "epoch": 24.442544094067344, | |
| "grad_norm": 9.839102745056152, | |
| "learning_rate": 2.299349240780911e-06, | |
| "loss": 0.9995, | |
| "step": 34300 | |
| }, | |
| { | |
| "epoch": 24.513807233208624, | |
| "grad_norm": 10.704620361328125, | |
| "learning_rate": 2.010122921185828e-06, | |
| "loss": 0.9504, | |
| "step": 34400 | |
| }, | |
| { | |
| "epoch": 24.513807233208624, | |
| "eval_loss": 0.09273621439933777, | |
| "eval_runtime": 158.2643, | |
| "eval_samples_per_second": 3.159, | |
| "eval_steps_per_second": 0.398, | |
| "eval_wer": 0.19599174771588565, | |
| "step": 34400 | |
| }, | |
| { | |
| "epoch": 24.585070372349904, | |
| "grad_norm": 15.199273109436035, | |
| "learning_rate": 1.7208966015907449e-06, | |
| "loss": 0.99, | |
| "step": 34500 | |
| }, | |
| { | |
| "epoch": 24.65633351149118, | |
| "grad_norm": 8.751911163330078, | |
| "learning_rate": 1.4316702819956617e-06, | |
| "loss": 0.9746, | |
| "step": 34600 | |
| }, | |
| { | |
| "epoch": 24.72759665063246, | |
| "grad_norm": 11.815445899963379, | |
| "learning_rate": 1.1424439624005785e-06, | |
| "loss": 1.0184, | |
| "step": 34700 | |
| }, | |
| { | |
| "epoch": 24.79885978977374, | |
| "grad_norm": 12.593855857849121, | |
| "learning_rate": 8.532176428054954e-07, | |
| "loss": 1.0567, | |
| "step": 34800 | |
| }, | |
| { | |
| "epoch": 24.79885978977374, | |
| "eval_loss": 0.09248282015323639, | |
| "eval_runtime": 161.1515, | |
| "eval_samples_per_second": 3.103, | |
| "eval_steps_per_second": 0.391, | |
| "eval_wer": 0.19864426760978485, | |
| "step": 34800 | |
| }, | |
| { | |
| "epoch": 24.875111348654908, | |
| "grad_norm": 10.561070442199707, | |
| "learning_rate": 5.639913232104122e-07, | |
| "loss": 0.9481, | |
| "step": 34900 | |
| }, | |
| { | |
| "epoch": 24.946374487796188, | |
| "grad_norm": 11.20162296295166, | |
| "learning_rate": 2.74765003615329e-07, | |
| "loss": 0.9673, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 25.01781578478532, | |
| "grad_norm": 15.058631896972656, | |
| "learning_rate": 1.6855013224332772e-05, | |
| "loss": 0.9582, | |
| "step": 35100 | |
| }, | |
| { | |
| "epoch": 25.0890789239266, | |
| "grad_norm": 11.464241027832031, | |
| "learning_rate": 1.6614570810290935e-05, | |
| "loss": 1.0316, | |
| "step": 35200 | |
| }, | |
| { | |
| "epoch": 25.0890789239266, | |
| "eval_loss": 0.09259089827537537, | |
| "eval_runtime": 149.1152, | |
| "eval_samples_per_second": 3.353, | |
| "eval_steps_per_second": 0.422, | |
| "eval_wer": 0.19834954317712938, | |
| "step": 35200 | |
| }, | |
| { | |
| "epoch": 25.16034206306788, | |
| "grad_norm": 23.382781982421875, | |
| "learning_rate": 1.63741283962491e-05, | |
| "loss": 0.9861, | |
| "step": 35300 | |
| }, | |
| { | |
| "epoch": 25.231605202209156, | |
| "grad_norm": 16.841032028198242, | |
| "learning_rate": 1.6133685982207263e-05, | |
| "loss": 1.0558, | |
| "step": 35400 | |
| }, | |
| { | |
| "epoch": 25.302868341350436, | |
| "grad_norm": 15.60053825378418, | |
| "learning_rate": 1.5893243568165425e-05, | |
| "loss": 1.0135, | |
| "step": 35500 | |
| }, | |
| { | |
| "epoch": 25.374131480491716, | |
| "grad_norm": 14.612176895141602, | |
| "learning_rate": 1.5655205578264005e-05, | |
| "loss": 0.9926, | |
| "step": 35600 | |
| }, | |
| { | |
| "epoch": 25.374131480491716, | |
| "eval_loss": 0.09281676262617111, | |
| "eval_runtime": 146.8753, | |
| "eval_samples_per_second": 3.404, | |
| "eval_steps_per_second": 0.429, | |
| "eval_wer": 0.19820218096080164, | |
| "step": 35600 | |
| }, | |
| { | |
| "epoch": 25.445394619632996, | |
| "grad_norm": 21.735061645507812, | |
| "learning_rate": 1.541476316422217e-05, | |
| "loss": 1.0212, | |
| "step": 35700 | |
| }, | |
| { | |
| "epoch": 25.516657758774272, | |
| "grad_norm": 15.282707214355469, | |
| "learning_rate": 1.5174320750180331e-05, | |
| "loss": 1.0118, | |
| "step": 35800 | |
| }, | |
| { | |
| "epoch": 25.587920897915552, | |
| "grad_norm": 25.81006622314453, | |
| "learning_rate": 1.4933878336138495e-05, | |
| "loss": 1.0535, | |
| "step": 35900 | |
| }, | |
| { | |
| "epoch": 25.659184037056832, | |
| "grad_norm": 12.21265697479248, | |
| "learning_rate": 1.4693435922096658e-05, | |
| "loss": 1.0646, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 25.659184037056832, | |
| "eval_loss": 0.0927177220582962, | |
| "eval_runtime": 148.3711, | |
| "eval_samples_per_second": 3.37, | |
| "eval_steps_per_second": 0.425, | |
| "eval_wer": 0.2005599764220454, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 25.730447176198112, | |
| "grad_norm": 13.201019287109375, | |
| "learning_rate": 1.4452993508054822e-05, | |
| "loss": 1.0161, | |
| "step": 36100 | |
| }, | |
| { | |
| "epoch": 25.801710315339392, | |
| "grad_norm": 9.418228149414062, | |
| "learning_rate": 1.4212551094012986e-05, | |
| "loss": 1.0435, | |
| "step": 36200 | |
| }, | |
| { | |
| "epoch": 25.872973454480668, | |
| "grad_norm": 19.827404022216797, | |
| "learning_rate": 1.3972108679971146e-05, | |
| "loss": 1.0984, | |
| "step": 36300 | |
| }, | |
| { | |
| "epoch": 25.944236593621948, | |
| "grad_norm": 18.476207733154297, | |
| "learning_rate": 1.373166626592931e-05, | |
| "loss": 1.0316, | |
| "step": 36400 | |
| }, | |
| { | |
| "epoch": 25.944236593621948, | |
| "eval_loss": 0.09291358292102814, | |
| "eval_runtime": 147.4091, | |
| "eval_samples_per_second": 3.392, | |
| "eval_steps_per_second": 0.427, | |
| "eval_wer": 0.20380194518125552, | |
| "step": 36400 | |
| }, | |
| { | |
| "epoch": 26.014965259219668, | |
| "grad_norm": 11.769281387329102, | |
| "learning_rate": 1.3491223851887474e-05, | |
| "loss": 1.0278, | |
| "step": 36500 | |
| }, | |
| { | |
| "epoch": 26.086228398360948, | |
| "grad_norm": 11.849235534667969, | |
| "learning_rate": 1.3250781437845635e-05, | |
| "loss": 0.9957, | |
| "step": 36600 | |
| }, | |
| { | |
| "epoch": 26.157491537502228, | |
| "grad_norm": 15.007696151733398, | |
| "learning_rate": 1.3010339023803799e-05, | |
| "loss": 1.0203, | |
| "step": 36700 | |
| }, | |
| { | |
| "epoch": 26.228754676643508, | |
| "grad_norm": 15.471383094787598, | |
| "learning_rate": 1.2769896609761963e-05, | |
| "loss": 1.0315, | |
| "step": 36800 | |
| }, | |
| { | |
| "epoch": 26.228754676643508, | |
| "eval_loss": 0.09275855869054794, | |
| "eval_runtime": 147.5238, | |
| "eval_samples_per_second": 3.389, | |
| "eval_steps_per_second": 0.427, | |
| "eval_wer": 0.20218096080165046, | |
| "step": 36800 | |
| }, | |
| { | |
| "epoch": 26.300017815784784, | |
| "grad_norm": 17.358564376831055, | |
| "learning_rate": 1.2529454195720125e-05, | |
| "loss": 1.0202, | |
| "step": 36900 | |
| }, | |
| { | |
| "epoch": 26.371280954926064, | |
| "grad_norm": 15.642189025878906, | |
| "learning_rate": 1.228901178167829e-05, | |
| "loss": 1.0203, | |
| "step": 37000 | |
| }, | |
| { | |
| "epoch": 26.442544094067344, | |
| "grad_norm": 10.802166938781738, | |
| "learning_rate": 1.2048569367636452e-05, | |
| "loss": 1.0047, | |
| "step": 37100 | |
| }, | |
| { | |
| "epoch": 26.513807233208624, | |
| "grad_norm": 15.042948722839355, | |
| "learning_rate": 1.1808126953594614e-05, | |
| "loss": 1.0131, | |
| "step": 37200 | |
| }, | |
| { | |
| "epoch": 26.513807233208624, | |
| "eval_loss": 0.0926903486251831, | |
| "eval_runtime": 147.4732, | |
| "eval_samples_per_second": 3.39, | |
| "eval_steps_per_second": 0.427, | |
| "eval_wer": 0.20350722074860006, | |
| "step": 37200 | |
| }, | |
| { | |
| "epoch": 26.585070372349904, | |
| "grad_norm": 17.35483169555664, | |
| "learning_rate": 1.1567684539552778e-05, | |
| "loss": 1.0161, | |
| "step": 37300 | |
| }, | |
| { | |
| "epoch": 26.65633351149118, | |
| "grad_norm": 15.87969970703125, | |
| "learning_rate": 1.132724212551094e-05, | |
| "loss": 1.0339, | |
| "step": 37400 | |
| }, | |
| { | |
| "epoch": 26.72759665063246, | |
| "grad_norm": 12.541560173034668, | |
| "learning_rate": 1.1086799711469103e-05, | |
| "loss": 0.9692, | |
| "step": 37500 | |
| }, | |
| { | |
| "epoch": 26.79885978977374, | |
| "grad_norm": 13.724421501159668, | |
| "learning_rate": 1.0848761721567685e-05, | |
| "loss": 0.9659, | |
| "step": 37600 | |
| }, | |
| { | |
| "epoch": 26.79885978977374, | |
| "eval_loss": 0.09249202907085419, | |
| "eval_runtime": 146.0218, | |
| "eval_samples_per_second": 3.424, | |
| "eval_steps_per_second": 0.431, | |
| "eval_wer": 0.19997052755673445, | |
| "step": 37600 | |
| }, | |
| { | |
| "epoch": 26.87012292891502, | |
| "grad_norm": 9.979654312133789, | |
| "learning_rate": 1.0608319307525847e-05, | |
| "loss": 1.0082, | |
| "step": 37700 | |
| }, | |
| { | |
| "epoch": 26.941386068056296, | |
| "grad_norm": 21.3048095703125, | |
| "learning_rate": 1.0367876893484011e-05, | |
| "loss": 0.9767, | |
| "step": 37800 | |
| }, | |
| { | |
| "epoch": 27.012114733654016, | |
| "grad_norm": 13.462841987609863, | |
| "learning_rate": 1.0127434479442175e-05, | |
| "loss": 1.0252, | |
| "step": 37900 | |
| }, | |
| { | |
| "epoch": 27.083377872795296, | |
| "grad_norm": 10.681748390197754, | |
| "learning_rate": 9.886992065400337e-06, | |
| "loss": 1.0056, | |
| "step": 38000 | |
| }, | |
| { | |
| "epoch": 27.083377872795296, | |
| "eval_loss": 0.09220070391893387, | |
| "eval_runtime": 148.5209, | |
| "eval_samples_per_second": 3.367, | |
| "eval_steps_per_second": 0.424, | |
| "eval_wer": 0.19923371647509577, | |
| "step": 38000 | |
| }, | |
| { | |
| "epoch": 27.154641011936576, | |
| "grad_norm": 13.021913528442383, | |
| "learning_rate": 9.6465496513585e-06, | |
| "loss": 0.9757, | |
| "step": 38100 | |
| }, | |
| { | |
| "epoch": 27.225904151077856, | |
| "grad_norm": 16.946664810180664, | |
| "learning_rate": 9.406107237316664e-06, | |
| "loss": 1.0595, | |
| "step": 38200 | |
| }, | |
| { | |
| "epoch": 27.297167290219136, | |
| "grad_norm": 13.715904235839844, | |
| "learning_rate": 9.165664823274826e-06, | |
| "loss": 0.9515, | |
| "step": 38300 | |
| }, | |
| { | |
| "epoch": 27.368430429360412, | |
| "grad_norm": 12.851482391357422, | |
| "learning_rate": 8.925222409232988e-06, | |
| "loss": 1.007, | |
| "step": 38400 | |
| }, | |
| { | |
| "epoch": 27.368430429360412, | |
| "eval_loss": 0.09224120527505875, | |
| "eval_runtime": 146.7128, | |
| "eval_samples_per_second": 3.408, | |
| "eval_steps_per_second": 0.429, | |
| "eval_wer": 0.19967580312407898, | |
| "step": 38400 | |
| }, | |
| { | |
| "epoch": 27.439693568501692, | |
| "grad_norm": 14.637819290161133, | |
| "learning_rate": 8.684779995191152e-06, | |
| "loss": 0.9863, | |
| "step": 38500 | |
| }, | |
| { | |
| "epoch": 27.510956707642972, | |
| "grad_norm": 11.744438171386719, | |
| "learning_rate": 8.444337581149316e-06, | |
| "loss": 0.9084, | |
| "step": 38600 | |
| }, | |
| { | |
| "epoch": 27.582219846784252, | |
| "grad_norm": 12.19479751586914, | |
| "learning_rate": 8.203895167107479e-06, | |
| "loss": 1.0345, | |
| "step": 38700 | |
| }, | |
| { | |
| "epoch": 27.65348298592553, | |
| "grad_norm": 18.433185577392578, | |
| "learning_rate": 7.963452753065641e-06, | |
| "loss": 0.9602, | |
| "step": 38800 | |
| }, | |
| { | |
| "epoch": 27.65348298592553, | |
| "eval_loss": 0.09226806461811066, | |
| "eval_runtime": 147.8247, | |
| "eval_samples_per_second": 3.382, | |
| "eval_steps_per_second": 0.426, | |
| "eval_wer": 0.20173887415266725, | |
| "step": 38800 | |
| }, | |
| { | |
| "epoch": 27.724746125066808, | |
| "grad_norm": 9.473343849182129, | |
| "learning_rate": 7.723010339023805e-06, | |
| "loss": 0.9717, | |
| "step": 38900 | |
| }, | |
| { | |
| "epoch": 27.796009264208088, | |
| "grad_norm": 12.731324195861816, | |
| "learning_rate": 7.482567924981967e-06, | |
| "loss": 0.9844, | |
| "step": 39000 | |
| }, | |
| { | |
| "epoch": 27.867272403349368, | |
| "grad_norm": 23.304418563842773, | |
| "learning_rate": 7.24212551094013e-06, | |
| "loss": 0.9744, | |
| "step": 39100 | |
| }, | |
| { | |
| "epoch": 27.938535542490648, | |
| "grad_norm": 11.320829391479492, | |
| "learning_rate": 7.001683096898293e-06, | |
| "loss": 0.9353, | |
| "step": 39200 | |
| }, | |
| { | |
| "epoch": 27.938535542490648, | |
| "eval_loss": 0.09226758033037186, | |
| "eval_runtime": 146.6121, | |
| "eval_samples_per_second": 3.41, | |
| "eval_steps_per_second": 0.43, | |
| "eval_wer": 0.1989389920424403, | |
| "step": 39200 | |
| }, | |
| { | |
| "epoch": 28.009264208088368, | |
| "grad_norm": 12.030290603637695, | |
| "learning_rate": 6.761240682856457e-06, | |
| "loss": 1.0218, | |
| "step": 39300 | |
| }, | |
| { | |
| "epoch": 28.080527347229644, | |
| "grad_norm": 12.404072761535645, | |
| "learning_rate": 6.520798268814619e-06, | |
| "loss": 0.9249, | |
| "step": 39400 | |
| }, | |
| { | |
| "epoch": 28.151790486370924, | |
| "grad_norm": 9.461714744567871, | |
| "learning_rate": 6.280355854772782e-06, | |
| "loss": 0.9299, | |
| "step": 39500 | |
| }, | |
| { | |
| "epoch": 28.223053625512204, | |
| "grad_norm": 14.217653274536133, | |
| "learning_rate": 6.039913440730945e-06, | |
| "loss": 0.951, | |
| "step": 39600 | |
| }, | |
| { | |
| "epoch": 28.223053625512204, | |
| "eval_loss": 0.09204767644405365, | |
| "eval_runtime": 147.0572, | |
| "eval_samples_per_second": 3.4, | |
| "eval_steps_per_second": 0.428, | |
| "eval_wer": 0.19834954317712938, | |
| "step": 39600 | |
| }, | |
| { | |
| "epoch": 28.294316764653484, | |
| "grad_norm": 12.441349029541016, | |
| "learning_rate": 5.799471026689108e-06, | |
| "loss": 0.9283, | |
| "step": 39700 | |
| }, | |
| { | |
| "epoch": 28.365579903794764, | |
| "grad_norm": 16.366159439086914, | |
| "learning_rate": 5.56143303678769e-06, | |
| "loss": 1.0031, | |
| "step": 39800 | |
| }, | |
| { | |
| "epoch": 28.43684304293604, | |
| "grad_norm": 11.885308265686035, | |
| "learning_rate": 5.320990622745853e-06, | |
| "loss": 0.964, | |
| "step": 39900 | |
| }, | |
| { | |
| "epoch": 28.50810618207732, | |
| "grad_norm": 9.691365242004395, | |
| "learning_rate": 5.080548208704015e-06, | |
| "loss": 0.9675, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 28.50810618207732, | |
| "eval_loss": 0.09217335283756256, | |
| "eval_runtime": 145.8792, | |
| "eval_samples_per_second": 3.427, | |
| "eval_steps_per_second": 0.432, | |
| "eval_wer": 0.19687592101385204, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 28.5793693212186, | |
| "grad_norm": 11.783367156982422, | |
| "learning_rate": 4.840105794662179e-06, | |
| "loss": 0.9698, | |
| "step": 40100 | |
| }, | |
| { | |
| "epoch": 28.65063246035988, | |
| "grad_norm": 12.037036895751953, | |
| "learning_rate": 4.5996633806203414e-06, | |
| "loss": 1.0061, | |
| "step": 40200 | |
| }, | |
| { | |
| "epoch": 28.72189559950116, | |
| "grad_norm": 9.481605529785156, | |
| "learning_rate": 4.359220966578505e-06, | |
| "loss": 0.9748, | |
| "step": 40300 | |
| }, | |
| { | |
| "epoch": 28.793158738642436, | |
| "grad_norm": 10.295605659484863, | |
| "learning_rate": 4.118778552536667e-06, | |
| "loss": 0.9398, | |
| "step": 40400 | |
| }, | |
| { | |
| "epoch": 28.793158738642436, | |
| "eval_loss": 0.0921778753399849, | |
| "eval_runtime": 145.4752, | |
| "eval_samples_per_second": 3.437, | |
| "eval_steps_per_second": 0.433, | |
| "eval_wer": 0.19982316534040673, | |
| "step": 40400 | |
| }, | |
| { | |
| "epoch": 28.864421877783716, | |
| "grad_norm": 16.17976188659668, | |
| "learning_rate": 3.878336138494831e-06, | |
| "loss": 0.9462, | |
| "step": 40500 | |
| }, | |
| { | |
| "epoch": 28.935685016924996, | |
| "grad_norm": 12.729133605957031, | |
| "learning_rate": 3.637893724452994e-06, | |
| "loss": 0.9777, | |
| "step": 40600 | |
| }, | |
| { | |
| "epoch": 29.006413682522716, | |
| "grad_norm": 15.731132507324219, | |
| "learning_rate": 3.3974513104111564e-06, | |
| "loss": 0.9736, | |
| "step": 40700 | |
| }, | |
| { | |
| "epoch": 29.077676821663996, | |
| "grad_norm": 9.655588150024414, | |
| "learning_rate": 3.15700889636932e-06, | |
| "loss": 0.9533, | |
| "step": 40800 | |
| }, | |
| { | |
| "epoch": 29.077676821663996, | |
| "eval_loss": 0.09240180999040604, | |
| "eval_runtime": 145.3169, | |
| "eval_samples_per_second": 3.441, | |
| "eval_steps_per_second": 0.434, | |
| "eval_wer": 0.1976127320954907, | |
| "step": 40800 | |
| }, | |
| { | |
| "epoch": 29.148939960805272, | |
| "grad_norm": 12.47839641571045, | |
| "learning_rate": 2.9165664823274828e-06, | |
| "loss": 0.9257, | |
| "step": 40900 | |
| }, | |
| { | |
| "epoch": 29.220203099946552, | |
| "grad_norm": 9.78573989868164, | |
| "learning_rate": 2.676124068285646e-06, | |
| "loss": 0.9376, | |
| "step": 41000 | |
| }, | |
| { | |
| "epoch": 29.291466239087832, | |
| "grad_norm": 10.32386589050293, | |
| "learning_rate": 2.4356816542438087e-06, | |
| "loss": 0.9428, | |
| "step": 41100 | |
| }, | |
| { | |
| "epoch": 29.362729378229112, | |
| "grad_norm": 12.5963773727417, | |
| "learning_rate": 2.195239240201972e-06, | |
| "loss": 0.9519, | |
| "step": 41200 | |
| }, | |
| { | |
| "epoch": 29.362729378229112, | |
| "eval_loss": 0.09224098175764084, | |
| "eval_runtime": 145.4228, | |
| "eval_samples_per_second": 3.438, | |
| "eval_steps_per_second": 0.433, | |
| "eval_wer": 0.19687592101385204, | |
| "step": 41200 | |
| }, | |
| { | |
| "epoch": 29.433992517370392, | |
| "grad_norm": 9.201010704040527, | |
| "learning_rate": 1.954796826160135e-06, | |
| "loss": 0.9134, | |
| "step": 41300 | |
| }, | |
| { | |
| "epoch": 29.505255656511668, | |
| "grad_norm": 12.38512134552002, | |
| "learning_rate": 1.7143544121182977e-06, | |
| "loss": 0.9591, | |
| "step": 41400 | |
| }, | |
| { | |
| "epoch": 29.576518795652948, | |
| "grad_norm": 12.809625625610352, | |
| "learning_rate": 1.4739119980764607e-06, | |
| "loss": 0.8946, | |
| "step": 41500 | |
| }, | |
| { | |
| "epoch": 29.647781934794228, | |
| "grad_norm": 11.387064933776855, | |
| "learning_rate": 1.2334695840346237e-06, | |
| "loss": 0.9297, | |
| "step": 41600 | |
| }, | |
| { | |
| "epoch": 29.647781934794228, | |
| "eval_loss": 0.09202717989683151, | |
| "eval_runtime": 145.5856, | |
| "eval_samples_per_second": 3.434, | |
| "eval_steps_per_second": 0.433, | |
| "eval_wer": 0.19820218096080164, | |
| "step": 41600 | |
| }, | |
| { | |
| "epoch": 29.719045073935508, | |
| "grad_norm": 11.282709121704102, | |
| "learning_rate": 9.930271699927866e-07, | |
| "loss": 0.9995, | |
| "step": 41700 | |
| }, | |
| { | |
| "epoch": 29.790308213076784, | |
| "grad_norm": 10.81017780303955, | |
| "learning_rate": 7.525847559509498e-07, | |
| "loss": 0.9876, | |
| "step": 41800 | |
| }, | |
| { | |
| "epoch": 29.861571352218064, | |
| "grad_norm": 11.903881072998047, | |
| "learning_rate": 5.121423419091128e-07, | |
| "loss": 0.9524, | |
| "step": 41900 | |
| }, | |
| { | |
| "epoch": 29.932834491359344, | |
| "grad_norm": 11.278658866882324, | |
| "learning_rate": 2.7410435200769414e-07, | |
| "loss": 0.9491, | |
| "step": 42000 | |
| }, | |
| { | |
| "epoch": 29.932834491359344, | |
| "eval_loss": 0.09202782809734344, | |
| "eval_runtime": 145.311, | |
| "eval_samples_per_second": 3.441, | |
| "eval_steps_per_second": 0.434, | |
| "eval_wer": 0.19908635425876806, | |
| "step": 42000 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 42090, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 30, | |
| "save_steps": 400, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.472222473355264e+19, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |