| { | |
| "best_global_step": 3000, | |
| "best_metric": 5.500143170754987, | |
| "best_model_checkpoint": "./SALAMA_C3/checkpoint-3000", | |
| "epoch": 5.025125628140704, | |
| "eval_steps": 500, | |
| "global_step": 3000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03350083752093802, | |
| "grad_norm": 11.757207870483398, | |
| "learning_rate": 6.333333333333334e-07, | |
| "loss": 0.5112, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06700167504187604, | |
| "grad_norm": 10.197953224182129, | |
| "learning_rate": 1.3e-06, | |
| "loss": 0.5283, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.10050251256281408, | |
| "grad_norm": 12.811074256896973, | |
| "learning_rate": 1.9666666666666668e-06, | |
| "loss": 0.474, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.13400335008375208, | |
| "grad_norm": 8.559342384338379, | |
| "learning_rate": 2.6e-06, | |
| "loss": 0.424, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.16750418760469013, | |
| "grad_norm": 6.9629225730896, | |
| "learning_rate": 3.266666666666667e-06, | |
| "loss": 0.4465, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.20100502512562815, | |
| "grad_norm": 7.833058834075928, | |
| "learning_rate": 3.9333333333333335e-06, | |
| "loss": 0.3764, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.23450586264656617, | |
| "grad_norm": 6.882424831390381, | |
| "learning_rate": 4.600000000000001e-06, | |
| "loss": 0.3705, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.26800670016750416, | |
| "grad_norm": 5.4183244705200195, | |
| "learning_rate": 5.2666666666666665e-06, | |
| "loss": 0.4116, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.3015075376884422, | |
| "grad_norm": 6.103787899017334, | |
| "learning_rate": 5.933333333333335e-06, | |
| "loss": 0.3903, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.33500837520938026, | |
| "grad_norm": 6.084160804748535, | |
| "learning_rate": 6.600000000000001e-06, | |
| "loss": 0.3794, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.3685092127303183, | |
| "grad_norm": 4.531406879425049, | |
| "learning_rate": 7.266666666666668e-06, | |
| "loss": 0.3904, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.4020100502512563, | |
| "grad_norm": 6.702854156494141, | |
| "learning_rate": 7.933333333333334e-06, | |
| "loss": 0.3672, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.4355108877721943, | |
| "grad_norm": 6.386377811431885, | |
| "learning_rate": 8.6e-06, | |
| "loss": 0.3379, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.46901172529313234, | |
| "grad_norm": 5.33281135559082, | |
| "learning_rate": 9.266666666666667e-06, | |
| "loss": 0.4102, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.5025125628140703, | |
| "grad_norm": 6.031435012817383, | |
| "learning_rate": 9.933333333333334e-06, | |
| "loss": 0.3815, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.5360134003350083, | |
| "grad_norm": 4.8638176918029785, | |
| "learning_rate": 9.968253968253969e-06, | |
| "loss": 0.358, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.5695142378559463, | |
| "grad_norm": 5.352113723754883, | |
| "learning_rate": 9.932980599647268e-06, | |
| "loss": 0.3578, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.6030150753768844, | |
| "grad_norm": 5.560739040374756, | |
| "learning_rate": 9.897707231040565e-06, | |
| "loss": 0.3795, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.6365159128978225, | |
| "grad_norm": 6.186940670013428, | |
| "learning_rate": 9.862433862433864e-06, | |
| "loss": 0.3852, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.6700167504187605, | |
| "grad_norm": 5.37507438659668, | |
| "learning_rate": 9.827160493827161e-06, | |
| "loss": 0.3761, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.7035175879396985, | |
| "grad_norm": 6.373449802398682, | |
| "learning_rate": 9.79188712522046e-06, | |
| "loss": 0.3674, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.7370184254606366, | |
| "grad_norm": 5.702625274658203, | |
| "learning_rate": 9.756613756613757e-06, | |
| "loss": 0.4013, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.7705192629815746, | |
| "grad_norm": 5.5675153732299805, | |
| "learning_rate": 9.721340388007056e-06, | |
| "loss": 0.3495, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.8040201005025126, | |
| "grad_norm": 6.296374320983887, | |
| "learning_rate": 9.686067019400353e-06, | |
| "loss": 0.3704, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.8375209380234506, | |
| "grad_norm": 4.830463886260986, | |
| "learning_rate": 9.650793650793652e-06, | |
| "loss": 0.36, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.8375209380234506, | |
| "eval_loss": 0.27764827013015747, | |
| "eval_runtime": 1752.3826, | |
| "eval_samples_per_second": 2.725, | |
| "eval_steps_per_second": 0.341, | |
| "eval_wer": 19.59291781998664, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.8710217755443886, | |
| "grad_norm": 4.863058090209961, | |
| "learning_rate": 9.61552028218695e-06, | |
| "loss": 0.3874, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.9045226130653267, | |
| "grad_norm": 6.121027946472168, | |
| "learning_rate": 9.580246913580248e-06, | |
| "loss": 0.382, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.9380234505862647, | |
| "grad_norm": 4.538082599639893, | |
| "learning_rate": 9.544973544973546e-06, | |
| "loss": 0.3611, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.9715242881072027, | |
| "grad_norm": 4.8772382736206055, | |
| "learning_rate": 9.509700176366844e-06, | |
| "loss": 0.3584, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.0050251256281406, | |
| "grad_norm": 4.039212703704834, | |
| "learning_rate": 9.474426807760142e-06, | |
| "loss": 0.3701, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.0385259631490786, | |
| "grad_norm": 4.787687301635742, | |
| "learning_rate": 9.43915343915344e-06, | |
| "loss": 0.2058, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.0720268006700167, | |
| "grad_norm": 4.503021717071533, | |
| "learning_rate": 9.403880070546738e-06, | |
| "loss": 0.2455, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.1055276381909547, | |
| "grad_norm": 6.663857936859131, | |
| "learning_rate": 9.368606701940036e-06, | |
| "loss": 0.2179, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.1390284757118927, | |
| "grad_norm": 4.486196041107178, | |
| "learning_rate": 9.333333333333334e-06, | |
| "loss": 0.2196, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.1725293132328307, | |
| "grad_norm": 4.667060852050781, | |
| "learning_rate": 9.298059964726633e-06, | |
| "loss": 0.2183, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.2060301507537687, | |
| "grad_norm": 5.608316898345947, | |
| "learning_rate": 9.26278659611993e-06, | |
| "loss": 0.2161, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.2395309882747068, | |
| "grad_norm": 4.2184271812438965, | |
| "learning_rate": 9.227513227513229e-06, | |
| "loss": 0.2382, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.2730318257956448, | |
| "grad_norm": 3.871945381164551, | |
| "learning_rate": 9.192239858906526e-06, | |
| "loss": 0.2214, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.3065326633165828, | |
| "grad_norm": 3.730222225189209, | |
| "learning_rate": 9.156966490299825e-06, | |
| "loss": 0.2213, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.3400335008375208, | |
| "grad_norm": 4.740777015686035, | |
| "learning_rate": 9.121693121693122e-06, | |
| "loss": 0.2503, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.3735343383584588, | |
| "grad_norm": 4.123469352722168, | |
| "learning_rate": 9.086419753086421e-06, | |
| "loss": 0.2443, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.4070351758793969, | |
| "grad_norm": 3.988917827606201, | |
| "learning_rate": 9.051146384479718e-06, | |
| "loss": 0.241, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.4405360134003349, | |
| "grad_norm": 5.4730305671691895, | |
| "learning_rate": 9.015873015873017e-06, | |
| "loss": 0.2163, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.474036850921273, | |
| "grad_norm": 5.020652770996094, | |
| "learning_rate": 8.980599647266314e-06, | |
| "loss": 0.2387, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.507537688442211, | |
| "grad_norm": 4.231486797332764, | |
| "learning_rate": 8.945326278659613e-06, | |
| "loss": 0.2523, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.541038525963149, | |
| "grad_norm": 6.197975158691406, | |
| "learning_rate": 8.910052910052912e-06, | |
| "loss": 0.2255, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.574539363484087, | |
| "grad_norm": 5.489210605621338, | |
| "learning_rate": 8.874779541446209e-06, | |
| "loss": 0.2397, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.608040201005025, | |
| "grad_norm": 4.32737398147583, | |
| "learning_rate": 8.839506172839508e-06, | |
| "loss": 0.2298, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.641541038525963, | |
| "grad_norm": 4.504214763641357, | |
| "learning_rate": 8.804232804232805e-06, | |
| "loss": 0.2301, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 1.675041876046901, | |
| "grad_norm": 3.9694759845733643, | |
| "learning_rate": 8.768959435626104e-06, | |
| "loss": 0.2338, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.675041876046901, | |
| "eval_loss": 0.18838582932949066, | |
| "eval_runtime": 1770.3449, | |
| "eval_samples_per_second": 2.698, | |
| "eval_steps_per_second": 0.337, | |
| "eval_wer": 14.071299035983584, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.708542713567839, | |
| "grad_norm": 4.972934722900391, | |
| "learning_rate": 8.7336860670194e-06, | |
| "loss": 0.2388, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 1.742043551088777, | |
| "grad_norm": 5.178994655609131, | |
| "learning_rate": 8.6984126984127e-06, | |
| "loss": 0.2531, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 1.775544388609715, | |
| "grad_norm": 3.560372829437256, | |
| "learning_rate": 8.663139329805997e-06, | |
| "loss": 0.2187, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 1.809045226130653, | |
| "grad_norm": 4.647324562072754, | |
| "learning_rate": 8.627865961199296e-06, | |
| "loss": 0.2413, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 1.8425460636515911, | |
| "grad_norm": 3.6784422397613525, | |
| "learning_rate": 8.592592592592593e-06, | |
| "loss": 0.2242, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.8760469011725294, | |
| "grad_norm": 5.928018093109131, | |
| "learning_rate": 8.557319223985891e-06, | |
| "loss": 0.2406, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 1.9095477386934674, | |
| "grad_norm": 6.358487606048584, | |
| "learning_rate": 8.52204585537919e-06, | |
| "loss": 0.2482, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 1.9430485762144054, | |
| "grad_norm": 4.409506797790527, | |
| "learning_rate": 8.486772486772487e-06, | |
| "loss": 0.2415, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 1.9765494137353434, | |
| "grad_norm": 5.169639587402344, | |
| "learning_rate": 8.451499118165786e-06, | |
| "loss": 0.222, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 2.0100502512562812, | |
| "grad_norm": 2.8841328620910645, | |
| "learning_rate": 8.416225749559083e-06, | |
| "loss": 0.2109, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.0435510887772192, | |
| "grad_norm": 3.835498332977295, | |
| "learning_rate": 8.380952380952382e-06, | |
| "loss": 0.1284, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 2.0770519262981573, | |
| "grad_norm": 2.584859848022461, | |
| "learning_rate": 8.34567901234568e-06, | |
| "loss": 0.1059, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 2.1105527638190953, | |
| "grad_norm": 2.797682285308838, | |
| "learning_rate": 8.310405643738978e-06, | |
| "loss": 0.0967, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 2.1440536013400333, | |
| "grad_norm": 3.2937309741973877, | |
| "learning_rate": 8.275132275132275e-06, | |
| "loss": 0.1267, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 2.1775544388609713, | |
| "grad_norm": 3.344325065612793, | |
| "learning_rate": 8.239858906525574e-06, | |
| "loss": 0.1354, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.2110552763819094, | |
| "grad_norm": 2.955522060394287, | |
| "learning_rate": 8.204585537918873e-06, | |
| "loss": 0.104, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 2.2445561139028474, | |
| "grad_norm": 5.180908203125, | |
| "learning_rate": 8.16931216931217e-06, | |
| "loss": 0.1229, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 2.2780569514237854, | |
| "grad_norm": 3.639417886734009, | |
| "learning_rate": 8.134038800705469e-06, | |
| "loss": 0.104, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 2.3115577889447234, | |
| "grad_norm": 3.7380902767181396, | |
| "learning_rate": 8.098765432098766e-06, | |
| "loss": 0.1141, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 2.3450586264656614, | |
| "grad_norm": 4.100318908691406, | |
| "learning_rate": 8.063492063492065e-06, | |
| "loss": 0.1123, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.3785594639865995, | |
| "grad_norm": 2.8147048950195312, | |
| "learning_rate": 8.028218694885362e-06, | |
| "loss": 0.1039, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 2.4120603015075375, | |
| "grad_norm": 3.7376410961151123, | |
| "learning_rate": 7.99294532627866e-06, | |
| "loss": 0.1179, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 2.4455611390284755, | |
| "grad_norm": 4.320065975189209, | |
| "learning_rate": 7.957671957671958e-06, | |
| "loss": 0.1167, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 2.4790619765494135, | |
| "grad_norm": 4.415127277374268, | |
| "learning_rate": 7.922398589065257e-06, | |
| "loss": 0.125, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 2.5125628140703515, | |
| "grad_norm": 2.631763219833374, | |
| "learning_rate": 7.887125220458554e-06, | |
| "loss": 0.1188, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.5125628140703515, | |
| "eval_loss": 0.13635103404521942, | |
| "eval_runtime": 1806.1099, | |
| "eval_samples_per_second": 2.644, | |
| "eval_steps_per_second": 0.331, | |
| "eval_wer": 11.713753937195762, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.5460636515912896, | |
| "grad_norm": 3.608773946762085, | |
| "learning_rate": 7.851851851851853e-06, | |
| "loss": 0.1222, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 2.5795644891122276, | |
| "grad_norm": 3.4494569301605225, | |
| "learning_rate": 7.816578483245151e-06, | |
| "loss": 0.1087, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 2.6130653266331656, | |
| "grad_norm": 3.6112048625946045, | |
| "learning_rate": 7.781305114638449e-06, | |
| "loss": 0.1097, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 2.6465661641541036, | |
| "grad_norm": 3.1978707313537598, | |
| "learning_rate": 7.746031746031747e-06, | |
| "loss": 0.1101, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 2.6800670016750416, | |
| "grad_norm": 3.6376733779907227, | |
| "learning_rate": 7.710758377425045e-06, | |
| "loss": 0.1056, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.7135678391959797, | |
| "grad_norm": 3.754915237426758, | |
| "learning_rate": 7.675485008818343e-06, | |
| "loss": 0.1103, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 2.7470686767169177, | |
| "grad_norm": 3.329240322113037, | |
| "learning_rate": 7.64021164021164e-06, | |
| "loss": 0.1193, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 2.7805695142378557, | |
| "grad_norm": 3.9300169944763184, | |
| "learning_rate": 7.604938271604939e-06, | |
| "loss": 0.1232, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 2.8140703517587937, | |
| "grad_norm": 4.290626049041748, | |
| "learning_rate": 7.569664902998237e-06, | |
| "loss": 0.1374, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 2.8475711892797317, | |
| "grad_norm": 3.8987998962402344, | |
| "learning_rate": 7.534391534391535e-06, | |
| "loss": 0.1062, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.8810720268006698, | |
| "grad_norm": 4.234960079193115, | |
| "learning_rate": 7.499118165784833e-06, | |
| "loss": 0.111, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 2.914572864321608, | |
| "grad_norm": 3.103458881378174, | |
| "learning_rate": 7.463844797178131e-06, | |
| "loss": 0.1076, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 2.948073701842546, | |
| "grad_norm": 3.270204782485962, | |
| "learning_rate": 7.428571428571429e-06, | |
| "loss": 0.1224, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 2.981574539363484, | |
| "grad_norm": 4.261337757110596, | |
| "learning_rate": 7.393298059964727e-06, | |
| "loss": 0.1224, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 3.0150753768844223, | |
| "grad_norm": 1.9531301259994507, | |
| "learning_rate": 7.358024691358025e-06, | |
| "loss": 0.0913, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 3.0485762144053603, | |
| "grad_norm": 1.866215467453003, | |
| "learning_rate": 7.322751322751324e-06, | |
| "loss": 0.0466, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 3.0820770519262983, | |
| "grad_norm": 3.9456610679626465, | |
| "learning_rate": 7.287477954144622e-06, | |
| "loss": 0.0451, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 3.1155778894472363, | |
| "grad_norm": 1.9518849849700928, | |
| "learning_rate": 7.25220458553792e-06, | |
| "loss": 0.0414, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 3.1490787269681744, | |
| "grad_norm": 2.232792854309082, | |
| "learning_rate": 7.216931216931218e-06, | |
| "loss": 0.0502, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 3.1825795644891124, | |
| "grad_norm": 2.074127674102783, | |
| "learning_rate": 7.181657848324516e-06, | |
| "loss": 0.057, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 3.2160804020100504, | |
| "grad_norm": 3.5036733150482178, | |
| "learning_rate": 7.146384479717814e-06, | |
| "loss": 0.0515, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 3.2495812395309884, | |
| "grad_norm": 2.324014186859131, | |
| "learning_rate": 7.111111111111112e-06, | |
| "loss": 0.0454, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 3.2830820770519265, | |
| "grad_norm": 2.710326671600342, | |
| "learning_rate": 7.07583774250441e-06, | |
| "loss": 0.0473, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 3.3165829145728645, | |
| "grad_norm": 2.8943896293640137, | |
| "learning_rate": 7.040564373897708e-06, | |
| "loss": 0.046, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 3.3500837520938025, | |
| "grad_norm": 2.319986581802368, | |
| "learning_rate": 7.005291005291006e-06, | |
| "loss": 0.0449, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.3500837520938025, | |
| "eval_loss": 0.10934468358755112, | |
| "eval_runtime": 1762.4506, | |
| "eval_samples_per_second": 2.71, | |
| "eval_steps_per_second": 0.339, | |
| "eval_wer": 7.172854824854443, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.3835845896147405, | |
| "grad_norm": 3.317129135131836, | |
| "learning_rate": 6.9700176366843046e-06, | |
| "loss": 0.0645, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 3.4170854271356785, | |
| "grad_norm": 1.9533768892288208, | |
| "learning_rate": 6.9347442680776025e-06, | |
| "loss": 0.0432, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 3.4505862646566166, | |
| "grad_norm": 2.0508453845977783, | |
| "learning_rate": 6.8994708994709005e-06, | |
| "loss": 0.0521, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 3.4840871021775546, | |
| "grad_norm": 2.163236141204834, | |
| "learning_rate": 6.8641975308641985e-06, | |
| "loss": 0.0529, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 3.5175879396984926, | |
| "grad_norm": 2.7154581546783447, | |
| "learning_rate": 6.8289241622574965e-06, | |
| "loss": 0.0452, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.5510887772194306, | |
| "grad_norm": 3.0822432041168213, | |
| "learning_rate": 6.7936507936507944e-06, | |
| "loss": 0.0546, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 3.5845896147403686, | |
| "grad_norm": 4.19010591506958, | |
| "learning_rate": 6.758377425044092e-06, | |
| "loss": 0.0529, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 3.6180904522613067, | |
| "grad_norm": 2.9883594512939453, | |
| "learning_rate": 6.72310405643739e-06, | |
| "loss": 0.0503, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 3.6515912897822447, | |
| "grad_norm": 2.3664371967315674, | |
| "learning_rate": 6.687830687830688e-06, | |
| "loss": 0.0498, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 3.6850921273031827, | |
| "grad_norm": 2.0549991130828857, | |
| "learning_rate": 6.652557319223986e-06, | |
| "loss": 0.051, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.7185929648241207, | |
| "grad_norm": 2.5339038372039795, | |
| "learning_rate": 6.617283950617285e-06, | |
| "loss": 0.0568, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 3.7520938023450587, | |
| "grad_norm": 1.9988099336624146, | |
| "learning_rate": 6.582010582010583e-06, | |
| "loss": 0.051, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 3.7855946398659968, | |
| "grad_norm": 2.5243782997131348, | |
| "learning_rate": 6.546737213403881e-06, | |
| "loss": 0.056, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 3.819095477386935, | |
| "grad_norm": 3.157158136367798, | |
| "learning_rate": 6.511463844797179e-06, | |
| "loss": 0.0497, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 3.852596314907873, | |
| "grad_norm": 1.9286202192306519, | |
| "learning_rate": 6.476190476190477e-06, | |
| "loss": 0.0426, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.886097152428811, | |
| "grad_norm": 3.808802604675293, | |
| "learning_rate": 6.440917107583775e-06, | |
| "loss": 0.0499, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 3.919597989949749, | |
| "grad_norm": 2.506671667098999, | |
| "learning_rate": 6.405643738977073e-06, | |
| "loss": 0.052, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 3.953098827470687, | |
| "grad_norm": 2.9451920986175537, | |
| "learning_rate": 6.370370370370371e-06, | |
| "loss": 0.0552, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 3.986599664991625, | |
| "grad_norm": 2.592744827270508, | |
| "learning_rate": 6.335097001763669e-06, | |
| "loss": 0.0527, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 4.0201005025125625, | |
| "grad_norm": 1.8891575336456299, | |
| "learning_rate": 6.299823633156967e-06, | |
| "loss": 0.0289, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 4.0536013400335005, | |
| "grad_norm": 1.8053243160247803, | |
| "learning_rate": 6.264550264550266e-06, | |
| "loss": 0.0192, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 4.0871021775544385, | |
| "grad_norm": 2.0084407329559326, | |
| "learning_rate": 6.229276895943564e-06, | |
| "loss": 0.0242, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 4.1206030150753765, | |
| "grad_norm": 1.5919119119644165, | |
| "learning_rate": 6.194003527336862e-06, | |
| "loss": 0.0211, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 4.1541038525963145, | |
| "grad_norm": 1.9214613437652588, | |
| "learning_rate": 6.15873015873016e-06, | |
| "loss": 0.0233, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 4.187604690117253, | |
| "grad_norm": 1.2652311325073242, | |
| "learning_rate": 6.123456790123458e-06, | |
| "loss": 0.0199, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 4.187604690117253, | |
| "eval_loss": 0.0981329157948494, | |
| "eval_runtime": 1779.0213, | |
| "eval_samples_per_second": 2.685, | |
| "eval_steps_per_second": 0.336, | |
| "eval_wer": 6.707549871146321, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 4.221105527638191, | |
| "grad_norm": 2.026528835296631, | |
| "learning_rate": 6.088183421516756e-06, | |
| "loss": 0.0217, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 4.254606365159129, | |
| "grad_norm": 1.596919059753418, | |
| "learning_rate": 6.052910052910054e-06, | |
| "loss": 0.0167, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 4.288107202680067, | |
| "grad_norm": 2.9445090293884277, | |
| "learning_rate": 6.017636684303352e-06, | |
| "loss": 0.0225, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 4.321608040201005, | |
| "grad_norm": 2.4160282611846924, | |
| "learning_rate": 5.9823633156966496e-06, | |
| "loss": 0.0253, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 4.355108877721943, | |
| "grad_norm": 1.461127758026123, | |
| "learning_rate": 5.9470899470899475e-06, | |
| "loss": 0.0197, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 4.388609715242881, | |
| "grad_norm": 2.7892863750457764, | |
| "learning_rate": 5.911816578483246e-06, | |
| "loss": 0.022, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 4.422110552763819, | |
| "grad_norm": 1.651208758354187, | |
| "learning_rate": 5.876543209876544e-06, | |
| "loss": 0.0215, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 4.455611390284757, | |
| "grad_norm": 2.2500391006469727, | |
| "learning_rate": 5.841269841269842e-06, | |
| "loss": 0.0247, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 4.489112227805695, | |
| "grad_norm": 4.447635173797607, | |
| "learning_rate": 5.80599647266314e-06, | |
| "loss": 0.0263, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 4.522613065326633, | |
| "grad_norm": 0.8300407528877258, | |
| "learning_rate": 5.770723104056438e-06, | |
| "loss": 0.0209, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.556113902847571, | |
| "grad_norm": 1.6874111890792847, | |
| "learning_rate": 5.735449735449736e-06, | |
| "loss": 0.0195, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 4.589614740368509, | |
| "grad_norm": 2.4045815467834473, | |
| "learning_rate": 5.700176366843034e-06, | |
| "loss": 0.0224, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 4.623115577889447, | |
| "grad_norm": 2.3160908222198486, | |
| "learning_rate": 5.664902998236332e-06, | |
| "loss": 0.0179, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 4.656616415410385, | |
| "grad_norm": 1.6684287786483765, | |
| "learning_rate": 5.62962962962963e-06, | |
| "loss": 0.0238, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 4.690117252931323, | |
| "grad_norm": 1.973906397819519, | |
| "learning_rate": 5.594356261022928e-06, | |
| "loss": 0.0226, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.723618090452261, | |
| "grad_norm": 2.270906686782837, | |
| "learning_rate": 5.559082892416227e-06, | |
| "loss": 0.0232, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 4.757118927973199, | |
| "grad_norm": 1.8875011205673218, | |
| "learning_rate": 5.523809523809525e-06, | |
| "loss": 0.0218, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 4.790619765494137, | |
| "grad_norm": 1.1312583684921265, | |
| "learning_rate": 5.488536155202823e-06, | |
| "loss": 0.0212, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 4.824120603015075, | |
| "grad_norm": 0.864783525466919, | |
| "learning_rate": 5.453262786596121e-06, | |
| "loss": 0.023, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 4.857621440536013, | |
| "grad_norm": 1.2935965061187744, | |
| "learning_rate": 5.417989417989419e-06, | |
| "loss": 0.019, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.891122278056951, | |
| "grad_norm": 2.4576382637023926, | |
| "learning_rate": 5.382716049382717e-06, | |
| "loss": 0.0193, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 4.924623115577889, | |
| "grad_norm": 2.71472430229187, | |
| "learning_rate": 5.347442680776015e-06, | |
| "loss": 0.0253, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 4.958123953098827, | |
| "grad_norm": 2.84940505027771, | |
| "learning_rate": 5.312169312169313e-06, | |
| "loss": 0.0218, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 4.991624790619765, | |
| "grad_norm": 1.8483999967575073, | |
| "learning_rate": 5.276895943562611e-06, | |
| "loss": 0.0226, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 5.025125628140704, | |
| "grad_norm": 0.6126876473426819, | |
| "learning_rate": 5.241622574955909e-06, | |
| "loss": 0.0101, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 5.025125628140704, | |
| "eval_loss": 0.09390027821063995, | |
| "eval_runtime": 1767.4512, | |
| "eval_samples_per_second": 2.702, | |
| "eval_steps_per_second": 0.338, | |
| "eval_wer": 5.500143170754987, | |
| "step": 3000 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 5970, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.76998696497152e+19, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |