| { | |
| "best_global_step": 9500, | |
| "best_metric": 16.040464106107944, | |
| "best_model_checkpoint": "/content/drive/MyDrive/models/whisper_small_ru_model_trainer_3ep/checkpoint-9500", | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 9894, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.007580351728320194, | |
| "grad_norm": 10.993572235107422, | |
| "learning_rate": 9.975742874469376e-06, | |
| "loss": 0.4792, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.015160703456640388, | |
| "grad_norm": 7.92294979095459, | |
| "learning_rate": 9.950475035374976e-06, | |
| "loss": 0.27, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.02274105518496058, | |
| "grad_norm": 11.214887619018555, | |
| "learning_rate": 9.925207196280575e-06, | |
| "loss": 0.3111, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.030321406913280776, | |
| "grad_norm": 6.483564376831055, | |
| "learning_rate": 9.899939357186174e-06, | |
| "loss": 0.2659, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.03790175864160097, | |
| "grad_norm": 13.091401100158691, | |
| "learning_rate": 9.874671518091774e-06, | |
| "loss": 0.2524, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.04548211036992116, | |
| "grad_norm": 9.941374778747559, | |
| "learning_rate": 9.849403678997373e-06, | |
| "loss": 0.2601, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.05306246209824136, | |
| "grad_norm": 10.436762809753418, | |
| "learning_rate": 9.824135839902972e-06, | |
| "loss": 0.2364, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.06064281382656155, | |
| "grad_norm": 6.529853343963623, | |
| "learning_rate": 9.798868000808572e-06, | |
| "loss": 0.2406, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.06822316555488174, | |
| "grad_norm": 6.7477641105651855, | |
| "learning_rate": 9.77360016171417e-06, | |
| "loss": 0.2628, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.07580351728320193, | |
| "grad_norm": 8.057855606079102, | |
| "learning_rate": 9.74833232261977e-06, | |
| "loss": 0.2343, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.08338386901152213, | |
| "grad_norm": 8.777417182922363, | |
| "learning_rate": 9.72306448352537e-06, | |
| "loss": 0.2544, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.09096422073984232, | |
| "grad_norm": 9.590123176574707, | |
| "learning_rate": 9.697796644430968e-06, | |
| "loss": 0.2533, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.09854457246816252, | |
| "grad_norm": 12.065073013305664, | |
| "learning_rate": 9.672528805336569e-06, | |
| "loss": 0.248, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.10612492419648271, | |
| "grad_norm": 6.656917095184326, | |
| "learning_rate": 9.647260966242168e-06, | |
| "loss": 0.245, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.11370527592480291, | |
| "grad_norm": 13.028727531433105, | |
| "learning_rate": 9.621993127147768e-06, | |
| "loss": 0.2245, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.1212856276531231, | |
| "grad_norm": 9.074626922607422, | |
| "learning_rate": 9.596725288053367e-06, | |
| "loss": 0.2464, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.12886597938144329, | |
| "grad_norm": 7.7365217208862305, | |
| "learning_rate": 9.571457448958967e-06, | |
| "loss": 0.2233, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.13644633110976348, | |
| "grad_norm": 7.931459426879883, | |
| "learning_rate": 9.546189609864566e-06, | |
| "loss": 0.2522, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.14402668283808367, | |
| "grad_norm": 10.054927825927734, | |
| "learning_rate": 9.520921770770164e-06, | |
| "loss": 0.2288, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.15160703456640387, | |
| "grad_norm": 11.330881118774414, | |
| "learning_rate": 9.495653931675763e-06, | |
| "loss": 0.2206, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.15160703456640387, | |
| "eval_cer": 5.496337965229563, | |
| "eval_loss": 0.26026931405067444, | |
| "eval_runtime": 4183.6066, | |
| "eval_samples_per_second": 2.439, | |
| "eval_ser": 69.43055963932177, | |
| "eval_steps_per_second": 0.61, | |
| "eval_wer": 21.266884240601154, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.15918738629472406, | |
| "grad_norm": 22.062847137451172, | |
| "learning_rate": 9.470386092581364e-06, | |
| "loss": 0.2404, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.16676773802304426, | |
| "grad_norm": 6.779280662536621, | |
| "learning_rate": 9.445118253486962e-06, | |
| "loss": 0.2299, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.17434808975136445, | |
| "grad_norm": 10.592273712158203, | |
| "learning_rate": 9.419850414392561e-06, | |
| "loss": 0.2064, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.18192844147968465, | |
| "grad_norm": 5.647468566894531, | |
| "learning_rate": 9.394582575298161e-06, | |
| "loss": 0.1878, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.18950879320800484, | |
| "grad_norm": 12.663495063781738, | |
| "learning_rate": 9.36931473620376e-06, | |
| "loss": 0.2302, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 0.19708914493632504, | |
| "grad_norm": 11.353273391723633, | |
| "learning_rate": 9.34404689710936e-06, | |
| "loss": 0.263, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.20466949666464523, | |
| "grad_norm": 10.60062026977539, | |
| "learning_rate": 9.318779058014959e-06, | |
| "loss": 0.2276, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 0.21224984839296543, | |
| "grad_norm": 6.700550556182861, | |
| "learning_rate": 9.29351121892056e-06, | |
| "loss": 0.2314, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.21983020012128562, | |
| "grad_norm": 11.847448348999023, | |
| "learning_rate": 9.268243379826158e-06, | |
| "loss": 0.2282, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 0.22741055184960582, | |
| "grad_norm": 10.603355407714844, | |
| "learning_rate": 9.242975540731759e-06, | |
| "loss": 0.1968, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.234990903577926, | |
| "grad_norm": 10.278948783874512, | |
| "learning_rate": 9.217707701637356e-06, | |
| "loss": 0.2209, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 0.2425712553062462, | |
| "grad_norm": 7.834460735321045, | |
| "learning_rate": 9.192439862542956e-06, | |
| "loss": 0.189, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.2501516070345664, | |
| "grad_norm": 7.9238810539245605, | |
| "learning_rate": 9.167172023448555e-06, | |
| "loss": 0.1758, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 0.25773195876288657, | |
| "grad_norm": 16.181570053100586, | |
| "learning_rate": 9.141904184354155e-06, | |
| "loss": 0.1999, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.2653123104912068, | |
| "grad_norm": 5.385849475860596, | |
| "learning_rate": 9.116636345259754e-06, | |
| "loss": 0.2353, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 0.27289266221952696, | |
| "grad_norm": 14.86568832397461, | |
| "learning_rate": 9.091368506165354e-06, | |
| "loss": 0.2039, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.2804730139478472, | |
| "grad_norm": 8.38940715789795, | |
| "learning_rate": 9.066100667070953e-06, | |
| "loss": 0.2048, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 0.28805336567616735, | |
| "grad_norm": 6.71006965637207, | |
| "learning_rate": 9.040832827976552e-06, | |
| "loss": 0.2016, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.2956337174044876, | |
| "grad_norm": 9.024951934814453, | |
| "learning_rate": 9.015564988882152e-06, | |
| "loss": 0.2272, | |
| "step": 975 | |
| }, | |
| { | |
| "epoch": 0.30321406913280774, | |
| "grad_norm": 6.729151725769043, | |
| "learning_rate": 8.99029714978775e-06, | |
| "loss": 0.22, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.30321406913280774, | |
| "eval_cer": 5.382320373005102, | |
| "eval_loss": 0.24674250185489655, | |
| "eval_runtime": 4121.9591, | |
| "eval_samples_per_second": 2.475, | |
| "eval_ser": 67.35273939037538, | |
| "eval_steps_per_second": 0.619, | |
| "eval_wer": 20.297092948978527, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.31079442086112796, | |
| "grad_norm": 9.8275785446167, | |
| "learning_rate": 8.965029310693351e-06, | |
| "loss": 0.2513, | |
| "step": 1025 | |
| }, | |
| { | |
| "epoch": 0.31837477258944813, | |
| "grad_norm": 8.723328590393066, | |
| "learning_rate": 8.93976147159895e-06, | |
| "loss": 0.2095, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.32595512431776835, | |
| "grad_norm": 10.17667007446289, | |
| "learning_rate": 8.91449363250455e-06, | |
| "loss": 0.1945, | |
| "step": 1075 | |
| }, | |
| { | |
| "epoch": 0.3335354760460885, | |
| "grad_norm": 9.78806209564209, | |
| "learning_rate": 8.889225793410147e-06, | |
| "loss": 0.2357, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.34111582777440874, | |
| "grad_norm": 7.773096084594727, | |
| "learning_rate": 8.863957954315748e-06, | |
| "loss": 0.1901, | |
| "step": 1125 | |
| }, | |
| { | |
| "epoch": 0.3486961795027289, | |
| "grad_norm": 5.588106155395508, | |
| "learning_rate": 8.838690115221346e-06, | |
| "loss": 0.2156, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.35627653123104913, | |
| "grad_norm": 9.3190336227417, | |
| "learning_rate": 8.813422276126947e-06, | |
| "loss": 0.2014, | |
| "step": 1175 | |
| }, | |
| { | |
| "epoch": 0.3638568829593693, | |
| "grad_norm": 7.115675926208496, | |
| "learning_rate": 8.788154437032545e-06, | |
| "loss": 0.2073, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.3714372346876895, | |
| "grad_norm": 8.060104370117188, | |
| "learning_rate": 8.762886597938146e-06, | |
| "loss": 0.1789, | |
| "step": 1225 | |
| }, | |
| { | |
| "epoch": 0.3790175864160097, | |
| "grad_norm": 8.937747955322266, | |
| "learning_rate": 8.737618758843744e-06, | |
| "loss": 0.2115, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.3865979381443299, | |
| "grad_norm": 10.151251792907715, | |
| "learning_rate": 8.712350919749343e-06, | |
| "loss": 0.216, | |
| "step": 1275 | |
| }, | |
| { | |
| "epoch": 0.3941782898726501, | |
| "grad_norm": 6.023889064788818, | |
| "learning_rate": 8.687083080654943e-06, | |
| "loss": 0.1836, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.4017586416009703, | |
| "grad_norm": 9.286721229553223, | |
| "learning_rate": 8.661815241560542e-06, | |
| "loss": 0.1799, | |
| "step": 1325 | |
| }, | |
| { | |
| "epoch": 0.40933899332929047, | |
| "grad_norm": 10.163074493408203, | |
| "learning_rate": 8.636547402466143e-06, | |
| "loss": 0.183, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.4169193450576107, | |
| "grad_norm": 11.878681182861328, | |
| "learning_rate": 8.611279563371741e-06, | |
| "loss": 0.2121, | |
| "step": 1375 | |
| }, | |
| { | |
| "epoch": 0.42449969678593086, | |
| "grad_norm": 10.150252342224121, | |
| "learning_rate": 8.58601172427734e-06, | |
| "loss": 0.1837, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.4320800485142511, | |
| "grad_norm": 6.533999443054199, | |
| "learning_rate": 8.560743885182939e-06, | |
| "loss": 0.219, | |
| "step": 1425 | |
| }, | |
| { | |
| "epoch": 0.43966040024257125, | |
| "grad_norm": 10.426810264587402, | |
| "learning_rate": 8.535476046088539e-06, | |
| "loss": 0.2285, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.44724075197089147, | |
| "grad_norm": 10.712567329406738, | |
| "learning_rate": 8.510208206994138e-06, | |
| "loss": 0.1837, | |
| "step": 1475 | |
| }, | |
| { | |
| "epoch": 0.45482110369921164, | |
| "grad_norm": 8.469382286071777, | |
| "learning_rate": 8.484940367899738e-06, | |
| "loss": 0.1901, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.45482110369921164, | |
| "eval_cer": 5.116005507016847, | |
| "eval_loss": 0.23773027956485748, | |
| "eval_runtime": 4147.5807, | |
| "eval_samples_per_second": 2.46, | |
| "eval_ser": 66.17661472116045, | |
| "eval_steps_per_second": 0.615, | |
| "eval_wer": 19.564232703452923, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.46240145542753186, | |
| "grad_norm": 11.634538650512695, | |
| "learning_rate": 8.459672528805337e-06, | |
| "loss": 0.1774, | |
| "step": 1525 | |
| }, | |
| { | |
| "epoch": 0.469981807155852, | |
| "grad_norm": 9.295145034790039, | |
| "learning_rate": 8.434404689710937e-06, | |
| "loss": 0.2293, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.47756215888417225, | |
| "grad_norm": 9.195737838745117, | |
| "learning_rate": 8.409136850616536e-06, | |
| "loss": 0.195, | |
| "step": 1575 | |
| }, | |
| { | |
| "epoch": 0.4851425106124924, | |
| "grad_norm": 7.1239776611328125, | |
| "learning_rate": 8.383869011522136e-06, | |
| "loss": 0.1902, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.49272286234081264, | |
| "grad_norm": 8.784886360168457, | |
| "learning_rate": 8.358601172427735e-06, | |
| "loss": 0.1708, | |
| "step": 1625 | |
| }, | |
| { | |
| "epoch": 0.5003032140691328, | |
| "grad_norm": 9.43509578704834, | |
| "learning_rate": 8.333333333333334e-06, | |
| "loss": 0.1978, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.507883565797453, | |
| "grad_norm": 9.499982833862305, | |
| "learning_rate": 8.308065494238934e-06, | |
| "loss": 0.2036, | |
| "step": 1675 | |
| }, | |
| { | |
| "epoch": 0.5154639175257731, | |
| "grad_norm": 7.286120414733887, | |
| "learning_rate": 8.282797655144533e-06, | |
| "loss": 0.2003, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.5230442692540934, | |
| "grad_norm": 6.725291728973389, | |
| "learning_rate": 8.257529816050132e-06, | |
| "loss": 0.2139, | |
| "step": 1725 | |
| }, | |
| { | |
| "epoch": 0.5306246209824136, | |
| "grad_norm": 5.5903401374816895, | |
| "learning_rate": 8.23226197695573e-06, | |
| "loss": 0.1869, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.5382049727107338, | |
| "grad_norm": 7.579101085662842, | |
| "learning_rate": 8.20699413786133e-06, | |
| "loss": 0.2097, | |
| "step": 1775 | |
| }, | |
| { | |
| "epoch": 0.5457853244390539, | |
| "grad_norm": 4.244465351104736, | |
| "learning_rate": 8.18172629876693e-06, | |
| "loss": 0.1906, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.5533656761673742, | |
| "grad_norm": 5.5115580558776855, | |
| "learning_rate": 8.15645845967253e-06, | |
| "loss": 0.197, | |
| "step": 1825 | |
| }, | |
| { | |
| "epoch": 0.5609460278956944, | |
| "grad_norm": 6.544187068939209, | |
| "learning_rate": 8.131190620578128e-06, | |
| "loss": 0.2061, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.5685263796240145, | |
| "grad_norm": 6.87299108505249, | |
| "learning_rate": 8.105922781483729e-06, | |
| "loss": 0.2018, | |
| "step": 1875 | |
| }, | |
| { | |
| "epoch": 0.5761067313523347, | |
| "grad_norm": 11.84753704071045, | |
| "learning_rate": 8.080654942389327e-06, | |
| "loss": 0.2018, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.583687083080655, | |
| "grad_norm": 7.050841331481934, | |
| "learning_rate": 8.055387103294928e-06, | |
| "loss": 0.1903, | |
| "step": 1925 | |
| }, | |
| { | |
| "epoch": 0.5912674348089751, | |
| "grad_norm": 7.1467461585998535, | |
| "learning_rate": 8.030119264200527e-06, | |
| "loss": 0.2079, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.5988477865372953, | |
| "grad_norm": 7.622501373291016, | |
| "learning_rate": 8.004851425106125e-06, | |
| "loss": 0.2047, | |
| "step": 1975 | |
| }, | |
| { | |
| "epoch": 0.6064281382656155, | |
| "grad_norm": 10.488551139831543, | |
| "learning_rate": 7.979583586011726e-06, | |
| "loss": 0.1969, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.6064281382656155, | |
| "eval_cer": 5.075425758775576, | |
| "eval_loss": 0.22730673849582672, | |
| "eval_runtime": 4147.4744, | |
| "eval_samples_per_second": 2.46, | |
| "eval_ser": 64.32421836714691, | |
| "eval_steps_per_second": 0.615, | |
| "eval_wer": 19.050882103576033, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.6140084899939358, | |
| "grad_norm": 6.823671817779541, | |
| "learning_rate": 7.954315746917324e-06, | |
| "loss": 0.2002, | |
| "step": 2025 | |
| }, | |
| { | |
| "epoch": 0.6215888417222559, | |
| "grad_norm": 7.486941814422607, | |
| "learning_rate": 7.929047907822923e-06, | |
| "loss": 0.2046, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 0.6291691934505761, | |
| "grad_norm": 6.544958114624023, | |
| "learning_rate": 7.903780068728523e-06, | |
| "loss": 0.1736, | |
| "step": 2075 | |
| }, | |
| { | |
| "epoch": 0.6367495451788963, | |
| "grad_norm": 7.139219760894775, | |
| "learning_rate": 7.878512229634122e-06, | |
| "loss": 0.1844, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.6443298969072165, | |
| "grad_norm": 10.455595970153809, | |
| "learning_rate": 7.85324439053972e-06, | |
| "loss": 0.1916, | |
| "step": 2125 | |
| }, | |
| { | |
| "epoch": 0.6519102486355367, | |
| "grad_norm": 6.0816216468811035, | |
| "learning_rate": 7.827976551445321e-06, | |
| "loss": 0.2102, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 0.6594906003638569, | |
| "grad_norm": 4.258129119873047, | |
| "learning_rate": 7.80270871235092e-06, | |
| "loss": 0.1785, | |
| "step": 2175 | |
| }, | |
| { | |
| "epoch": 0.667070952092177, | |
| "grad_norm": 8.886266708374023, | |
| "learning_rate": 7.77744087325652e-06, | |
| "loss": 0.1916, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.6746513038204973, | |
| "grad_norm": 8.997633934020996, | |
| "learning_rate": 7.752173034162119e-06, | |
| "loss": 0.2088, | |
| "step": 2225 | |
| }, | |
| { | |
| "epoch": 0.6822316555488175, | |
| "grad_norm": 9.341837882995605, | |
| "learning_rate": 7.72690519506772e-06, | |
| "loss": 0.1996, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 0.6898120072771377, | |
| "grad_norm": 11.538143157958984, | |
| "learning_rate": 7.701637355973318e-06, | |
| "loss": 0.1766, | |
| "step": 2275 | |
| }, | |
| { | |
| "epoch": 0.6973923590054578, | |
| "grad_norm": 6.78935432434082, | |
| "learning_rate": 7.676369516878917e-06, | |
| "loss": 0.1748, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.7049727107337781, | |
| "grad_norm": 9.051713943481445, | |
| "learning_rate": 7.651101677784516e-06, | |
| "loss": 0.173, | |
| "step": 2325 | |
| }, | |
| { | |
| "epoch": 0.7125530624620983, | |
| "grad_norm": 8.62745189666748, | |
| "learning_rate": 7.625833838690115e-06, | |
| "loss": 0.1936, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 0.7201334141904184, | |
| "grad_norm": 9.529070854187012, | |
| "learning_rate": 7.600565999595715e-06, | |
| "loss": 0.2004, | |
| "step": 2375 | |
| }, | |
| { | |
| "epoch": 0.7277137659187386, | |
| "grad_norm": 7.745057106018066, | |
| "learning_rate": 7.575298160501314e-06, | |
| "loss": 0.1832, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.7352941176470589, | |
| "grad_norm": 8.844827651977539, | |
| "learning_rate": 7.550030321406914e-06, | |
| "loss": 0.2056, | |
| "step": 2425 | |
| }, | |
| { | |
| "epoch": 0.742874469375379, | |
| "grad_norm": 13.322829246520996, | |
| "learning_rate": 7.524762482312513e-06, | |
| "loss": 0.1825, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 0.7504548211036992, | |
| "grad_norm": 10.450922012329102, | |
| "learning_rate": 7.499494643218113e-06, | |
| "loss": 0.1798, | |
| "step": 2475 | |
| }, | |
| { | |
| "epoch": 0.7580351728320194, | |
| "grad_norm": 9.188721656799316, | |
| "learning_rate": 7.474226804123712e-06, | |
| "loss": 0.1743, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.7580351728320194, | |
| "eval_cer": 4.852319288687943, | |
| "eval_loss": 0.2188422828912735, | |
| "eval_runtime": 4181.1454, | |
| "eval_samples_per_second": 2.44, | |
| "eval_ser": 63.148093697931984, | |
| "eval_steps_per_second": 0.61, | |
| "eval_wer": 18.228592002415766, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.7656155245603395, | |
| "grad_norm": 5.665472030639648, | |
| "learning_rate": 7.448958965029312e-06, | |
| "loss": 0.1979, | |
| "step": 2525 | |
| }, | |
| { | |
| "epoch": 0.7731958762886598, | |
| "grad_norm": 4.752591133117676, | |
| "learning_rate": 7.4236911259349106e-06, | |
| "loss": 0.191, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 0.78077622801698, | |
| "grad_norm": 9.350257873535156, | |
| "learning_rate": 7.39842328684051e-06, | |
| "loss": 0.1735, | |
| "step": 2575 | |
| }, | |
| { | |
| "epoch": 0.7883565797453002, | |
| "grad_norm": 7.437028408050537, | |
| "learning_rate": 7.37315544774611e-06, | |
| "loss": 0.2365, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.7959369314736203, | |
| "grad_norm": 9.3013916015625, | |
| "learning_rate": 7.347887608651709e-06, | |
| "loss": 0.1888, | |
| "step": 2625 | |
| }, | |
| { | |
| "epoch": 0.8035172832019406, | |
| "grad_norm": 7.452167987823486, | |
| "learning_rate": 7.322619769557308e-06, | |
| "loss": 0.1752, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 0.8110976349302608, | |
| "grad_norm": 6.42844820022583, | |
| "learning_rate": 7.2973519304629074e-06, | |
| "loss": 0.1991, | |
| "step": 2675 | |
| }, | |
| { | |
| "epoch": 0.8186779866585809, | |
| "grad_norm": 6.838171482086182, | |
| "learning_rate": 7.272084091368506e-06, | |
| "loss": 0.1748, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.8262583383869011, | |
| "grad_norm": 7.954586505889893, | |
| "learning_rate": 7.246816252274106e-06, | |
| "loss": 0.1905, | |
| "step": 2725 | |
| }, | |
| { | |
| "epoch": 0.8338386901152214, | |
| "grad_norm": 6.925525188446045, | |
| "learning_rate": 7.221548413179705e-06, | |
| "loss": 0.1784, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 0.8414190418435415, | |
| "grad_norm": 7.18998384475708, | |
| "learning_rate": 7.196280574085305e-06, | |
| "loss": 0.1693, | |
| "step": 2775 | |
| }, | |
| { | |
| "epoch": 0.8489993935718617, | |
| "grad_norm": 8.666403770446777, | |
| "learning_rate": 7.171012734990904e-06, | |
| "loss": 0.1684, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.8565797453001819, | |
| "grad_norm": 7.043132781982422, | |
| "learning_rate": 7.145744895896504e-06, | |
| "loss": 0.1781, | |
| "step": 2825 | |
| }, | |
| { | |
| "epoch": 0.8641600970285022, | |
| "grad_norm": 5.09641695022583, | |
| "learning_rate": 7.120477056802103e-06, | |
| "loss": 0.1778, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 0.8717404487568223, | |
| "grad_norm": 6.9336748123168945, | |
| "learning_rate": 7.095209217707703e-06, | |
| "loss": 0.1607, | |
| "step": 2875 | |
| }, | |
| { | |
| "epoch": 0.8793208004851425, | |
| "grad_norm": 5.951363563537598, | |
| "learning_rate": 7.069941378613302e-06, | |
| "loss": 0.1793, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.8869011522134627, | |
| "grad_norm": 5.363964557647705, | |
| "learning_rate": 7.044673539518901e-06, | |
| "loss": 0.1742, | |
| "step": 2925 | |
| }, | |
| { | |
| "epoch": 0.8944815039417829, | |
| "grad_norm": 8.336238861083984, | |
| "learning_rate": 7.0194057004245e-06, | |
| "loss": 0.2014, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 0.9020618556701031, | |
| "grad_norm": 6.5310587882995605, | |
| "learning_rate": 6.9941378613300994e-06, | |
| "loss": 0.2074, | |
| "step": 2975 | |
| }, | |
| { | |
| "epoch": 0.9096422073984233, | |
| "grad_norm": 9.294402122497559, | |
| "learning_rate": 6.968870022235699e-06, | |
| "loss": 0.1747, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.9096422073984233, | |
| "eval_cer": 4.88665599873825, | |
| "eval_loss": 0.21674878895282745, | |
| "eval_runtime": 4171.9101, | |
| "eval_samples_per_second": 2.446, | |
| "eval_ser": 62.40321474076252, | |
| "eval_steps_per_second": 0.611, | |
| "eval_wer": 18.098512212401715, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.9172225591267434, | |
| "grad_norm": 8.7633056640625, | |
| "learning_rate": 6.943602183141298e-06, | |
| "loss": 0.1575, | |
| "step": 3025 | |
| }, | |
| { | |
| "epoch": 0.9248029108550637, | |
| "grad_norm": 13.642629623413086, | |
| "learning_rate": 6.918334344046897e-06, | |
| "loss": 0.1817, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 0.9323832625833839, | |
| "grad_norm": 6.795938968658447, | |
| "learning_rate": 6.893066504952497e-06, | |
| "loss": 0.1887, | |
| "step": 3075 | |
| }, | |
| { | |
| "epoch": 0.939963614311704, | |
| "grad_norm": 7.764118671417236, | |
| "learning_rate": 6.867798665858096e-06, | |
| "loss": 0.1611, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.9475439660400242, | |
| "grad_norm": 7.532794952392578, | |
| "learning_rate": 6.842530826763696e-06, | |
| "loss": 0.1869, | |
| "step": 3125 | |
| }, | |
| { | |
| "epoch": 0.9551243177683445, | |
| "grad_norm": 6.741429328918457, | |
| "learning_rate": 6.817262987669295e-06, | |
| "loss": 0.1846, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 0.9627046694966647, | |
| "grad_norm": 9.013187408447266, | |
| "learning_rate": 6.791995148574895e-06, | |
| "loss": 0.1649, | |
| "step": 3175 | |
| }, | |
| { | |
| "epoch": 0.9702850212249848, | |
| "grad_norm": 11.595109939575195, | |
| "learning_rate": 6.7667273094804945e-06, | |
| "loss": 0.1818, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.977865372953305, | |
| "grad_norm": 12.244895935058594, | |
| "learning_rate": 6.741459470386094e-06, | |
| "loss": 0.1906, | |
| "step": 3225 | |
| }, | |
| { | |
| "epoch": 0.9854457246816253, | |
| "grad_norm": 6.988622188568115, | |
| "learning_rate": 6.716191631291692e-06, | |
| "loss": 0.173, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 0.9930260764099454, | |
| "grad_norm": 4.436202526092529, | |
| "learning_rate": 6.690923792197291e-06, | |
| "loss": 0.1838, | |
| "step": 3275 | |
| }, | |
| { | |
| "epoch": 1.0006064281382656, | |
| "grad_norm": 6.243381977081299, | |
| "learning_rate": 6.665655953102891e-06, | |
| "loss": 0.1718, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 1.0081867798665858, | |
| "grad_norm": 5.074841022491455, | |
| "learning_rate": 6.6403881140084905e-06, | |
| "loss": 0.076, | |
| "step": 3325 | |
| }, | |
| { | |
| "epoch": 1.015767131594906, | |
| "grad_norm": 4.297674655914307, | |
| "learning_rate": 6.61512027491409e-06, | |
| "loss": 0.078, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 1.0233474833232261, | |
| "grad_norm": 3.33673095703125, | |
| "learning_rate": 6.589852435819689e-06, | |
| "loss": 0.0774, | |
| "step": 3375 | |
| }, | |
| { | |
| "epoch": 1.0309278350515463, | |
| "grad_norm": 7.982945442199707, | |
| "learning_rate": 6.564584596725288e-06, | |
| "loss": 0.0856, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 1.0385081867798667, | |
| "grad_norm": 5.4341888427734375, | |
| "learning_rate": 6.539316757630888e-06, | |
| "loss": 0.0752, | |
| "step": 3425 | |
| }, | |
| { | |
| "epoch": 1.0460885385081868, | |
| "grad_norm": 3.646911144256592, | |
| "learning_rate": 6.514048918536487e-06, | |
| "loss": 0.0657, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 1.053668890236507, | |
| "grad_norm": 2.50636625289917, | |
| "learning_rate": 6.488781079442087e-06, | |
| "loss": 0.0678, | |
| "step": 3475 | |
| }, | |
| { | |
| "epoch": 1.0612492419648272, | |
| "grad_norm": 2.662381172180176, | |
| "learning_rate": 6.4635132403476865e-06, | |
| "loss": 0.077, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.0612492419648272, | |
| "eval_cer": 4.527188431321651, | |
| "eval_loss": 0.2141607105731964, | |
| "eval_runtime": 4130.8959, | |
| "eval_samples_per_second": 2.47, | |
| "eval_ser": 60.59982358129962, | |
| "eval_steps_per_second": 0.618, | |
| "eval_wer": 17.200729375965437, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.0688295936931473, | |
| "grad_norm": 2.4962751865386963, | |
| "learning_rate": 6.438245401253286e-06, | |
| "loss": 0.0885, | |
| "step": 3525 | |
| }, | |
| { | |
| "epoch": 1.0764099454214675, | |
| "grad_norm": 5.805286884307861, | |
| "learning_rate": 6.4129775621588856e-06, | |
| "loss": 0.0644, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 1.0839902971497877, | |
| "grad_norm": 3.763657331466675, | |
| "learning_rate": 6.387709723064483e-06, | |
| "loss": 0.0647, | |
| "step": 3575 | |
| }, | |
| { | |
| "epoch": 1.0915706488781078, | |
| "grad_norm": 3.8962442874908447, | |
| "learning_rate": 6.362441883970083e-06, | |
| "loss": 0.0759, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 1.0991510006064282, | |
| "grad_norm": 6.139822483062744, | |
| "learning_rate": 6.3371740448756825e-06, | |
| "loss": 0.0707, | |
| "step": 3625 | |
| }, | |
| { | |
| "epoch": 1.1067313523347484, | |
| "grad_norm": 4.11650276184082, | |
| "learning_rate": 6.311906205781282e-06, | |
| "loss": 0.0699, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 1.1143117040630686, | |
| "grad_norm": 6.533496379852295, | |
| "learning_rate": 6.286638366686882e-06, | |
| "loss": 0.069, | |
| "step": 3675 | |
| }, | |
| { | |
| "epoch": 1.1218920557913887, | |
| "grad_norm": 3.1129257678985596, | |
| "learning_rate": 6.261370527592481e-06, | |
| "loss": 0.0707, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 1.129472407519709, | |
| "grad_norm": 3.159837245941162, | |
| "learning_rate": 6.23610268849808e-06, | |
| "loss": 0.0766, | |
| "step": 3725 | |
| }, | |
| { | |
| "epoch": 1.137052759248029, | |
| "grad_norm": 6.998505592346191, | |
| "learning_rate": 6.210834849403679e-06, | |
| "loss": 0.0799, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 1.1446331109763492, | |
| "grad_norm": 5.051515102386475, | |
| "learning_rate": 6.185567010309279e-06, | |
| "loss": 0.08, | |
| "step": 3775 | |
| }, | |
| { | |
| "epoch": 1.1522134627046694, | |
| "grad_norm": 4.849501132965088, | |
| "learning_rate": 6.1602991712148785e-06, | |
| "loss": 0.0694, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 1.1597938144329896, | |
| "grad_norm": 4.301568984985352, | |
| "learning_rate": 6.135031332120478e-06, | |
| "loss": 0.0801, | |
| "step": 3825 | |
| }, | |
| { | |
| "epoch": 1.16737416616131, | |
| "grad_norm": 7.137290954589844, | |
| "learning_rate": 6.1097634930260776e-06, | |
| "loss": 0.0707, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 1.1749545178896301, | |
| "grad_norm": 1.7661166191101074, | |
| "learning_rate": 6.084495653931675e-06, | |
| "loss": 0.0671, | |
| "step": 3875 | |
| }, | |
| { | |
| "epoch": 1.1825348696179503, | |
| "grad_norm": 3.448512077331543, | |
| "learning_rate": 6.059227814837275e-06, | |
| "loss": 0.0689, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 1.1901152213462705, | |
| "grad_norm": 3.7001595497131348, | |
| "learning_rate": 6.0339599757428745e-06, | |
| "loss": 0.0764, | |
| "step": 3925 | |
| }, | |
| { | |
| "epoch": 1.1976955730745906, | |
| "grad_norm": 6.902029037475586, | |
| "learning_rate": 6.008692136648474e-06, | |
| "loss": 0.0661, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 1.2052759248029108, | |
| "grad_norm": 4.167758464813232, | |
| "learning_rate": 5.983424297554074e-06, | |
| "loss": 0.071, | |
| "step": 3975 | |
| }, | |
| { | |
| "epoch": 1.212856276531231, | |
| "grad_norm": 3.972062110900879, | |
| "learning_rate": 5.958156458459673e-06, | |
| "loss": 0.0839, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.212856276531231, | |
| "eval_cer": 4.462786563667489, | |
| "eval_loss": 0.21264785528182983, | |
| "eval_runtime": 4149.8292, | |
| "eval_samples_per_second": 2.459, | |
| "eval_ser": 60.8742526707831, | |
| "eval_steps_per_second": 0.615, | |
| "eval_wer": 17.160079441586042, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.2204366282595513, | |
| "grad_norm": 4.492404937744141, | |
| "learning_rate": 5.932888619365273e-06, | |
| "loss": 0.0894, | |
| "step": 4025 | |
| }, | |
| { | |
| "epoch": 1.2280169799878715, | |
| "grad_norm": 3.026043176651001, | |
| "learning_rate": 5.907620780270872e-06, | |
| "loss": 0.072, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 1.2355973317161917, | |
| "grad_norm": 2.404448986053467, | |
| "learning_rate": 5.882352941176471e-06, | |
| "loss": 0.0769, | |
| "step": 4075 | |
| }, | |
| { | |
| "epoch": 1.2431776834445118, | |
| "grad_norm": 5.609960556030273, | |
| "learning_rate": 5.8570851020820705e-06, | |
| "loss": 0.0617, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 1.250758035172832, | |
| "grad_norm": 4.348845481872559, | |
| "learning_rate": 5.83181726298767e-06, | |
| "loss": 0.0761, | |
| "step": 4125 | |
| }, | |
| { | |
| "epoch": 1.2583383869011522, | |
| "grad_norm": 6.77551794052124, | |
| "learning_rate": 5.8065494238932696e-06, | |
| "loss": 0.0776, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 1.2659187386294724, | |
| "grad_norm": 5.368580341339111, | |
| "learning_rate": 5.781281584798869e-06, | |
| "loss": 0.0677, | |
| "step": 4175 | |
| }, | |
| { | |
| "epoch": 1.2734990903577925, | |
| "grad_norm": 6.325013637542725, | |
| "learning_rate": 5.756013745704467e-06, | |
| "loss": 0.0858, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 1.2810794420861127, | |
| "grad_norm": 7.039133548736572, | |
| "learning_rate": 5.7307459066100665e-06, | |
| "loss": 0.0917, | |
| "step": 4225 | |
| }, | |
| { | |
| "epoch": 1.2886597938144329, | |
| "grad_norm": 6.305510520935059, | |
| "learning_rate": 5.705478067515666e-06, | |
| "loss": 0.0815, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 1.2962401455427532, | |
| "grad_norm": 4.5553789138793945, | |
| "learning_rate": 5.680210228421266e-06, | |
| "loss": 0.0746, | |
| "step": 4275 | |
| }, | |
| { | |
| "epoch": 1.3038204972710734, | |
| "grad_norm": 14.23348331451416, | |
| "learning_rate": 5.654942389326865e-06, | |
| "loss": 0.0635, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 1.3114008489993936, | |
| "grad_norm": 4.656774997711182, | |
| "learning_rate": 5.629674550232465e-06, | |
| "loss": 0.0905, | |
| "step": 4325 | |
| }, | |
| { | |
| "epoch": 1.3189812007277137, | |
| "grad_norm": 7.67761754989624, | |
| "learning_rate": 5.604406711138064e-06, | |
| "loss": 0.0699, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 1.326561552456034, | |
| "grad_norm": 3.044973850250244, | |
| "learning_rate": 5.579138872043664e-06, | |
| "loss": 0.0684, | |
| "step": 4375 | |
| }, | |
| { | |
| "epoch": 1.334141904184354, | |
| "grad_norm": 12.772285461425781, | |
| "learning_rate": 5.553871032949263e-06, | |
| "loss": 0.0832, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 1.3417222559126745, | |
| "grad_norm": 4.697045803070068, | |
| "learning_rate": 5.528603193854862e-06, | |
| "loss": 0.0841, | |
| "step": 4425 | |
| }, | |
| { | |
| "epoch": 1.3493026076409946, | |
| "grad_norm": 3.967209815979004, | |
| "learning_rate": 5.5033353547604615e-06, | |
| "loss": 0.0653, | |
| "step": 4450 | |
| }, | |
| { | |
| "epoch": 1.3568829593693148, | |
| "grad_norm": 2.5490405559539795, | |
| "learning_rate": 5.478067515666061e-06, | |
| "loss": 0.0805, | |
| "step": 4475 | |
| }, | |
| { | |
| "epoch": 1.364463311097635, | |
| "grad_norm": 7.244118690490723, | |
| "learning_rate": 5.45279967657166e-06, | |
| "loss": 0.0888, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 1.364463311097635, | |
| "eval_cer": 4.486444392601671, | |
| "eval_loss": 0.2091592252254486, | |
| "eval_runtime": 4167.1809, | |
| "eval_samples_per_second": 2.448, | |
| "eval_ser": 60.394001764187, | |
| "eval_steps_per_second": 0.612, | |
| "eval_wer": 17.352876273214015, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 1.3720436628259551, | |
| "grad_norm": 3.822779655456543, | |
| "learning_rate": 5.427531837477259e-06, | |
| "loss": 0.0703, | |
| "step": 4525 | |
| }, | |
| { | |
| "epoch": 1.3796240145542753, | |
| "grad_norm": 4.564328193664551, | |
| "learning_rate": 5.402263998382858e-06, | |
| "loss": 0.0608, | |
| "step": 4550 | |
| }, | |
| { | |
| "epoch": 1.3872043662825955, | |
| "grad_norm": 1.9041502475738525, | |
| "learning_rate": 5.3769961592884576e-06, | |
| "loss": 0.0633, | |
| "step": 4575 | |
| }, | |
| { | |
| "epoch": 1.3947847180109156, | |
| "grad_norm": 3.6892504692077637, | |
| "learning_rate": 5.351728320194057e-06, | |
| "loss": 0.0703, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 1.4023650697392358, | |
| "grad_norm": 1.5316110849380493, | |
| "learning_rate": 5.326460481099657e-06, | |
| "loss": 0.0699, | |
| "step": 4625 | |
| }, | |
| { | |
| "epoch": 1.409945421467556, | |
| "grad_norm": 4.096865653991699, | |
| "learning_rate": 5.301192642005256e-06, | |
| "loss": 0.0713, | |
| "step": 4650 | |
| }, | |
| { | |
| "epoch": 1.4175257731958764, | |
| "grad_norm": 4.801632404327393, | |
| "learning_rate": 5.275924802910856e-06, | |
| "loss": 0.0678, | |
| "step": 4675 | |
| }, | |
| { | |
| "epoch": 1.4251061249241965, | |
| "grad_norm": 7.240773677825928, | |
| "learning_rate": 5.250656963816455e-06, | |
| "loss": 0.0683, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 1.4326864766525167, | |
| "grad_norm": 5.203670978546143, | |
| "learning_rate": 5.225389124722055e-06, | |
| "loss": 0.087, | |
| "step": 4725 | |
| }, | |
| { | |
| "epoch": 1.4402668283808369, | |
| "grad_norm": 5.950301170349121, | |
| "learning_rate": 5.2001212856276535e-06, | |
| "loss": 0.0824, | |
| "step": 4750 | |
| }, | |
| { | |
| "epoch": 1.447847180109157, | |
| "grad_norm": 8.965677261352539, | |
| "learning_rate": 5.174853446533253e-06, | |
| "loss": 0.0705, | |
| "step": 4775 | |
| }, | |
| { | |
| "epoch": 1.4554275318374772, | |
| "grad_norm": 4.188201427459717, | |
| "learning_rate": 5.149585607438852e-06, | |
| "loss": 0.0605, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 1.4630078835657976, | |
| "grad_norm": 5.050042152404785, | |
| "learning_rate": 5.124317768344451e-06, | |
| "loss": 0.0743, | |
| "step": 4825 | |
| }, | |
| { | |
| "epoch": 1.4705882352941178, | |
| "grad_norm": 8.579322814941406, | |
| "learning_rate": 5.099049929250051e-06, | |
| "loss": 0.0725, | |
| "step": 4850 | |
| }, | |
| { | |
| "epoch": 1.478168587022438, | |
| "grad_norm": 3.81740140914917, | |
| "learning_rate": 5.07378209015565e-06, | |
| "loss": 0.0585, | |
| "step": 4875 | |
| }, | |
| { | |
| "epoch": 1.485748938750758, | |
| "grad_norm": 2.082289218902588, | |
| "learning_rate": 5.048514251061249e-06, | |
| "loss": 0.0766, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 1.4933292904790783, | |
| "grad_norm": 6.351588249206543, | |
| "learning_rate": 5.023246411966849e-06, | |
| "loss": 0.0746, | |
| "step": 4925 | |
| }, | |
| { | |
| "epoch": 1.5009096422073984, | |
| "grad_norm": 3.2933731079101562, | |
| "learning_rate": 4.997978572872448e-06, | |
| "loss": 0.0762, | |
| "step": 4950 | |
| }, | |
| { | |
| "epoch": 1.5084899939357186, | |
| "grad_norm": 2.749415874481201, | |
| "learning_rate": 4.972710733778048e-06, | |
| "loss": 0.0872, | |
| "step": 4975 | |
| }, | |
| { | |
| "epoch": 1.5160703456640388, | |
| "grad_norm": 3.414822578430176, | |
| "learning_rate": 4.947442894683647e-06, | |
| "loss": 0.069, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.5160703456640388, | |
| "eval_cer": 4.46672953515652, | |
| "eval_loss": 0.2117619663476944, | |
| "eval_runtime": 4150.2171, | |
| "eval_samples_per_second": 2.458, | |
| "eval_ser": 60.15877683034402, | |
| "eval_steps_per_second": 0.615, | |
| "eval_wer": 17.157756588192935, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.523650697392359, | |
| "grad_norm": 3.9905097484588623, | |
| "learning_rate": 4.922175055589247e-06, | |
| "loss": 0.0647, | |
| "step": 5025 | |
| }, | |
| { | |
| "epoch": 1.531231049120679, | |
| "grad_norm": 6.00367546081543, | |
| "learning_rate": 4.8969072164948455e-06, | |
| "loss": 0.0609, | |
| "step": 5050 | |
| }, | |
| { | |
| "epoch": 1.5388114008489993, | |
| "grad_norm": 3.5311403274536133, | |
| "learning_rate": 4.871639377400445e-06, | |
| "loss": 0.0682, | |
| "step": 5075 | |
| }, | |
| { | |
| "epoch": 1.5463917525773194, | |
| "grad_norm": 4.727973461151123, | |
| "learning_rate": 4.846371538306045e-06, | |
| "loss": 0.0649, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 1.5539721043056398, | |
| "grad_norm": 5.63254976272583, | |
| "learning_rate": 4.821103699211644e-06, | |
| "loss": 0.0774, | |
| "step": 5125 | |
| }, | |
| { | |
| "epoch": 1.56155245603396, | |
| "grad_norm": 2.976087808609009, | |
| "learning_rate": 4.795835860117244e-06, | |
| "loss": 0.0711, | |
| "step": 5150 | |
| }, | |
| { | |
| "epoch": 1.5691328077622801, | |
| "grad_norm": 4.30213737487793, | |
| "learning_rate": 4.770568021022842e-06, | |
| "loss": 0.0726, | |
| "step": 5175 | |
| }, | |
| { | |
| "epoch": 1.5767131594906003, | |
| "grad_norm": 4.121530055999756, | |
| "learning_rate": 4.745300181928442e-06, | |
| "loss": 0.0676, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 1.5842935112189207, | |
| "grad_norm": 3.224336624145508, | |
| "learning_rate": 4.7200323428340415e-06, | |
| "loss": 0.0708, | |
| "step": 5225 | |
| }, | |
| { | |
| "epoch": 1.5918738629472409, | |
| "grad_norm": 4.8691935539245605, | |
| "learning_rate": 4.69476450373964e-06, | |
| "loss": 0.0829, | |
| "step": 5250 | |
| }, | |
| { | |
| "epoch": 1.599454214675561, | |
| "grad_norm": 5.627283573150635, | |
| "learning_rate": 4.66949666464524e-06, | |
| "loss": 0.0676, | |
| "step": 5275 | |
| }, | |
| { | |
| "epoch": 1.6070345664038812, | |
| "grad_norm": 3.1544229984283447, | |
| "learning_rate": 4.644228825550839e-06, | |
| "loss": 0.0699, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 1.6146149181322014, | |
| "grad_norm": 3.1078245639801025, | |
| "learning_rate": 4.618960986456439e-06, | |
| "loss": 0.0791, | |
| "step": 5325 | |
| }, | |
| { | |
| "epoch": 1.6221952698605215, | |
| "grad_norm": 3.3688204288482666, | |
| "learning_rate": 4.5936931473620375e-06, | |
| "loss": 0.0688, | |
| "step": 5350 | |
| }, | |
| { | |
| "epoch": 1.6297756215888417, | |
| "grad_norm": 4.0804524421691895, | |
| "learning_rate": 4.568425308267637e-06, | |
| "loss": 0.0778, | |
| "step": 5375 | |
| }, | |
| { | |
| "epoch": 1.6373559733171619, | |
| "grad_norm": 3.7207510471343994, | |
| "learning_rate": 4.543157469173237e-06, | |
| "loss": 0.0642, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 1.644936325045482, | |
| "grad_norm": 4.2266316413879395, | |
| "learning_rate": 4.517889630078836e-06, | |
| "loss": 0.0736, | |
| "step": 5425 | |
| }, | |
| { | |
| "epoch": 1.6525166767738022, | |
| "grad_norm": 6.174389839172363, | |
| "learning_rate": 4.492621790984436e-06, | |
| "loss": 0.0825, | |
| "step": 5450 | |
| }, | |
| { | |
| "epoch": 1.6600970285021224, | |
| "grad_norm": 5.419666290283203, | |
| "learning_rate": 4.467353951890035e-06, | |
| "loss": 0.0708, | |
| "step": 5475 | |
| }, | |
| { | |
| "epoch": 1.6676773802304425, | |
| "grad_norm": 3.7497785091400146, | |
| "learning_rate": 4.442086112795634e-06, | |
| "loss": 0.0609, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.6676773802304425, | |
| "eval_cer": 4.4297641774468595, | |
| "eval_loss": 0.20770902931690216, | |
| "eval_runtime": 4161.696, | |
| "eval_samples_per_second": 2.452, | |
| "eval_ser": 59.33548956189356, | |
| "eval_steps_per_second": 0.613, | |
| "eval_wer": 16.85462422039233, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.675257731958763, | |
| "grad_norm": 2.9338574409484863, | |
| "learning_rate": 4.4168182737012335e-06, | |
| "loss": 0.0667, | |
| "step": 5525 | |
| }, | |
| { | |
| "epoch": 1.682838083687083, | |
| "grad_norm": 2.5198843479156494, | |
| "learning_rate": 4.391550434606833e-06, | |
| "loss": 0.0676, | |
| "step": 5550 | |
| }, | |
| { | |
| "epoch": 1.6904184354154033, | |
| "grad_norm": 5.370730876922607, | |
| "learning_rate": 4.366282595512433e-06, | |
| "loss": 0.0786, | |
| "step": 5575 | |
| }, | |
| { | |
| "epoch": 1.6979987871437234, | |
| "grad_norm": 3.7155959606170654, | |
| "learning_rate": 4.341014756418031e-06, | |
| "loss": 0.0713, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 1.7055791388720438, | |
| "grad_norm": 5.575277805328369, | |
| "learning_rate": 4.315746917323631e-06, | |
| "loss": 0.0708, | |
| "step": 5625 | |
| }, | |
| { | |
| "epoch": 1.713159490600364, | |
| "grad_norm": 2.6846461296081543, | |
| "learning_rate": 4.2904790782292295e-06, | |
| "loss": 0.0681, | |
| "step": 5650 | |
| }, | |
| { | |
| "epoch": 1.7207398423286842, | |
| "grad_norm": 6.605522632598877, | |
| "learning_rate": 4.265211239134829e-06, | |
| "loss": 0.0604, | |
| "step": 5675 | |
| }, | |
| { | |
| "epoch": 1.7283201940570043, | |
| "grad_norm": 9.453361511230469, | |
| "learning_rate": 4.239943400040429e-06, | |
| "loss": 0.0795, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 1.7359005457853245, | |
| "grad_norm": 3.2720046043395996, | |
| "learning_rate": 4.214675560946028e-06, | |
| "loss": 0.0717, | |
| "step": 5725 | |
| }, | |
| { | |
| "epoch": 1.7434808975136447, | |
| "grad_norm": 4.286426544189453, | |
| "learning_rate": 4.189407721851628e-06, | |
| "loss": 0.0721, | |
| "step": 5750 | |
| }, | |
| { | |
| "epoch": 1.7510612492419648, | |
| "grad_norm": 4.679434776306152, | |
| "learning_rate": 4.164139882757227e-06, | |
| "loss": 0.0718, | |
| "step": 5775 | |
| }, | |
| { | |
| "epoch": 1.758641600970285, | |
| "grad_norm": 4.515007972717285, | |
| "learning_rate": 4.138872043662826e-06, | |
| "loss": 0.0665, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 1.7662219526986052, | |
| "grad_norm": 4.324512958526611, | |
| "learning_rate": 4.1136042045684255e-06, | |
| "loss": 0.0692, | |
| "step": 5825 | |
| }, | |
| { | |
| "epoch": 1.7738023044269253, | |
| "grad_norm": 4.820830821990967, | |
| "learning_rate": 4.088336365474025e-06, | |
| "loss": 0.058, | |
| "step": 5850 | |
| }, | |
| { | |
| "epoch": 1.7813826561552455, | |
| "grad_norm": 4.152068614959717, | |
| "learning_rate": 4.0630685263796246e-06, | |
| "loss": 0.0596, | |
| "step": 5875 | |
| }, | |
| { | |
| "epoch": 1.7889630078835657, | |
| "grad_norm": 7.0391082763671875, | |
| "learning_rate": 4.037800687285224e-06, | |
| "loss": 0.0638, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 1.7965433596118858, | |
| "grad_norm": 4.026524066925049, | |
| "learning_rate": 4.012532848190823e-06, | |
| "loss": 0.0704, | |
| "step": 5925 | |
| }, | |
| { | |
| "epoch": 1.8041237113402062, | |
| "grad_norm": 5.237268447875977, | |
| "learning_rate": 3.987265009096422e-06, | |
| "loss": 0.0721, | |
| "step": 5950 | |
| }, | |
| { | |
| "epoch": 1.8117040630685264, | |
| "grad_norm": 3.4499592781066895, | |
| "learning_rate": 3.961997170002022e-06, | |
| "loss": 0.0705, | |
| "step": 5975 | |
| }, | |
| { | |
| "epoch": 1.8192844147968465, | |
| "grad_norm": 5.843986511230469, | |
| "learning_rate": 3.936729330907621e-06, | |
| "loss": 0.0721, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.8192844147968465, | |
| "eval_cer": 4.3441688380391605, | |
| "eval_loss": 0.20601269602775574, | |
| "eval_runtime": 4156.9236, | |
| "eval_samples_per_second": 2.454, | |
| "eval_ser": 58.659217877094974, | |
| "eval_steps_per_second": 0.614, | |
| "eval_wer": 16.552653279288275, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.8268647665251667, | |
| "grad_norm": 4.0732831954956055, | |
| "learning_rate": 3.91146149181322e-06, | |
| "loss": 0.0694, | |
| "step": 6025 | |
| }, | |
| { | |
| "epoch": 1.834445118253487, | |
| "grad_norm": 3.2202863693237305, | |
| "learning_rate": 3.88619365271882e-06, | |
| "loss": 0.0724, | |
| "step": 6050 | |
| }, | |
| { | |
| "epoch": 1.8420254699818073, | |
| "grad_norm": 5.942609786987305, | |
| "learning_rate": 3.860925813624419e-06, | |
| "loss": 0.0717, | |
| "step": 6075 | |
| }, | |
| { | |
| "epoch": 1.8496058217101274, | |
| "grad_norm": 4.279210090637207, | |
| "learning_rate": 3.835657974530019e-06, | |
| "loss": 0.0715, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 1.8571861734384476, | |
| "grad_norm": 3.8644626140594482, | |
| "learning_rate": 3.8103901354356175e-06, | |
| "loss": 0.0739, | |
| "step": 6125 | |
| }, | |
| { | |
| "epoch": 1.8647665251667678, | |
| "grad_norm": 7.767407417297363, | |
| "learning_rate": 3.785122296341217e-06, | |
| "loss": 0.0689, | |
| "step": 6150 | |
| }, | |
| { | |
| "epoch": 1.872346876895088, | |
| "grad_norm": 6.898746967315674, | |
| "learning_rate": 3.7598544572468166e-06, | |
| "loss": 0.0834, | |
| "step": 6175 | |
| }, | |
| { | |
| "epoch": 1.879927228623408, | |
| "grad_norm": 3.7618629932403564, | |
| "learning_rate": 3.734586618152416e-06, | |
| "loss": 0.067, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 1.8875075803517283, | |
| "grad_norm": 5.345553398132324, | |
| "learning_rate": 3.7093187790580152e-06, | |
| "loss": 0.0807, | |
| "step": 6225 | |
| }, | |
| { | |
| "epoch": 1.8950879320800484, | |
| "grad_norm": 1.9484589099884033, | |
| "learning_rate": 3.6840509399636148e-06, | |
| "loss": 0.0577, | |
| "step": 6250 | |
| }, | |
| { | |
| "epoch": 1.9026682838083686, | |
| "grad_norm": 8.23530101776123, | |
| "learning_rate": 3.658783100869214e-06, | |
| "loss": 0.0668, | |
| "step": 6275 | |
| }, | |
| { | |
| "epoch": 1.9102486355366888, | |
| "grad_norm": 3.302901029586792, | |
| "learning_rate": 3.633515261774813e-06, | |
| "loss": 0.0712, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 1.917828987265009, | |
| "grad_norm": 6.6083855628967285, | |
| "learning_rate": 3.6082474226804126e-06, | |
| "loss": 0.0784, | |
| "step": 6325 | |
| }, | |
| { | |
| "epoch": 1.9254093389933293, | |
| "grad_norm": 4.849329948425293, | |
| "learning_rate": 3.582979583586012e-06, | |
| "loss": 0.0578, | |
| "step": 6350 | |
| }, | |
| { | |
| "epoch": 1.9329896907216495, | |
| "grad_norm": 3.2557804584503174, | |
| "learning_rate": 3.5577117444916117e-06, | |
| "loss": 0.062, | |
| "step": 6375 | |
| }, | |
| { | |
| "epoch": 1.9405700424499697, | |
| "grad_norm": 4.66004753112793, | |
| "learning_rate": 3.5324439053972108e-06, | |
| "loss": 0.0652, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 1.9481503941782898, | |
| "grad_norm": 3.7304911613464355, | |
| "learning_rate": 3.50717606630281e-06, | |
| "loss": 0.0733, | |
| "step": 6425 | |
| }, | |
| { | |
| "epoch": 1.9557307459066102, | |
| "grad_norm": 6.284624099731445, | |
| "learning_rate": 3.4819082272084094e-06, | |
| "loss": 0.0749, | |
| "step": 6450 | |
| }, | |
| { | |
| "epoch": 1.9633110976349304, | |
| "grad_norm": 5.241745948791504, | |
| "learning_rate": 3.4566403881140086e-06, | |
| "loss": 0.072, | |
| "step": 6475 | |
| }, | |
| { | |
| "epoch": 1.9708914493632506, | |
| "grad_norm": 4.935563087463379, | |
| "learning_rate": 3.431372549019608e-06, | |
| "loss": 0.0681, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.9708914493632506, | |
| "eval_cer": 4.328396952083039, | |
| "eval_loss": 0.20379287004470825, | |
| "eval_runtime": 4180.665, | |
| "eval_samples_per_second": 2.441, | |
| "eval_ser": 58.16916593158875, | |
| "eval_steps_per_second": 0.61, | |
| "eval_wer": 16.3575335942672, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.9784718010915707, | |
| "grad_norm": 1.918434739112854, | |
| "learning_rate": 3.4061047099252077e-06, | |
| "loss": 0.0314, | |
| "step": 6525 | |
| }, | |
| { | |
| "epoch": 1.986052152819891, | |
| "grad_norm": 4.31190299987793, | |
| "learning_rate": 3.380836870830807e-06, | |
| "loss": 0.0266, | |
| "step": 6550 | |
| }, | |
| { | |
| "epoch": 1.993632504548211, | |
| "grad_norm": 3.206461191177368, | |
| "learning_rate": 3.355569031736406e-06, | |
| "loss": 0.0286, | |
| "step": 6575 | |
| }, | |
| { | |
| "epoch": 2.0012128562765312, | |
| "grad_norm": 1.4688981771469116, | |
| "learning_rate": 3.3303011926420054e-06, | |
| "loss": 0.0254, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 2.0087932080048514, | |
| "grad_norm": 2.9358630180358887, | |
| "learning_rate": 3.305033353547605e-06, | |
| "loss": 0.0298, | |
| "step": 6625 | |
| }, | |
| { | |
| "epoch": 2.0163735597331716, | |
| "grad_norm": 2.1530213356018066, | |
| "learning_rate": 3.279765514453204e-06, | |
| "loss": 0.0311, | |
| "step": 6650 | |
| }, | |
| { | |
| "epoch": 2.0239539114614917, | |
| "grad_norm": 18.083703994750977, | |
| "learning_rate": 3.2544976753588036e-06, | |
| "loss": 0.0328, | |
| "step": 6675 | |
| }, | |
| { | |
| "epoch": 2.031534263189812, | |
| "grad_norm": 3.114184856414795, | |
| "learning_rate": 3.229229836264403e-06, | |
| "loss": 0.0278, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 2.039114614918132, | |
| "grad_norm": 6.7866106033325195, | |
| "learning_rate": 3.2039619971700027e-06, | |
| "loss": 0.0259, | |
| "step": 6725 | |
| }, | |
| { | |
| "epoch": 2.0466949666464522, | |
| "grad_norm": 2.4553844928741455, | |
| "learning_rate": 3.1786941580756014e-06, | |
| "loss": 0.0273, | |
| "step": 6750 | |
| }, | |
| { | |
| "epoch": 2.0542753183747724, | |
| "grad_norm": 3.798393726348877, | |
| "learning_rate": 3.153426318981201e-06, | |
| "loss": 0.0318, | |
| "step": 6775 | |
| }, | |
| { | |
| "epoch": 2.0618556701030926, | |
| "grad_norm": 6.048929691314697, | |
| "learning_rate": 3.1281584798868005e-06, | |
| "loss": 0.034, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 2.069436021831413, | |
| "grad_norm": 0.9682230949401855, | |
| "learning_rate": 3.1028906407923996e-06, | |
| "loss": 0.0355, | |
| "step": 6825 | |
| }, | |
| { | |
| "epoch": 2.0770163735597333, | |
| "grad_norm": 0.7081900835037231, | |
| "learning_rate": 3.077622801697999e-06, | |
| "loss": 0.0257, | |
| "step": 6850 | |
| }, | |
| { | |
| "epoch": 2.0845967252880535, | |
| "grad_norm": 3.3265202045440674, | |
| "learning_rate": 3.0523549626035987e-06, | |
| "loss": 0.0257, | |
| "step": 6875 | |
| }, | |
| { | |
| "epoch": 2.0921770770163737, | |
| "grad_norm": 1.3119407892227173, | |
| "learning_rate": 3.0270871235091974e-06, | |
| "loss": 0.0263, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 2.099757428744694, | |
| "grad_norm": 3.3000783920288086, | |
| "learning_rate": 3.001819284414797e-06, | |
| "loss": 0.0273, | |
| "step": 6925 | |
| }, | |
| { | |
| "epoch": 2.107337780473014, | |
| "grad_norm": 1.735640048980713, | |
| "learning_rate": 2.9765514453203965e-06, | |
| "loss": 0.0314, | |
| "step": 6950 | |
| }, | |
| { | |
| "epoch": 2.114918132201334, | |
| "grad_norm": 4.68803596496582, | |
| "learning_rate": 2.951283606225996e-06, | |
| "loss": 0.0338, | |
| "step": 6975 | |
| }, | |
| { | |
| "epoch": 2.1224984839296543, | |
| "grad_norm": 3.5388286113739014, | |
| "learning_rate": 2.926015767131595e-06, | |
| "loss": 0.0322, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 2.1224984839296543, | |
| "eval_cer": 4.27089528453468, | |
| "eval_loss": 0.21301127970218658, | |
| "eval_runtime": 3826.4362, | |
| "eval_samples_per_second": 2.666, | |
| "eval_ser": 57.777124375183774, | |
| "eval_steps_per_second": 0.667, | |
| "eval_wer": 16.280879432294633, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 2.1300788356579745, | |
| "grad_norm": 5.576068878173828, | |
| "learning_rate": 2.9007479280371947e-06, | |
| "loss": 0.0284, | |
| "step": 7025 | |
| }, | |
| { | |
| "epoch": 2.1376591873862947, | |
| "grad_norm": 1.70852792263031, | |
| "learning_rate": 2.8754800889427934e-06, | |
| "loss": 0.0247, | |
| "step": 7050 | |
| }, | |
| { | |
| "epoch": 2.145239539114615, | |
| "grad_norm": 3.2939512729644775, | |
| "learning_rate": 2.850212249848393e-06, | |
| "loss": 0.0252, | |
| "step": 7075 | |
| }, | |
| { | |
| "epoch": 2.152819890842935, | |
| "grad_norm": 0.9231658577919006, | |
| "learning_rate": 2.8249444107539925e-06, | |
| "loss": 0.0288, | |
| "step": 7100 | |
| }, | |
| { | |
| "epoch": 2.160400242571255, | |
| "grad_norm": 5.767349720001221, | |
| "learning_rate": 2.799676571659592e-06, | |
| "loss": 0.0294, | |
| "step": 7125 | |
| }, | |
| { | |
| "epoch": 2.1679805942995753, | |
| "grad_norm": 3.638855457305908, | |
| "learning_rate": 2.774408732565191e-06, | |
| "loss": 0.0269, | |
| "step": 7150 | |
| }, | |
| { | |
| "epoch": 2.1755609460278955, | |
| "grad_norm": 4.53891658782959, | |
| "learning_rate": 2.7491408934707907e-06, | |
| "loss": 0.0289, | |
| "step": 7175 | |
| }, | |
| { | |
| "epoch": 2.1831412977562157, | |
| "grad_norm": 0.5863853693008423, | |
| "learning_rate": 2.72387305437639e-06, | |
| "loss": 0.0247, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 2.1907216494845363, | |
| "grad_norm": 3.777498483657837, | |
| "learning_rate": 2.698605215281989e-06, | |
| "loss": 0.0234, | |
| "step": 7225 | |
| }, | |
| { | |
| "epoch": 2.1983020012128565, | |
| "grad_norm": 2.23175048828125, | |
| "learning_rate": 2.6733373761875885e-06, | |
| "loss": 0.0314, | |
| "step": 7250 | |
| }, | |
| { | |
| "epoch": 2.2058823529411766, | |
| "grad_norm": 3.1541337966918945, | |
| "learning_rate": 2.648069537093188e-06, | |
| "loss": 0.0311, | |
| "step": 7275 | |
| }, | |
| { | |
| "epoch": 2.213462704669497, | |
| "grad_norm": 2.6496334075927734, | |
| "learning_rate": 2.6228016979987876e-06, | |
| "loss": 0.0351, | |
| "step": 7300 | |
| }, | |
| { | |
| "epoch": 2.221043056397817, | |
| "grad_norm": 1.7922499179840088, | |
| "learning_rate": 2.5975338589043867e-06, | |
| "loss": 0.0211, | |
| "step": 7325 | |
| }, | |
| { | |
| "epoch": 2.228623408126137, | |
| "grad_norm": 5.5097222328186035, | |
| "learning_rate": 2.572266019809986e-06, | |
| "loss": 0.0302, | |
| "step": 7350 | |
| }, | |
| { | |
| "epoch": 2.2362037598544573, | |
| "grad_norm": 2.997144937515259, | |
| "learning_rate": 2.5469981807155854e-06, | |
| "loss": 0.0296, | |
| "step": 7375 | |
| }, | |
| { | |
| "epoch": 2.2437841115827775, | |
| "grad_norm": 1.941422462463379, | |
| "learning_rate": 2.5217303416211845e-06, | |
| "loss": 0.0244, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 2.2513644633110976, | |
| "grad_norm": 1.389489769935608, | |
| "learning_rate": 2.496462502526784e-06, | |
| "loss": 0.0308, | |
| "step": 7425 | |
| }, | |
| { | |
| "epoch": 2.258944815039418, | |
| "grad_norm": 1.393212914466858, | |
| "learning_rate": 2.4711946634323836e-06, | |
| "loss": 0.0262, | |
| "step": 7450 | |
| }, | |
| { | |
| "epoch": 2.266525166767738, | |
| "grad_norm": 2.6350386142730713, | |
| "learning_rate": 2.4459268243379827e-06, | |
| "loss": 0.0263, | |
| "step": 7475 | |
| }, | |
| { | |
| "epoch": 2.274105518496058, | |
| "grad_norm": 3.369213819503784, | |
| "learning_rate": 2.4206589852435823e-06, | |
| "loss": 0.0277, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 2.274105518496058, | |
| "eval_cer": 4.254301946185011, | |
| "eval_loss": 0.2150956392288208, | |
| "eval_runtime": 3817.2467, | |
| "eval_samples_per_second": 2.673, | |
| "eval_ser": 57.47329216896991, | |
| "eval_steps_per_second": 0.668, | |
| "eval_wer": 16.106665427811524, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 2.2816858702243783, | |
| "grad_norm": 2.3932759761810303, | |
| "learning_rate": 2.395391146149182e-06, | |
| "loss": 0.0321, | |
| "step": 7525 | |
| }, | |
| { | |
| "epoch": 2.2892662219526985, | |
| "grad_norm": 2.8787713050842285, | |
| "learning_rate": 2.370123307054781e-06, | |
| "loss": 0.0278, | |
| "step": 7550 | |
| }, | |
| { | |
| "epoch": 2.2968465736810186, | |
| "grad_norm": 2.570692539215088, | |
| "learning_rate": 2.34485546796038e-06, | |
| "loss": 0.0211, | |
| "step": 7575 | |
| }, | |
| { | |
| "epoch": 2.304426925409339, | |
| "grad_norm": 2.9834413528442383, | |
| "learning_rate": 2.3195876288659796e-06, | |
| "loss": 0.0308, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 2.312007277137659, | |
| "grad_norm": 3.4857709407806396, | |
| "learning_rate": 2.2943197897715787e-06, | |
| "loss": 0.0272, | |
| "step": 7625 | |
| }, | |
| { | |
| "epoch": 2.319587628865979, | |
| "grad_norm": 4.783783435821533, | |
| "learning_rate": 2.2690519506771783e-06, | |
| "loss": 0.0248, | |
| "step": 7650 | |
| }, | |
| { | |
| "epoch": 2.3271679805942997, | |
| "grad_norm": 3.023923873901367, | |
| "learning_rate": 2.243784111582778e-06, | |
| "loss": 0.0297, | |
| "step": 7675 | |
| }, | |
| { | |
| "epoch": 2.33474833232262, | |
| "grad_norm": 1.8888705968856812, | |
| "learning_rate": 2.218516272488377e-06, | |
| "loss": 0.0291, | |
| "step": 7700 | |
| }, | |
| { | |
| "epoch": 2.34232868405094, | |
| "grad_norm": 2.11702561378479, | |
| "learning_rate": 2.1932484333939765e-06, | |
| "loss": 0.0271, | |
| "step": 7725 | |
| }, | |
| { | |
| "epoch": 2.3499090357792602, | |
| "grad_norm": 2.8734652996063232, | |
| "learning_rate": 2.1679805942995756e-06, | |
| "loss": 0.0309, | |
| "step": 7750 | |
| }, | |
| { | |
| "epoch": 2.3574893875075804, | |
| "grad_norm": 1.2483388185501099, | |
| "learning_rate": 2.1427127552051747e-06, | |
| "loss": 0.0273, | |
| "step": 7775 | |
| }, | |
| { | |
| "epoch": 2.3650697392359006, | |
| "grad_norm": 1.2977802753448486, | |
| "learning_rate": 2.1174449161107743e-06, | |
| "loss": 0.0302, | |
| "step": 7800 | |
| }, | |
| { | |
| "epoch": 2.3726500909642207, | |
| "grad_norm": 3.370899200439453, | |
| "learning_rate": 2.092177077016374e-06, | |
| "loss": 0.0268, | |
| "step": 7825 | |
| }, | |
| { | |
| "epoch": 2.380230442692541, | |
| "grad_norm": 2.3007266521453857, | |
| "learning_rate": 2.066909237921973e-06, | |
| "loss": 0.0265, | |
| "step": 7850 | |
| }, | |
| { | |
| "epoch": 2.387810794420861, | |
| "grad_norm": 3.0940144062042236, | |
| "learning_rate": 2.0416413988275725e-06, | |
| "loss": 0.0221, | |
| "step": 7875 | |
| }, | |
| { | |
| "epoch": 2.3953911461491812, | |
| "grad_norm": 2.137110948562622, | |
| "learning_rate": 2.016373559733172e-06, | |
| "loss": 0.0273, | |
| "step": 7900 | |
| }, | |
| { | |
| "epoch": 2.4029714978775014, | |
| "grad_norm": 3.142803430557251, | |
| "learning_rate": 1.991105720638771e-06, | |
| "loss": 0.0309, | |
| "step": 7925 | |
| }, | |
| { | |
| "epoch": 2.4105518496058216, | |
| "grad_norm": 3.708592176437378, | |
| "learning_rate": 1.9658378815443703e-06, | |
| "loss": 0.0269, | |
| "step": 7950 | |
| }, | |
| { | |
| "epoch": 2.4181322013341418, | |
| "grad_norm": 2.705143451690674, | |
| "learning_rate": 1.94057004244997e-06, | |
| "loss": 0.0284, | |
| "step": 7975 | |
| }, | |
| { | |
| "epoch": 2.425712553062462, | |
| "grad_norm": 2.2267937660217285, | |
| "learning_rate": 1.9153022033555693e-06, | |
| "loss": 0.0249, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 2.425712553062462, | |
| "eval_cer": 4.251344717568238, | |
| "eval_loss": 0.21296031773090363, | |
| "eval_runtime": 3956.9891, | |
| "eval_samples_per_second": 2.578, | |
| "eval_ser": 57.463491130059786, | |
| "eval_steps_per_second": 0.645, | |
| "eval_wer": 16.07414548030801, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 2.4332929047907825, | |
| "grad_norm": 2.44812273979187, | |
| "learning_rate": 1.8900343642611685e-06, | |
| "loss": 0.0301, | |
| "step": 8025 | |
| }, | |
| { | |
| "epoch": 2.4408732565191027, | |
| "grad_norm": 3.240962028503418, | |
| "learning_rate": 1.864766525166768e-06, | |
| "loss": 0.0315, | |
| "step": 8050 | |
| }, | |
| { | |
| "epoch": 2.448453608247423, | |
| "grad_norm": 3.5091423988342285, | |
| "learning_rate": 1.8394986860723673e-06, | |
| "loss": 0.0218, | |
| "step": 8075 | |
| }, | |
| { | |
| "epoch": 2.456033959975743, | |
| "grad_norm": 2.2608911991119385, | |
| "learning_rate": 1.8142308469779665e-06, | |
| "loss": 0.026, | |
| "step": 8100 | |
| }, | |
| { | |
| "epoch": 2.463614311704063, | |
| "grad_norm": 2.7215895652770996, | |
| "learning_rate": 1.788963007883566e-06, | |
| "loss": 0.0323, | |
| "step": 8125 | |
| }, | |
| { | |
| "epoch": 2.4711946634323834, | |
| "grad_norm": 3.6915700435638428, | |
| "learning_rate": 1.7636951687891653e-06, | |
| "loss": 0.0397, | |
| "step": 8150 | |
| }, | |
| { | |
| "epoch": 2.4787750151607035, | |
| "grad_norm": 3.11942720413208, | |
| "learning_rate": 1.7384273296947647e-06, | |
| "loss": 0.032, | |
| "step": 8175 | |
| }, | |
| { | |
| "epoch": 2.4863553668890237, | |
| "grad_norm": 0.9445828795433044, | |
| "learning_rate": 1.713159490600364e-06, | |
| "loss": 0.0244, | |
| "step": 8200 | |
| }, | |
| { | |
| "epoch": 2.493935718617344, | |
| "grad_norm": 2.8565821647644043, | |
| "learning_rate": 1.6878916515059636e-06, | |
| "loss": 0.0283, | |
| "step": 8225 | |
| }, | |
| { | |
| "epoch": 2.501516070345664, | |
| "grad_norm": 0.4366835057735443, | |
| "learning_rate": 1.6626238124115627e-06, | |
| "loss": 0.0236, | |
| "step": 8250 | |
| }, | |
| { | |
| "epoch": 2.509096422073984, | |
| "grad_norm": 2.801251173019409, | |
| "learning_rate": 1.637355973317162e-06, | |
| "loss": 0.0278, | |
| "step": 8275 | |
| }, | |
| { | |
| "epoch": 2.5166767738023044, | |
| "grad_norm": 2.6153385639190674, | |
| "learning_rate": 1.6120881342227616e-06, | |
| "loss": 0.0274, | |
| "step": 8300 | |
| }, | |
| { | |
| "epoch": 2.5242571255306245, | |
| "grad_norm": 4.796694755554199, | |
| "learning_rate": 1.5868202951283607e-06, | |
| "loss": 0.023, | |
| "step": 8325 | |
| }, | |
| { | |
| "epoch": 2.5318374772589447, | |
| "grad_norm": 1.340336799621582, | |
| "learning_rate": 1.56155245603396e-06, | |
| "loss": 0.0266, | |
| "step": 8350 | |
| }, | |
| { | |
| "epoch": 2.539417828987265, | |
| "grad_norm": 2.282335042953491, | |
| "learning_rate": 1.5362846169395595e-06, | |
| "loss": 0.0221, | |
| "step": 8375 | |
| }, | |
| { | |
| "epoch": 2.546998180715585, | |
| "grad_norm": 0.9249318242073059, | |
| "learning_rate": 1.5110167778451587e-06, | |
| "loss": 0.0222, | |
| "step": 8400 | |
| }, | |
| { | |
| "epoch": 2.554578532443905, | |
| "grad_norm": 0.8499120473861694, | |
| "learning_rate": 1.4857489387507582e-06, | |
| "loss": 0.0219, | |
| "step": 8425 | |
| }, | |
| { | |
| "epoch": 2.5621588841722254, | |
| "grad_norm": 2.0028629302978516, | |
| "learning_rate": 1.4604810996563575e-06, | |
| "loss": 0.0352, | |
| "step": 8450 | |
| }, | |
| { | |
| "epoch": 2.5697392359005455, | |
| "grad_norm": 3.0861124992370605, | |
| "learning_rate": 1.4352132605619567e-06, | |
| "loss": 0.0255, | |
| "step": 8475 | |
| }, | |
| { | |
| "epoch": 2.5773195876288657, | |
| "grad_norm": 3.6241636276245117, | |
| "learning_rate": 1.4099454214675562e-06, | |
| "loss": 0.0234, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 2.5773195876288657, | |
| "eval_cer": 4.2832170704379, | |
| "eval_loss": 0.21502597630023956, | |
| "eval_runtime": 4007.4384, | |
| "eval_samples_per_second": 2.546, | |
| "eval_ser": 57.6693129471724, | |
| "eval_steps_per_second": 0.637, | |
| "eval_wer": 16.259973751756657, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 2.5848999393571863, | |
| "grad_norm": 3.110015869140625, | |
| "learning_rate": 1.3846775823731555e-06, | |
| "loss": 0.0286, | |
| "step": 8525 | |
| }, | |
| { | |
| "epoch": 2.5924802910855065, | |
| "grad_norm": 2.1836204528808594, | |
| "learning_rate": 1.3594097432787549e-06, | |
| "loss": 0.0283, | |
| "step": 8550 | |
| }, | |
| { | |
| "epoch": 2.6000606428138267, | |
| "grad_norm": 2.07279634475708, | |
| "learning_rate": 1.3341419041843542e-06, | |
| "loss": 0.0334, | |
| "step": 8575 | |
| }, | |
| { | |
| "epoch": 2.607640994542147, | |
| "grad_norm": 3.523423910140991, | |
| "learning_rate": 1.3088740650899538e-06, | |
| "loss": 0.0281, | |
| "step": 8600 | |
| }, | |
| { | |
| "epoch": 2.615221346270467, | |
| "grad_norm": 3.105787992477417, | |
| "learning_rate": 1.2836062259955529e-06, | |
| "loss": 0.0242, | |
| "step": 8625 | |
| }, | |
| { | |
| "epoch": 2.622801697998787, | |
| "grad_norm": 0.8383470177650452, | |
| "learning_rate": 1.2583383869011522e-06, | |
| "loss": 0.0319, | |
| "step": 8650 | |
| }, | |
| { | |
| "epoch": 2.6303820497271073, | |
| "grad_norm": 0.714475691318512, | |
| "learning_rate": 1.2330705478067518e-06, | |
| "loss": 0.0265, | |
| "step": 8675 | |
| }, | |
| { | |
| "epoch": 2.6379624014554275, | |
| "grad_norm": 1.9016785621643066, | |
| "learning_rate": 1.207802708712351e-06, | |
| "loss": 0.0293, | |
| "step": 8700 | |
| }, | |
| { | |
| "epoch": 2.6455427531837477, | |
| "grad_norm": 4.238561153411865, | |
| "learning_rate": 1.1825348696179504e-06, | |
| "loss": 0.0342, | |
| "step": 8725 | |
| }, | |
| { | |
| "epoch": 2.653123104912068, | |
| "grad_norm": 2.495922803878784, | |
| "learning_rate": 1.1572670305235498e-06, | |
| "loss": 0.0223, | |
| "step": 8750 | |
| }, | |
| { | |
| "epoch": 2.660703456640388, | |
| "grad_norm": 2.157742738723755, | |
| "learning_rate": 1.131999191429149e-06, | |
| "loss": 0.0265, | |
| "step": 8775 | |
| }, | |
| { | |
| "epoch": 2.668283808368708, | |
| "grad_norm": 1.7393815517425537, | |
| "learning_rate": 1.1067313523347484e-06, | |
| "loss": 0.0238, | |
| "step": 8800 | |
| }, | |
| { | |
| "epoch": 2.6758641600970288, | |
| "grad_norm": 5.519290924072266, | |
| "learning_rate": 1.0814635132403478e-06, | |
| "loss": 0.0217, | |
| "step": 8825 | |
| }, | |
| { | |
| "epoch": 2.683444511825349, | |
| "grad_norm": 1.7298911809921265, | |
| "learning_rate": 1.056195674145947e-06, | |
| "loss": 0.0247, | |
| "step": 8850 | |
| }, | |
| { | |
| "epoch": 2.691024863553669, | |
| "grad_norm": 1.5079689025878906, | |
| "learning_rate": 1.0309278350515464e-06, | |
| "loss": 0.0258, | |
| "step": 8875 | |
| }, | |
| { | |
| "epoch": 2.6986052152819893, | |
| "grad_norm": 2.1406021118164062, | |
| "learning_rate": 1.005659995957146e-06, | |
| "loss": 0.0231, | |
| "step": 8900 | |
| }, | |
| { | |
| "epoch": 2.7061855670103094, | |
| "grad_norm": 2.1180737018585205, | |
| "learning_rate": 9.80392156862745e-07, | |
| "loss": 0.0245, | |
| "step": 8925 | |
| }, | |
| { | |
| "epoch": 2.7137659187386296, | |
| "grad_norm": 2.1627533435821533, | |
| "learning_rate": 9.551243177683444e-07, | |
| "loss": 0.0264, | |
| "step": 8950 | |
| }, | |
| { | |
| "epoch": 2.7213462704669498, | |
| "grad_norm": 0.6703803539276123, | |
| "learning_rate": 9.29856478673944e-07, | |
| "loss": 0.0206, | |
| "step": 8975 | |
| }, | |
| { | |
| "epoch": 2.72892662219527, | |
| "grad_norm": 3.99841046333313, | |
| "learning_rate": 9.045886395795432e-07, | |
| "loss": 0.0264, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 2.72892662219527, | |
| "eval_cer": 4.264487955865006, | |
| "eval_loss": 0.21449251472949982, | |
| "eval_runtime": 3978.4945, | |
| "eval_samples_per_second": 2.565, | |
| "eval_ser": 57.630108791531896, | |
| "eval_steps_per_second": 0.641, | |
| "eval_wer": 16.115956841383955, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 2.73650697392359, | |
| "grad_norm": 4.727873802185059, | |
| "learning_rate": 8.793208004851426e-07, | |
| "loss": 0.0215, | |
| "step": 9025 | |
| }, | |
| { | |
| "epoch": 2.7440873256519103, | |
| "grad_norm": 4.042278289794922, | |
| "learning_rate": 8.54052961390742e-07, | |
| "loss": 0.0167, | |
| "step": 9050 | |
| }, | |
| { | |
| "epoch": 2.7516676773802304, | |
| "grad_norm": 3.561555862426758, | |
| "learning_rate": 8.287851222963413e-07, | |
| "loss": 0.0318, | |
| "step": 9075 | |
| }, | |
| { | |
| "epoch": 2.7592480291085506, | |
| "grad_norm": 2.2994942665100098, | |
| "learning_rate": 8.035172832019407e-07, | |
| "loss": 0.0263, | |
| "step": 9100 | |
| }, | |
| { | |
| "epoch": 2.7668283808368708, | |
| "grad_norm": 1.5986050367355347, | |
| "learning_rate": 7.7824944410754e-07, | |
| "loss": 0.0232, | |
| "step": 9125 | |
| }, | |
| { | |
| "epoch": 2.774408732565191, | |
| "grad_norm": 1.9084292650222778, | |
| "learning_rate": 7.529816050131393e-07, | |
| "loss": 0.0315, | |
| "step": 9150 | |
| }, | |
| { | |
| "epoch": 2.781989084293511, | |
| "grad_norm": 2.2224199771881104, | |
| "learning_rate": 7.277137659187387e-07, | |
| "loss": 0.0223, | |
| "step": 9175 | |
| }, | |
| { | |
| "epoch": 2.7895694360218313, | |
| "grad_norm": 4.254256248474121, | |
| "learning_rate": 7.024459268243381e-07, | |
| "loss": 0.0214, | |
| "step": 9200 | |
| }, | |
| { | |
| "epoch": 2.7971497877501514, | |
| "grad_norm": 0.8206575512886047, | |
| "learning_rate": 6.771780877299374e-07, | |
| "loss": 0.023, | |
| "step": 9225 | |
| }, | |
| { | |
| "epoch": 2.8047301394784716, | |
| "grad_norm": 1.8297322988510132, | |
| "learning_rate": 6.519102486355368e-07, | |
| "loss": 0.0246, | |
| "step": 9250 | |
| }, | |
| { | |
| "epoch": 2.8123104912067918, | |
| "grad_norm": 2.382608413696289, | |
| "learning_rate": 6.266424095411361e-07, | |
| "loss": 0.0317, | |
| "step": 9275 | |
| }, | |
| { | |
| "epoch": 2.819890842935112, | |
| "grad_norm": 2.400407075881958, | |
| "learning_rate": 6.013745704467355e-07, | |
| "loss": 0.0233, | |
| "step": 9300 | |
| }, | |
| { | |
| "epoch": 2.827471194663432, | |
| "grad_norm": 1.5915170907974243, | |
| "learning_rate": 5.761067313523347e-07, | |
| "loss": 0.0185, | |
| "step": 9325 | |
| }, | |
| { | |
| "epoch": 2.8350515463917527, | |
| "grad_norm": 0.9242422580718994, | |
| "learning_rate": 5.508388922579342e-07, | |
| "loss": 0.0157, | |
| "step": 9350 | |
| }, | |
| { | |
| "epoch": 2.842631898120073, | |
| "grad_norm": 2.4773428440093994, | |
| "learning_rate": 5.255710531635335e-07, | |
| "loss": 0.022, | |
| "step": 9375 | |
| }, | |
| { | |
| "epoch": 2.850212249848393, | |
| "grad_norm": 1.8242741823196411, | |
| "learning_rate": 5.003032140691328e-07, | |
| "loss": 0.0228, | |
| "step": 9400 | |
| }, | |
| { | |
| "epoch": 2.857792601576713, | |
| "grad_norm": 1.73279869556427, | |
| "learning_rate": 4.7503537497473216e-07, | |
| "loss": 0.0261, | |
| "step": 9425 | |
| }, | |
| { | |
| "epoch": 2.8653729533050334, | |
| "grad_norm": 5.991119384765625, | |
| "learning_rate": 4.4976753588033155e-07, | |
| "loss": 0.0288, | |
| "step": 9450 | |
| }, | |
| { | |
| "epoch": 2.8729533050333536, | |
| "grad_norm": 6.558225154876709, | |
| "learning_rate": 4.2449969678593093e-07, | |
| "loss": 0.031, | |
| "step": 9475 | |
| }, | |
| { | |
| "epoch": 2.8805336567616737, | |
| "grad_norm": 1.9604460000991821, | |
| "learning_rate": 3.992318576915302e-07, | |
| "loss": 0.0268, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 2.8805336567616737, | |
| "eval_cer": 4.232122731559215, | |
| "eval_loss": 0.21253998577594757, | |
| "eval_runtime": 3873.6751, | |
| "eval_samples_per_second": 2.634, | |
| "eval_ser": 57.522297363520536, | |
| "eval_steps_per_second": 0.659, | |
| "eval_wer": 16.040464106107944, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 2.888114008489994, | |
| "grad_norm": 0.7039766907691956, | |
| "learning_rate": 3.739640185971296e-07, | |
| "loss": 0.0336, | |
| "step": 9525 | |
| }, | |
| { | |
| "epoch": 2.895694360218314, | |
| "grad_norm": 3.8160667419433594, | |
| "learning_rate": 3.48696179502729e-07, | |
| "loss": 0.0295, | |
| "step": 9550 | |
| }, | |
| { | |
| "epoch": 2.9032747119466342, | |
| "grad_norm": 2.7596523761749268, | |
| "learning_rate": 3.234283404083283e-07, | |
| "loss": 0.0239, | |
| "step": 9575 | |
| }, | |
| { | |
| "epoch": 2.9108550636749544, | |
| "grad_norm": 1.7568339109420776, | |
| "learning_rate": 2.9816050131392765e-07, | |
| "loss": 0.0344, | |
| "step": 9600 | |
| }, | |
| { | |
| "epoch": 2.9184354154032746, | |
| "grad_norm": 4.06329870223999, | |
| "learning_rate": 2.7289266221952704e-07, | |
| "loss": 0.0246, | |
| "step": 9625 | |
| }, | |
| { | |
| "epoch": 2.926015767131595, | |
| "grad_norm": 5.080215930938721, | |
| "learning_rate": 2.4762482312512637e-07, | |
| "loss": 0.0279, | |
| "step": 9650 | |
| }, | |
| { | |
| "epoch": 2.9335961188599153, | |
| "grad_norm": 0.7092571258544922, | |
| "learning_rate": 2.2235698403072573e-07, | |
| "loss": 0.0287, | |
| "step": 9675 | |
| }, | |
| { | |
| "epoch": 2.9411764705882355, | |
| "grad_norm": 4.8213911056518555, | |
| "learning_rate": 1.9708914493632506e-07, | |
| "loss": 0.0219, | |
| "step": 9700 | |
| }, | |
| { | |
| "epoch": 2.9487568223165557, | |
| "grad_norm": 1.9876303672790527, | |
| "learning_rate": 1.7182130584192442e-07, | |
| "loss": 0.0215, | |
| "step": 9725 | |
| }, | |
| { | |
| "epoch": 2.956337174044876, | |
| "grad_norm": 4.70910120010376, | |
| "learning_rate": 1.4655346674752375e-07, | |
| "loss": 0.0308, | |
| "step": 9750 | |
| }, | |
| { | |
| "epoch": 2.963917525773196, | |
| "grad_norm": 3.7995145320892334, | |
| "learning_rate": 1.212856276531231e-07, | |
| "loss": 0.0231, | |
| "step": 9775 | |
| }, | |
| { | |
| "epoch": 2.971497877501516, | |
| "grad_norm": 1.9700251817703247, | |
| "learning_rate": 9.601778855872247e-08, | |
| "loss": 0.0249, | |
| "step": 9800 | |
| }, | |
| { | |
| "epoch": 2.9790782292298363, | |
| "grad_norm": 2.985954761505127, | |
| "learning_rate": 7.074994946432182e-08, | |
| "loss": 0.0245, | |
| "step": 9825 | |
| }, | |
| { | |
| "epoch": 2.9866585809581565, | |
| "grad_norm": 5.655767917633057, | |
| "learning_rate": 4.548211036992117e-08, | |
| "loss": 0.0287, | |
| "step": 9850 | |
| }, | |
| { | |
| "epoch": 2.9942389326864767, | |
| "grad_norm": 2.333650588989258, | |
| "learning_rate": 2.021427127552052e-08, | |
| "loss": 0.0215, | |
| "step": 9875 | |
| } | |
| ], | |
| "logging_steps": 25, | |
| "max_steps": 9894, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.283605131272192e+19, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |