| { | |
| "best_metric": 5.288159547738693, | |
| "best_model_checkpoint": "/kaggle/working/checkpoint-4000", | |
| "epoch": 2.873563218390805, | |
| "eval_steps": 1000, | |
| "global_step": 4000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.017959770114942528, | |
| "grad_norm": 24.085046768188477, | |
| "learning_rate": 4.4e-07, | |
| "loss": 1.4603, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.035919540229885055, | |
| "grad_norm": 13.078679084777832, | |
| "learning_rate": 9.400000000000001e-07, | |
| "loss": 1.3305, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.05387931034482758, | |
| "grad_norm": 17.343385696411133, | |
| "learning_rate": 1.44e-06, | |
| "loss": 1.0971, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.07183908045977011, | |
| "grad_norm": 10.30687427520752, | |
| "learning_rate": 1.94e-06, | |
| "loss": 0.6573, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.08979885057471264, | |
| "grad_norm": 7.07254695892334, | |
| "learning_rate": 2.4400000000000004e-06, | |
| "loss": 0.5107, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.10775862068965517, | |
| "grad_norm": 7.000039100646973, | |
| "learning_rate": 2.9400000000000002e-06, | |
| "loss": 0.4643, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.1257183908045977, | |
| "grad_norm": 7.2714972496032715, | |
| "learning_rate": 3.44e-06, | |
| "loss": 0.4358, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.14367816091954022, | |
| "grad_norm": 6.5415730476379395, | |
| "learning_rate": 3.94e-06, | |
| "loss": 0.4219, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.16163793103448276, | |
| "grad_norm": 7.611192226409912, | |
| "learning_rate": 4.440000000000001e-06, | |
| "loss": 0.4036, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.17959770114942528, | |
| "grad_norm": 6.730944633483887, | |
| "learning_rate": 4.94e-06, | |
| "loss": 0.4024, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.19755747126436782, | |
| "grad_norm": 6.479249000549316, | |
| "learning_rate": 5.4400000000000004e-06, | |
| "loss": 0.3915, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.21551724137931033, | |
| "grad_norm": 6.75312614440918, | |
| "learning_rate": 5.94e-06, | |
| "loss": 0.3638, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.23347701149425287, | |
| "grad_norm": 5.702291488647461, | |
| "learning_rate": 6.440000000000001e-06, | |
| "loss": 0.3708, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.2514367816091954, | |
| "grad_norm": 5.4078688621521, | |
| "learning_rate": 6.9400000000000005e-06, | |
| "loss": 0.3556, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.26939655172413796, | |
| "grad_norm": 5.735648155212402, | |
| "learning_rate": 7.440000000000001e-06, | |
| "loss": 0.3444, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.28735632183908044, | |
| "grad_norm": 6.246584892272949, | |
| "learning_rate": 7.94e-06, | |
| "loss": 0.3443, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.305316091954023, | |
| "grad_norm": 6.741141319274902, | |
| "learning_rate": 8.44e-06, | |
| "loss": 0.3192, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.3232758620689655, | |
| "grad_norm": 6.674511432647705, | |
| "learning_rate": 8.94e-06, | |
| "loss": 0.3249, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.34123563218390807, | |
| "grad_norm": 6.338778018951416, | |
| "learning_rate": 9.440000000000001e-06, | |
| "loss": 0.3331, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.35919540229885055, | |
| "grad_norm": 6.05772590637207, | |
| "learning_rate": 9.940000000000001e-06, | |
| "loss": 0.3067, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.3771551724137931, | |
| "grad_norm": 6.442326545715332, | |
| "learning_rate": 9.937142857142858e-06, | |
| "loss": 0.3275, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.39511494252873564, | |
| "grad_norm": 5.767732620239258, | |
| "learning_rate": 9.865714285714285e-06, | |
| "loss": 0.3218, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.4130747126436782, | |
| "grad_norm": 5.053823471069336, | |
| "learning_rate": 9.794285714285714e-06, | |
| "loss": 0.3078, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.43103448275862066, | |
| "grad_norm": 5.843025207519531, | |
| "learning_rate": 9.722857142857143e-06, | |
| "loss": 0.2923, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.4489942528735632, | |
| "grad_norm": 6.414527893066406, | |
| "learning_rate": 9.651428571428572e-06, | |
| "loss": 0.2823, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 0.46695402298850575, | |
| "grad_norm": 5.519237041473389, | |
| "learning_rate": 9.58e-06, | |
| "loss": 0.2789, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.4849137931034483, | |
| "grad_norm": 4.679568290710449, | |
| "learning_rate": 9.508571428571429e-06, | |
| "loss": 0.2856, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 0.5028735632183908, | |
| "grad_norm": 5.241317272186279, | |
| "learning_rate": 9.437142857142858e-06, | |
| "loss": 0.2765, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.5208333333333334, | |
| "grad_norm": 5.156564235687256, | |
| "learning_rate": 9.365714285714287e-06, | |
| "loss": 0.2715, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 0.5387931034482759, | |
| "grad_norm": 5.449781894683838, | |
| "learning_rate": 9.294285714285714e-06, | |
| "loss": 0.2713, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.5567528735632183, | |
| "grad_norm": 5.37013053894043, | |
| "learning_rate": 9.222857142857143e-06, | |
| "loss": 0.2662, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 0.5747126436781609, | |
| "grad_norm": 5.62649393081665, | |
| "learning_rate": 9.151428571428572e-06, | |
| "loss": 0.2814, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.5926724137931034, | |
| "grad_norm": 5.46066427230835, | |
| "learning_rate": 9.080000000000001e-06, | |
| "loss": 0.2606, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 0.610632183908046, | |
| "grad_norm": 5.137524604797363, | |
| "learning_rate": 9.00857142857143e-06, | |
| "loss": 0.2329, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.6285919540229885, | |
| "grad_norm": 5.287104606628418, | |
| "learning_rate": 8.937142857142857e-06, | |
| "loss": 0.2595, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 0.646551724137931, | |
| "grad_norm": 6.085468292236328, | |
| "learning_rate": 8.865714285714287e-06, | |
| "loss": 0.2558, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.6645114942528736, | |
| "grad_norm": 6.9088826179504395, | |
| "learning_rate": 8.794285714285716e-06, | |
| "loss": 0.2408, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 0.6824712643678161, | |
| "grad_norm": 5.21582555770874, | |
| "learning_rate": 8.722857142857145e-06, | |
| "loss": 0.2255, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.7004310344827587, | |
| "grad_norm": 4.872750759124756, | |
| "learning_rate": 8.651428571428572e-06, | |
| "loss": 0.2292, | |
| "step": 975 | |
| }, | |
| { | |
| "epoch": 0.7183908045977011, | |
| "grad_norm": 5.2055583000183105, | |
| "learning_rate": 8.580000000000001e-06, | |
| "loss": 0.2542, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.7183908045977011, | |
| "eval_cer": 9.284704773869347, | |
| "eval_loss": 0.27573877573013306, | |
| "eval_runtime": 208.7308, | |
| "eval_samples_per_second": 2.189, | |
| "eval_steps_per_second": 0.278, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.7363505747126436, | |
| "grad_norm": 5.838357925415039, | |
| "learning_rate": 8.50857142857143e-06, | |
| "loss": 0.2147, | |
| "step": 1025 | |
| }, | |
| { | |
| "epoch": 0.7543103448275862, | |
| "grad_norm": 5.988395690917969, | |
| "learning_rate": 8.437142857142859e-06, | |
| "loss": 0.2399, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.7722701149425287, | |
| "grad_norm": 4.545289993286133, | |
| "learning_rate": 8.365714285714286e-06, | |
| "loss": 0.2243, | |
| "step": 1075 | |
| }, | |
| { | |
| "epoch": 0.7902298850574713, | |
| "grad_norm": 4.793825626373291, | |
| "learning_rate": 8.294285714285715e-06, | |
| "loss": 0.2275, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.8081896551724138, | |
| "grad_norm": 5.085103988647461, | |
| "learning_rate": 8.222857142857144e-06, | |
| "loss": 0.2228, | |
| "step": 1125 | |
| }, | |
| { | |
| "epoch": 0.8261494252873564, | |
| "grad_norm": 4.345165729522705, | |
| "learning_rate": 8.151428571428572e-06, | |
| "loss": 0.2246, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.8441091954022989, | |
| "grad_norm": 5.478984355926514, | |
| "learning_rate": 8.08e-06, | |
| "loss": 0.2041, | |
| "step": 1175 | |
| }, | |
| { | |
| "epoch": 0.8620689655172413, | |
| "grad_norm": 5.148238182067871, | |
| "learning_rate": 8.00857142857143e-06, | |
| "loss": 0.2123, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.8800287356321839, | |
| "grad_norm": 5.2301106452941895, | |
| "learning_rate": 7.937142857142857e-06, | |
| "loss": 0.2135, | |
| "step": 1225 | |
| }, | |
| { | |
| "epoch": 0.8979885057471264, | |
| "grad_norm": 5.3838019371032715, | |
| "learning_rate": 7.865714285714286e-06, | |
| "loss": 0.1978, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.915948275862069, | |
| "grad_norm": 5.178905487060547, | |
| "learning_rate": 7.794285714285715e-06, | |
| "loss": 0.2127, | |
| "step": 1275 | |
| }, | |
| { | |
| "epoch": 0.9339080459770115, | |
| "grad_norm": 5.5203351974487305, | |
| "learning_rate": 7.722857142857142e-06, | |
| "loss": 0.2128, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.951867816091954, | |
| "grad_norm": 5.354878902435303, | |
| "learning_rate": 7.651428571428571e-06, | |
| "loss": 0.1924, | |
| "step": 1325 | |
| }, | |
| { | |
| "epoch": 0.9698275862068966, | |
| "grad_norm": 5.155806064605713, | |
| "learning_rate": 7.58e-06, | |
| "loss": 0.209, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.9877873563218391, | |
| "grad_norm": 4.325410842895508, | |
| "learning_rate": 7.508571428571429e-06, | |
| "loss": 0.2019, | |
| "step": 1375 | |
| }, | |
| { | |
| "epoch": 1.0057471264367817, | |
| "grad_norm": 3.931236743927002, | |
| "learning_rate": 7.4371428571428575e-06, | |
| "loss": 0.2028, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.0237068965517242, | |
| "grad_norm": 4.375692844390869, | |
| "learning_rate": 7.365714285714286e-06, | |
| "loss": 0.1572, | |
| "step": 1425 | |
| }, | |
| { | |
| "epoch": 1.0416666666666667, | |
| "grad_norm": 3.4934616088867188, | |
| "learning_rate": 7.294285714285715e-06, | |
| "loss": 0.1423, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 1.0596264367816093, | |
| "grad_norm": 4.1985249519348145, | |
| "learning_rate": 7.222857142857144e-06, | |
| "loss": 0.1477, | |
| "step": 1475 | |
| }, | |
| { | |
| "epoch": 1.0775862068965518, | |
| "grad_norm": 4.258643627166748, | |
| "learning_rate": 7.151428571428573e-06, | |
| "loss": 0.145, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.0955459770114944, | |
| "grad_norm": 3.0669021606445312, | |
| "learning_rate": 7.08e-06, | |
| "loss": 0.1404, | |
| "step": 1525 | |
| }, | |
| { | |
| "epoch": 1.1135057471264367, | |
| "grad_norm": 4.19215726852417, | |
| "learning_rate": 7.008571428571429e-06, | |
| "loss": 0.1427, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 1.1314655172413792, | |
| "grad_norm": 4.806066989898682, | |
| "learning_rate": 6.937142857142858e-06, | |
| "loss": 0.1339, | |
| "step": 1575 | |
| }, | |
| { | |
| "epoch": 1.1494252873563218, | |
| "grad_norm": 4.574463367462158, | |
| "learning_rate": 6.865714285714287e-06, | |
| "loss": 0.1427, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.1673850574712643, | |
| "grad_norm": 4.810680866241455, | |
| "learning_rate": 6.794285714285714e-06, | |
| "loss": 0.1313, | |
| "step": 1625 | |
| }, | |
| { | |
| "epoch": 1.1853448275862069, | |
| "grad_norm": 4.358596324920654, | |
| "learning_rate": 6.722857142857143e-06, | |
| "loss": 0.1487, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 1.2033045977011494, | |
| "grad_norm": 3.6320159435272217, | |
| "learning_rate": 6.651428571428572e-06, | |
| "loss": 0.1373, | |
| "step": 1675 | |
| }, | |
| { | |
| "epoch": 1.221264367816092, | |
| "grad_norm": 4.551269054412842, | |
| "learning_rate": 6.5800000000000005e-06, | |
| "loss": 0.1322, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.2392241379310345, | |
| "grad_norm": 3.798658847808838, | |
| "learning_rate": 6.5085714285714295e-06, | |
| "loss": 0.1324, | |
| "step": 1725 | |
| }, | |
| { | |
| "epoch": 1.257183908045977, | |
| "grad_norm": 3.2752606868743896, | |
| "learning_rate": 6.437142857142858e-06, | |
| "loss": 0.1409, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 1.2751436781609196, | |
| "grad_norm": 3.7986721992492676, | |
| "learning_rate": 6.365714285714286e-06, | |
| "loss": 0.1316, | |
| "step": 1775 | |
| }, | |
| { | |
| "epoch": 1.293103448275862, | |
| "grad_norm": 3.678849458694458, | |
| "learning_rate": 6.294285714285715e-06, | |
| "loss": 0.131, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.3110632183908046, | |
| "grad_norm": 4.378350257873535, | |
| "learning_rate": 6.222857142857144e-06, | |
| "loss": 0.1213, | |
| "step": 1825 | |
| }, | |
| { | |
| "epoch": 1.3290229885057472, | |
| "grad_norm": 4.888228893280029, | |
| "learning_rate": 6.151428571428571e-06, | |
| "loss": 0.1499, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 1.3469827586206897, | |
| "grad_norm": 3.7335093021392822, | |
| "learning_rate": 6.08e-06, | |
| "loss": 0.1261, | |
| "step": 1875 | |
| }, | |
| { | |
| "epoch": 1.3649425287356323, | |
| "grad_norm": 4.47926664352417, | |
| "learning_rate": 6.008571428571429e-06, | |
| "loss": 0.1361, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.3829022988505746, | |
| "grad_norm": 3.6603622436523438, | |
| "learning_rate": 5.937142857142858e-06, | |
| "loss": 0.135, | |
| "step": 1925 | |
| }, | |
| { | |
| "epoch": 1.4008620689655173, | |
| "grad_norm": 3.8697991371154785, | |
| "learning_rate": 5.865714285714286e-06, | |
| "loss": 0.1157, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 1.4188218390804597, | |
| "grad_norm": 3.898298740386963, | |
| "learning_rate": 5.794285714285715e-06, | |
| "loss": 0.1262, | |
| "step": 1975 | |
| }, | |
| { | |
| "epoch": 1.4367816091954024, | |
| "grad_norm": 6.249912261962891, | |
| "learning_rate": 5.722857142857144e-06, | |
| "loss": 0.128, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.4367816091954024, | |
| "eval_cer": 7.455244974874371, | |
| "eval_loss": 0.20655690133571625, | |
| "eval_runtime": 206.1569, | |
| "eval_samples_per_second": 2.217, | |
| "eval_steps_per_second": 0.281, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.4547413793103448, | |
| "grad_norm": 3.680793285369873, | |
| "learning_rate": 5.651428571428572e-06, | |
| "loss": 0.1249, | |
| "step": 2025 | |
| }, | |
| { | |
| "epoch": 1.4727011494252873, | |
| "grad_norm": 4.394272804260254, | |
| "learning_rate": 5.580000000000001e-06, | |
| "loss": 0.1327, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 1.4906609195402298, | |
| "grad_norm": 3.971735954284668, | |
| "learning_rate": 5.508571428571429e-06, | |
| "loss": 0.1256, | |
| "step": 2075 | |
| }, | |
| { | |
| "epoch": 1.5086206896551724, | |
| "grad_norm": 3.7754571437835693, | |
| "learning_rate": 5.437142857142857e-06, | |
| "loss": 0.12, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 1.526580459770115, | |
| "grad_norm": 5.009246826171875, | |
| "learning_rate": 5.365714285714286e-06, | |
| "loss": 0.1258, | |
| "step": 2125 | |
| }, | |
| { | |
| "epoch": 1.5445402298850575, | |
| "grad_norm": 3.481464147567749, | |
| "learning_rate": 5.294285714285715e-06, | |
| "loss": 0.1174, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 1.5625, | |
| "grad_norm": 3.6977555751800537, | |
| "learning_rate": 5.2228571428571425e-06, | |
| "loss": 0.1248, | |
| "step": 2175 | |
| }, | |
| { | |
| "epoch": 1.5804597701149425, | |
| "grad_norm": 3.6558139324188232, | |
| "learning_rate": 5.1514285714285715e-06, | |
| "loss": 0.1145, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.598419540229885, | |
| "grad_norm": 3.2170028686523438, | |
| "learning_rate": 5.0800000000000005e-06, | |
| "loss": 0.1181, | |
| "step": 2225 | |
| }, | |
| { | |
| "epoch": 1.6163793103448276, | |
| "grad_norm": 3.2904043197631836, | |
| "learning_rate": 5.0085714285714295e-06, | |
| "loss": 0.1261, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 1.6343390804597702, | |
| "grad_norm": 6.5361223220825195, | |
| "learning_rate": 4.937142857142858e-06, | |
| "loss": 0.1257, | |
| "step": 2275 | |
| }, | |
| { | |
| "epoch": 1.6522988505747125, | |
| "grad_norm": 3.8789713382720947, | |
| "learning_rate": 4.865714285714287e-06, | |
| "loss": 0.1166, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 1.6702586206896552, | |
| "grad_norm": 3.6503453254699707, | |
| "learning_rate": 4.794285714285715e-06, | |
| "loss": 0.1166, | |
| "step": 2325 | |
| }, | |
| { | |
| "epoch": 1.6882183908045976, | |
| "grad_norm": 2.955310344696045, | |
| "learning_rate": 4.722857142857144e-06, | |
| "loss": 0.1148, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 1.7061781609195403, | |
| "grad_norm": 3.2080459594726562, | |
| "learning_rate": 4.651428571428572e-06, | |
| "loss": 0.1153, | |
| "step": 2375 | |
| }, | |
| { | |
| "epoch": 1.7241379310344827, | |
| "grad_norm": 2.9274492263793945, | |
| "learning_rate": 4.58e-06, | |
| "loss": 0.1263, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.7420977011494254, | |
| "grad_norm": 3.9885170459747314, | |
| "learning_rate": 4.508571428571429e-06, | |
| "loss": 0.1041, | |
| "step": 2425 | |
| }, | |
| { | |
| "epoch": 1.7600574712643677, | |
| "grad_norm": 5.113827228546143, | |
| "learning_rate": 4.437142857142857e-06, | |
| "loss": 0.1179, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 1.7780172413793105, | |
| "grad_norm": 5.254714012145996, | |
| "learning_rate": 4.3657142857142855e-06, | |
| "loss": 0.1183, | |
| "step": 2475 | |
| }, | |
| { | |
| "epoch": 1.7959770114942528, | |
| "grad_norm": 5.9119696617126465, | |
| "learning_rate": 4.2942857142857146e-06, | |
| "loss": 0.1183, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.8139367816091954, | |
| "grad_norm": 3.5279624462127686, | |
| "learning_rate": 4.222857142857143e-06, | |
| "loss": 0.1174, | |
| "step": 2525 | |
| }, | |
| { | |
| "epoch": 1.831896551724138, | |
| "grad_norm": 3.6319098472595215, | |
| "learning_rate": 4.151428571428572e-06, | |
| "loss": 0.1084, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 1.8498563218390804, | |
| "grad_norm": 4.165521621704102, | |
| "learning_rate": 4.08e-06, | |
| "loss": 0.1086, | |
| "step": 2575 | |
| }, | |
| { | |
| "epoch": 1.867816091954023, | |
| "grad_norm": 3.5935449600219727, | |
| "learning_rate": 4.008571428571429e-06, | |
| "loss": 0.1013, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.8857758620689655, | |
| "grad_norm": 4.520440101623535, | |
| "learning_rate": 3.937142857142858e-06, | |
| "loss": 0.1085, | |
| "step": 2625 | |
| }, | |
| { | |
| "epoch": 1.903735632183908, | |
| "grad_norm": 4.8552327156066895, | |
| "learning_rate": 3.865714285714286e-06, | |
| "loss": 0.1075, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 1.9216954022988506, | |
| "grad_norm": 5.094956874847412, | |
| "learning_rate": 3.7942857142857147e-06, | |
| "loss": 0.1153, | |
| "step": 2675 | |
| }, | |
| { | |
| "epoch": 1.9396551724137931, | |
| "grad_norm": 4.34927225112915, | |
| "learning_rate": 3.722857142857143e-06, | |
| "loss": 0.1092, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 1.9576149425287355, | |
| "grad_norm": 2.9122841358184814, | |
| "learning_rate": 3.651428571428572e-06, | |
| "loss": 0.1094, | |
| "step": 2725 | |
| }, | |
| { | |
| "epoch": 1.9755747126436782, | |
| "grad_norm": 4.0797529220581055, | |
| "learning_rate": 3.58e-06, | |
| "loss": 0.1031, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 1.9935344827586206, | |
| "grad_norm": 3.478084087371826, | |
| "learning_rate": 3.508571428571429e-06, | |
| "loss": 0.1035, | |
| "step": 2775 | |
| }, | |
| { | |
| "epoch": 2.0114942528735633, | |
| "grad_norm": 2.5905420780181885, | |
| "learning_rate": 3.437142857142857e-06, | |
| "loss": 0.086, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 2.0294540229885056, | |
| "grad_norm": 2.674304246902466, | |
| "learning_rate": 3.3657142857142862e-06, | |
| "loss": 0.0741, | |
| "step": 2825 | |
| }, | |
| { | |
| "epoch": 2.0474137931034484, | |
| "grad_norm": 3.0569241046905518, | |
| "learning_rate": 3.2942857142857144e-06, | |
| "loss": 0.0722, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 2.0653735632183907, | |
| "grad_norm": 2.8325390815734863, | |
| "learning_rate": 3.222857142857143e-06, | |
| "loss": 0.0759, | |
| "step": 2875 | |
| }, | |
| { | |
| "epoch": 2.0833333333333335, | |
| "grad_norm": 3.281291961669922, | |
| "learning_rate": 3.151428571428572e-06, | |
| "loss": 0.0803, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 2.101293103448276, | |
| "grad_norm": 2.3541665077209473, | |
| "learning_rate": 3.08e-06, | |
| "loss": 0.0672, | |
| "step": 2925 | |
| }, | |
| { | |
| "epoch": 2.1192528735632186, | |
| "grad_norm": 3.7438085079193115, | |
| "learning_rate": 3.008571428571429e-06, | |
| "loss": 0.0737, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 2.137212643678161, | |
| "grad_norm": 2.571523427963257, | |
| "learning_rate": 2.9371428571428573e-06, | |
| "loss": 0.065, | |
| "step": 2975 | |
| }, | |
| { | |
| "epoch": 2.1551724137931036, | |
| "grad_norm": 2.559622287750244, | |
| "learning_rate": 2.865714285714286e-06, | |
| "loss": 0.0725, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.1551724137931036, | |
| "eval_cer": 5.755339195979899, | |
| "eval_loss": 0.17403484880924225, | |
| "eval_runtime": 203.7679, | |
| "eval_samples_per_second": 2.243, | |
| "eval_steps_per_second": 0.285, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.173132183908046, | |
| "grad_norm": 2.8300039768218994, | |
| "learning_rate": 2.7942857142857145e-06, | |
| "loss": 0.0691, | |
| "step": 3025 | |
| }, | |
| { | |
| "epoch": 2.1910919540229887, | |
| "grad_norm": 2.468639850616455, | |
| "learning_rate": 2.722857142857143e-06, | |
| "loss": 0.0834, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 2.209051724137931, | |
| "grad_norm": 3.254262685775757, | |
| "learning_rate": 2.6514285714285713e-06, | |
| "loss": 0.0727, | |
| "step": 3075 | |
| }, | |
| { | |
| "epoch": 2.2270114942528734, | |
| "grad_norm": 3.118223190307617, | |
| "learning_rate": 2.5800000000000003e-06, | |
| "loss": 0.0783, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 2.244971264367816, | |
| "grad_norm": 2.0613930225372314, | |
| "learning_rate": 2.5085714285714285e-06, | |
| "loss": 0.0677, | |
| "step": 3125 | |
| }, | |
| { | |
| "epoch": 2.2629310344827585, | |
| "grad_norm": 4.234766483306885, | |
| "learning_rate": 2.4371428571428575e-06, | |
| "loss": 0.0695, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 2.280890804597701, | |
| "grad_norm": 3.091860294342041, | |
| "learning_rate": 2.365714285714286e-06, | |
| "loss": 0.0674, | |
| "step": 3175 | |
| }, | |
| { | |
| "epoch": 2.2988505747126435, | |
| "grad_norm": 2.968728542327881, | |
| "learning_rate": 2.2942857142857146e-06, | |
| "loss": 0.0793, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 2.3168103448275863, | |
| "grad_norm": 2.635185956954956, | |
| "learning_rate": 2.222857142857143e-06, | |
| "loss": 0.0669, | |
| "step": 3225 | |
| }, | |
| { | |
| "epoch": 2.3347701149425286, | |
| "grad_norm": 4.698062896728516, | |
| "learning_rate": 2.1514285714285714e-06, | |
| "loss": 0.0792, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 2.3527298850574714, | |
| "grad_norm": 2.380126953125, | |
| "learning_rate": 2.08e-06, | |
| "loss": 0.0686, | |
| "step": 3275 | |
| }, | |
| { | |
| "epoch": 2.3706896551724137, | |
| "grad_norm": 2.4431111812591553, | |
| "learning_rate": 2.0085714285714286e-06, | |
| "loss": 0.0683, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 2.3886494252873565, | |
| "grad_norm": 2.8130743503570557, | |
| "learning_rate": 1.9371428571428576e-06, | |
| "loss": 0.0726, | |
| "step": 3325 | |
| }, | |
| { | |
| "epoch": 2.406609195402299, | |
| "grad_norm": 2.115403890609741, | |
| "learning_rate": 1.865714285714286e-06, | |
| "loss": 0.0715, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 2.4245689655172415, | |
| "grad_norm": 3.2712090015411377, | |
| "learning_rate": 1.7942857142857146e-06, | |
| "loss": 0.0677, | |
| "step": 3375 | |
| }, | |
| { | |
| "epoch": 2.442528735632184, | |
| "grad_norm": 2.903533697128296, | |
| "learning_rate": 1.7228571428571432e-06, | |
| "loss": 0.0722, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 2.4604885057471266, | |
| "grad_norm": 2.4043657779693604, | |
| "learning_rate": 1.6514285714285715e-06, | |
| "loss": 0.0723, | |
| "step": 3425 | |
| }, | |
| { | |
| "epoch": 2.478448275862069, | |
| "grad_norm": 2.5334341526031494, | |
| "learning_rate": 1.5800000000000001e-06, | |
| "loss": 0.0729, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 2.4964080459770113, | |
| "grad_norm": 2.8699309825897217, | |
| "learning_rate": 1.5085714285714287e-06, | |
| "loss": 0.0733, | |
| "step": 3475 | |
| }, | |
| { | |
| "epoch": 2.514367816091954, | |
| "grad_norm": 3.6407063007354736, | |
| "learning_rate": 1.4371428571428573e-06, | |
| "loss": 0.0912, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 2.532327586206897, | |
| "grad_norm": 2.9028170108795166, | |
| "learning_rate": 1.3657142857142857e-06, | |
| "loss": 0.0704, | |
| "step": 3525 | |
| }, | |
| { | |
| "epoch": 2.550287356321839, | |
| "grad_norm": 2.9066832065582275, | |
| "learning_rate": 1.2942857142857143e-06, | |
| "loss": 0.0666, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 2.5682471264367814, | |
| "grad_norm": 3.7357940673828125, | |
| "learning_rate": 1.222857142857143e-06, | |
| "loss": 0.075, | |
| "step": 3575 | |
| }, | |
| { | |
| "epoch": 2.586206896551724, | |
| "grad_norm": 2.8801915645599365, | |
| "learning_rate": 1.1514285714285714e-06, | |
| "loss": 0.0792, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 2.6041666666666665, | |
| "grad_norm": 3.622131824493408, | |
| "learning_rate": 1.08e-06, | |
| "loss": 0.07, | |
| "step": 3625 | |
| }, | |
| { | |
| "epoch": 2.6221264367816093, | |
| "grad_norm": 3.064757823944092, | |
| "learning_rate": 1.0085714285714286e-06, | |
| "loss": 0.066, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 2.6400862068965516, | |
| "grad_norm": 2.172606945037842, | |
| "learning_rate": 9.371428571428571e-07, | |
| "loss": 0.0579, | |
| "step": 3675 | |
| }, | |
| { | |
| "epoch": 2.6580459770114944, | |
| "grad_norm": 4.104800701141357, | |
| "learning_rate": 8.657142857142858e-07, | |
| "loss": 0.078, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 2.6760057471264367, | |
| "grad_norm": 3.803704261779785, | |
| "learning_rate": 7.942857142857144e-07, | |
| "loss": 0.0768, | |
| "step": 3725 | |
| }, | |
| { | |
| "epoch": 2.6939655172413794, | |
| "grad_norm": 1.876494288444519, | |
| "learning_rate": 7.228571428571429e-07, | |
| "loss": 0.066, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 2.7119252873563218, | |
| "grad_norm": 4.310028076171875, | |
| "learning_rate": 6.514285714285715e-07, | |
| "loss": 0.0665, | |
| "step": 3775 | |
| }, | |
| { | |
| "epoch": 2.7298850574712645, | |
| "grad_norm": 3.0695154666900635, | |
| "learning_rate": 5.800000000000001e-07, | |
| "loss": 0.0695, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 2.747844827586207, | |
| "grad_norm": 3.2505669593811035, | |
| "learning_rate": 5.085714285714286e-07, | |
| "loss": 0.065, | |
| "step": 3825 | |
| }, | |
| { | |
| "epoch": 2.765804597701149, | |
| "grad_norm": 2.551088809967041, | |
| "learning_rate": 4.371428571428572e-07, | |
| "loss": 0.0649, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 2.783764367816092, | |
| "grad_norm": 3.110604763031006, | |
| "learning_rate": 3.657142857142858e-07, | |
| "loss": 0.0704, | |
| "step": 3875 | |
| }, | |
| { | |
| "epoch": 2.8017241379310347, | |
| "grad_norm": 2.836848020553589, | |
| "learning_rate": 2.942857142857143e-07, | |
| "loss": 0.0698, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 2.819683908045977, | |
| "grad_norm": 2.818726062774658, | |
| "learning_rate": 2.228571428571429e-07, | |
| "loss": 0.0722, | |
| "step": 3925 | |
| }, | |
| { | |
| "epoch": 2.8376436781609193, | |
| "grad_norm": 2.192012071609497, | |
| "learning_rate": 1.5142857142857144e-07, | |
| "loss": 0.0657, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 2.855603448275862, | |
| "grad_norm": 3.1882786750793457, | |
| "learning_rate": 8e-08, | |
| "loss": 0.0751, | |
| "step": 3975 | |
| }, | |
| { | |
| "epoch": 2.873563218390805, | |
| "grad_norm": 3.386892557144165, | |
| "learning_rate": 8.571428571428572e-09, | |
| "loss": 0.0673, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.873563218390805, | |
| "eval_cer": 5.288159547738693, | |
| "eval_loss": 0.16544735431671143, | |
| "eval_runtime": 204.9568, | |
| "eval_samples_per_second": 2.23, | |
| "eval_steps_per_second": 0.283, | |
| "step": 4000 | |
| } | |
| ], | |
| "logging_steps": 25, | |
| "max_steps": 4000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.14987227430912e+18, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |