{ "best_metric": 3.299715518951416, "best_model_checkpoint": "/scratch/cl5625/exceptions/models/100M_low_2000_634/checkpoint-90000", "epoch": 10.0, "eval_steps": 1000, "global_step": 92750, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.005390835579514825, "grad_norm": 1.3841798305511475, "learning_rate": 0.0003, "loss": 8.6258, "step": 50 }, { "epoch": 0.01078167115902965, "grad_norm": 2.7142674922943115, "learning_rate": 0.0006, "loss": 6.9518, "step": 100 }, { "epoch": 0.016172506738544475, "grad_norm": 1.7501500844955444, "learning_rate": 0.0005996762007555315, "loss": 6.4766, "step": 150 }, { "epoch": 0.0215633423180593, "grad_norm": 2.089492082595825, "learning_rate": 0.000599352401511063, "loss": 6.2242, "step": 200 }, { "epoch": 0.026954177897574125, "grad_norm": 1.0795507431030273, "learning_rate": 0.0005990286022665946, "loss": 6.0814, "step": 250 }, { "epoch": 0.03234501347708895, "grad_norm": 2.468144655227661, "learning_rate": 0.0005987048030221263, "loss": 5.9861, "step": 300 }, { "epoch": 0.03773584905660377, "grad_norm": 1.4130438566207886, "learning_rate": 0.0005983810037776578, "loss": 5.8758, "step": 350 }, { "epoch": 0.0431266846361186, "grad_norm": 1.5332931280136108, "learning_rate": 0.0005980572045331894, "loss": 5.7848, "step": 400 }, { "epoch": 0.04851752021563342, "grad_norm": 1.451593279838562, "learning_rate": 0.0005977334052887209, "loss": 5.713, "step": 450 }, { "epoch": 0.05390835579514825, "grad_norm": 1.3739091157913208, "learning_rate": 0.0005974096060442526, "loss": 5.6508, "step": 500 }, { "epoch": 0.05929919137466307, "grad_norm": 1.2970751523971558, "learning_rate": 0.0005970858067997841, "loss": 5.5723, "step": 550 }, { "epoch": 0.0646900269541779, "grad_norm": 1.2038410902023315, "learning_rate": 0.0005967620075553157, "loss": 5.5093, "step": 600 }, { "epoch": 0.07008086253369272, "grad_norm": 1.2364015579223633, "learning_rate": 0.0005964382083108472, "loss": 5.4167, "step": 650 }, { "epoch": 0.07547169811320754, "grad_norm": 1.1776434183120728, "learning_rate": 0.0005961144090663788, "loss": 5.3687, "step": 700 }, { "epoch": 0.08086253369272237, "grad_norm": 1.2404123544692993, "learning_rate": 0.0005957906098219104, "loss": 5.2965, "step": 750 }, { "epoch": 0.0862533692722372, "grad_norm": 0.8565037250518799, "learning_rate": 0.0005954668105774419, "loss": 5.2536, "step": 800 }, { "epoch": 0.09164420485175202, "grad_norm": 0.8625679612159729, "learning_rate": 0.0005951430113329735, "loss": 5.1917, "step": 850 }, { "epoch": 0.09703504043126684, "grad_norm": 1.4676177501678467, "learning_rate": 0.0005948192120885051, "loss": 5.1705, "step": 900 }, { "epoch": 0.10242587601078167, "grad_norm": 1.3620243072509766, "learning_rate": 0.0005944954128440366, "loss": 5.129, "step": 950 }, { "epoch": 0.1078167115902965, "grad_norm": 1.802571177482605, "learning_rate": 0.0005941716135995682, "loss": 5.0781, "step": 1000 }, { "epoch": 0.1078167115902965, "eval_accuracy": 0.22607515020446844, "eval_loss": 5.030185699462891, "eval_runtime": 184.9577, "eval_samples_per_second": 97.379, "eval_steps_per_second": 6.088, "step": 1000 }, { "epoch": 0.11320754716981132, "grad_norm": 1.0640990734100342, "learning_rate": 0.0005938478143550997, "loss": 5.0384, "step": 1050 }, { "epoch": 0.11859838274932614, "grad_norm": 0.9512579441070557, "learning_rate": 0.0005935240151106314, "loss": 5.01, "step": 1100 }, { "epoch": 0.12398921832884097, "grad_norm": 1.4336494207382202, "learning_rate": 0.0005932002158661629, "loss": 4.986, "step": 1150 }, { "epoch": 0.1293800539083558, "grad_norm": 0.986595094203949, "learning_rate": 0.0005928764166216945, "loss": 4.9237, "step": 1200 }, { "epoch": 0.1347708894878706, "grad_norm": 0.9924365878105164, "learning_rate": 0.000592552617377226, "loss": 4.8993, "step": 1250 }, { "epoch": 0.14016172506738545, "grad_norm": 1.405576229095459, "learning_rate": 0.0005922288181327577, "loss": 4.8778, "step": 1300 }, { "epoch": 0.14555256064690028, "grad_norm": 0.7532362341880798, "learning_rate": 0.0005919050188882893, "loss": 4.8412, "step": 1350 }, { "epoch": 0.1509433962264151, "grad_norm": 0.9290658235549927, "learning_rate": 0.0005915812196438207, "loss": 4.8361, "step": 1400 }, { "epoch": 0.15633423180592992, "grad_norm": 1.0212785005569458, "learning_rate": 0.0005912574203993524, "loss": 4.8431, "step": 1450 }, { "epoch": 0.16172506738544473, "grad_norm": 1.3435090780258179, "learning_rate": 0.0005909336211548839, "loss": 4.801, "step": 1500 }, { "epoch": 0.16711590296495957, "grad_norm": 1.0386897325515747, "learning_rate": 0.0005906098219104155, "loss": 4.7558, "step": 1550 }, { "epoch": 0.1725067385444744, "grad_norm": 0.8441452383995056, "learning_rate": 0.000590286022665947, "loss": 4.749, "step": 1600 }, { "epoch": 0.1778975741239892, "grad_norm": 0.9853036999702454, "learning_rate": 0.0005899622234214787, "loss": 4.7201, "step": 1650 }, { "epoch": 0.18328840970350405, "grad_norm": 0.9956970810890198, "learning_rate": 0.0005896384241770102, "loss": 4.6933, "step": 1700 }, { "epoch": 0.18867924528301888, "grad_norm": 0.8019234538078308, "learning_rate": 0.0005893146249325418, "loss": 4.7021, "step": 1750 }, { "epoch": 0.1940700808625337, "grad_norm": 0.8837598562240601, "learning_rate": 0.0005889908256880733, "loss": 4.6704, "step": 1800 }, { "epoch": 0.19946091644204852, "grad_norm": 1.046995997428894, "learning_rate": 0.0005886670264436049, "loss": 4.6374, "step": 1850 }, { "epoch": 0.20485175202156333, "grad_norm": 1.0166348218917847, "learning_rate": 0.0005883432271991365, "loss": 4.6279, "step": 1900 }, { "epoch": 0.21024258760107817, "grad_norm": 0.8063094615936279, "learning_rate": 0.0005880194279546681, "loss": 4.6256, "step": 1950 }, { "epoch": 0.215633423180593, "grad_norm": 0.9398548603057861, "learning_rate": 0.0005876956287101996, "loss": 4.5763, "step": 2000 }, { "epoch": 0.215633423180593, "eval_accuracy": 0.2714252357849155, "eval_loss": 4.502427577972412, "eval_runtime": 183.5351, "eval_samples_per_second": 98.134, "eval_steps_per_second": 6.135, "step": 2000 }, { "epoch": 0.2210242587601078, "grad_norm": 0.980785608291626, "learning_rate": 0.0005873718294657312, "loss": 4.5575, "step": 2050 }, { "epoch": 0.22641509433962265, "grad_norm": 0.8288772106170654, "learning_rate": 0.0005870480302212628, "loss": 4.5462, "step": 2100 }, { "epoch": 0.23180592991913745, "grad_norm": 0.9006871581077576, "learning_rate": 0.0005867242309767943, "loss": 4.5289, "step": 2150 }, { "epoch": 0.2371967654986523, "grad_norm": 0.7568821310997009, "learning_rate": 0.0005864004317323259, "loss": 4.4877, "step": 2200 }, { "epoch": 0.24258760107816713, "grad_norm": 0.8699207305908203, "learning_rate": 0.0005860766324878575, "loss": 4.5022, "step": 2250 }, { "epoch": 0.24797843665768193, "grad_norm": 0.9681375026702881, "learning_rate": 0.000585752833243389, "loss": 4.4781, "step": 2300 }, { "epoch": 0.25336927223719674, "grad_norm": 1.0356428623199463, "learning_rate": 0.0005854290339989206, "loss": 4.4547, "step": 2350 }, { "epoch": 0.2587601078167116, "grad_norm": 1.0217968225479126, "learning_rate": 0.0005851052347544521, "loss": 4.4496, "step": 2400 }, { "epoch": 0.2641509433962264, "grad_norm": 0.9390143752098083, "learning_rate": 0.0005847814355099838, "loss": 4.4324, "step": 2450 }, { "epoch": 0.2695417789757412, "grad_norm": 0.8502791523933411, "learning_rate": 0.0005844576362655154, "loss": 4.4238, "step": 2500 }, { "epoch": 0.2749326145552561, "grad_norm": 0.9871562719345093, "learning_rate": 0.0005841338370210469, "loss": 4.3975, "step": 2550 }, { "epoch": 0.2803234501347709, "grad_norm": 0.9953110814094543, "learning_rate": 0.0005838100377765785, "loss": 4.3725, "step": 2600 }, { "epoch": 0.2857142857142857, "grad_norm": 0.8370280861854553, "learning_rate": 0.0005834862385321101, "loss": 4.3714, "step": 2650 }, { "epoch": 0.29110512129380056, "grad_norm": 0.8906695246696472, "learning_rate": 0.0005831624392876417, "loss": 4.3636, "step": 2700 }, { "epoch": 0.29649595687331537, "grad_norm": 0.8926903009414673, "learning_rate": 0.0005828386400431731, "loss": 4.3712, "step": 2750 }, { "epoch": 0.3018867924528302, "grad_norm": 0.6380210518836975, "learning_rate": 0.0005825148407987048, "loss": 4.3652, "step": 2800 }, { "epoch": 0.30727762803234504, "grad_norm": 0.9012649655342102, "learning_rate": 0.0005821910415542363, "loss": 4.3264, "step": 2850 }, { "epoch": 0.31266846361185985, "grad_norm": 0.8436964750289917, "learning_rate": 0.0005818672423097679, "loss": 4.3127, "step": 2900 }, { "epoch": 0.31805929919137466, "grad_norm": 0.8248408436775208, "learning_rate": 0.0005815434430652994, "loss": 4.3424, "step": 2950 }, { "epoch": 0.32345013477088946, "grad_norm": 0.7463982105255127, "learning_rate": 0.0005812196438208311, "loss": 4.3107, "step": 3000 }, { "epoch": 0.32345013477088946, "eval_accuracy": 0.2991547565898248, "eval_loss": 4.231179237365723, "eval_runtime": 183.5046, "eval_samples_per_second": 98.15, "eval_steps_per_second": 6.136, "step": 3000 }, { "epoch": 0.3288409703504043, "grad_norm": 0.7356308102607727, "learning_rate": 0.0005808958445763626, "loss": 4.3028, "step": 3050 }, { "epoch": 0.33423180592991913, "grad_norm": 0.7332613468170166, "learning_rate": 0.0005805720453318942, "loss": 4.281, "step": 3100 }, { "epoch": 0.33962264150943394, "grad_norm": 0.9393066763877869, "learning_rate": 0.0005802482460874257, "loss": 4.2707, "step": 3150 }, { "epoch": 0.3450134770889488, "grad_norm": 0.7228361964225769, "learning_rate": 0.0005799244468429573, "loss": 4.2769, "step": 3200 }, { "epoch": 0.3504043126684636, "grad_norm": 0.7841880917549133, "learning_rate": 0.0005796006475984889, "loss": 4.2499, "step": 3250 }, { "epoch": 0.3557951482479784, "grad_norm": 0.7926186919212341, "learning_rate": 0.0005792768483540205, "loss": 4.2548, "step": 3300 }, { "epoch": 0.3611859838274933, "grad_norm": 0.8690990805625916, "learning_rate": 0.000578953049109552, "loss": 4.254, "step": 3350 }, { "epoch": 0.3665768194070081, "grad_norm": 0.7845708727836609, "learning_rate": 0.0005786292498650836, "loss": 4.2325, "step": 3400 }, { "epoch": 0.3719676549865229, "grad_norm": 0.8615543246269226, "learning_rate": 0.0005783054506206152, "loss": 4.2247, "step": 3450 }, { "epoch": 0.37735849056603776, "grad_norm": 0.7429792284965515, "learning_rate": 0.0005779816513761467, "loss": 4.216, "step": 3500 }, { "epoch": 0.38274932614555257, "grad_norm": 0.7726993560791016, "learning_rate": 0.0005776578521316782, "loss": 4.1962, "step": 3550 }, { "epoch": 0.3881401617250674, "grad_norm": 0.7264295220375061, "learning_rate": 0.0005773340528872099, "loss": 4.2044, "step": 3600 }, { "epoch": 0.3935309973045822, "grad_norm": 0.7918074131011963, "learning_rate": 0.0005770102536427414, "loss": 4.219, "step": 3650 }, { "epoch": 0.39892183288409705, "grad_norm": 0.7423751950263977, "learning_rate": 0.000576686454398273, "loss": 4.2045, "step": 3700 }, { "epoch": 0.40431266846361186, "grad_norm": 0.6395682096481323, "learning_rate": 0.0005763626551538045, "loss": 4.1897, "step": 3750 }, { "epoch": 0.40970350404312667, "grad_norm": 0.6632921695709229, "learning_rate": 0.0005760388559093362, "loss": 4.182, "step": 3800 }, { "epoch": 0.41509433962264153, "grad_norm": 0.6251309514045715, "learning_rate": 0.0005757150566648678, "loss": 4.1772, "step": 3850 }, { "epoch": 0.42048517520215634, "grad_norm": 0.8533205986022949, "learning_rate": 0.0005753912574203993, "loss": 4.1699, "step": 3900 }, { "epoch": 0.42587601078167114, "grad_norm": 0.707554817199707, "learning_rate": 0.0005750674581759309, "loss": 4.1668, "step": 3950 }, { "epoch": 0.431266846361186, "grad_norm": 0.7947620153427124, "learning_rate": 0.0005747436589314624, "loss": 4.1524, "step": 4000 }, { "epoch": 0.431266846361186, "eval_accuracy": 0.3127696153503061, "eval_loss": 4.084148406982422, "eval_runtime": 183.63, "eval_samples_per_second": 98.083, "eval_steps_per_second": 6.132, "step": 4000 }, { "epoch": 0.4366576819407008, "grad_norm": 0.5997743606567383, "learning_rate": 0.0005744198596869941, "loss": 4.1636, "step": 4050 }, { "epoch": 0.4420485175202156, "grad_norm": 0.7036544680595398, "learning_rate": 0.0005740960604425255, "loss": 4.156, "step": 4100 }, { "epoch": 0.4474393530997305, "grad_norm": 0.7143358588218689, "learning_rate": 0.0005737722611980572, "loss": 4.1384, "step": 4150 }, { "epoch": 0.4528301886792453, "grad_norm": 0.6587009429931641, "learning_rate": 0.0005734484619535887, "loss": 4.113, "step": 4200 }, { "epoch": 0.4582210242587601, "grad_norm": 0.704142153263092, "learning_rate": 0.0005731246627091203, "loss": 4.1487, "step": 4250 }, { "epoch": 0.4636118598382749, "grad_norm": 0.5820618867874146, "learning_rate": 0.0005728008634646518, "loss": 4.1219, "step": 4300 }, { "epoch": 0.46900269541778977, "grad_norm": 0.6929563879966736, "learning_rate": 0.0005724770642201835, "loss": 4.1263, "step": 4350 }, { "epoch": 0.4743935309973046, "grad_norm": 0.7341722846031189, "learning_rate": 0.000572153264975715, "loss": 4.1214, "step": 4400 }, { "epoch": 0.4797843665768194, "grad_norm": 0.5978211164474487, "learning_rate": 0.0005718294657312466, "loss": 4.0946, "step": 4450 }, { "epoch": 0.48517520215633425, "grad_norm": 0.6326689124107361, "learning_rate": 0.0005715056664867781, "loss": 4.0929, "step": 4500 }, { "epoch": 0.49056603773584906, "grad_norm": 0.6336254477500916, "learning_rate": 0.0005711818672423097, "loss": 4.0867, "step": 4550 }, { "epoch": 0.49595687331536387, "grad_norm": 0.6374770998954773, "learning_rate": 0.0005708580679978413, "loss": 4.0936, "step": 4600 }, { "epoch": 0.5013477088948787, "grad_norm": 0.7158097624778748, "learning_rate": 0.0005705342687533729, "loss": 4.0868, "step": 4650 }, { "epoch": 0.5067385444743935, "grad_norm": 0.6559752225875854, "learning_rate": 0.0005702104695089044, "loss": 4.0694, "step": 4700 }, { "epoch": 0.5121293800539084, "grad_norm": 0.6569370627403259, "learning_rate": 0.000569886670264436, "loss": 4.0812, "step": 4750 }, { "epoch": 0.5175202156334232, "grad_norm": 0.6718046069145203, "learning_rate": 0.0005695628710199675, "loss": 4.0682, "step": 4800 }, { "epoch": 0.522911051212938, "grad_norm": 0.6529590487480164, "learning_rate": 0.0005692390717754991, "loss": 4.0635, "step": 4850 }, { "epoch": 0.5283018867924528, "grad_norm": 0.6065393686294556, "learning_rate": 0.0005689152725310306, "loss": 4.0566, "step": 4900 }, { "epoch": 0.5336927223719676, "grad_norm": 0.711345911026001, "learning_rate": 0.0005685914732865623, "loss": 4.0593, "step": 4950 }, { "epoch": 0.5390835579514824, "grad_norm": 0.6914499998092651, "learning_rate": 0.0005682676740420939, "loss": 4.0724, "step": 5000 }, { "epoch": 0.5390835579514824, "eval_accuracy": 0.32181616344262765, "eval_loss": 3.98882794380188, "eval_runtime": 183.5537, "eval_samples_per_second": 98.124, "eval_steps_per_second": 6.134, "step": 5000 }, { "epoch": 0.5444743935309974, "grad_norm": 0.6853660345077515, "learning_rate": 0.0005679438747976254, "loss": 4.0452, "step": 5050 }, { "epoch": 0.5498652291105122, "grad_norm": 0.6142929792404175, "learning_rate": 0.000567620075553157, "loss": 4.0442, "step": 5100 }, { "epoch": 0.555256064690027, "grad_norm": 0.6437095999717712, "learning_rate": 0.0005672962763086886, "loss": 4.0475, "step": 5150 }, { "epoch": 0.5606469002695418, "grad_norm": 0.7355647683143616, "learning_rate": 0.0005669724770642202, "loss": 4.0336, "step": 5200 }, { "epoch": 0.5660377358490566, "grad_norm": 0.6523656249046326, "learning_rate": 0.0005666486778197517, "loss": 4.0297, "step": 5250 }, { "epoch": 0.5714285714285714, "grad_norm": 0.6674039959907532, "learning_rate": 0.0005663248785752833, "loss": 4.0411, "step": 5300 }, { "epoch": 0.5768194070080862, "grad_norm": 0.599259078502655, "learning_rate": 0.0005660010793308148, "loss": 4.0249, "step": 5350 }, { "epoch": 0.5822102425876011, "grad_norm": 0.6059934496879578, "learning_rate": 0.0005656772800863465, "loss": 4.0416, "step": 5400 }, { "epoch": 0.5876010781671159, "grad_norm": 0.5647556185722351, "learning_rate": 0.0005653534808418779, "loss": 4.0184, "step": 5450 }, { "epoch": 0.5929919137466307, "grad_norm": 0.6619639992713928, "learning_rate": 0.0005650296815974096, "loss": 4.0231, "step": 5500 }, { "epoch": 0.5983827493261455, "grad_norm": 0.6255214810371399, "learning_rate": 0.0005647058823529411, "loss": 4.0157, "step": 5550 }, { "epoch": 0.6037735849056604, "grad_norm": 0.5285488963127136, "learning_rate": 0.0005643820831084727, "loss": 4.005, "step": 5600 }, { "epoch": 0.6091644204851752, "grad_norm": 0.8316670656204224, "learning_rate": 0.0005640582838640042, "loss": 4.0181, "step": 5650 }, { "epoch": 0.6145552560646901, "grad_norm": 0.7264182567596436, "learning_rate": 0.0005637344846195358, "loss": 4.0123, "step": 5700 }, { "epoch": 0.6199460916442049, "grad_norm": 0.6089359521865845, "learning_rate": 0.0005634106853750674, "loss": 4.0165, "step": 5750 }, { "epoch": 0.6253369272237197, "grad_norm": 0.769210696220398, "learning_rate": 0.000563086886130599, "loss": 3.9894, "step": 5800 }, { "epoch": 0.6307277628032345, "grad_norm": 0.7284867763519287, "learning_rate": 0.0005627630868861305, "loss": 3.9899, "step": 5850 }, { "epoch": 0.6361185983827493, "grad_norm": 0.604616105556488, "learning_rate": 0.0005624392876416621, "loss": 3.9758, "step": 5900 }, { "epoch": 0.6415094339622641, "grad_norm": 0.685613751411438, "learning_rate": 0.0005621154883971937, "loss": 3.9787, "step": 5950 }, { "epoch": 0.6469002695417789, "grad_norm": 0.685235321521759, "learning_rate": 0.0005617916891527253, "loss": 3.9609, "step": 6000 }, { "epoch": 0.6469002695417789, "eval_accuracy": 0.32830632639045004, "eval_loss": 3.917071580886841, "eval_runtime": 183.57, "eval_samples_per_second": 98.115, "eval_steps_per_second": 6.134, "step": 6000 }, { "epoch": 0.6522911051212938, "grad_norm": 0.6348211169242859, "learning_rate": 0.0005614678899082568, "loss": 3.9973, "step": 6050 }, { "epoch": 0.6576819407008087, "grad_norm": 0.5842711925506592, "learning_rate": 0.0005611440906637884, "loss": 3.9818, "step": 6100 }, { "epoch": 0.6630727762803235, "grad_norm": 0.6440193057060242, "learning_rate": 0.00056082029141932, "loss": 3.9752, "step": 6150 }, { "epoch": 0.6684636118598383, "grad_norm": 0.6132529377937317, "learning_rate": 0.0005604964921748515, "loss": 3.9854, "step": 6200 }, { "epoch": 0.6738544474393531, "grad_norm": 0.6459117531776428, "learning_rate": 0.000560172692930383, "loss": 3.9462, "step": 6250 }, { "epoch": 0.6792452830188679, "grad_norm": 0.6339119672775269, "learning_rate": 0.0005598488936859147, "loss": 3.9565, "step": 6300 }, { "epoch": 0.6846361185983828, "grad_norm": 0.6577672958374023, "learning_rate": 0.0005595250944414463, "loss": 3.9497, "step": 6350 }, { "epoch": 0.6900269541778976, "grad_norm": 0.6435871720314026, "learning_rate": 0.0005592012951969778, "loss": 3.9424, "step": 6400 }, { "epoch": 0.6954177897574124, "grad_norm": 0.7063788771629333, "learning_rate": 0.0005588774959525094, "loss": 3.9694, "step": 6450 }, { "epoch": 0.7008086253369272, "grad_norm": 0.5613532662391663, "learning_rate": 0.000558553696708041, "loss": 3.9543, "step": 6500 }, { "epoch": 0.706199460916442, "grad_norm": 0.559969425201416, "learning_rate": 0.0005582298974635726, "loss": 3.9512, "step": 6550 }, { "epoch": 0.7115902964959568, "grad_norm": 0.58429354429245, "learning_rate": 0.0005579060982191041, "loss": 3.9474, "step": 6600 }, { "epoch": 0.7169811320754716, "grad_norm": Infinity, "learning_rate": 0.0005575887749595251, "loss": 3.9414, "step": 6650 }, { "epoch": 0.7223719676549866, "grad_norm": 0.589440107345581, "learning_rate": 0.0005572649757150566, "loss": 3.9341, "step": 6700 }, { "epoch": 0.7277628032345014, "grad_norm": 0.7343171834945679, "learning_rate": 0.0005569411764705882, "loss": 3.9383, "step": 6750 }, { "epoch": 0.7331536388140162, "grad_norm": 0.6029019355773926, "learning_rate": 0.0005566173772261198, "loss": 3.9469, "step": 6800 }, { "epoch": 0.738544474393531, "grad_norm": 0.5020064115524292, "learning_rate": 0.0005562935779816513, "loss": 3.931, "step": 6850 }, { "epoch": 0.7439353099730458, "grad_norm": 0.6130984425544739, "learning_rate": 0.0005559697787371828, "loss": 3.9264, "step": 6900 }, { "epoch": 0.7493261455525606, "grad_norm": 0.6600217819213867, "learning_rate": 0.0005556459794927145, "loss": 3.9137, "step": 6950 }, { "epoch": 0.7547169811320755, "grad_norm": 0.5343778133392334, "learning_rate": 0.000555322180248246, "loss": 3.9274, "step": 7000 }, { "epoch": 0.7547169811320755, "eval_accuracy": 0.33360293736563035, "eval_loss": 3.858804941177368, "eval_runtime": 183.535, "eval_samples_per_second": 98.134, "eval_steps_per_second": 6.135, "step": 7000 }, { "epoch": 0.7601078167115903, "grad_norm": 0.6115443110466003, "learning_rate": 0.0005549983810037776, "loss": 3.931, "step": 7050 }, { "epoch": 0.7654986522911051, "grad_norm": 0.5609748959541321, "learning_rate": 0.0005546745817593091, "loss": 3.9311, "step": 7100 }, { "epoch": 0.77088948787062, "grad_norm": 0.6355463266372681, "learning_rate": 0.0005543507825148408, "loss": 3.9063, "step": 7150 }, { "epoch": 0.7762803234501348, "grad_norm": 0.6557473540306091, "learning_rate": 0.0005540269832703723, "loss": 3.9159, "step": 7200 }, { "epoch": 0.7816711590296496, "grad_norm": 0.6337311863899231, "learning_rate": 0.0005537031840259039, "loss": 3.9184, "step": 7250 }, { "epoch": 0.7870619946091644, "grad_norm": 0.5891764760017395, "learning_rate": 0.0005533793847814354, "loss": 3.9032, "step": 7300 }, { "epoch": 0.7924528301886793, "grad_norm": 0.6272363066673279, "learning_rate": 0.000553055585536967, "loss": 3.9148, "step": 7350 }, { "epoch": 0.7978436657681941, "grad_norm": 0.6207209229469299, "learning_rate": 0.0005527317862924987, "loss": 3.8796, "step": 7400 }, { "epoch": 0.8032345013477089, "grad_norm": 0.5580083131790161, "learning_rate": 0.0005524079870480301, "loss": 3.8865, "step": 7450 }, { "epoch": 0.8086253369272237, "grad_norm": 0.7376517057418823, "learning_rate": 0.0005520841878035618, "loss": 3.9018, "step": 7500 }, { "epoch": 0.8140161725067385, "grad_norm": 0.6539864540100098, "learning_rate": 0.0005517603885590933, "loss": 3.8971, "step": 7550 }, { "epoch": 0.8194070080862533, "grad_norm": 0.640784502029419, "learning_rate": 0.0005514365893146249, "loss": 3.9161, "step": 7600 }, { "epoch": 0.8247978436657682, "grad_norm": 0.5456549525260925, "learning_rate": 0.0005511127900701564, "loss": 3.8959, "step": 7650 }, { "epoch": 0.8301886792452831, "grad_norm": 0.5484740734100342, "learning_rate": 0.000550788990825688, "loss": 3.871, "step": 7700 }, { "epoch": 0.8355795148247979, "grad_norm": 0.605842649936676, "learning_rate": 0.0005504651915812196, "loss": 3.8834, "step": 7750 }, { "epoch": 0.8409703504043127, "grad_norm": 0.5898163914680481, "learning_rate": 0.0005501413923367512, "loss": 3.8977, "step": 7800 }, { "epoch": 0.8463611859838275, "grad_norm": 0.7656612992286682, "learning_rate": 0.0005498175930922827, "loss": 3.8864, "step": 7850 }, { "epoch": 0.8517520215633423, "grad_norm": 0.8152493834495544, "learning_rate": 0.0005494937938478143, "loss": 3.8847, "step": 7900 }, { "epoch": 0.8571428571428571, "grad_norm": 0.5602722764015198, "learning_rate": 0.0005491699946033459, "loss": 3.8864, "step": 7950 }, { "epoch": 0.862533692722372, "grad_norm": 0.5723937153816223, "learning_rate": 0.0005488461953588775, "loss": 3.8618, "step": 8000 }, { "epoch": 0.862533692722372, "eval_accuracy": 0.33786419497282644, "eval_loss": 3.8131983280181885, "eval_runtime": 183.2819, "eval_samples_per_second": 98.269, "eval_steps_per_second": 6.144, "step": 8000 }, { "epoch": 0.8679245283018868, "grad_norm": 0.5539727210998535, "learning_rate": 0.000548522396114409, "loss": 3.879, "step": 8050 }, { "epoch": 0.8733153638814016, "grad_norm": 0.7839546203613281, "learning_rate": 0.0005481985968699406, "loss": 3.8589, "step": 8100 }, { "epoch": 0.8787061994609164, "grad_norm": 0.5483460426330566, "learning_rate": 0.0005478747976254721, "loss": 3.8715, "step": 8150 }, { "epoch": 0.8840970350404312, "grad_norm": 0.5753620266914368, "learning_rate": 0.0005475509983810037, "loss": 3.8617, "step": 8200 }, { "epoch": 0.889487870619946, "grad_norm": 0.5939356088638306, "learning_rate": 0.0005472271991365352, "loss": 3.8748, "step": 8250 }, { "epoch": 0.894878706199461, "grad_norm": 0.5494759678840637, "learning_rate": 0.0005469033998920669, "loss": 3.8485, "step": 8300 }, { "epoch": 0.9002695417789758, "grad_norm": 0.5566021800041199, "learning_rate": 0.0005465796006475984, "loss": 3.8652, "step": 8350 }, { "epoch": 0.9056603773584906, "grad_norm": 0.5274083018302917, "learning_rate": 0.00054625580140313, "loss": 3.866, "step": 8400 }, { "epoch": 0.9110512129380054, "grad_norm": 0.5682411193847656, "learning_rate": 0.0005459320021586615, "loss": 3.8363, "step": 8450 }, { "epoch": 0.9164420485175202, "grad_norm": 0.5468075275421143, "learning_rate": 0.0005456082029141932, "loss": 3.8486, "step": 8500 }, { "epoch": 0.921832884097035, "grad_norm": 0.5886979699134827, "learning_rate": 0.0005452844036697248, "loss": 3.8496, "step": 8550 }, { "epoch": 0.9272237196765498, "grad_norm": 0.566277027130127, "learning_rate": 0.0005449606044252563, "loss": 3.8782, "step": 8600 }, { "epoch": 0.9326145552560647, "grad_norm": 0.6201555728912354, "learning_rate": 0.0005446368051807879, "loss": 3.8507, "step": 8650 }, { "epoch": 0.9380053908355795, "grad_norm": 0.5942217111587524, "learning_rate": 0.0005443130059363194, "loss": 3.8613, "step": 8700 }, { "epoch": 0.9433962264150944, "grad_norm": 0.5227022767066956, "learning_rate": 0.0005439892066918511, "loss": 3.8428, "step": 8750 }, { "epoch": 0.9487870619946092, "grad_norm": 0.6055588126182556, "learning_rate": 0.0005436654074473825, "loss": 3.8415, "step": 8800 }, { "epoch": 0.954177897574124, "grad_norm": 0.5056185722351074, "learning_rate": 0.0005433416082029142, "loss": 3.8431, "step": 8850 }, { "epoch": 0.9595687331536388, "grad_norm": 0.6033360958099365, "learning_rate": 0.0005430178089584457, "loss": 3.8452, "step": 8900 }, { "epoch": 0.9649595687331537, "grad_norm": 0.6905035972595215, "learning_rate": 0.0005426940097139773, "loss": 3.8455, "step": 8950 }, { "epoch": 0.9703504043126685, "grad_norm": 0.6233043670654297, "learning_rate": 0.0005423766864543982, "loss": 3.8625, "step": 9000 }, { "epoch": 0.9703504043126685, "eval_accuracy": 0.34123297775951444, "eval_loss": 3.775101900100708, "eval_runtime": 183.702, "eval_samples_per_second": 98.045, "eval_steps_per_second": 6.129, "step": 9000 }, { "epoch": 0.9757412398921833, "grad_norm": 0.6275179982185364, "learning_rate": 0.0005420528872099298, "loss": 3.839, "step": 9050 }, { "epoch": 0.9811320754716981, "grad_norm": 0.5922442674636841, "learning_rate": 0.0005417290879654613, "loss": 3.8288, "step": 9100 }, { "epoch": 0.9865229110512129, "grad_norm": 0.5687377452850342, "learning_rate": 0.000541405288720993, "loss": 3.8416, "step": 9150 }, { "epoch": 0.9919137466307277, "grad_norm": 0.5579700469970703, "learning_rate": 0.0005410814894765245, "loss": 3.8436, "step": 9200 }, { "epoch": 0.9973045822102425, "grad_norm": 0.5312648415565491, "learning_rate": 0.0005407576902320561, "loss": 3.8355, "step": 9250 }, { "epoch": 1.0026954177897573, "grad_norm": 0.5382205247879028, "learning_rate": 0.0005404338909875876, "loss": 3.8133, "step": 9300 }, { "epoch": 1.0080862533692723, "grad_norm": 0.6140266060829163, "learning_rate": 0.0005401100917431192, "loss": 3.7783, "step": 9350 }, { "epoch": 1.013477088948787, "grad_norm": 0.5851580500602722, "learning_rate": 0.0005397862924986508, "loss": 3.7885, "step": 9400 }, { "epoch": 1.0188679245283019, "grad_norm": 0.5980308651924133, "learning_rate": 0.0005394624932541824, "loss": 3.7783, "step": 9450 }, { "epoch": 1.0242587601078168, "grad_norm": 0.5626499652862549, "learning_rate": 0.0005391386940097139, "loss": 3.7758, "step": 9500 }, { "epoch": 1.0296495956873315, "grad_norm": 0.5909863114356995, "learning_rate": 0.0005388148947652455, "loss": 3.7869, "step": 9550 }, { "epoch": 1.0350404312668464, "grad_norm": 0.5526650547981262, "learning_rate": 0.000538491095520777, "loss": 3.7655, "step": 9600 }, { "epoch": 1.0404312668463611, "grad_norm": 0.5704436898231506, "learning_rate": 0.0005381672962763086, "loss": 3.7576, "step": 9650 }, { "epoch": 1.045822102425876, "grad_norm": 0.5485429167747498, "learning_rate": 0.0005378434970318403, "loss": 3.7695, "step": 9700 }, { "epoch": 1.0512129380053907, "grad_norm": 0.5683411955833435, "learning_rate": 0.0005375196977873718, "loss": 3.7622, "step": 9750 }, { "epoch": 1.0566037735849056, "grad_norm": 0.5670942068099976, "learning_rate": 0.0005371958985429034, "loss": 3.7672, "step": 9800 }, { "epoch": 1.0619946091644206, "grad_norm": 0.6690695881843567, "learning_rate": 0.0005368720992984349, "loss": 3.7521, "step": 9850 }, { "epoch": 1.0673854447439353, "grad_norm": 0.6253776550292969, "learning_rate": 0.0005365483000539665, "loss": 3.762, "step": 9900 }, { "epoch": 1.0727762803234502, "grad_norm": 0.5629079937934875, "learning_rate": 0.0005362245008094981, "loss": 3.7594, "step": 9950 }, { "epoch": 1.0781671159029649, "grad_norm": 0.5760037302970886, "learning_rate": 0.0005359007015650297, "loss": 3.7656, "step": 10000 }, { "epoch": 1.0781671159029649, "eval_accuracy": 0.34546098758303934, "eval_loss": 3.7440903186798096, "eval_runtime": 183.3277, "eval_samples_per_second": 98.245, "eval_steps_per_second": 6.142, "step": 10000 }, { "epoch": 1.0835579514824798, "grad_norm": 0.5451662540435791, "learning_rate": 0.0005355769023205612, "loss": 3.755, "step": 10050 }, { "epoch": 1.0889487870619945, "grad_norm": 0.5988211035728455, "learning_rate": 0.0005352531030760928, "loss": 3.7454, "step": 10100 }, { "epoch": 1.0943396226415094, "grad_norm": 0.6496660709381104, "learning_rate": 0.0005349293038316244, "loss": 3.777, "step": 10150 }, { "epoch": 1.0997304582210243, "grad_norm": 0.5746974945068359, "learning_rate": 0.0005346055045871559, "loss": 3.7792, "step": 10200 }, { "epoch": 1.105121293800539, "grad_norm": 0.5537252426147461, "learning_rate": 0.0005342817053426874, "loss": 3.7429, "step": 10250 }, { "epoch": 1.110512129380054, "grad_norm": 0.6647258400917053, "learning_rate": 0.0005339579060982191, "loss": 3.7704, "step": 10300 }, { "epoch": 1.1159029649595686, "grad_norm": 0.5465642809867859, "learning_rate": 0.0005336341068537506, "loss": 3.7703, "step": 10350 }, { "epoch": 1.1212938005390836, "grad_norm": 0.5680330991744995, "learning_rate": 0.0005333103076092822, "loss": 3.7573, "step": 10400 }, { "epoch": 1.1266846361185983, "grad_norm": 0.5591064095497131, "learning_rate": 0.0005329865083648137, "loss": 3.7225, "step": 10450 }, { "epoch": 1.1320754716981132, "grad_norm": 0.5781257152557373, "learning_rate": 0.0005326627091203454, "loss": 3.7439, "step": 10500 }, { "epoch": 1.137466307277628, "grad_norm": 0.5286954045295715, "learning_rate": 0.0005323389098758769, "loss": 3.7463, "step": 10550 }, { "epoch": 1.1428571428571428, "grad_norm": 0.5636879205703735, "learning_rate": 0.0005320151106314085, "loss": 3.7536, "step": 10600 }, { "epoch": 1.1482479784366577, "grad_norm": 0.5693058371543884, "learning_rate": 0.00053169131138694, "loss": 3.7615, "step": 10650 }, { "epoch": 1.1536388140161726, "grad_norm": 0.62501060962677, "learning_rate": 0.0005313675121424716, "loss": 3.7439, "step": 10700 }, { "epoch": 1.1590296495956873, "grad_norm": 0.5845157504081726, "learning_rate": 0.0005310437128980032, "loss": 3.7449, "step": 10750 }, { "epoch": 1.1644204851752022, "grad_norm": 0.5758997201919556, "learning_rate": 0.0005307199136535348, "loss": 3.7219, "step": 10800 }, { "epoch": 1.169811320754717, "grad_norm": 0.6027551889419556, "learning_rate": 0.0005303961144090663, "loss": 3.7569, "step": 10850 }, { "epoch": 1.1752021563342319, "grad_norm": 0.5498311519622803, "learning_rate": 0.0005300723151645979, "loss": 3.7575, "step": 10900 }, { "epoch": 1.1805929919137466, "grad_norm": 0.5848629474639893, "learning_rate": 0.0005297485159201295, "loss": 3.7171, "step": 10950 }, { "epoch": 1.1859838274932615, "grad_norm": 0.5120974779129028, "learning_rate": 0.000529424716675661, "loss": 3.7343, "step": 11000 }, { "epoch": 1.1859838274932615, "eval_accuracy": 0.3476425202645785, "eval_loss": 3.7190380096435547, "eval_runtime": 183.3061, "eval_samples_per_second": 98.256, "eval_steps_per_second": 6.143, "step": 11000 }, { "epoch": 1.1913746630727764, "grad_norm": 0.5583203434944153, "learning_rate": 0.000529107393416082, "loss": 3.7451, "step": 11050 }, { "epoch": 1.196765498652291, "grad_norm": 0.5904495716094971, "learning_rate": 0.0005287835941716135, "loss": 3.74, "step": 11100 }, { "epoch": 1.202156334231806, "grad_norm": 0.5562983751296997, "learning_rate": 0.0005284597949271452, "loss": 3.7345, "step": 11150 }, { "epoch": 1.2075471698113207, "grad_norm": 0.676979660987854, "learning_rate": 0.0005281359956826767, "loss": 3.7461, "step": 11200 }, { "epoch": 1.2129380053908356, "grad_norm": 0.507462739944458, "learning_rate": 0.0005278121964382083, "loss": 3.756, "step": 11250 }, { "epoch": 1.2183288409703503, "grad_norm": 0.57256019115448, "learning_rate": 0.0005274883971937398, "loss": 3.7256, "step": 11300 }, { "epoch": 1.2237196765498652, "grad_norm": 0.5681268572807312, "learning_rate": 0.0005271645979492714, "loss": 3.747, "step": 11350 }, { "epoch": 1.2291105121293802, "grad_norm": 0.5520728230476379, "learning_rate": 0.000526840798704803, "loss": 3.7469, "step": 11400 }, { "epoch": 1.2345013477088949, "grad_norm": 0.5298144817352295, "learning_rate": 0.0005265169994603346, "loss": 3.7509, "step": 11450 }, { "epoch": 1.2398921832884098, "grad_norm": 0.5538373589515686, "learning_rate": 0.0005261932002158661, "loss": 3.7152, "step": 11500 }, { "epoch": 1.2452830188679245, "grad_norm": 0.601010262966156, "learning_rate": 0.0005258694009713977, "loss": 3.7366, "step": 11550 }, { "epoch": 1.2506738544474394, "grad_norm": 0.5493463277816772, "learning_rate": 0.0005255456017269292, "loss": 3.7331, "step": 11600 }, { "epoch": 1.256064690026954, "grad_norm": 0.5921666622161865, "learning_rate": 0.0005252218024824608, "loss": 3.732, "step": 11650 }, { "epoch": 1.261455525606469, "grad_norm": 0.5424702763557434, "learning_rate": 0.0005248980032379924, "loss": 3.7297, "step": 11700 }, { "epoch": 1.266846361185984, "grad_norm": 0.6088834404945374, "learning_rate": 0.000524574203993524, "loss": 3.7384, "step": 11750 }, { "epoch": 1.2722371967654986, "grad_norm": 0.5311514139175415, "learning_rate": 0.0005242504047490555, "loss": 3.7268, "step": 11800 }, { "epoch": 1.2776280323450135, "grad_norm": 0.5391415953636169, "learning_rate": 0.0005239266055045871, "loss": 3.7088, "step": 11850 }, { "epoch": 1.2830188679245282, "grad_norm": 0.5253583788871765, "learning_rate": 0.0005236028062601186, "loss": 3.7158, "step": 11900 }, { "epoch": 1.2884097035040432, "grad_norm": 0.5387663841247559, "learning_rate": 0.0005232790070156503, "loss": 3.7243, "step": 11950 }, { "epoch": 1.2938005390835579, "grad_norm": 0.5523077845573425, "learning_rate": 0.0005229552077711818, "loss": 3.7195, "step": 12000 }, { "epoch": 1.2938005390835579, "eval_accuracy": 0.34966281206060096, "eval_loss": 3.695082664489746, "eval_runtime": 183.4346, "eval_samples_per_second": 98.188, "eval_steps_per_second": 6.138, "step": 12000 }, { "epoch": 1.2991913746630728, "grad_norm": 0.5675397515296936, "learning_rate": 0.0005226314085267134, "loss": 3.701, "step": 12050 }, { "epoch": 1.3045822102425877, "grad_norm": 0.6067949533462524, "learning_rate": 0.000522307609282245, "loss": 3.7387, "step": 12100 }, { "epoch": 1.3099730458221024, "grad_norm": 0.5414419174194336, "learning_rate": 0.0005219838100377766, "loss": 3.7205, "step": 12150 }, { "epoch": 1.3153638814016173, "grad_norm": 0.6006377339363098, "learning_rate": 0.000521660010793308, "loss": 3.7045, "step": 12200 }, { "epoch": 1.320754716981132, "grad_norm": 0.6085687875747681, "learning_rate": 0.0005213362115488396, "loss": 3.7319, "step": 12250 }, { "epoch": 1.326145552560647, "grad_norm": 0.5864773392677307, "learning_rate": 0.0005210124123043713, "loss": 3.6989, "step": 12300 }, { "epoch": 1.3315363881401616, "grad_norm": 0.5509727001190186, "learning_rate": 0.0005206886130599028, "loss": 3.725, "step": 12350 }, { "epoch": 1.3369272237196765, "grad_norm": 0.5826424360275269, "learning_rate": 0.0005203648138154344, "loss": 3.6955, "step": 12400 }, { "epoch": 1.3423180592991915, "grad_norm": 0.5667881369590759, "learning_rate": 0.0005200410145709659, "loss": 3.7183, "step": 12450 }, { "epoch": 1.3477088948787062, "grad_norm": 0.6350487470626831, "learning_rate": 0.0005197172153264976, "loss": 3.7223, "step": 12500 }, { "epoch": 1.353099730458221, "grad_norm": 0.565371572971344, "learning_rate": 0.0005193934160820291, "loss": 3.7318, "step": 12550 }, { "epoch": 1.3584905660377358, "grad_norm": 0.5364895462989807, "learning_rate": 0.0005190696168375607, "loss": 3.7087, "step": 12600 }, { "epoch": 1.3638814016172507, "grad_norm": 0.6211722493171692, "learning_rate": 0.0005187458175930922, "loss": 3.7054, "step": 12650 }, { "epoch": 1.3692722371967654, "grad_norm": 0.5237928628921509, "learning_rate": 0.0005184220183486238, "loss": 3.7025, "step": 12700 }, { "epoch": 1.3746630727762803, "grad_norm": 0.5414772033691406, "learning_rate": 0.0005180982191041554, "loss": 3.7171, "step": 12750 }, { "epoch": 1.3800539083557952, "grad_norm": 0.5802706480026245, "learning_rate": 0.000517774419859687, "loss": 3.7097, "step": 12800 }, { "epoch": 1.38544474393531, "grad_norm": 0.5995144248008728, "learning_rate": 0.0005174506206152185, "loss": 3.7307, "step": 12850 }, { "epoch": 1.3908355795148248, "grad_norm": 0.5822244882583618, "learning_rate": 0.0005171268213707501, "loss": 3.7303, "step": 12900 }, { "epoch": 1.3962264150943398, "grad_norm": 0.5466347336769104, "learning_rate": 0.0005168030221262816, "loss": 3.7282, "step": 12950 }, { "epoch": 1.4016172506738545, "grad_norm": 0.5338788032531738, "learning_rate": 0.0005164792228818132, "loss": 3.7112, "step": 13000 }, { "epoch": 1.4016172506738545, "eval_accuracy": 0.35203926802287927, "eval_loss": 3.6746132373809814, "eval_runtime": 183.6347, "eval_samples_per_second": 98.081, "eval_steps_per_second": 6.132, "step": 13000 }, { "epoch": 1.4070080862533692, "grad_norm": 0.6075963973999023, "learning_rate": 0.0005161554236373448, "loss": 3.692, "step": 13050 }, { "epoch": 1.412398921832884, "grad_norm": 0.5783334374427795, "learning_rate": 0.0005158381003777657, "loss": 3.7088, "step": 13100 }, { "epoch": 1.417789757412399, "grad_norm": 0.4899292588233948, "learning_rate": 0.0005155143011332973, "loss": 3.7093, "step": 13150 }, { "epoch": 1.4231805929919137, "grad_norm": 0.5416874885559082, "learning_rate": 0.0005151905018888289, "loss": 3.7333, "step": 13200 }, { "epoch": 1.4285714285714286, "grad_norm": 0.5604920983314514, "learning_rate": 0.0005148667026443604, "loss": 3.6912, "step": 13250 }, { "epoch": 1.4339622641509435, "grad_norm": 0.5507873892784119, "learning_rate": 0.000514542903399892, "loss": 3.698, "step": 13300 }, { "epoch": 1.4393530997304582, "grad_norm": 0.5818184614181519, "learning_rate": 0.0005142191041554237, "loss": 3.6776, "step": 13350 }, { "epoch": 1.444743935309973, "grad_norm": 0.6013184785842896, "learning_rate": 0.0005138953049109552, "loss": 3.7165, "step": 13400 }, { "epoch": 1.4501347708894878, "grad_norm": 0.6403388381004333, "learning_rate": 0.0005135715056664868, "loss": 3.7128, "step": 13450 }, { "epoch": 1.4555256064690028, "grad_norm": 0.595710813999176, "learning_rate": 0.0005132477064220183, "loss": 3.6982, "step": 13500 }, { "epoch": 1.4609164420485174, "grad_norm": 0.6105093955993652, "learning_rate": 0.0005129239071775499, "loss": 3.7149, "step": 13550 }, { "epoch": 1.4663072776280324, "grad_norm": 0.566348135471344, "learning_rate": 0.0005126001079330814, "loss": 3.6997, "step": 13600 }, { "epoch": 1.4716981132075473, "grad_norm": 0.5651235580444336, "learning_rate": 0.000512276308688613, "loss": 3.7034, "step": 13650 }, { "epoch": 1.477088948787062, "grad_norm": 0.5697197318077087, "learning_rate": 0.0005119525094441446, "loss": 3.7153, "step": 13700 }, { "epoch": 1.482479784366577, "grad_norm": 0.6158642172813416, "learning_rate": 0.0005116287101996762, "loss": 3.6978, "step": 13750 }, { "epoch": 1.4878706199460916, "grad_norm": 0.5030224323272705, "learning_rate": 0.0005113049109552077, "loss": 3.6963, "step": 13800 }, { "epoch": 1.4932614555256065, "grad_norm": 0.5537750720977783, "learning_rate": 0.0005109811117107393, "loss": 3.6907, "step": 13850 }, { "epoch": 1.4986522911051212, "grad_norm": 0.6205897927284241, "learning_rate": 0.0005106573124662708, "loss": 3.6805, "step": 13900 }, { "epoch": 1.5040431266846361, "grad_norm": 0.5059426426887512, "learning_rate": 0.0005103335132218025, "loss": 3.7003, "step": 13950 }, { "epoch": 1.509433962264151, "grad_norm": 0.5683820843696594, "learning_rate": 0.000510009713977334, "loss": 3.6911, "step": 14000 }, { "epoch": 1.509433962264151, "eval_accuracy": 0.35447570037923115, "eval_loss": 3.6494083404541016, "eval_runtime": 183.437, "eval_samples_per_second": 98.186, "eval_steps_per_second": 6.138, "step": 14000 }, { "epoch": 1.5148247978436657, "grad_norm": 0.5536700487136841, "learning_rate": 0.0005096859147328656, "loss": 3.6698, "step": 14050 }, { "epoch": 1.5202156334231804, "grad_norm": 0.5463404655456543, "learning_rate": 0.0005093621154883971, "loss": 3.7006, "step": 14100 }, { "epoch": 1.5256064690026954, "grad_norm": 0.5179774761199951, "learning_rate": 0.0005090383162439288, "loss": 3.6828, "step": 14150 }, { "epoch": 1.5309973045822103, "grad_norm": 0.5397458076477051, "learning_rate": 0.0005087145169994602, "loss": 3.6819, "step": 14200 }, { "epoch": 1.536388140161725, "grad_norm": 0.5047475695610046, "learning_rate": 0.0005083907177549918, "loss": 3.6879, "step": 14250 }, { "epoch": 1.54177897574124, "grad_norm": 0.5295481085777283, "learning_rate": 0.0005080669185105234, "loss": 3.6971, "step": 14300 }, { "epoch": 1.5471698113207548, "grad_norm": 0.49893248081207275, "learning_rate": 0.000507743119266055, "loss": 3.7041, "step": 14350 }, { "epoch": 1.5525606469002695, "grad_norm": 0.6531094312667847, "learning_rate": 0.0005074193200215865, "loss": 3.6874, "step": 14400 }, { "epoch": 1.5579514824797842, "grad_norm": 0.5354477763175964, "learning_rate": 0.0005070955207771181, "loss": 3.7074, "step": 14450 }, { "epoch": 1.5633423180592994, "grad_norm": 0.7448453903198242, "learning_rate": 0.0005067717215326498, "loss": 3.6686, "step": 14500 }, { "epoch": 1.568733153638814, "grad_norm": 0.5273525714874268, "learning_rate": 0.0005064479222881813, "loss": 3.6823, "step": 14550 }, { "epoch": 1.5741239892183287, "grad_norm": 0.5093368887901306, "learning_rate": 0.0005061241230437129, "loss": 3.6727, "step": 14600 }, { "epoch": 1.5795148247978437, "grad_norm": 0.5493596196174622, "learning_rate": 0.0005058003237992444, "loss": 3.6886, "step": 14650 }, { "epoch": 1.5849056603773586, "grad_norm": 0.5913286209106445, "learning_rate": 0.000505476524554776, "loss": 3.6968, "step": 14700 }, { "epoch": 1.5902964959568733, "grad_norm": 0.5922813415527344, "learning_rate": 0.0005051527253103076, "loss": 3.6575, "step": 14750 }, { "epoch": 1.595687331536388, "grad_norm": 0.562714695930481, "learning_rate": 0.0005048289260658392, "loss": 3.6826, "step": 14800 }, { "epoch": 1.6010781671159031, "grad_norm": 0.5367226600646973, "learning_rate": 0.0005045051268213707, "loss": 3.6719, "step": 14850 }, { "epoch": 1.6064690026954178, "grad_norm": 0.5268148183822632, "learning_rate": 0.0005041813275769023, "loss": 3.6715, "step": 14900 }, { "epoch": 1.6118598382749325, "grad_norm": 0.5168265104293823, "learning_rate": 0.0005038575283324338, "loss": 3.6692, "step": 14950 }, { "epoch": 1.6172506738544474, "grad_norm": 0.5893277525901794, "learning_rate": 0.0005035337290879654, "loss": 3.6767, "step": 15000 }, { "epoch": 1.6172506738544474, "eval_accuracy": 0.356096910118311, "eval_loss": 3.634113311767578, "eval_runtime": 183.2068, "eval_samples_per_second": 98.31, "eval_steps_per_second": 6.146, "step": 15000 }, { "epoch": 1.6226415094339623, "grad_norm": 0.5801236033439636, "learning_rate": 0.000503209929843497, "loss": 3.6864, "step": 15050 }, { "epoch": 1.628032345013477, "grad_norm": 0.5008922815322876, "learning_rate": 0.0005028926065839179, "loss": 3.6614, "step": 15100 }, { "epoch": 1.633423180592992, "grad_norm": 0.5347914099693298, "learning_rate": 0.0005025688073394495, "loss": 3.6811, "step": 15150 }, { "epoch": 1.6388140161725069, "grad_norm": 0.6196523904800415, "learning_rate": 0.0005022450080949811, "loss": 3.6564, "step": 15200 }, { "epoch": 1.6442048517520216, "grad_norm": 0.5722506046295166, "learning_rate": 0.0005019212088505126, "loss": 3.6983, "step": 15250 }, { "epoch": 1.6495956873315363, "grad_norm": 0.5670409202575684, "learning_rate": 0.0005015974096060442, "loss": 3.6822, "step": 15300 }, { "epoch": 1.6549865229110512, "grad_norm": 0.5775005221366882, "learning_rate": 0.0005012736103615758, "loss": 3.676, "step": 15350 }, { "epoch": 1.6603773584905661, "grad_norm": 0.5482152700424194, "learning_rate": 0.0005009498111171074, "loss": 3.6664, "step": 15400 }, { "epoch": 1.6657681940700808, "grad_norm": 0.5493103861808777, "learning_rate": 0.0005006260118726389, "loss": 3.6771, "step": 15450 }, { "epoch": 1.6711590296495957, "grad_norm": 0.5443813800811768, "learning_rate": 0.0005003022126281705, "loss": 3.6884, "step": 15500 }, { "epoch": 1.6765498652291106, "grad_norm": 0.5360202193260193, "learning_rate": 0.000499978413383702, "loss": 3.6633, "step": 15550 }, { "epoch": 1.6819407008086253, "grad_norm": 0.5757080316543579, "learning_rate": 0.0004996546141392336, "loss": 3.6626, "step": 15600 }, { "epoch": 1.68733153638814, "grad_norm": 0.5827114582061768, "learning_rate": 0.0004993308148947651, "loss": 3.657, "step": 15650 }, { "epoch": 1.692722371967655, "grad_norm": 0.5391814708709717, "learning_rate": 0.0004990070156502968, "loss": 3.673, "step": 15700 }, { "epoch": 1.6981132075471699, "grad_norm": 0.6123207211494446, "learning_rate": 0.0004986832164058284, "loss": 3.6493, "step": 15750 }, { "epoch": 1.7035040431266846, "grad_norm": 0.5068091154098511, "learning_rate": 0.0004983594171613599, "loss": 3.6679, "step": 15800 }, { "epoch": 1.7088948787061995, "grad_norm": 0.5883404016494751, "learning_rate": 0.0004980356179168915, "loss": 3.6597, "step": 15850 }, { "epoch": 1.7142857142857144, "grad_norm": 0.5995203256607056, "learning_rate": 0.000497711818672423, "loss": 3.6626, "step": 15900 }, { "epoch": 1.719676549865229, "grad_norm": 0.5240509510040283, "learning_rate": 0.0004973880194279547, "loss": 3.6495, "step": 15950 }, { "epoch": 1.7250673854447438, "grad_norm": 0.5879561305046082, "learning_rate": 0.0004970642201834862, "loss": 3.6497, "step": 16000 }, { "epoch": 1.7250673854447438, "eval_accuracy": 0.3578040642916522, "eval_loss": 3.616135597229004, "eval_runtime": 183.5062, "eval_samples_per_second": 98.149, "eval_steps_per_second": 6.136, "step": 16000 }, { "epoch": 1.7304582210242587, "grad_norm": 0.5466377139091492, "learning_rate": 0.0004967404209390178, "loss": 3.6711, "step": 16050 }, { "epoch": 1.7358490566037736, "grad_norm": 0.5741949081420898, "learning_rate": 0.0004964166216945493, "loss": 3.6722, "step": 16100 }, { "epoch": 1.7412398921832883, "grad_norm": 0.6283635497093201, "learning_rate": 0.000496092822450081, "loss": 3.6545, "step": 16150 }, { "epoch": 1.7466307277628033, "grad_norm": 0.5305262207984924, "learning_rate": 0.0004957690232056125, "loss": 3.6599, "step": 16200 }, { "epoch": 1.7520215633423182, "grad_norm": 0.5391592979431152, "learning_rate": 0.0004954452239611441, "loss": 3.6603, "step": 16250 }, { "epoch": 1.7574123989218329, "grad_norm": 0.5375102162361145, "learning_rate": 0.0004951214247166756, "loss": 3.6588, "step": 16300 }, { "epoch": 1.7628032345013476, "grad_norm": 0.5385131239891052, "learning_rate": 0.0004947976254722072, "loss": 3.6598, "step": 16350 }, { "epoch": 1.7681940700808625, "grad_norm": 0.581318199634552, "learning_rate": 0.0004944738262277387, "loss": 3.664, "step": 16400 }, { "epoch": 1.7735849056603774, "grad_norm": 0.5098201632499695, "learning_rate": 0.0004941500269832703, "loss": 3.666, "step": 16450 }, { "epoch": 1.778975741239892, "grad_norm": 0.5255228877067566, "learning_rate": 0.0004938262277388019, "loss": 3.6614, "step": 16500 }, { "epoch": 1.784366576819407, "grad_norm": 0.5504864454269409, "learning_rate": 0.0004935024284943335, "loss": 3.641, "step": 16550 }, { "epoch": 1.789757412398922, "grad_norm": 0.510482132434845, "learning_rate": 0.000493178629249865, "loss": 3.6548, "step": 16600 }, { "epoch": 1.7951482479784366, "grad_norm": 0.5756171941757202, "learning_rate": 0.0004928548300053966, "loss": 3.6613, "step": 16650 }, { "epoch": 1.8005390835579513, "grad_norm": 0.5339798927307129, "learning_rate": 0.0004925310307609282, "loss": 3.653, "step": 16700 }, { "epoch": 1.8059299191374663, "grad_norm": 0.588362991809845, "learning_rate": 0.0004922072315164598, "loss": 3.6417, "step": 16750 }, { "epoch": 1.8113207547169812, "grad_norm": 0.5696936249732971, "learning_rate": 0.0004918834322719913, "loss": 3.6488, "step": 16800 }, { "epoch": 1.8167115902964959, "grad_norm": 0.5738952159881592, "learning_rate": 0.0004915596330275229, "loss": 3.6448, "step": 16850 }, { "epoch": 1.8221024258760108, "grad_norm": 0.5368397235870361, "learning_rate": 0.0004912358337830544, "loss": 3.6527, "step": 16900 }, { "epoch": 1.8274932614555257, "grad_norm": 0.5771840214729309, "learning_rate": 0.000490912034538586, "loss": 3.6372, "step": 16950 }, { "epoch": 1.8328840970350404, "grad_norm": 0.5940222144126892, "learning_rate": 0.0004905882352941175, "loss": 3.6291, "step": 17000 }, { "epoch": 1.8328840970350404, "eval_accuracy": 0.359461020830823, "eval_loss": 3.600602626800537, "eval_runtime": 183.2755, "eval_samples_per_second": 98.273, "eval_steps_per_second": 6.144, "step": 17000 }, { "epoch": 1.838274932614555, "grad_norm": 0.49475905299186707, "learning_rate": 0.0004902644360496492, "loss": 3.6373, "step": 17050 }, { "epoch": 1.8436657681940702, "grad_norm": 0.5278207063674927, "learning_rate": 0.0004899406368051808, "loss": 3.6397, "step": 17100 }, { "epoch": 1.849056603773585, "grad_norm": 0.5330104827880859, "learning_rate": 0.0004896233135456017, "loss": 3.6291, "step": 17150 }, { "epoch": 1.8544474393530996, "grad_norm": 0.5108185410499573, "learning_rate": 0.0004892995143011333, "loss": 3.6578, "step": 17200 }, { "epoch": 1.8598382749326146, "grad_norm": 0.5817229747772217, "learning_rate": 0.0004889757150566648, "loss": 3.6642, "step": 17250 }, { "epoch": 1.8652291105121295, "grad_norm": 0.5322571992874146, "learning_rate": 0.0004886519158121964, "loss": 3.648, "step": 17300 }, { "epoch": 1.8706199460916442, "grad_norm": 0.5755774974822998, "learning_rate": 0.000488328116567728, "loss": 3.636, "step": 17350 }, { "epoch": 1.8760107816711589, "grad_norm": 0.5633023977279663, "learning_rate": 0.0004880043173232595, "loss": 3.6531, "step": 17400 }, { "epoch": 1.881401617250674, "grad_norm": 0.5242737531661987, "learning_rate": 0.0004876805180787911, "loss": 3.6244, "step": 17450 }, { "epoch": 1.8867924528301887, "grad_norm": 0.5220695734024048, "learning_rate": 0.0004873567188343227, "loss": 3.6473, "step": 17500 }, { "epoch": 1.8921832884097034, "grad_norm": 0.4793733060359955, "learning_rate": 0.0004870329195898542, "loss": 3.6514, "step": 17550 }, { "epoch": 1.8975741239892183, "grad_norm": 0.6032518148422241, "learning_rate": 0.00048670912034538583, "loss": 3.6398, "step": 17600 }, { "epoch": 1.9029649595687332, "grad_norm": 0.562566876411438, "learning_rate": 0.0004863853211009174, "loss": 3.6368, "step": 17650 }, { "epoch": 1.908355795148248, "grad_norm": 0.5620681643486023, "learning_rate": 0.000486061521856449, "loss": 3.6535, "step": 17700 }, { "epoch": 1.9137466307277629, "grad_norm": 0.548251748085022, "learning_rate": 0.00048573772261198054, "loss": 3.6267, "step": 17750 }, { "epoch": 1.9191374663072778, "grad_norm": 0.5563006401062012, "learning_rate": 0.00048541392336751214, "loss": 3.632, "step": 17800 }, { "epoch": 1.9245283018867925, "grad_norm": 0.5012133121490479, "learning_rate": 0.0004850901241230437, "loss": 3.6225, "step": 17850 }, { "epoch": 1.9299191374663072, "grad_norm": 0.5342919826507568, "learning_rate": 0.0004847663248785753, "loss": 3.6266, "step": 17900 }, { "epoch": 1.935309973045822, "grad_norm": 0.5178313851356506, "learning_rate": 0.0004844425256341068, "loss": 3.6355, "step": 17950 }, { "epoch": 1.940700808625337, "grad_norm": 0.5467429161071777, "learning_rate": 0.00048411872638963834, "loss": 3.643, "step": 18000 }, { "epoch": 1.940700808625337, "eval_accuracy": 0.36094337217927597, "eval_loss": 3.584843873977661, "eval_runtime": 183.5851, "eval_samples_per_second": 98.107, "eval_steps_per_second": 6.133, "step": 18000 }, { "epoch": 1.9460916442048517, "grad_norm": 0.5631021857261658, "learning_rate": 0.00048379492714516995, "loss": 3.6179, "step": 18050 }, { "epoch": 1.9514824797843666, "grad_norm": 0.541903018951416, "learning_rate": 0.0004834711279007015, "loss": 3.6427, "step": 18100 }, { "epoch": 1.9568733153638815, "grad_norm": 0.5587621927261353, "learning_rate": 0.0004831473286562331, "loss": 3.6403, "step": 18150 }, { "epoch": 1.9622641509433962, "grad_norm": 1.227616548538208, "learning_rate": 0.00048282352941176465, "loss": 3.6295, "step": 18200 }, { "epoch": 1.967654986522911, "grad_norm": 0.5264652371406555, "learning_rate": 0.00048249973016729626, "loss": 3.645, "step": 18250 }, { "epoch": 1.9730458221024259, "grad_norm": 0.5398224592208862, "learning_rate": 0.0004821759309228278, "loss": 3.6412, "step": 18300 }, { "epoch": 1.9784366576819408, "grad_norm": 0.5698915123939514, "learning_rate": 0.00048185213167835936, "loss": 3.6253, "step": 18350 }, { "epoch": 1.9838274932614555, "grad_norm": 0.5805723071098328, "learning_rate": 0.00048152833243389096, "loss": 3.6555, "step": 18400 }, { "epoch": 1.9892183288409704, "grad_norm": 0.5575518012046814, "learning_rate": 0.0004812045331894225, "loss": 3.611, "step": 18450 }, { "epoch": 1.9946091644204853, "grad_norm": 0.5759623050689697, "learning_rate": 0.0004808807339449541, "loss": 3.617, "step": 18500 }, { "epoch": 2.0, "grad_norm": 1.1932631731033325, "learning_rate": 0.0004805569347004856, "loss": 3.6358, "step": 18550 }, { "epoch": 2.0053908355795147, "grad_norm": 0.5486853122711182, "learning_rate": 0.0004802331354560173, "loss": 3.5442, "step": 18600 }, { "epoch": 2.01078167115903, "grad_norm": 0.5466881394386292, "learning_rate": 0.00047990933621154877, "loss": 3.5451, "step": 18650 }, { "epoch": 2.0161725067385445, "grad_norm": 0.5424332618713379, "learning_rate": 0.0004795855369670804, "loss": 3.5382, "step": 18700 }, { "epoch": 2.0215633423180592, "grad_norm": 0.5714851021766663, "learning_rate": 0.0004792617377226119, "loss": 3.5577, "step": 18750 }, { "epoch": 2.026954177897574, "grad_norm": 0.5695775151252747, "learning_rate": 0.0004789379384781435, "loss": 3.5428, "step": 18800 }, { "epoch": 2.032345013477089, "grad_norm": 0.530637264251709, "learning_rate": 0.0004786141392336751, "loss": 3.5231, "step": 18850 }, { "epoch": 2.0377358490566038, "grad_norm": 0.5261335968971252, "learning_rate": 0.00047829033998920663, "loss": 3.5522, "step": 18900 }, { "epoch": 2.0431266846361185, "grad_norm": 0.5697975158691406, "learning_rate": 0.00047796654074473824, "loss": 3.5444, "step": 18950 }, { "epoch": 2.0485175202156336, "grad_norm": 0.6406385898590088, "learning_rate": 0.0004776427415002698, "loss": 3.5593, "step": 19000 }, { "epoch": 2.0485175202156336, "eval_accuracy": 0.36186920343634316, "eval_loss": 3.57981014251709, "eval_runtime": 183.1277, "eval_samples_per_second": 98.352, "eval_steps_per_second": 6.149, "step": 19000 }, { "epoch": 2.0539083557951483, "grad_norm": 0.5842219591140747, "learning_rate": 0.0004773189422558014, "loss": 3.5202, "step": 19050 }, { "epoch": 2.059299191374663, "grad_norm": 0.5753844380378723, "learning_rate": 0.00047699514301133294, "loss": 3.5584, "step": 19100 }, { "epoch": 2.0646900269541777, "grad_norm": 0.5149918794631958, "learning_rate": 0.00047667134376686455, "loss": 3.5434, "step": 19150 }, { "epoch": 2.070080862533693, "grad_norm": 0.5646060705184937, "learning_rate": 0.0004763540205072854, "loss": 3.5652, "step": 19200 }, { "epoch": 2.0754716981132075, "grad_norm": 0.6100060343742371, "learning_rate": 0.00047603022126281705, "loss": 3.563, "step": 19250 }, { "epoch": 2.0808625336927222, "grad_norm": 0.5924269556999207, "learning_rate": 0.00047570642201834855, "loss": 3.5591, "step": 19300 }, { "epoch": 2.0862533692722374, "grad_norm": 0.5271193385124207, "learning_rate": 0.00047538262277388015, "loss": 3.5502, "step": 19350 }, { "epoch": 2.091644204851752, "grad_norm": 0.559655487537384, "learning_rate": 0.0004750588235294117, "loss": 3.5551, "step": 19400 }, { "epoch": 2.0970350404312668, "grad_norm": 0.5192553997039795, "learning_rate": 0.0004747350242849433, "loss": 3.5419, "step": 19450 }, { "epoch": 2.1024258760107815, "grad_norm": 0.5999012589454651, "learning_rate": 0.00047441122504047486, "loss": 3.5376, "step": 19500 }, { "epoch": 2.1078167115902966, "grad_norm": 0.601438581943512, "learning_rate": 0.0004740874257960064, "loss": 3.5554, "step": 19550 }, { "epoch": 2.1132075471698113, "grad_norm": 0.5252161622047424, "learning_rate": 0.000473763626551538, "loss": 3.5518, "step": 19600 }, { "epoch": 2.118598382749326, "grad_norm": 0.568138062953949, "learning_rate": 0.00047343982730706956, "loss": 3.5517, "step": 19650 }, { "epoch": 2.123989218328841, "grad_norm": 0.5670929551124573, "learning_rate": 0.00047311602806260117, "loss": 3.5404, "step": 19700 }, { "epoch": 2.129380053908356, "grad_norm": 0.6419331431388855, "learning_rate": 0.0004727922288181327, "loss": 3.5383, "step": 19750 }, { "epoch": 2.1347708894878705, "grad_norm": 0.5455891489982605, "learning_rate": 0.0004724684295736643, "loss": 3.5673, "step": 19800 }, { "epoch": 2.1401617250673857, "grad_norm": 0.5633212327957153, "learning_rate": 0.0004721446303291959, "loss": 3.5679, "step": 19850 }, { "epoch": 2.1455525606469004, "grad_norm": Infinity, "learning_rate": 0.0004718273070696168, "loss": 3.5567, "step": 19900 }, { "epoch": 2.150943396226415, "grad_norm": 0.5624653697013855, "learning_rate": 0.0004715035078251483, "loss": 3.5539, "step": 19950 }, { "epoch": 2.1563342318059298, "grad_norm": 0.5584042072296143, "learning_rate": 0.0004711797085806799, "loss": 3.5552, "step": 20000 }, { "epoch": 2.1563342318059298, "eval_accuracy": 0.3635111658769956, "eval_loss": 3.5678915977478027, "eval_runtime": 183.3667, "eval_samples_per_second": 98.224, "eval_steps_per_second": 6.141, "step": 20000 }, { "epoch": 2.161725067385445, "grad_norm": 0.5540956258773804, "learning_rate": 0.0004708559093362115, "loss": 3.5543, "step": 20050 }, { "epoch": 2.1671159029649596, "grad_norm": 0.5896965265274048, "learning_rate": 0.0004705321100917431, "loss": 3.5654, "step": 20100 }, { "epoch": 2.1725067385444743, "grad_norm": 0.5863565802574158, "learning_rate": 0.00047020831084727463, "loss": 3.5427, "step": 20150 }, { "epoch": 2.177897574123989, "grad_norm": 0.6331449747085571, "learning_rate": 0.00046988451160280624, "loss": 3.5658, "step": 20200 }, { "epoch": 2.183288409703504, "grad_norm": 0.5432201027870178, "learning_rate": 0.0004695607123583378, "loss": 3.5648, "step": 20250 }, { "epoch": 2.188679245283019, "grad_norm": 0.5611268877983093, "learning_rate": 0.00046923691311386934, "loss": 3.5401, "step": 20300 }, { "epoch": 2.1940700808625335, "grad_norm": 0.5305395722389221, "learning_rate": 0.00046891311386940094, "loss": 3.5583, "step": 20350 }, { "epoch": 2.1994609164420487, "grad_norm": 0.5022453665733337, "learning_rate": 0.0004685893146249325, "loss": 3.5571, "step": 20400 }, { "epoch": 2.2048517520215634, "grad_norm": 0.5850080847740173, "learning_rate": 0.0004682655153804641, "loss": 3.5559, "step": 20450 }, { "epoch": 2.210242587601078, "grad_norm": 0.5080869793891907, "learning_rate": 0.00046794171613599565, "loss": 3.5601, "step": 20500 }, { "epoch": 2.215633423180593, "grad_norm": 0.5989983081817627, "learning_rate": 0.00046761791689152725, "loss": 3.563, "step": 20550 }, { "epoch": 2.221024258760108, "grad_norm": 0.6006690859794617, "learning_rate": 0.00046729411764705875, "loss": 3.5674, "step": 20600 }, { "epoch": 2.2264150943396226, "grad_norm": 0.5296274423599243, "learning_rate": 0.0004669703184025904, "loss": 3.5525, "step": 20650 }, { "epoch": 2.2318059299191373, "grad_norm": 0.5273208618164062, "learning_rate": 0.0004666465191581219, "loss": 3.5551, "step": 20700 }, { "epoch": 2.2371967654986524, "grad_norm": 0.6000422239303589, "learning_rate": 0.00046632271991365346, "loss": 3.556, "step": 20750 }, { "epoch": 2.242587601078167, "grad_norm": 0.542732298374176, "learning_rate": 0.00046599892066918506, "loss": 3.5593, "step": 20800 }, { "epoch": 2.247978436657682, "grad_norm": 0.5846306085586548, "learning_rate": 0.0004656751214247166, "loss": 3.5424, "step": 20850 }, { "epoch": 2.2533692722371965, "grad_norm": 0.6560302972793579, "learning_rate": 0.0004653513221802482, "loss": 3.554, "step": 20900 }, { "epoch": 2.2587601078167117, "grad_norm": 0.5398501753807068, "learning_rate": 0.00046502752293577977, "loss": 3.5597, "step": 20950 }, { "epoch": 2.2641509433962264, "grad_norm": 0.5567417144775391, "learning_rate": 0.00046470372369131137, "loss": 3.5423, "step": 21000 }, { "epoch": 2.2641509433962264, "eval_accuracy": 0.3644758948679004, "eval_loss": 3.5552797317504883, "eval_runtime": 183.5672, "eval_samples_per_second": 98.117, "eval_steps_per_second": 6.134, "step": 21000 }, { "epoch": 2.269541778975741, "grad_norm": 0.5651289224624634, "learning_rate": 0.0004643799244468429, "loss": 3.5383, "step": 21050 }, { "epoch": 2.274932614555256, "grad_norm": 0.5148941278457642, "learning_rate": 0.0004640561252023745, "loss": 3.5681, "step": 21100 }, { "epoch": 2.280323450134771, "grad_norm": 0.5875728726387024, "learning_rate": 0.0004637323259579061, "loss": 3.5605, "step": 21150 }, { "epoch": 2.2857142857142856, "grad_norm": 0.5935449004173279, "learning_rate": 0.0004634085267134376, "loss": 3.5523, "step": 21200 }, { "epoch": 2.2911051212938007, "grad_norm": 0.5729395747184753, "learning_rate": 0.00046308472746896923, "loss": 3.5244, "step": 21250 }, { "epoch": 2.2964959568733154, "grad_norm": 0.6214632391929626, "learning_rate": 0.00046276092822450073, "loss": 3.5476, "step": 21300 }, { "epoch": 2.30188679245283, "grad_norm": 0.5737814903259277, "learning_rate": 0.00046243712898003233, "loss": 3.5626, "step": 21350 }, { "epoch": 2.3072776280323453, "grad_norm": 0.5795217156410217, "learning_rate": 0.0004621133297355639, "loss": 3.54, "step": 21400 }, { "epoch": 2.31266846361186, "grad_norm": 0.5368177890777588, "learning_rate": 0.0004617895304910955, "loss": 3.552, "step": 21450 }, { "epoch": 2.3180592991913747, "grad_norm": 0.6149646639823914, "learning_rate": 0.00046146573124662704, "loss": 3.557, "step": 21500 }, { "epoch": 2.3234501347708894, "grad_norm": 0.5619512796401978, "learning_rate": 0.00046114193200215864, "loss": 3.5438, "step": 21550 }, { "epoch": 2.3288409703504045, "grad_norm": 0.5701526403427124, "learning_rate": 0.0004608181327576902, "loss": 3.5537, "step": 21600 }, { "epoch": 2.334231805929919, "grad_norm": 0.5332589149475098, "learning_rate": 0.00046049433351322175, "loss": 3.5537, "step": 21650 }, { "epoch": 2.339622641509434, "grad_norm": 0.583706259727478, "learning_rate": 0.00046017053426875335, "loss": 3.5544, "step": 21700 }, { "epoch": 2.3450134770889486, "grad_norm": 0.5967761278152466, "learning_rate": 0.0004598467350242849, "loss": 3.5528, "step": 21750 }, { "epoch": 2.3504043126684637, "grad_norm": 0.5579556822776794, "learning_rate": 0.0004595229357798165, "loss": 3.5632, "step": 21800 }, { "epoch": 2.3557951482479784, "grad_norm": 0.5610327124595642, "learning_rate": 0.00045919913653534806, "loss": 3.5557, "step": 21850 }, { "epoch": 2.361185983827493, "grad_norm": 0.5787166953086853, "learning_rate": 0.00045887533729087966, "loss": 3.5439, "step": 21900 }, { "epoch": 2.3665768194070083, "grad_norm": 0.540556013584137, "learning_rate": 0.00045855153804641116, "loss": 3.558, "step": 21950 }, { "epoch": 2.371967654986523, "grad_norm": 0.5531706809997559, "learning_rate": 0.0004582277388019427, "loss": 3.5562, "step": 22000 }, { "epoch": 2.371967654986523, "eval_accuracy": 0.3654050943644898, "eval_loss": 3.5464489459991455, "eval_runtime": 183.2178, "eval_samples_per_second": 98.304, "eval_steps_per_second": 6.146, "step": 22000 }, { "epoch": 2.3773584905660377, "grad_norm": 0.5978881120681763, "learning_rate": 0.0004579039395574743, "loss": 3.5342, "step": 22050 }, { "epoch": 2.382749326145553, "grad_norm": 0.5623920559883118, "learning_rate": 0.00045758014031300586, "loss": 3.5555, "step": 22100 }, { "epoch": 2.3881401617250675, "grad_norm": 0.5382367372512817, "learning_rate": 0.00045725634106853747, "loss": 3.5515, "step": 22150 }, { "epoch": 2.393530997304582, "grad_norm": 0.6158401370048523, "learning_rate": 0.000456932541824069, "loss": 3.5763, "step": 22200 }, { "epoch": 2.398921832884097, "grad_norm": 0.5851870775222778, "learning_rate": 0.0004566087425796006, "loss": 3.546, "step": 22250 }, { "epoch": 2.404312668463612, "grad_norm": 0.6621217131614685, "learning_rate": 0.0004562849433351322, "loss": 3.527, "step": 22300 }, { "epoch": 2.4097035040431267, "grad_norm": 0.5817117094993591, "learning_rate": 0.0004559611440906638, "loss": 3.55, "step": 22350 }, { "epoch": 2.4150943396226414, "grad_norm": 0.5394627451896667, "learning_rate": 0.00045563734484619533, "loss": 3.5395, "step": 22400 }, { "epoch": 2.420485175202156, "grad_norm": 0.6029559969902039, "learning_rate": 0.0004553135456017269, "loss": 3.569, "step": 22450 }, { "epoch": 2.4258760107816713, "grad_norm": 0.6211172342300415, "learning_rate": 0.0004549897463572585, "loss": 3.5407, "step": 22500 }, { "epoch": 2.431266846361186, "grad_norm": 0.5414947867393494, "learning_rate": 0.00045466594711279, "loss": 3.5477, "step": 22550 }, { "epoch": 2.4366576819407006, "grad_norm": 0.5086063742637634, "learning_rate": 0.00045434214786832164, "loss": 3.5477, "step": 22600 }, { "epoch": 2.442048517520216, "grad_norm": 0.5853644609451294, "learning_rate": 0.00045401834862385314, "loss": 3.5445, "step": 22650 }, { "epoch": 2.4474393530997305, "grad_norm": 0.5894696116447449, "learning_rate": 0.00045369454937938474, "loss": 3.5327, "step": 22700 }, { "epoch": 2.452830188679245, "grad_norm": 0.5951809883117676, "learning_rate": 0.0004533707501349163, "loss": 3.5572, "step": 22750 }, { "epoch": 2.4582210242587603, "grad_norm": 0.5352622270584106, "learning_rate": 0.0004530469508904479, "loss": 3.5671, "step": 22800 }, { "epoch": 2.463611859838275, "grad_norm": 0.6040215492248535, "learning_rate": 0.00045272315164597945, "loss": 3.5349, "step": 22850 }, { "epoch": 2.4690026954177897, "grad_norm": 0.573526918888092, "learning_rate": 0.000452399352401511, "loss": 3.5441, "step": 22900 }, { "epoch": 2.4743935309973044, "grad_norm": 0.5855453610420227, "learning_rate": 0.0004520755531570426, "loss": 3.5475, "step": 22950 }, { "epoch": 2.4797843665768196, "grad_norm": 0.5997734665870667, "learning_rate": 0.00045175175391257415, "loss": 3.5392, "step": 23000 }, { "epoch": 2.4797843665768196, "eval_accuracy": 0.36637493004112187, "eval_loss": 3.5344290733337402, "eval_runtime": 183.3961, "eval_samples_per_second": 98.208, "eval_steps_per_second": 6.14, "step": 23000 }, { "epoch": 2.4851752021563343, "grad_norm": 0.5770864486694336, "learning_rate": 0.00045142795466810576, "loss": 3.5379, "step": 23050 }, { "epoch": 2.490566037735849, "grad_norm": 0.5228535532951355, "learning_rate": 0.0004511041554236373, "loss": 3.5355, "step": 23100 }, { "epoch": 2.4959568733153636, "grad_norm": 0.6147720217704773, "learning_rate": 0.0004507803561791689, "loss": 3.5329, "step": 23150 }, { "epoch": 2.501347708894879, "grad_norm": 0.5598917007446289, "learning_rate": 0.00045045655693470046, "loss": 3.5474, "step": 23200 }, { "epoch": 2.5067385444743935, "grad_norm": 0.545137345790863, "learning_rate": 0.00045013275769023207, "loss": 3.5582, "step": 23250 }, { "epoch": 2.512129380053908, "grad_norm": 0.5641994476318359, "learning_rate": 0.00044980895844576356, "loss": 3.5553, "step": 23300 }, { "epoch": 2.5175202156334233, "grad_norm": 0.5789488554000854, "learning_rate": 0.0004494851592012951, "loss": 3.5306, "step": 23350 }, { "epoch": 2.522911051212938, "grad_norm": 0.5591729283332825, "learning_rate": 0.0004491613599568267, "loss": 3.5349, "step": 23400 }, { "epoch": 2.5283018867924527, "grad_norm": 0.5694953799247742, "learning_rate": 0.00044883756071235827, "loss": 3.539, "step": 23450 }, { "epoch": 2.533692722371968, "grad_norm": 0.5788413286209106, "learning_rate": 0.0004485137614678899, "loss": 3.5186, "step": 23500 }, { "epoch": 2.5390835579514826, "grad_norm": 0.5994324088096619, "learning_rate": 0.0004481899622234214, "loss": 3.5555, "step": 23550 }, { "epoch": 2.5444743935309972, "grad_norm": 0.6380375623703003, "learning_rate": 0.00044786616297895303, "loss": 3.5488, "step": 23600 }, { "epoch": 2.5498652291105124, "grad_norm": 0.5977026224136353, "learning_rate": 0.0004475423637344846, "loss": 3.5424, "step": 23650 }, { "epoch": 2.555256064690027, "grad_norm": 0.5507897734642029, "learning_rate": 0.00044721856449001613, "loss": 3.5557, "step": 23700 }, { "epoch": 2.560646900269542, "grad_norm": 0.5803244709968567, "learning_rate": 0.00044689476524554774, "loss": 3.5641, "step": 23750 }, { "epoch": 2.5660377358490565, "grad_norm": 0.609472930431366, "learning_rate": 0.0004465709660010793, "loss": 3.5369, "step": 23800 }, { "epoch": 2.571428571428571, "grad_norm": 0.5822898745536804, "learning_rate": 0.0004462471667566109, "loss": 3.5363, "step": 23850 }, { "epoch": 2.5768194070080863, "grad_norm": 0.5829198360443115, "learning_rate": 0.00044592336751214244, "loss": 3.534, "step": 23900 }, { "epoch": 2.582210242587601, "grad_norm": 0.5264182686805725, "learning_rate": 0.00044560604425256334, "loss": 3.5611, "step": 23950 }, { "epoch": 2.5876010781671157, "grad_norm": 0.5896156430244446, "learning_rate": 0.000445282245008095, "loss": 3.541, "step": 24000 }, { "epoch": 2.5876010781671157, "eval_accuracy": 0.3671398463713358, "eval_loss": 3.5273725986480713, "eval_runtime": 183.5998, "eval_samples_per_second": 98.099, "eval_steps_per_second": 6.133, "step": 24000 }, { "epoch": 2.592991913746631, "grad_norm": 0.6344112157821655, "learning_rate": 0.0004449584457636265, "loss": 3.5453, "step": 24050 }, { "epoch": 2.5983827493261455, "grad_norm": 0.6354188919067383, "learning_rate": 0.00044463464651915805, "loss": 3.5216, "step": 24100 }, { "epoch": 2.6037735849056602, "grad_norm": 0.5827237963676453, "learning_rate": 0.00044431084727468965, "loss": 3.5458, "step": 24150 }, { "epoch": 2.6091644204851754, "grad_norm": 0.5500607490539551, "learning_rate": 0.0004439870480302212, "loss": 3.5379, "step": 24200 }, { "epoch": 2.61455525606469, "grad_norm": 0.5530035495758057, "learning_rate": 0.0004436632487857528, "loss": 3.5422, "step": 24250 }, { "epoch": 2.6199460916442048, "grad_norm": 0.585949182510376, "learning_rate": 0.00044333944954128436, "loss": 3.5475, "step": 24300 }, { "epoch": 2.62533692722372, "grad_norm": 0.6067348718643188, "learning_rate": 0.00044301565029681596, "loss": 3.5488, "step": 24350 }, { "epoch": 2.6307277628032346, "grad_norm": 0.5916098356246948, "learning_rate": 0.0004426918510523475, "loss": 3.5251, "step": 24400 }, { "epoch": 2.6361185983827493, "grad_norm": 0.5653256177902222, "learning_rate": 0.0004423680518078791, "loss": 3.5518, "step": 24450 }, { "epoch": 2.641509433962264, "grad_norm": 0.6101019978523254, "learning_rate": 0.00044204425256341067, "loss": 3.5162, "step": 24500 }, { "epoch": 2.6469002695417787, "grad_norm": 0.5783448815345764, "learning_rate": 0.0004417204533189422, "loss": 3.5196, "step": 24550 }, { "epoch": 2.652291105121294, "grad_norm": 0.5608351230621338, "learning_rate": 0.0004413966540744738, "loss": 3.5275, "step": 24600 }, { "epoch": 2.6576819407008085, "grad_norm": 0.533299446105957, "learning_rate": 0.0004410728548300053, "loss": 3.5358, "step": 24650 }, { "epoch": 2.6630727762803232, "grad_norm": 0.5338042378425598, "learning_rate": 0.0004407490555855369, "loss": 3.5365, "step": 24700 }, { "epoch": 2.6684636118598384, "grad_norm": 0.5940648913383484, "learning_rate": 0.0004404252563410685, "loss": 3.5506, "step": 24750 }, { "epoch": 2.673854447439353, "grad_norm": 0.5929331183433533, "learning_rate": 0.0004401014570966001, "loss": 3.5268, "step": 24800 }, { "epoch": 2.6792452830188678, "grad_norm": 0.5341362953186035, "learning_rate": 0.00043977765785213163, "loss": 3.5281, "step": 24850 }, { "epoch": 2.684636118598383, "grad_norm": 0.5370336174964905, "learning_rate": 0.0004394538586076632, "loss": 3.5383, "step": 24900 }, { "epoch": 2.6900269541778976, "grad_norm": 0.5800198316574097, "learning_rate": 0.0004391300593631948, "loss": 3.5321, "step": 24950 }, { "epoch": 2.6954177897574123, "grad_norm": 0.5767235159873962, "learning_rate": 0.00043880626011872634, "loss": 3.5477, "step": 25000 }, { "epoch": 2.6954177897574123, "eval_accuracy": 0.36847790668477115, "eval_loss": 3.51676607131958, "eval_runtime": 183.255, "eval_samples_per_second": 98.284, "eval_steps_per_second": 6.144, "step": 25000 }, { "epoch": 2.7008086253369274, "grad_norm": 0.6077183485031128, "learning_rate": 0.00043848246087425794, "loss": 3.5476, "step": 25050 }, { "epoch": 2.706199460916442, "grad_norm": 0.5808961391448975, "learning_rate": 0.0004381586616297895, "loss": 3.5578, "step": 25100 }, { "epoch": 2.711590296495957, "grad_norm": 0.6259551048278809, "learning_rate": 0.0004378348623853211, "loss": 3.5405, "step": 25150 }, { "epoch": 2.7169811320754715, "grad_norm": 0.6004087328910828, "learning_rate": 0.00043751106314085265, "loss": 3.5239, "step": 25200 }, { "epoch": 2.7223719676549867, "grad_norm": 0.5496945381164551, "learning_rate": 0.00043718726389638425, "loss": 3.5258, "step": 25250 }, { "epoch": 2.7277628032345014, "grad_norm": 0.6094425916671753, "learning_rate": 0.00043686346465191575, "loss": 3.536, "step": 25300 }, { "epoch": 2.733153638814016, "grad_norm": 0.5401579737663269, "learning_rate": 0.0004365396654074473, "loss": 3.5275, "step": 25350 }, { "epoch": 2.7385444743935308, "grad_norm": 0.5293307304382324, "learning_rate": 0.00043622234214786825, "loss": 3.5194, "step": 25400 }, { "epoch": 2.743935309973046, "grad_norm": 0.5775792598724365, "learning_rate": 0.00043589854290339985, "loss": 3.535, "step": 25450 }, { "epoch": 2.7493261455525606, "grad_norm": 0.5909881591796875, "learning_rate": 0.0004355747436589314, "loss": 3.5507, "step": 25500 }, { "epoch": 2.7547169811320753, "grad_norm": 0.5832464098930359, "learning_rate": 0.000435250944414463, "loss": 3.5444, "step": 25550 }, { "epoch": 2.7601078167115904, "grad_norm": 0.5698075294494629, "learning_rate": 0.00043492714516999456, "loss": 3.535, "step": 25600 }, { "epoch": 2.765498652291105, "grad_norm": 0.5500668883323669, "learning_rate": 0.0004346033459255261, "loss": 3.5232, "step": 25650 }, { "epoch": 2.77088948787062, "grad_norm": 0.5673616528511047, "learning_rate": 0.0004342795466810577, "loss": 3.5455, "step": 25700 }, { "epoch": 2.776280323450135, "grad_norm": 0.5733495354652405, "learning_rate": 0.00043395574743658927, "loss": 3.5295, "step": 25750 }, { "epoch": 2.7816711590296497, "grad_norm": 0.5701019763946533, "learning_rate": 0.00043363194819212087, "loss": 3.5307, "step": 25800 }, { "epoch": 2.7870619946091644, "grad_norm": 0.5702261924743652, "learning_rate": 0.0004333081489476524, "loss": 3.516, "step": 25850 }, { "epoch": 2.7924528301886795, "grad_norm": 0.5822378396987915, "learning_rate": 0.000432984349703184, "loss": 3.5372, "step": 25900 }, { "epoch": 2.797843665768194, "grad_norm": 0.550873875617981, "learning_rate": 0.0004326605504587155, "loss": 3.5085, "step": 25950 }, { "epoch": 2.803234501347709, "grad_norm": 0.5707508325576782, "learning_rate": 0.0004323367512142472, "loss": 3.5275, "step": 26000 }, { "epoch": 2.803234501347709, "eval_accuracy": 0.36893414016070414, "eval_loss": 3.510256290435791, "eval_runtime": 183.5418, "eval_samples_per_second": 98.13, "eval_steps_per_second": 6.135, "step": 26000 }, { "epoch": 2.8086253369272236, "grad_norm": 0.5530628561973572, "learning_rate": 0.0004320129519697787, "loss": 3.5324, "step": 26050 }, { "epoch": 2.8140161725067383, "grad_norm": 0.536308228969574, "learning_rate": 0.00043168915272531023, "loss": 3.5192, "step": 26100 }, { "epoch": 2.8194070080862534, "grad_norm": 0.5508840680122375, "learning_rate": 0.00043136535348084183, "loss": 3.5308, "step": 26150 }, { "epoch": 2.824797843665768, "grad_norm": 0.661864697933197, "learning_rate": 0.0004310415542363734, "loss": 3.5415, "step": 26200 }, { "epoch": 2.830188679245283, "grad_norm": 0.5606989860534668, "learning_rate": 0.000430717754991905, "loss": 3.5281, "step": 26250 }, { "epoch": 2.835579514824798, "grad_norm": 0.5549330711364746, "learning_rate": 0.00043039395574743654, "loss": 3.5263, "step": 26300 }, { "epoch": 2.8409703504043127, "grad_norm": 0.5238580107688904, "learning_rate": 0.00043007015650296814, "loss": 3.5394, "step": 26350 }, { "epoch": 2.8463611859838274, "grad_norm": 0.5641552805900574, "learning_rate": 0.0004297463572584997, "loss": 3.5218, "step": 26400 }, { "epoch": 2.8517520215633425, "grad_norm": 0.5834696888923645, "learning_rate": 0.0004294225580140313, "loss": 3.5309, "step": 26450 }, { "epoch": 2.857142857142857, "grad_norm": 0.6112784147262573, "learning_rate": 0.00042909875876956285, "loss": 3.52, "step": 26500 }, { "epoch": 2.862533692722372, "grad_norm": 0.5841647386550903, "learning_rate": 0.0004287749595250944, "loss": 3.5032, "step": 26550 }, { "epoch": 2.867924528301887, "grad_norm": 0.5762807726860046, "learning_rate": 0.000428451160280626, "loss": 3.5271, "step": 26600 }, { "epoch": 2.8733153638814017, "grad_norm": 0.5195743441581726, "learning_rate": 0.0004281273610361575, "loss": 3.5221, "step": 26650 }, { "epoch": 2.8787061994609164, "grad_norm": 0.5398359298706055, "learning_rate": 0.0004278035617916891, "loss": 3.5163, "step": 26700 }, { "epoch": 2.884097035040431, "grad_norm": 0.5633315443992615, "learning_rate": 0.00042747976254722066, "loss": 3.5195, "step": 26750 }, { "epoch": 2.889487870619946, "grad_norm": 0.5828991532325745, "learning_rate": 0.00042715596330275226, "loss": 3.5012, "step": 26800 }, { "epoch": 2.894878706199461, "grad_norm": 0.6273923516273499, "learning_rate": 0.0004268321640582838, "loss": 3.5202, "step": 26850 }, { "epoch": 2.9002695417789757, "grad_norm": 0.5381441712379456, "learning_rate": 0.0004265083648138154, "loss": 3.5087, "step": 26900 }, { "epoch": 2.9056603773584904, "grad_norm": 0.596377432346344, "learning_rate": 0.00042618456556934697, "loss": 3.5073, "step": 26950 }, { "epoch": 2.9110512129380055, "grad_norm": 0.5934976935386658, "learning_rate": 0.0004258607663248785, "loss": 3.5341, "step": 27000 }, { "epoch": 2.9110512129380055, "eval_accuracy": 0.3700002422959398, "eval_loss": 3.500957727432251, "eval_runtime": 183.1714, "eval_samples_per_second": 98.329, "eval_steps_per_second": 6.147, "step": 27000 }, { "epoch": 2.91644204851752, "grad_norm": 0.6017852425575256, "learning_rate": 0.0004255369670804101, "loss": 3.5455, "step": 27050 }, { "epoch": 2.921832884097035, "grad_norm": 0.5310950875282288, "learning_rate": 0.0004252131678359417, "loss": 3.5272, "step": 27100 }, { "epoch": 2.92722371967655, "grad_norm": 0.5664175152778625, "learning_rate": 0.0004248893685914733, "loss": 3.5418, "step": 27150 }, { "epoch": 2.9326145552560647, "grad_norm": 0.6421070694923401, "learning_rate": 0.00042456556934700483, "loss": 3.5271, "step": 27200 }, { "epoch": 2.9380053908355794, "grad_norm": 0.5426092743873596, "learning_rate": 0.00042424177010253643, "loss": 3.542, "step": 27250 }, { "epoch": 2.9433962264150946, "grad_norm": 0.597486138343811, "learning_rate": 0.00042391797085806793, "loss": 3.5224, "step": 27300 }, { "epoch": 2.9487870619946093, "grad_norm": 0.6039035320281982, "learning_rate": 0.0004235941716135995, "loss": 3.5114, "step": 27350 }, { "epoch": 2.954177897574124, "grad_norm": 0.5943632125854492, "learning_rate": 0.0004232703723691311, "loss": 3.5101, "step": 27400 }, { "epoch": 2.9595687331536387, "grad_norm": 0.5611184239387512, "learning_rate": 0.00042294657312466264, "loss": 3.527, "step": 27450 }, { "epoch": 2.964959568733154, "grad_norm": 0.5667484402656555, "learning_rate": 0.00042262277388019424, "loss": 3.5303, "step": 27500 }, { "epoch": 2.9703504043126685, "grad_norm": 0.5914982557296753, "learning_rate": 0.0004222989746357258, "loss": 3.5148, "step": 27550 }, { "epoch": 2.975741239892183, "grad_norm": 0.5547810792922974, "learning_rate": 0.0004219751753912574, "loss": 3.5162, "step": 27600 }, { "epoch": 2.981132075471698, "grad_norm": 0.5683316588401794, "learning_rate": 0.00042165137614678895, "loss": 3.5384, "step": 27650 }, { "epoch": 2.986522911051213, "grad_norm": 0.5850192904472351, "learning_rate": 0.00042132757690232055, "loss": 3.5048, "step": 27700 }, { "epoch": 2.9919137466307277, "grad_norm": 0.5472140908241272, "learning_rate": 0.0004210037776578521, "loss": 3.4964, "step": 27750 }, { "epoch": 2.9973045822102424, "grad_norm": 0.6173052787780762, "learning_rate": 0.00042067997841338365, "loss": 3.5167, "step": 27800 }, { "epoch": 3.0026954177897576, "grad_norm": 0.5687317848205566, "learning_rate": 0.00042035617916891526, "loss": 3.4765, "step": 27850 }, { "epoch": 3.0080862533692723, "grad_norm": 0.5316717624664307, "learning_rate": 0.0004200323799244468, "loss": 3.4207, "step": 27900 }, { "epoch": 3.013477088948787, "grad_norm": 0.6342532634735107, "learning_rate": 0.0004197085806799784, "loss": 3.3998, "step": 27950 }, { "epoch": 3.018867924528302, "grad_norm": 0.5721489787101746, "learning_rate": 0.0004193847814355099, "loss": 3.4218, "step": 28000 }, { "epoch": 3.018867924528302, "eval_accuracy": 0.37088456814986187, "eval_loss": 3.4964616298675537, "eval_runtime": 183.4604, "eval_samples_per_second": 98.174, "eval_steps_per_second": 6.138, "step": 28000 }, { "epoch": 3.024258760107817, "grad_norm": 0.5991702079772949, "learning_rate": 0.0004190609821910415, "loss": 3.4134, "step": 28050 }, { "epoch": 3.0296495956873315, "grad_norm": 0.5457400679588318, "learning_rate": 0.00041873718294657306, "loss": 3.4297, "step": 28100 }, { "epoch": 3.035040431266846, "grad_norm": 0.5663076043128967, "learning_rate": 0.00041841338370210467, "loss": 3.431, "step": 28150 }, { "epoch": 3.0404312668463613, "grad_norm": 0.6150392889976501, "learning_rate": 0.0004180895844576362, "loss": 3.4296, "step": 28200 }, { "epoch": 3.045822102425876, "grad_norm": 0.5990293622016907, "learning_rate": 0.00041776578521316777, "loss": 3.435, "step": 28250 }, { "epoch": 3.0512129380053907, "grad_norm": 0.5907416939735413, "learning_rate": 0.0004174419859686994, "loss": 3.4342, "step": 28300 }, { "epoch": 3.056603773584906, "grad_norm": 0.5918059945106506, "learning_rate": 0.0004171181867242309, "loss": 3.4428, "step": 28350 }, { "epoch": 3.0619946091644206, "grad_norm": 0.6297253966331482, "learning_rate": 0.00041679438747976253, "loss": 3.4174, "step": 28400 }, { "epoch": 3.0673854447439353, "grad_norm": 0.6004771590232849, "learning_rate": 0.0004164705882352941, "loss": 3.4398, "step": 28450 }, { "epoch": 3.07277628032345, "grad_norm": 0.6342185139656067, "learning_rate": 0.0004161467889908257, "loss": 3.4255, "step": 28500 }, { "epoch": 3.078167115902965, "grad_norm": 0.5759187340736389, "learning_rate": 0.00041582298974635724, "loss": 3.4223, "step": 28550 }, { "epoch": 3.08355795148248, "grad_norm": 0.5629438757896423, "learning_rate": 0.00041549919050188884, "loss": 3.4434, "step": 28600 }, { "epoch": 3.0889487870619945, "grad_norm": 0.6492778658866882, "learning_rate": 0.00041517539125742034, "loss": 3.4449, "step": 28650 }, { "epoch": 3.0943396226415096, "grad_norm": 0.5692011713981628, "learning_rate": 0.0004148515920129519, "loss": 3.447, "step": 28700 }, { "epoch": 3.0997304582210243, "grad_norm": 0.9911279678344727, "learning_rate": 0.0004145277927684835, "loss": 3.443, "step": 28750 }, { "epoch": 3.105121293800539, "grad_norm": 0.6052446365356445, "learning_rate": 0.00041420399352401504, "loss": 3.4396, "step": 28800 }, { "epoch": 3.1105121293800537, "grad_norm": 0.5958963632583618, "learning_rate": 0.00041388019427954665, "loss": 3.4439, "step": 28850 }, { "epoch": 3.115902964959569, "grad_norm": 0.5762249231338501, "learning_rate": 0.0004135563950350782, "loss": 3.4425, "step": 28900 }, { "epoch": 3.1212938005390836, "grad_norm": 0.5552219152450562, "learning_rate": 0.0004132325957906098, "loss": 3.4359, "step": 28950 }, { "epoch": 3.1266846361185983, "grad_norm": 0.59885573387146, "learning_rate": 0.00041290879654614135, "loss": 3.4461, "step": 29000 }, { "epoch": 3.1266846361185983, "eval_accuracy": 0.37161156462222855, "eval_loss": 3.489034414291382, "eval_runtime": 183.4689, "eval_samples_per_second": 98.169, "eval_steps_per_second": 6.137, "step": 29000 }, { "epoch": 3.1320754716981134, "grad_norm": 0.5445829033851624, "learning_rate": 0.00041258499730167296, "loss": 3.4258, "step": 29050 }, { "epoch": 3.137466307277628, "grad_norm": 0.5851534008979797, "learning_rate": 0.0004122611980572045, "loss": 3.4514, "step": 29100 }, { "epoch": 3.142857142857143, "grad_norm": 0.5788666605949402, "learning_rate": 0.00041193739881273606, "loss": 3.4466, "step": 29150 }, { "epoch": 3.1482479784366575, "grad_norm": 0.6072415113449097, "learning_rate": 0.00041161359956826766, "loss": 3.4602, "step": 29200 }, { "epoch": 3.1536388140161726, "grad_norm": 0.6163262724876404, "learning_rate": 0.0004112898003237992, "loss": 3.4614, "step": 29250 }, { "epoch": 3.1590296495956873, "grad_norm": 0.612508237361908, "learning_rate": 0.0004109660010793308, "loss": 3.4541, "step": 29300 }, { "epoch": 3.164420485175202, "grad_norm": 0.5615676045417786, "learning_rate": 0.0004106422018348623, "loss": 3.4416, "step": 29350 }, { "epoch": 3.169811320754717, "grad_norm": 0.6066969633102417, "learning_rate": 0.0004103184025903939, "loss": 3.4516, "step": 29400 }, { "epoch": 3.175202156334232, "grad_norm": 0.603179395198822, "learning_rate": 0.00040999460334592547, "loss": 3.4643, "step": 29450 }, { "epoch": 3.1805929919137466, "grad_norm": 0.5823822021484375, "learning_rate": 0.000409670804101457, "loss": 3.4381, "step": 29500 }, { "epoch": 3.1859838274932613, "grad_norm": 0.5800785422325134, "learning_rate": 0.000409353480841878, "loss": 3.4505, "step": 29550 }, { "epoch": 3.1913746630727764, "grad_norm": 0.5989530086517334, "learning_rate": 0.0004090296815974096, "loss": 3.4613, "step": 29600 }, { "epoch": 3.196765498652291, "grad_norm": 0.5782071352005005, "learning_rate": 0.00040870588235294113, "loss": 3.4469, "step": 29650 }, { "epoch": 3.202156334231806, "grad_norm": 0.6211130023002625, "learning_rate": 0.00040838208310847273, "loss": 3.4652, "step": 29700 }, { "epoch": 3.207547169811321, "grad_norm": 0.6505089998245239, "learning_rate": 0.0004080582838640043, "loss": 3.4618, "step": 29750 }, { "epoch": 3.2129380053908356, "grad_norm": 0.676662027835846, "learning_rate": 0.0004077344846195359, "loss": 3.437, "step": 29800 }, { "epoch": 3.2183288409703503, "grad_norm": 0.6046519875526428, "learning_rate": 0.00040741068537506744, "loss": 3.4592, "step": 29850 }, { "epoch": 3.223719676549865, "grad_norm": 0.6096304655075073, "learning_rate": 0.000407086886130599, "loss": 3.4508, "step": 29900 }, { "epoch": 3.22911051212938, "grad_norm": 0.5660011172294617, "learning_rate": 0.0004067630868861306, "loss": 3.4471, "step": 29950 }, { "epoch": 3.234501347708895, "grad_norm": 0.6080735921859741, "learning_rate": 0.0004064392876416621, "loss": 3.4554, "step": 30000 }, { "epoch": 3.234501347708895, "eval_accuracy": 0.3721109332946239, "eval_loss": 3.484154224395752, "eval_runtime": 183.5489, "eval_samples_per_second": 98.126, "eval_steps_per_second": 6.135, "step": 30000 }, { "epoch": 3.2398921832884096, "grad_norm": 0.5689899325370789, "learning_rate": 0.0004061154883971937, "loss": 3.4345, "step": 30050 }, { "epoch": 3.2452830188679247, "grad_norm": 0.6104323267936707, "learning_rate": 0.00040579168915272525, "loss": 3.451, "step": 30100 }, { "epoch": 3.2506738544474394, "grad_norm": 0.5824128985404968, "learning_rate": 0.00040546788990825685, "loss": 3.4518, "step": 30150 }, { "epoch": 3.256064690026954, "grad_norm": 0.5787423849105835, "learning_rate": 0.0004051440906637884, "loss": 3.4673, "step": 30200 }, { "epoch": 3.2614555256064692, "grad_norm": 0.5913103222846985, "learning_rate": 0.00040482029141931995, "loss": 3.4537, "step": 30250 }, { "epoch": 3.266846361185984, "grad_norm": 0.6089011430740356, "learning_rate": 0.00040449649217485156, "loss": 3.4319, "step": 30300 }, { "epoch": 3.2722371967654986, "grad_norm": 0.5752812623977661, "learning_rate": 0.0004041726929303831, "loss": 3.4487, "step": 30350 }, { "epoch": 3.2776280323450133, "grad_norm": 0.6006338596343994, "learning_rate": 0.0004038488936859147, "loss": 3.4607, "step": 30400 }, { "epoch": 3.2830188679245285, "grad_norm": 0.7376881241798401, "learning_rate": 0.00040352509444144626, "loss": 3.45, "step": 30450 }, { "epoch": 3.288409703504043, "grad_norm": 0.6478651165962219, "learning_rate": 0.00040320129519697787, "loss": 3.4595, "step": 30500 }, { "epoch": 3.293800539083558, "grad_norm": 0.6603450775146484, "learning_rate": 0.0004028774959525094, "loss": 3.4558, "step": 30550 }, { "epoch": 3.2991913746630726, "grad_norm": 0.5964590311050415, "learning_rate": 0.000402553696708041, "loss": 3.4487, "step": 30600 }, { "epoch": 3.3045822102425877, "grad_norm": 0.5935302376747131, "learning_rate": 0.0004022298974635726, "loss": 3.4468, "step": 30650 }, { "epoch": 3.3099730458221024, "grad_norm": 0.6866962313652039, "learning_rate": 0.00040190609821910407, "loss": 3.4606, "step": 30700 }, { "epoch": 3.315363881401617, "grad_norm": 0.5697274804115295, "learning_rate": 0.0004015822989746357, "loss": 3.4381, "step": 30750 }, { "epoch": 3.3207547169811322, "grad_norm": 0.5480004549026489, "learning_rate": 0.0004012584997301672, "loss": 3.4342, "step": 30800 }, { "epoch": 3.326145552560647, "grad_norm": 0.591159999370575, "learning_rate": 0.00040093470048569883, "loss": 3.4336, "step": 30850 }, { "epoch": 3.3315363881401616, "grad_norm": 0.564811646938324, "learning_rate": 0.0004006109012412304, "loss": 3.4658, "step": 30900 }, { "epoch": 3.3369272237196768, "grad_norm": 0.6144384145736694, "learning_rate": 0.000400287101996762, "loss": 3.4619, "step": 30950 }, { "epoch": 3.3423180592991915, "grad_norm": 0.5722789168357849, "learning_rate": 0.00039996330275229354, "loss": 3.4404, "step": 31000 }, { "epoch": 3.3423180592991915, "eval_accuracy": 0.37290725030941624, "eval_loss": 3.476534605026245, "eval_runtime": 183.314, "eval_samples_per_second": 98.252, "eval_steps_per_second": 6.142, "step": 31000 }, { "epoch": 3.347708894878706, "grad_norm": 0.5752816200256348, "learning_rate": 0.00039963950350782514, "loss": 3.4498, "step": 31050 }, { "epoch": 3.353099730458221, "grad_norm": 0.6340233087539673, "learning_rate": 0.0003993157042633567, "loss": 3.4681, "step": 31100 }, { "epoch": 3.358490566037736, "grad_norm": 0.6339342594146729, "learning_rate": 0.00039899190501888824, "loss": 3.4499, "step": 31150 }, { "epoch": 3.3638814016172507, "grad_norm": 0.5568013191223145, "learning_rate": 0.00039866810577441985, "loss": 3.4723, "step": 31200 }, { "epoch": 3.3692722371967654, "grad_norm": 0.5924887657165527, "learning_rate": 0.0003983443065299514, "loss": 3.4685, "step": 31250 }, { "epoch": 3.37466307277628, "grad_norm": 0.6027880907058716, "learning_rate": 0.000398020507285483, "loss": 3.4603, "step": 31300 }, { "epoch": 3.3800539083557952, "grad_norm": 0.5847180485725403, "learning_rate": 0.0003976967080410145, "loss": 3.4554, "step": 31350 }, { "epoch": 3.38544474393531, "grad_norm": 0.6057071089744568, "learning_rate": 0.0003973729087965461, "loss": 3.4646, "step": 31400 }, { "epoch": 3.3908355795148246, "grad_norm": 0.6064717769622803, "learning_rate": 0.00039704910955207765, "loss": 3.4559, "step": 31450 }, { "epoch": 3.3962264150943398, "grad_norm": 0.7252898216247559, "learning_rate": 0.00039672531030760926, "loss": 3.4556, "step": 31500 }, { "epoch": 3.4016172506738545, "grad_norm": 0.5483014583587646, "learning_rate": 0.00039640798704803016, "loss": 3.4635, "step": 31550 }, { "epoch": 3.407008086253369, "grad_norm": 0.5969088673591614, "learning_rate": 0.00039608418780356176, "loss": 3.4526, "step": 31600 }, { "epoch": 3.4123989218328843, "grad_norm": 0.6174740791320801, "learning_rate": 0.0003957603885590933, "loss": 3.4417, "step": 31650 }, { "epoch": 3.417789757412399, "grad_norm": 0.5688740611076355, "learning_rate": 0.0003954365893146249, "loss": 3.4588, "step": 31700 }, { "epoch": 3.4231805929919137, "grad_norm": 0.6028859615325928, "learning_rate": 0.00039511279007015647, "loss": 3.4622, "step": 31750 }, { "epoch": 3.4285714285714284, "grad_norm": 0.5561661124229431, "learning_rate": 0.00039478899082568807, "loss": 3.4419, "step": 31800 }, { "epoch": 3.4339622641509435, "grad_norm": 0.6191286444664001, "learning_rate": 0.0003944651915812196, "loss": 3.4556, "step": 31850 }, { "epoch": 3.439353099730458, "grad_norm": 0.6074486374855042, "learning_rate": 0.0003941413923367512, "loss": 3.468, "step": 31900 }, { "epoch": 3.444743935309973, "grad_norm": 0.5516985058784485, "learning_rate": 0.0003938175930922828, "loss": 3.4587, "step": 31950 }, { "epoch": 3.450134770889488, "grad_norm": 0.5780602097511292, "learning_rate": 0.0003934937938478143, "loss": 3.4539, "step": 32000 }, { "epoch": 3.450134770889488, "eval_accuracy": 0.3738237374181314, "eval_loss": 3.4704768657684326, "eval_runtime": 183.3826, "eval_samples_per_second": 98.215, "eval_steps_per_second": 6.14, "step": 32000 }, { "epoch": 3.4555256064690028, "grad_norm": 0.5890681147575378, "learning_rate": 0.0003931699946033459, "loss": 3.4567, "step": 32050 }, { "epoch": 3.4609164420485174, "grad_norm": 0.6089186668395996, "learning_rate": 0.00039284619535887743, "loss": 3.4616, "step": 32100 }, { "epoch": 3.466307277628032, "grad_norm": 0.6278788447380066, "learning_rate": 0.00039252239611440904, "loss": 3.4596, "step": 32150 }, { "epoch": 3.4716981132075473, "grad_norm": 0.6116218566894531, "learning_rate": 0.0003921985968699406, "loss": 3.439, "step": 32200 }, { "epoch": 3.477088948787062, "grad_norm": 0.5771948099136353, "learning_rate": 0.0003918747976254722, "loss": 3.4533, "step": 32250 }, { "epoch": 3.4824797843665767, "grad_norm": 0.5892335176467896, "learning_rate": 0.00039155099838100374, "loss": 3.4757, "step": 32300 }, { "epoch": 3.487870619946092, "grad_norm": 0.6450402140617371, "learning_rate": 0.0003912271991365353, "loss": 3.4477, "step": 32350 }, { "epoch": 3.4932614555256065, "grad_norm": 0.585907518863678, "learning_rate": 0.0003909033998920669, "loss": 3.4573, "step": 32400 }, { "epoch": 3.498652291105121, "grad_norm": 0.6161054372787476, "learning_rate": 0.00039057960064759845, "loss": 3.4465, "step": 32450 }, { "epoch": 3.5040431266846364, "grad_norm": 0.5852574110031128, "learning_rate": 0.00039025580140313005, "loss": 3.4551, "step": 32500 }, { "epoch": 3.509433962264151, "grad_norm": 0.6066722273826599, "learning_rate": 0.0003899320021586616, "loss": 3.4501, "step": 32550 }, { "epoch": 3.5148247978436657, "grad_norm": 0.5780394077301025, "learning_rate": 0.0003896082029141932, "loss": 3.4549, "step": 32600 }, { "epoch": 3.5202156334231804, "grad_norm": 0.6035788059234619, "learning_rate": 0.00038928440366972476, "loss": 3.448, "step": 32650 }, { "epoch": 3.525606469002695, "grad_norm": 0.6049900650978088, "learning_rate": 0.00038896060442525636, "loss": 3.4605, "step": 32700 }, { "epoch": 3.5309973045822103, "grad_norm": 0.6193360686302185, "learning_rate": 0.00038863680518078786, "loss": 3.4481, "step": 32750 }, { "epoch": 3.536388140161725, "grad_norm": 0.6224262714385986, "learning_rate": 0.0003883130059363194, "loss": 3.4517, "step": 32800 }, { "epoch": 3.5417789757412397, "grad_norm": 0.6505341529846191, "learning_rate": 0.000387989206691851, "loss": 3.4337, "step": 32850 }, { "epoch": 3.547169811320755, "grad_norm": 0.5855464935302734, "learning_rate": 0.00038766540744738256, "loss": 3.4484, "step": 32900 }, { "epoch": 3.5525606469002695, "grad_norm": 0.5817187428474426, "learning_rate": 0.00038734160820291417, "loss": 3.4388, "step": 32950 }, { "epoch": 3.557951482479784, "grad_norm": 0.6249986290931702, "learning_rate": 0.0003870178089584457, "loss": 3.4407, "step": 33000 }, { "epoch": 3.557951482479784, "eval_accuracy": 0.3741863121047683, "eval_loss": 3.4653964042663574, "eval_runtime": 183.4614, "eval_samples_per_second": 98.173, "eval_steps_per_second": 6.138, "step": 33000 }, { "epoch": 3.5633423180592994, "grad_norm": 0.6825558543205261, "learning_rate": 0.0003866940097139773, "loss": 3.4488, "step": 33050 }, { "epoch": 3.568733153638814, "grad_norm": 0.6727454662322998, "learning_rate": 0.0003863702104695089, "loss": 3.4594, "step": 33100 }, { "epoch": 3.5741239892183287, "grad_norm": 0.5715111494064331, "learning_rate": 0.0003860464112250404, "loss": 3.4315, "step": 33150 }, { "epoch": 3.579514824797844, "grad_norm": 0.5947990417480469, "learning_rate": 0.00038572261198057203, "loss": 3.4395, "step": 33200 }, { "epoch": 3.5849056603773586, "grad_norm": 0.6582793593406677, "learning_rate": 0.0003853988127361036, "loss": 3.4704, "step": 33250 }, { "epoch": 3.5902964959568733, "grad_norm": 0.6481404900550842, "learning_rate": 0.0003850750134916352, "loss": 3.4585, "step": 33300 }, { "epoch": 3.595687331536388, "grad_norm": 0.5785871148109436, "learning_rate": 0.0003847512142471667, "loss": 3.446, "step": 33350 }, { "epoch": 3.601078167115903, "grad_norm": 0.6385973691940308, "learning_rate": 0.0003844274150026983, "loss": 3.4475, "step": 33400 }, { "epoch": 3.606469002695418, "grad_norm": 0.6048685908317566, "learning_rate": 0.00038410361575822984, "loss": 3.4571, "step": 33450 }, { "epoch": 3.6118598382749325, "grad_norm": 0.5991024971008301, "learning_rate": 0.00038377981651376144, "loss": 3.4378, "step": 33500 }, { "epoch": 3.617250673854447, "grad_norm": 0.6056102514266968, "learning_rate": 0.00038346249325418234, "loss": 3.4473, "step": 33550 }, { "epoch": 3.6226415094339623, "grad_norm": 0.6247057914733887, "learning_rate": 0.00038313869400971395, "loss": 3.4526, "step": 33600 }, { "epoch": 3.628032345013477, "grad_norm": 0.6171483397483826, "learning_rate": 0.0003828148947652455, "loss": 3.4612, "step": 33650 }, { "epoch": 3.6334231805929917, "grad_norm": 0.5629798769950867, "learning_rate": 0.0003824910955207771, "loss": 3.4562, "step": 33700 }, { "epoch": 3.638814016172507, "grad_norm": 0.5999249815940857, "learning_rate": 0.00038216729627630865, "loss": 3.4415, "step": 33750 }, { "epoch": 3.6442048517520216, "grad_norm": 0.5683233737945557, "learning_rate": 0.00038184349703184026, "loss": 3.4568, "step": 33800 }, { "epoch": 3.6495956873315363, "grad_norm": 0.6162096858024597, "learning_rate": 0.0003815196977873718, "loss": 3.4557, "step": 33850 }, { "epoch": 3.6549865229110514, "grad_norm": 0.6410558223724365, "learning_rate": 0.0003811958985429034, "loss": 3.4458, "step": 33900 }, { "epoch": 3.660377358490566, "grad_norm": 0.6777019500732422, "learning_rate": 0.00038087209929843496, "loss": 3.4405, "step": 33950 }, { "epoch": 3.665768194070081, "grad_norm": 0.6722299456596375, "learning_rate": 0.00038054830005396646, "loss": 3.4573, "step": 34000 }, { "epoch": 3.665768194070081, "eval_accuracy": 0.37471262669334165, "eval_loss": 3.458420991897583, "eval_runtime": 183.3402, "eval_samples_per_second": 98.238, "eval_steps_per_second": 6.142, "step": 34000 }, { "epoch": 3.671159029649596, "grad_norm": 0.6154153943061829, "learning_rate": 0.00038022450080949806, "loss": 3.4406, "step": 34050 }, { "epoch": 3.6765498652291106, "grad_norm": 0.5900728702545166, "learning_rate": 0.0003799007015650296, "loss": 3.4567, "step": 34100 }, { "epoch": 3.6819407008086253, "grad_norm": 0.6302698850631714, "learning_rate": 0.0003795769023205612, "loss": 3.4545, "step": 34150 }, { "epoch": 3.68733153638814, "grad_norm": 0.5984343886375427, "learning_rate": 0.00037925310307609277, "loss": 3.4368, "step": 34200 }, { "epoch": 3.6927223719676547, "grad_norm": 0.6989060044288635, "learning_rate": 0.0003789293038316244, "loss": 3.4501, "step": 34250 }, { "epoch": 3.69811320754717, "grad_norm": 0.6396805644035339, "learning_rate": 0.0003786055045871559, "loss": 3.4404, "step": 34300 }, { "epoch": 3.7035040431266846, "grad_norm": 0.560369074344635, "learning_rate": 0.0003782817053426875, "loss": 3.4632, "step": 34350 }, { "epoch": 3.7088948787061993, "grad_norm": 0.6261559128761292, "learning_rate": 0.0003779579060982191, "loss": 3.4469, "step": 34400 }, { "epoch": 3.7142857142857144, "grad_norm": 0.6551036238670349, "learning_rate": 0.00037763410685375063, "loss": 3.433, "step": 34450 }, { "epoch": 3.719676549865229, "grad_norm": 0.7240360975265503, "learning_rate": 0.00037731030760928223, "loss": 3.4508, "step": 34500 }, { "epoch": 3.725067385444744, "grad_norm": 0.6055688858032227, "learning_rate": 0.0003769865083648138, "loss": 3.4537, "step": 34550 }, { "epoch": 3.730458221024259, "grad_norm": 0.6346741318702698, "learning_rate": 0.0003766627091203454, "loss": 3.4566, "step": 34600 }, { "epoch": 3.7358490566037736, "grad_norm": 0.5869565010070801, "learning_rate": 0.00037633890987587694, "loss": 3.4455, "step": 34650 }, { "epoch": 3.7412398921832883, "grad_norm": 0.606854259967804, "learning_rate": 0.00037601511063140855, "loss": 3.4511, "step": 34700 }, { "epoch": 3.7466307277628035, "grad_norm": 0.6614265441894531, "learning_rate": 0.00037569131138694004, "loss": 3.4331, "step": 34750 }, { "epoch": 3.752021563342318, "grad_norm": 0.6091951727867126, "learning_rate": 0.0003753675121424716, "loss": 3.4687, "step": 34800 }, { "epoch": 3.757412398921833, "grad_norm": 0.6363885402679443, "learning_rate": 0.0003750437128980032, "loss": 3.454, "step": 34850 }, { "epoch": 3.7628032345013476, "grad_norm": 0.6237848997116089, "learning_rate": 0.00037471991365353475, "loss": 3.4517, "step": 34900 }, { "epoch": 3.7681940700808623, "grad_norm": 0.6054222583770752, "learning_rate": 0.00037439611440906635, "loss": 3.4515, "step": 34950 }, { "epoch": 3.7735849056603774, "grad_norm": 0.6497372984886169, "learning_rate": 0.0003740723151645979, "loss": 3.4392, "step": 35000 }, { "epoch": 3.7735849056603774, "eval_accuracy": 0.3750332613652822, "eval_loss": 3.4538044929504395, "eval_runtime": 183.7049, "eval_samples_per_second": 98.043, "eval_steps_per_second": 6.129, "step": 35000 }, { "epoch": 3.778975741239892, "grad_norm": 0.7446255683898926, "learning_rate": 0.0003737485159201295, "loss": 3.4523, "step": 35050 }, { "epoch": 3.784366576819407, "grad_norm": 0.5870745778083801, "learning_rate": 0.00037342471667566106, "loss": 3.4448, "step": 35100 }, { "epoch": 3.789757412398922, "grad_norm": 0.6146149635314941, "learning_rate": 0.00037310091743119266, "loss": 3.4419, "step": 35150 }, { "epoch": 3.7951482479784366, "grad_norm": 0.7114392518997192, "learning_rate": 0.0003727771181867242, "loss": 3.4663, "step": 35200 }, { "epoch": 3.8005390835579513, "grad_norm": 0.6365261077880859, "learning_rate": 0.00037245331894225576, "loss": 3.4394, "step": 35250 }, { "epoch": 3.8059299191374665, "grad_norm": 0.6266224980354309, "learning_rate": 0.00037212951969778737, "loss": 3.4338, "step": 35300 }, { "epoch": 3.811320754716981, "grad_norm": 0.5985236167907715, "learning_rate": 0.0003718121964382083, "loss": 3.4362, "step": 35350 }, { "epoch": 3.816711590296496, "grad_norm": 0.6070831418037415, "learning_rate": 0.0003714883971937398, "loss": 3.4484, "step": 35400 }, { "epoch": 3.822102425876011, "grad_norm": 0.556121826171875, "learning_rate": 0.0003711645979492714, "loss": 3.4579, "step": 35450 }, { "epoch": 3.8274932614555257, "grad_norm": 0.5937209129333496, "learning_rate": 0.00037084079870480297, "loss": 3.4385, "step": 35500 }, { "epoch": 3.8328840970350404, "grad_norm": 0.5834438800811768, "learning_rate": 0.0003705169994603345, "loss": 3.4426, "step": 35550 }, { "epoch": 3.838274932614555, "grad_norm": 0.6240514516830444, "learning_rate": 0.00037019320021586613, "loss": 3.4437, "step": 35600 }, { "epoch": 3.8436657681940702, "grad_norm": 0.6120893955230713, "learning_rate": 0.0003698694009713977, "loss": 3.4429, "step": 35650 }, { "epoch": 3.849056603773585, "grad_norm": 0.6248950958251953, "learning_rate": 0.0003695456017269293, "loss": 3.4407, "step": 35700 }, { "epoch": 3.8544474393530996, "grad_norm": 0.5935930013656616, "learning_rate": 0.00036922180248246083, "loss": 3.45, "step": 35750 }, { "epoch": 3.8598382749326143, "grad_norm": 0.643417239189148, "learning_rate": 0.00036889800323799244, "loss": 3.4408, "step": 35800 }, { "epoch": 3.8652291105121295, "grad_norm": 0.6277262568473816, "learning_rate": 0.000368574203993524, "loss": 3.4397, "step": 35850 }, { "epoch": 3.870619946091644, "grad_norm": 0.6452029347419739, "learning_rate": 0.0003682504047490556, "loss": 3.4507, "step": 35900 }, { "epoch": 3.876010781671159, "grad_norm": 0.645775556564331, "learning_rate": 0.00036792660550458714, "loss": 3.4488, "step": 35950 }, { "epoch": 3.881401617250674, "grad_norm": 0.6617048382759094, "learning_rate": 0.00036760280626011864, "loss": 3.4464, "step": 36000 }, { "epoch": 3.881401617250674, "eval_accuracy": 0.37615086496934197, "eval_loss": 3.445986032485962, "eval_runtime": 183.1416, "eval_samples_per_second": 98.345, "eval_steps_per_second": 6.148, "step": 36000 }, { "epoch": 3.8867924528301887, "grad_norm": 0.6022095084190369, "learning_rate": 0.0003672790070156503, "loss": 3.4428, "step": 36050 }, { "epoch": 3.8921832884097034, "grad_norm": 0.5766339302062988, "learning_rate": 0.0003669552077711818, "loss": 3.4499, "step": 36100 }, { "epoch": 3.8975741239892185, "grad_norm": 0.6311817765235901, "learning_rate": 0.0003666314085267134, "loss": 3.4279, "step": 36150 }, { "epoch": 3.9029649595687332, "grad_norm": 0.6049012541770935, "learning_rate": 0.00036630760928224495, "loss": 3.4384, "step": 36200 }, { "epoch": 3.908355795148248, "grad_norm": 0.6929218769073486, "learning_rate": 0.00036598381003777656, "loss": 3.4577, "step": 36250 }, { "epoch": 3.913746630727763, "grad_norm": 0.6345332264900208, "learning_rate": 0.0003656600107933081, "loss": 3.4422, "step": 36300 }, { "epoch": 3.9191374663072778, "grad_norm": 0.6367397308349609, "learning_rate": 0.0003653362115488397, "loss": 3.4492, "step": 36350 }, { "epoch": 3.9245283018867925, "grad_norm": 0.6182951331138611, "learning_rate": 0.00036501241230437126, "loss": 3.4355, "step": 36400 }, { "epoch": 3.929919137466307, "grad_norm": 0.5866326093673706, "learning_rate": 0.0003646886130599028, "loss": 3.458, "step": 36450 }, { "epoch": 3.935309973045822, "grad_norm": 0.6007795333862305, "learning_rate": 0.0003643648138154344, "loss": 3.4518, "step": 36500 }, { "epoch": 3.940700808625337, "grad_norm": 0.575502336025238, "learning_rate": 0.00036404101457096597, "loss": 3.4513, "step": 36550 }, { "epoch": 3.9460916442048517, "grad_norm": 0.5908992290496826, "learning_rate": 0.00036371721532649757, "loss": 3.452, "step": 36600 }, { "epoch": 3.9514824797843664, "grad_norm": 0.6322728395462036, "learning_rate": 0.0003633934160820291, "loss": 3.4509, "step": 36650 }, { "epoch": 3.9568733153638815, "grad_norm": 0.7075260877609253, "learning_rate": 0.00036306961683756073, "loss": 3.4427, "step": 36700 }, { "epoch": 3.9622641509433962, "grad_norm": 0.6351099610328674, "learning_rate": 0.0003627458175930922, "loss": 3.474, "step": 36750 }, { "epoch": 3.967654986522911, "grad_norm": 0.6212638020515442, "learning_rate": 0.0003624220183486238, "loss": 3.4598, "step": 36800 }, { "epoch": 3.973045822102426, "grad_norm": 0.6004298329353333, "learning_rate": 0.0003620982191041554, "loss": 3.4321, "step": 36850 }, { "epoch": 3.9784366576819408, "grad_norm": 0.6405156254768372, "learning_rate": 0.00036177441985968693, "loss": 3.4454, "step": 36900 }, { "epoch": 3.9838274932614555, "grad_norm": 0.6019781231880188, "learning_rate": 0.00036145062061521854, "loss": 3.424, "step": 36950 }, { "epoch": 3.9892183288409706, "grad_norm": 0.6448103785514832, "learning_rate": 0.0003611268213707501, "loss": 3.4344, "step": 37000 }, { "epoch": 3.9892183288409706, "eval_accuracy": 0.37672487817566586, "eval_loss": 3.4396376609802246, "eval_runtime": 183.5533, "eval_samples_per_second": 98.124, "eval_steps_per_second": 6.134, "step": 37000 }, { "epoch": 3.9946091644204853, "grad_norm": 0.6120318174362183, "learning_rate": 0.0003608030221262817, "loss": 3.4419, "step": 37050 }, { "epoch": 4.0, "grad_norm": 1.1697500944137573, "learning_rate": 0.00036047922288181324, "loss": 3.4236, "step": 37100 }, { "epoch": 4.005390835579515, "grad_norm": 0.6020103693008423, "learning_rate": 0.00036015542363734485, "loss": 3.3492, "step": 37150 }, { "epoch": 4.010781671159029, "grad_norm": 0.5951403975486755, "learning_rate": 0.0003598316243928764, "loss": 3.3509, "step": 37200 }, { "epoch": 4.0161725067385445, "grad_norm": 0.57025545835495, "learning_rate": 0.00035950782514840795, "loss": 3.3444, "step": 37250 }, { "epoch": 4.02156334231806, "grad_norm": 0.6555331349372864, "learning_rate": 0.00035918402590393955, "loss": 3.3651, "step": 37300 }, { "epoch": 4.026954177897574, "grad_norm": 0.5830883979797363, "learning_rate": 0.00035886022665947105, "loss": 3.3525, "step": 37350 }, { "epoch": 4.032345013477089, "grad_norm": 0.6118924021720886, "learning_rate": 0.0003585364274150027, "loss": 3.3481, "step": 37400 }, { "epoch": 4.037735849056604, "grad_norm": 0.6575861573219299, "learning_rate": 0.0003582126281705342, "loss": 3.3609, "step": 37450 }, { "epoch": 4.0431266846361185, "grad_norm": 0.664242684841156, "learning_rate": 0.0003578888289260658, "loss": 3.3625, "step": 37500 }, { "epoch": 4.048517520215634, "grad_norm": 0.5893779993057251, "learning_rate": 0.00035756502968159736, "loss": 3.3602, "step": 37550 }, { "epoch": 4.053908355795148, "grad_norm": 0.662519633769989, "learning_rate": 0.00035724123043712896, "loss": 3.3475, "step": 37600 }, { "epoch": 4.059299191374663, "grad_norm": 0.609507143497467, "learning_rate": 0.0003569174311926605, "loss": 3.3682, "step": 37650 }, { "epoch": 4.064690026954178, "grad_norm": 0.6340688467025757, "learning_rate": 0.00035659363194819206, "loss": 3.3747, "step": 37700 }, { "epoch": 4.070080862533692, "grad_norm": 0.6548017263412476, "learning_rate": 0.00035626983270372367, "loss": 3.3722, "step": 37750 }, { "epoch": 4.0754716981132075, "grad_norm": 0.6166157126426697, "learning_rate": 0.0003559460334592552, "loss": 3.3431, "step": 37800 }, { "epoch": 4.080862533692723, "grad_norm": 0.6283932328224182, "learning_rate": 0.0003556222342147868, "loss": 3.3594, "step": 37850 }, { "epoch": 4.086253369272237, "grad_norm": 0.6251431703567505, "learning_rate": 0.0003552984349703184, "loss": 3.3586, "step": 37900 }, { "epoch": 4.091644204851752, "grad_norm": 0.6172202825546265, "learning_rate": 0.00035497463572585, "loss": 3.3612, "step": 37950 }, { "epoch": 4.097035040431267, "grad_norm": 0.655680775642395, "learning_rate": 0.00035465083648138153, "loss": 3.3725, "step": 38000 }, { "epoch": 4.097035040431267, "eval_accuracy": 0.37726716473874794, "eval_loss": 3.4422571659088135, "eval_runtime": 183.6362, "eval_samples_per_second": 98.08, "eval_steps_per_second": 6.132, "step": 38000 }, { "epoch": 4.1024258760107815, "grad_norm": 0.6230634450912476, "learning_rate": 0.00035432703723691314, "loss": 3.3661, "step": 38050 }, { "epoch": 4.107816711590297, "grad_norm": 0.6373627781867981, "learning_rate": 0.00035400323799244463, "loss": 3.3758, "step": 38100 }, { "epoch": 4.113207547169812, "grad_norm": 0.6033377051353455, "learning_rate": 0.0003536794387479762, "loss": 3.3743, "step": 38150 }, { "epoch": 4.118598382749326, "grad_norm": 0.6158263683319092, "learning_rate": 0.0003533556395035078, "loss": 3.3557, "step": 38200 }, { "epoch": 4.123989218328841, "grad_norm": 0.688728928565979, "learning_rate": 0.00035303184025903934, "loss": 3.3649, "step": 38250 }, { "epoch": 4.129380053908355, "grad_norm": 0.6338489055633545, "learning_rate": 0.00035270804101457094, "loss": 3.3688, "step": 38300 }, { "epoch": 4.1347708894878705, "grad_norm": 0.6235848069190979, "learning_rate": 0.0003523842417701025, "loss": 3.3523, "step": 38350 }, { "epoch": 4.140161725067386, "grad_norm": 0.6039944887161255, "learning_rate": 0.0003520604425256341, "loss": 3.3666, "step": 38400 }, { "epoch": 4.1455525606469, "grad_norm": 0.6759158372879028, "learning_rate": 0.00035173664328116565, "loss": 3.3516, "step": 38450 }, { "epoch": 4.150943396226415, "grad_norm": 0.6314166188240051, "learning_rate": 0.00035141284403669725, "loss": 3.3835, "step": 38500 }, { "epoch": 4.15633423180593, "grad_norm": 0.6417436003684998, "learning_rate": 0.0003510890447922288, "loss": 3.3594, "step": 38550 }, { "epoch": 4.1617250673854445, "grad_norm": 0.6634682416915894, "learning_rate": 0.00035076524554776035, "loss": 3.368, "step": 38600 }, { "epoch": 4.16711590296496, "grad_norm": 0.6313776969909668, "learning_rate": 0.00035044144630329196, "loss": 3.3669, "step": 38650 }, { "epoch": 4.172506738544475, "grad_norm": 0.616958737373352, "learning_rate": 0.00035011764705882346, "loss": 3.3885, "step": 38700 }, { "epoch": 4.177897574123989, "grad_norm": 0.7168732285499573, "learning_rate": 0.0003497938478143551, "loss": 3.3589, "step": 38750 }, { "epoch": 4.183288409703504, "grad_norm": 0.7030311226844788, "learning_rate": 0.0003494700485698866, "loss": 3.3839, "step": 38800 }, { "epoch": 4.188679245283019, "grad_norm": 0.6451276540756226, "learning_rate": 0.0003491462493254182, "loss": 3.374, "step": 38850 }, { "epoch": 4.1940700808625335, "grad_norm": 0.6581581234931946, "learning_rate": 0.00034882245008094977, "loss": 3.3815, "step": 38900 }, { "epoch": 4.199460916442049, "grad_norm": 0.6221701502799988, "learning_rate": 0.0003484986508364813, "loss": 3.3756, "step": 38950 }, { "epoch": 4.204851752021563, "grad_norm": 0.7239381074905396, "learning_rate": 0.0003481748515920129, "loss": 3.371, "step": 39000 }, { "epoch": 4.204851752021563, "eval_accuracy": 0.37740939136889706, "eval_loss": 3.438514232635498, "eval_runtime": 183.1338, "eval_samples_per_second": 98.349, "eval_steps_per_second": 6.149, "step": 39000 }, { "epoch": 4.210242587601078, "grad_norm": 0.6811919212341309, "learning_rate": 0.00034785105234754447, "loss": 3.3755, "step": 39050 }, { "epoch": 4.215633423180593, "grad_norm": 0.6211300492286682, "learning_rate": 0.0003475272531030761, "loss": 3.3774, "step": 39100 }, { "epoch": 4.2210242587601075, "grad_norm": 0.6302964091300964, "learning_rate": 0.00034720345385860763, "loss": 3.3873, "step": 39150 }, { "epoch": 4.226415094339623, "grad_norm": 0.6861122250556946, "learning_rate": 0.00034687965461413923, "loss": 3.3823, "step": 39200 }, { "epoch": 4.231805929919138, "grad_norm": 0.6081944108009338, "learning_rate": 0.0003465558553696708, "loss": 3.38, "step": 39250 }, { "epoch": 4.237196765498652, "grad_norm": 0.6504591703414917, "learning_rate": 0.0003462320561252024, "loss": 3.375, "step": 39300 }, { "epoch": 4.242587601078167, "grad_norm": 0.6743220090866089, "learning_rate": 0.00034590825688073394, "loss": 3.3905, "step": 39350 }, { "epoch": 4.247978436657682, "grad_norm": 0.6235594749450684, "learning_rate": 0.0003455909336211549, "loss": 3.3754, "step": 39400 }, { "epoch": 4.2533692722371965, "grad_norm": 0.6357383728027344, "learning_rate": 0.0003452671343766864, "loss": 3.3785, "step": 39450 }, { "epoch": 4.258760107816712, "grad_norm": 0.6747174859046936, "learning_rate": 0.000344943335132218, "loss": 3.377, "step": 39500 }, { "epoch": 4.264150943396227, "grad_norm": 0.6351191997528076, "learning_rate": 0.00034461953588774954, "loss": 3.3805, "step": 39550 }, { "epoch": 4.269541778975741, "grad_norm": 0.5940690636634827, "learning_rate": 0.00034429573664328115, "loss": 3.3745, "step": 39600 }, { "epoch": 4.274932614555256, "grad_norm": 0.5747135281562805, "learning_rate": 0.0003439719373988127, "loss": 3.3735, "step": 39650 }, { "epoch": 4.280323450134771, "grad_norm": 0.6357155442237854, "learning_rate": 0.00034364813815434425, "loss": 3.3891, "step": 39700 }, { "epoch": 4.285714285714286, "grad_norm": 0.6541160345077515, "learning_rate": 0.00034332433890987585, "loss": 3.3779, "step": 39750 }, { "epoch": 4.291105121293801, "grad_norm": 0.6745419502258301, "learning_rate": 0.0003430005396654074, "loss": 3.384, "step": 39800 }, { "epoch": 4.296495956873315, "grad_norm": 0.641961395740509, "learning_rate": 0.000342676740420939, "loss": 3.3535, "step": 39850 }, { "epoch": 4.30188679245283, "grad_norm": 0.6052359342575073, "learning_rate": 0.00034235294117647056, "loss": 3.3919, "step": 39900 }, { "epoch": 4.307277628032345, "grad_norm": 0.6415327787399292, "learning_rate": 0.00034202914193200216, "loss": 3.3692, "step": 39950 }, { "epoch": 4.3126684636118595, "grad_norm": 0.6692045331001282, "learning_rate": 0.0003417053426875337, "loss": 3.3901, "step": 40000 }, { "epoch": 4.3126684636118595, "eval_accuracy": 0.3779461366347006, "eval_loss": 3.4338619709014893, "eval_runtime": 183.59, "eval_samples_per_second": 98.104, "eval_steps_per_second": 6.133, "step": 40000 }, { "epoch": 4.318059299191375, "grad_norm": 0.659393846988678, "learning_rate": 0.0003413815434430653, "loss": 3.4026, "step": 40050 }, { "epoch": 4.32345013477089, "grad_norm": 0.6064386963844299, "learning_rate": 0.0003410577441985968, "loss": 3.3698, "step": 40100 }, { "epoch": 4.328840970350404, "grad_norm": 0.6234272718429565, "learning_rate": 0.00034073394495412837, "loss": 3.3865, "step": 40150 }, { "epoch": 4.334231805929919, "grad_norm": 0.6467458009719849, "learning_rate": 0.00034041014570965997, "loss": 3.4039, "step": 40200 }, { "epoch": 4.339622641509434, "grad_norm": 0.5988481640815735, "learning_rate": 0.0003400863464651915, "loss": 3.3805, "step": 40250 }, { "epoch": 4.345013477088949, "grad_norm": 0.660705029964447, "learning_rate": 0.0003397625472207231, "loss": 3.3948, "step": 40300 }, { "epoch": 4.350404312668464, "grad_norm": 0.6406025886535645, "learning_rate": 0.0003394387479762547, "loss": 3.401, "step": 40350 }, { "epoch": 4.355795148247978, "grad_norm": 0.6672471761703491, "learning_rate": 0.0003391149487317863, "loss": 3.3882, "step": 40400 }, { "epoch": 4.361185983827493, "grad_norm": 0.6221401691436768, "learning_rate": 0.00033879114948731783, "loss": 3.3945, "step": 40450 }, { "epoch": 4.366576819407008, "grad_norm": 0.6297544240951538, "learning_rate": 0.00033846735024284944, "loss": 3.3631, "step": 40500 }, { "epoch": 4.3719676549865225, "grad_norm": 0.6644108295440674, "learning_rate": 0.000338143550998381, "loss": 3.3732, "step": 40550 }, { "epoch": 4.377358490566038, "grad_norm": 0.633611798286438, "learning_rate": 0.00033781975175391254, "loss": 3.3804, "step": 40600 }, { "epoch": 4.382749326145553, "grad_norm": 0.641505241394043, "learning_rate": 0.00033749595250944414, "loss": 3.3789, "step": 40650 }, { "epoch": 4.388140161725067, "grad_norm": 0.6398807764053345, "learning_rate": 0.00033717215326497564, "loss": 3.3847, "step": 40700 }, { "epoch": 4.393530997304582, "grad_norm": 0.6694504022598267, "learning_rate": 0.0003368483540205073, "loss": 3.3853, "step": 40750 }, { "epoch": 4.398921832884097, "grad_norm": 0.6023000478744507, "learning_rate": 0.0003365245547760388, "loss": 3.3843, "step": 40800 }, { "epoch": 4.404312668463612, "grad_norm": 0.6796262860298157, "learning_rate": 0.0003362007555315704, "loss": 3.3754, "step": 40850 }, { "epoch": 4.409703504043127, "grad_norm": 0.6454067230224609, "learning_rate": 0.00033587695628710195, "loss": 3.3825, "step": 40900 }, { "epoch": 4.415094339622642, "grad_norm": 0.6159910559654236, "learning_rate": 0.00033555315704263355, "loss": 3.3901, "step": 40950 }, { "epoch": 4.420485175202156, "grad_norm": 0.6237287521362305, "learning_rate": 0.0003352293577981651, "loss": 3.3831, "step": 41000 }, { "epoch": 4.420485175202156, "eval_accuracy": 0.378417364209152, "eval_loss": 3.4264814853668213, "eval_runtime": 183.3491, "eval_samples_per_second": 98.233, "eval_steps_per_second": 6.141, "step": 41000 }, { "epoch": 4.425876010781671, "grad_norm": 0.6706305742263794, "learning_rate": 0.00033490555855369665, "loss": 3.3701, "step": 41050 }, { "epoch": 4.431266846361186, "grad_norm": 0.6600586175918579, "learning_rate": 0.00033458175930922826, "loss": 3.3875, "step": 41100 }, { "epoch": 4.436657681940701, "grad_norm": 0.5768634080886841, "learning_rate": 0.0003342579600647598, "loss": 3.383, "step": 41150 }, { "epoch": 4.442048517520216, "grad_norm": 0.6009304523468018, "learning_rate": 0.0003339341608202914, "loss": 3.3686, "step": 41200 }, { "epoch": 4.44743935309973, "grad_norm": 0.6819839477539062, "learning_rate": 0.00033361036157582297, "loss": 3.3808, "step": 41250 }, { "epoch": 4.452830188679245, "grad_norm": 0.63370281457901, "learning_rate": 0.00033328656233135457, "loss": 3.3742, "step": 41300 }, { "epoch": 4.45822102425876, "grad_norm": 0.6805122494697571, "learning_rate": 0.0003329627630868861, "loss": 3.3999, "step": 41350 }, { "epoch": 4.463611859838275, "grad_norm": 0.6374489068984985, "learning_rate": 0.0003326454398273071, "loss": 3.3923, "step": 41400 }, { "epoch": 4.46900269541779, "grad_norm": 0.632504403591156, "learning_rate": 0.00033232164058283857, "loss": 3.3802, "step": 41450 }, { "epoch": 4.474393530997305, "grad_norm": 0.6276558637619019, "learning_rate": 0.0003319978413383702, "loss": 3.371, "step": 41500 }, { "epoch": 4.479784366576819, "grad_norm": 0.6790854334831238, "learning_rate": 0.0003316740420939017, "loss": 3.3857, "step": 41550 }, { "epoch": 4.485175202156334, "grad_norm": 0.6570220589637756, "learning_rate": 0.00033135024284943333, "loss": 3.3908, "step": 41600 }, { "epoch": 4.490566037735849, "grad_norm": 0.6306306719779968, "learning_rate": 0.0003310264436049649, "loss": 3.3758, "step": 41650 }, { "epoch": 4.495956873315364, "grad_norm": 0.6574743986129761, "learning_rate": 0.0003307026443604965, "loss": 3.3988, "step": 41700 }, { "epoch": 4.501347708894879, "grad_norm": 0.6541933417320251, "learning_rate": 0.00033037884511602804, "loss": 3.379, "step": 41750 }, { "epoch": 4.506738544474393, "grad_norm": 0.6046768426895142, "learning_rate": 0.0003300550458715596, "loss": 3.3775, "step": 41800 }, { "epoch": 4.512129380053908, "grad_norm": 0.6767274737358093, "learning_rate": 0.0003297312466270912, "loss": 3.3878, "step": 41850 }, { "epoch": 4.517520215633423, "grad_norm": 0.6689441204071045, "learning_rate": 0.00032940744738262274, "loss": 3.3872, "step": 41900 }, { "epoch": 4.5229110512129385, "grad_norm": 0.6398838758468628, "learning_rate": 0.00032908364813815435, "loss": 3.3898, "step": 41950 }, { "epoch": 4.528301886792453, "grad_norm": 0.6058347225189209, "learning_rate": 0.0003287598488936859, "loss": 3.3797, "step": 42000 }, { "epoch": 4.528301886792453, "eval_accuracy": 0.37927583067577425, "eval_loss": 3.4238638877868652, "eval_runtime": 183.4895, "eval_samples_per_second": 98.158, "eval_steps_per_second": 6.137, "step": 42000 }, { "epoch": 4.533692722371968, "grad_norm": 0.5938351154327393, "learning_rate": 0.0003284360496492175, "loss": 3.3765, "step": 42050 }, { "epoch": 4.539083557951482, "grad_norm": 0.6558327674865723, "learning_rate": 0.000328112250404749, "loss": 3.4059, "step": 42100 }, { "epoch": 4.544474393530997, "grad_norm": 0.64605313539505, "learning_rate": 0.00032778845116028066, "loss": 3.3917, "step": 42150 }, { "epoch": 4.549865229110512, "grad_norm": 0.5833234190940857, "learning_rate": 0.00032746465191581215, "loss": 3.3712, "step": 42200 }, { "epoch": 4.555256064690027, "grad_norm": 0.822323203086853, "learning_rate": 0.0003271408526713437, "loss": 3.3953, "step": 42250 }, { "epoch": 4.560646900269542, "grad_norm": 0.6433358192443848, "learning_rate": 0.0003268170534268753, "loss": 3.3939, "step": 42300 }, { "epoch": 4.566037735849057, "grad_norm": 0.7177082300186157, "learning_rate": 0.00032649325418240686, "loss": 3.3757, "step": 42350 }, { "epoch": 4.571428571428571, "grad_norm": 0.6373773217201233, "learning_rate": 0.00032616945493793846, "loss": 3.4017, "step": 42400 }, { "epoch": 4.576819407008086, "grad_norm": 0.6122283935546875, "learning_rate": 0.00032584565569347, "loss": 3.3703, "step": 42450 }, { "epoch": 4.5822102425876015, "grad_norm": 0.6813517212867737, "learning_rate": 0.0003255218564490016, "loss": 3.3709, "step": 42500 }, { "epoch": 4.587601078167116, "grad_norm": 0.6224530935287476, "learning_rate": 0.00032519805720453317, "loss": 3.3845, "step": 42550 }, { "epoch": 4.592991913746631, "grad_norm": 0.6742523312568665, "learning_rate": 0.0003248742579600647, "loss": 3.3823, "step": 42600 }, { "epoch": 4.598382749326145, "grad_norm": 0.7158631682395935, "learning_rate": 0.0003245504587155963, "loss": 3.3956, "step": 42650 }, { "epoch": 4.60377358490566, "grad_norm": 0.6356601715087891, "learning_rate": 0.0003242266594711278, "loss": 3.3887, "step": 42700 }, { "epoch": 4.609164420485175, "grad_norm": 0.6447049975395203, "learning_rate": 0.0003239028602266595, "loss": 3.3758, "step": 42750 }, { "epoch": 4.6145552560646905, "grad_norm": 0.6369867324829102, "learning_rate": 0.000323579060982191, "loss": 3.3985, "step": 42800 }, { "epoch": 4.619946091644205, "grad_norm": 0.6150829195976257, "learning_rate": 0.0003232552617377226, "loss": 3.3909, "step": 42850 }, { "epoch": 4.62533692722372, "grad_norm": 0.5987628102302551, "learning_rate": 0.00032293146249325413, "loss": 3.378, "step": 42900 }, { "epoch": 4.630727762803234, "grad_norm": 0.6027454733848572, "learning_rate": 0.00032260766324878574, "loss": 3.382, "step": 42950 }, { "epoch": 4.636118598382749, "grad_norm": 0.6479946374893188, "learning_rate": 0.0003222838640043173, "loss": 3.3968, "step": 43000 }, { "epoch": 4.636118598382749, "eval_accuracy": 0.3795472455895348, "eval_loss": 3.419617176055908, "eval_runtime": 183.1797, "eval_samples_per_second": 98.324, "eval_steps_per_second": 6.147, "step": 43000 }, { "epoch": 4.6415094339622645, "grad_norm": 0.6356445550918579, "learning_rate": 0.00032196006475984884, "loss": 3.3766, "step": 43050 }, { "epoch": 4.646900269541779, "grad_norm": 0.6246060132980347, "learning_rate": 0.00032163626551538044, "loss": 3.3818, "step": 43100 }, { "epoch": 4.652291105121294, "grad_norm": 0.6289001107215881, "learning_rate": 0.000321312466270912, "loss": 3.3773, "step": 43150 }, { "epoch": 4.657681940700809, "grad_norm": 0.6760928630828857, "learning_rate": 0.0003209886670264436, "loss": 3.3847, "step": 43200 }, { "epoch": 4.663072776280323, "grad_norm": 0.6528955698013306, "learning_rate": 0.00032066486778197515, "loss": 3.3837, "step": 43250 }, { "epoch": 4.668463611859838, "grad_norm": 0.6557238101959229, "learning_rate": 0.00032034106853750675, "loss": 3.3919, "step": 43300 }, { "epoch": 4.6738544474393535, "grad_norm": 0.6291058659553528, "learning_rate": 0.0003200172692930383, "loss": 3.3674, "step": 43350 }, { "epoch": 4.679245283018868, "grad_norm": 0.7018590569496155, "learning_rate": 0.0003196934700485699, "loss": 3.377, "step": 43400 }, { "epoch": 4.684636118598383, "grad_norm": 0.6758108139038086, "learning_rate": 0.00031937614678899075, "loss": 3.3833, "step": 43450 }, { "epoch": 4.690026954177897, "grad_norm": 0.6351593136787415, "learning_rate": 0.00031905234754452236, "loss": 3.383, "step": 43500 }, { "epoch": 4.695417789757412, "grad_norm": 0.639167308807373, "learning_rate": 0.0003187285483000539, "loss": 3.3697, "step": 43550 }, { "epoch": 4.7008086253369274, "grad_norm": 0.6248657703399658, "learning_rate": 0.0003184047490555855, "loss": 3.3745, "step": 43600 }, { "epoch": 4.706199460916442, "grad_norm": 0.633437991142273, "learning_rate": 0.00031808094981111706, "loss": 3.3931, "step": 43650 }, { "epoch": 4.711590296495957, "grad_norm": 0.627947211265564, "learning_rate": 0.00031775715056664867, "loss": 3.384, "step": 43700 }, { "epoch": 4.716981132075472, "grad_norm": 0.6561819911003113, "learning_rate": 0.0003174333513221802, "loss": 3.3817, "step": 43750 }, { "epoch": 4.722371967654986, "grad_norm": 0.6273093819618225, "learning_rate": 0.00031710955207771177, "loss": 3.3727, "step": 43800 }, { "epoch": 4.727762803234501, "grad_norm": 0.6125487089157104, "learning_rate": 0.0003167857528332434, "loss": 3.3952, "step": 43850 }, { "epoch": 4.7331536388140165, "grad_norm": 0.6168906092643738, "learning_rate": 0.0003164619535887749, "loss": 3.4017, "step": 43900 }, { "epoch": 4.738544474393531, "grad_norm": 0.631703794002533, "learning_rate": 0.00031613815434430653, "loss": 3.4001, "step": 43950 }, { "epoch": 4.743935309973046, "grad_norm": 0.6847707629203796, "learning_rate": 0.0003158143550998381, "loss": 3.3652, "step": 44000 }, { "epoch": 4.743935309973046, "eval_accuracy": 0.38033150213377975, "eval_loss": 3.4119884967803955, "eval_runtime": 183.7579, "eval_samples_per_second": 98.015, "eval_steps_per_second": 6.128, "step": 44000 }, { "epoch": 4.74932614555256, "grad_norm": 0.6523143649101257, "learning_rate": 0.0003154905558553697, "loss": 3.3999, "step": 44050 }, { "epoch": 4.754716981132075, "grad_norm": 0.6526023149490356, "learning_rate": 0.0003151667566109012, "loss": 3.3786, "step": 44100 }, { "epoch": 4.7601078167115904, "grad_norm": 0.6396304368972778, "learning_rate": 0.00031484295736643284, "loss": 3.3723, "step": 44150 }, { "epoch": 4.765498652291106, "grad_norm": 0.6429187059402466, "learning_rate": 0.00031451915812196434, "loss": 3.3932, "step": 44200 }, { "epoch": 4.77088948787062, "grad_norm": 0.6210240721702576, "learning_rate": 0.0003141953588774959, "loss": 3.3862, "step": 44250 }, { "epoch": 4.776280323450135, "grad_norm": 0.6462406516075134, "learning_rate": 0.0003138715596330275, "loss": 3.3807, "step": 44300 }, { "epoch": 4.781671159029649, "grad_norm": 0.6584225296974182, "learning_rate": 0.00031354776038855904, "loss": 3.3916, "step": 44350 }, { "epoch": 4.787061994609164, "grad_norm": 0.6552765369415283, "learning_rate": 0.00031322396114409065, "loss": 3.3723, "step": 44400 }, { "epoch": 4.7924528301886795, "grad_norm": 0.6333467364311218, "learning_rate": 0.0003129001618996222, "loss": 3.39, "step": 44450 }, { "epoch": 4.797843665768194, "grad_norm": 0.6195804476737976, "learning_rate": 0.0003125763626551538, "loss": 3.3721, "step": 44500 }, { "epoch": 4.803234501347709, "grad_norm": 0.6451464891433716, "learning_rate": 0.00031225256341068535, "loss": 3.3719, "step": 44550 }, { "epoch": 4.808625336927224, "grad_norm": 0.6498815417289734, "learning_rate": 0.00031192876416621696, "loss": 3.3853, "step": 44600 }, { "epoch": 4.814016172506738, "grad_norm": 0.6606132984161377, "learning_rate": 0.0003116049649217485, "loss": 3.375, "step": 44650 }, { "epoch": 4.819407008086253, "grad_norm": 0.6065442562103271, "learning_rate": 0.00031128116567728, "loss": 3.3695, "step": 44700 }, { "epoch": 4.824797843665769, "grad_norm": 0.6994802355766296, "learning_rate": 0.00031095736643281166, "loss": 3.3718, "step": 44750 }, { "epoch": 4.830188679245283, "grad_norm": 0.6376444697380066, "learning_rate": 0.00031063356718834316, "loss": 3.3855, "step": 44800 }, { "epoch": 4.835579514824798, "grad_norm": 0.6514849662780762, "learning_rate": 0.00031030976794387476, "loss": 3.3803, "step": 44850 }, { "epoch": 4.840970350404312, "grad_norm": 0.6474654078483582, "learning_rate": 0.0003099859686994063, "loss": 3.3833, "step": 44900 }, { "epoch": 4.846361185983827, "grad_norm": 0.6419128179550171, "learning_rate": 0.0003096621694549379, "loss": 3.3786, "step": 44950 }, { "epoch": 4.8517520215633425, "grad_norm": 0.6200140714645386, "learning_rate": 0.00030933837021046947, "loss": 3.3886, "step": 45000 }, { "epoch": 4.8517520215633425, "eval_accuracy": 0.38061834575760994, "eval_loss": 3.4101431369781494, "eval_runtime": 183.1471, "eval_samples_per_second": 98.342, "eval_steps_per_second": 6.148, "step": 45000 }, { "epoch": 4.857142857142857, "grad_norm": 0.6373332738876343, "learning_rate": 0.0003090145709660011, "loss": 3.3643, "step": 45050 }, { "epoch": 4.862533692722372, "grad_norm": 0.6662492752075195, "learning_rate": 0.0003086907717215326, "loss": 3.3895, "step": 45100 }, { "epoch": 4.867924528301887, "grad_norm": 0.7503253817558289, "learning_rate": 0.0003083669724770642, "loss": 3.3651, "step": 45150 }, { "epoch": 4.873315363881401, "grad_norm": 0.6341877579689026, "learning_rate": 0.0003080431732325958, "loss": 3.3864, "step": 45200 }, { "epoch": 4.878706199460916, "grad_norm": 0.6354518532752991, "learning_rate": 0.00030771937398812733, "loss": 3.3839, "step": 45250 }, { "epoch": 4.884097035040432, "grad_norm": 0.6411907076835632, "learning_rate": 0.00030739557474365894, "loss": 3.389, "step": 45300 }, { "epoch": 4.889487870619946, "grad_norm": 0.6452317833900452, "learning_rate": 0.0003070717754991905, "loss": 3.3803, "step": 45350 }, { "epoch": 4.894878706199461, "grad_norm": 0.61771160364151, "learning_rate": 0.0003067479762547221, "loss": 3.3768, "step": 45400 }, { "epoch": 4.900269541778976, "grad_norm": 0.6439537405967712, "learning_rate": 0.00030643065299514294, "loss": 3.3857, "step": 45450 }, { "epoch": 4.90566037735849, "grad_norm": 0.6568528413772583, "learning_rate": 0.00030610685375067454, "loss": 3.3843, "step": 45500 }, { "epoch": 4.9110512129380055, "grad_norm": 0.6745019555091858, "learning_rate": 0.0003057830545062061, "loss": 3.3737, "step": 45550 }, { "epoch": 4.916442048517521, "grad_norm": 0.6615657806396484, "learning_rate": 0.0003054592552617377, "loss": 3.3996, "step": 45600 }, { "epoch": 4.921832884097035, "grad_norm": 0.6774879097938538, "learning_rate": 0.00030513545601726925, "loss": 3.3726, "step": 45650 }, { "epoch": 4.92722371967655, "grad_norm": 0.6349773406982422, "learning_rate": 0.00030481165677280085, "loss": 3.3848, "step": 45700 }, { "epoch": 4.932614555256064, "grad_norm": 0.6601877808570862, "learning_rate": 0.0003044878575283324, "loss": 3.3864, "step": 45750 }, { "epoch": 4.938005390835579, "grad_norm": 0.7116173505783081, "learning_rate": 0.000304164058283864, "loss": 3.3814, "step": 45800 }, { "epoch": 4.943396226415095, "grad_norm": 0.7644145488739014, "learning_rate": 0.00030384025903939556, "loss": 3.382, "step": 45850 }, { "epoch": 4.948787061994609, "grad_norm": 0.6855260729789734, "learning_rate": 0.0003035164597949271, "loss": 3.3844, "step": 45900 }, { "epoch": 4.954177897574124, "grad_norm": 0.6151649951934814, "learning_rate": 0.0003031926605504587, "loss": 3.3674, "step": 45950 }, { "epoch": 4.959568733153639, "grad_norm": 0.6668580174446106, "learning_rate": 0.00030286886130599026, "loss": 3.3804, "step": 46000 }, { "epoch": 4.959568733153639, "eval_accuracy": 0.3810672994900594, "eval_loss": 3.4056036472320557, "eval_runtime": 183.5372, "eval_samples_per_second": 98.133, "eval_steps_per_second": 6.135, "step": 46000 }, { "epoch": 4.964959568733153, "grad_norm": 0.6548580527305603, "learning_rate": 0.00030254506206152187, "loss": 3.387, "step": 46050 }, { "epoch": 4.9703504043126685, "grad_norm": 0.6375557780265808, "learning_rate": 0.00030222126281705336, "loss": 3.3795, "step": 46100 }, { "epoch": 4.975741239892184, "grad_norm": 0.6740067601203918, "learning_rate": 0.000301897463572585, "loss": 3.3812, "step": 46150 }, { "epoch": 4.981132075471698, "grad_norm": 0.6744655966758728, "learning_rate": 0.0003015736643281165, "loss": 3.3782, "step": 46200 }, { "epoch": 4.986522911051213, "grad_norm": 0.6689214110374451, "learning_rate": 0.00030124986508364807, "loss": 3.4003, "step": 46250 }, { "epoch": 4.991913746630727, "grad_norm": 0.6952337026596069, "learning_rate": 0.0003009260658391797, "loss": 3.3866, "step": 46300 }, { "epoch": 4.997304582210242, "grad_norm": 0.6591714024543762, "learning_rate": 0.0003006022665947112, "loss": 3.3759, "step": 46350 }, { "epoch": 5.002695417789758, "grad_norm": 0.6524887084960938, "learning_rate": 0.00030027846735024283, "loss": 3.3332, "step": 46400 }, { "epoch": 5.008086253369272, "grad_norm": 0.6842234134674072, "learning_rate": 0.00029995466810577443, "loss": 3.2811, "step": 46450 }, { "epoch": 5.013477088948787, "grad_norm": 0.6671633720397949, "learning_rate": 0.00029963086886130593, "loss": 3.2851, "step": 46500 }, { "epoch": 5.018867924528302, "grad_norm": 0.6657042503356934, "learning_rate": 0.00029930706961683754, "loss": 3.2927, "step": 46550 }, { "epoch": 5.024258760107816, "grad_norm": 0.6508234739303589, "learning_rate": 0.0002989832703723691, "loss": 3.2926, "step": 46600 }, { "epoch": 5.0296495956873315, "grad_norm": 0.7433764934539795, "learning_rate": 0.0002986594711279007, "loss": 3.2969, "step": 46650 }, { "epoch": 5.035040431266847, "grad_norm": 0.6221592426300049, "learning_rate": 0.00029833567188343224, "loss": 3.2891, "step": 46700 }, { "epoch": 5.040431266846361, "grad_norm": 0.6607252955436707, "learning_rate": 0.00029801187263896385, "loss": 3.3061, "step": 46750 }, { "epoch": 5.045822102425876, "grad_norm": 0.7137832045555115, "learning_rate": 0.0002976880733944954, "loss": 3.2899, "step": 46800 }, { "epoch": 5.051212938005391, "grad_norm": 0.6454117894172668, "learning_rate": 0.00029736427415002695, "loss": 3.308, "step": 46850 }, { "epoch": 5.056603773584905, "grad_norm": 0.6954532265663147, "learning_rate": 0.0002970404749055585, "loss": 3.296, "step": 46900 }, { "epoch": 5.061994609164421, "grad_norm": 0.6978551149368286, "learning_rate": 0.0002967166756610901, "loss": 3.2855, "step": 46950 }, { "epoch": 5.067385444743936, "grad_norm": 0.7152319550514221, "learning_rate": 0.00029639287641662165, "loss": 3.3041, "step": 47000 }, { "epoch": 5.067385444743936, "eval_accuracy": 0.3810717542584598, "eval_loss": 3.4087512493133545, "eval_runtime": 183.5405, "eval_samples_per_second": 98.131, "eval_steps_per_second": 6.135, "step": 47000 }, { "epoch": 5.07277628032345, "grad_norm": 0.6855849623680115, "learning_rate": 0.00029606907717215326, "loss": 3.3057, "step": 47050 }, { "epoch": 5.078167115902965, "grad_norm": 0.6449735164642334, "learning_rate": 0.0002957452779276848, "loss": 3.3095, "step": 47100 }, { "epoch": 5.083557951482479, "grad_norm": 0.668123722076416, "learning_rate": 0.00029542147868321636, "loss": 3.304, "step": 47150 }, { "epoch": 5.0889487870619945, "grad_norm": 0.6669653058052063, "learning_rate": 0.00029509767943874796, "loss": 3.2993, "step": 47200 }, { "epoch": 5.09433962264151, "grad_norm": 0.6730850338935852, "learning_rate": 0.0002947738801942795, "loss": 3.3039, "step": 47250 }, { "epoch": 5.099730458221024, "grad_norm": 0.6958271265029907, "learning_rate": 0.0002944500809498111, "loss": 3.2981, "step": 47300 }, { "epoch": 5.105121293800539, "grad_norm": 0.6824030876159668, "learning_rate": 0.00029412628170534267, "loss": 3.2891, "step": 47350 }, { "epoch": 5.110512129380054, "grad_norm": 0.6391562223434448, "learning_rate": 0.0002938024824608742, "loss": 3.3064, "step": 47400 }, { "epoch": 5.115902964959568, "grad_norm": 0.6703659296035767, "learning_rate": 0.00029347868321640577, "loss": 3.3213, "step": 47450 }, { "epoch": 5.121293800539084, "grad_norm": 0.6760079860687256, "learning_rate": 0.0002931548839719374, "loss": 3.3149, "step": 47500 }, { "epoch": 5.126684636118599, "grad_norm": 0.6816515326499939, "learning_rate": 0.00029283756071235833, "loss": 3.3225, "step": 47550 }, { "epoch": 5.132075471698113, "grad_norm": 0.6747937202453613, "learning_rate": 0.0002925137614678899, "loss": 3.314, "step": 47600 }, { "epoch": 5.137466307277628, "grad_norm": 0.6805335879325867, "learning_rate": 0.00029218996222342143, "loss": 3.3041, "step": 47650 }, { "epoch": 5.142857142857143, "grad_norm": 0.6687912940979004, "learning_rate": 0.00029186616297895303, "loss": 3.3185, "step": 47700 }, { "epoch": 5.1482479784366575, "grad_norm": 0.6776725649833679, "learning_rate": 0.0002915423637344846, "loss": 3.311, "step": 47750 }, { "epoch": 5.153638814016173, "grad_norm": 0.6747115850448608, "learning_rate": 0.00029121856449001613, "loss": 3.3006, "step": 47800 }, { "epoch": 5.159029649595688, "grad_norm": 0.6835746169090271, "learning_rate": 0.00029089476524554774, "loss": 3.3243, "step": 47850 }, { "epoch": 5.164420485175202, "grad_norm": 0.6365933418273926, "learning_rate": 0.0002905709660010793, "loss": 3.3159, "step": 47900 }, { "epoch": 5.169811320754717, "grad_norm": 0.8081470727920532, "learning_rate": 0.0002902471667566109, "loss": 3.3033, "step": 47950 }, { "epoch": 5.175202156334231, "grad_norm": 0.6865705251693726, "learning_rate": 0.00028992336751214245, "loss": 3.3075, "step": 48000 }, { "epoch": 5.175202156334231, "eval_accuracy": 0.3815551509563464, "eval_loss": 3.405240297317505, "eval_runtime": 183.5473, "eval_samples_per_second": 98.127, "eval_steps_per_second": 6.135, "step": 48000 }, { "epoch": 5.180592991913747, "grad_norm": 0.6026649475097656, "learning_rate": 0.00028959956826767405, "loss": 3.3132, "step": 48050 }, { "epoch": 5.185983827493262, "grad_norm": 0.7102674245834351, "learning_rate": 0.00028927576902320555, "loss": 3.3234, "step": 48100 }, { "epoch": 5.191374663072776, "grad_norm": 0.6844705939292908, "learning_rate": 0.00028895196977873715, "loss": 3.319, "step": 48150 }, { "epoch": 5.196765498652291, "grad_norm": 0.733917236328125, "learning_rate": 0.0002886281705342687, "loss": 3.329, "step": 48200 }, { "epoch": 5.202156334231806, "grad_norm": 0.6499011516571045, "learning_rate": 0.0002883043712898003, "loss": 3.3221, "step": 48250 }, { "epoch": 5.2075471698113205, "grad_norm": 0.6283177733421326, "learning_rate": 0.00028798057204533186, "loss": 3.2958, "step": 48300 }, { "epoch": 5.212938005390836, "grad_norm": 0.6744676232337952, "learning_rate": 0.00028765677280086346, "loss": 3.3312, "step": 48350 }, { "epoch": 5.218328840970351, "grad_norm": 0.6724770665168762, "learning_rate": 0.000287332973556395, "loss": 3.3213, "step": 48400 }, { "epoch": 5.223719676549865, "grad_norm": 0.7276390790939331, "learning_rate": 0.0002870091743119266, "loss": 3.3264, "step": 48450 }, { "epoch": 5.22911051212938, "grad_norm": 0.6782853007316589, "learning_rate": 0.00028668537506745817, "loss": 3.3324, "step": 48500 }, { "epoch": 5.234501347708895, "grad_norm": 0.7115477323532104, "learning_rate": 0.0002863615758229897, "loss": 3.3095, "step": 48550 }, { "epoch": 5.2398921832884096, "grad_norm": 0.6378598213195801, "learning_rate": 0.00028603777657852127, "loss": 3.3119, "step": 48600 }, { "epoch": 5.245283018867925, "grad_norm": 0.678990364074707, "learning_rate": 0.0002857139773340529, "loss": 3.3342, "step": 48650 }, { "epoch": 5.250673854447439, "grad_norm": 0.7339763045310974, "learning_rate": 0.0002853901780895844, "loss": 3.3229, "step": 48700 }, { "epoch": 5.256064690026954, "grad_norm": 0.7002367973327637, "learning_rate": 0.00028506637884511603, "loss": 3.3221, "step": 48750 }, { "epoch": 5.261455525606469, "grad_norm": 0.6659910082817078, "learning_rate": 0.0002847425796006476, "loss": 3.3332, "step": 48800 }, { "epoch": 5.2668463611859835, "grad_norm": 0.6622328758239746, "learning_rate": 0.00028441878035617913, "loss": 3.3246, "step": 48850 }, { "epoch": 5.272237196765499, "grad_norm": 0.6887483596801758, "learning_rate": 0.00028409498111171073, "loss": 3.3156, "step": 48900 }, { "epoch": 5.277628032345014, "grad_norm": 0.6723185181617737, "learning_rate": 0.0002837711818672423, "loss": 3.326, "step": 48950 }, { "epoch": 5.283018867924528, "grad_norm": 0.6477317214012146, "learning_rate": 0.00028344738262277384, "loss": 3.3062, "step": 49000 }, { "epoch": 5.283018867924528, "eval_accuracy": 0.3819458667409273, "eval_loss": 3.401156425476074, "eval_runtime": 183.2869, "eval_samples_per_second": 98.267, "eval_steps_per_second": 6.143, "step": 49000 }, { "epoch": 5.288409703504043, "grad_norm": 0.7285588979721069, "learning_rate": 0.00028312358337830544, "loss": 3.3276, "step": 49050 }, { "epoch": 5.293800539083558, "grad_norm": 0.6508597731590271, "learning_rate": 0.000282799784133837, "loss": 3.3288, "step": 49100 }, { "epoch": 5.2991913746630726, "grad_norm": 0.6885204911231995, "learning_rate": 0.00028247598488936854, "loss": 3.3244, "step": 49150 }, { "epoch": 5.304582210242588, "grad_norm": 0.6657274961471558, "learning_rate": 0.00028215218564490015, "loss": 3.3197, "step": 49200 }, { "epoch": 5.309973045822103, "grad_norm": 0.6937828063964844, "learning_rate": 0.0002818283864004317, "loss": 3.2995, "step": 49250 }, { "epoch": 5.315363881401617, "grad_norm": 0.6877214312553406, "learning_rate": 0.0002815045871559633, "loss": 3.3058, "step": 49300 }, { "epoch": 5.320754716981132, "grad_norm": 0.7068207859992981, "learning_rate": 0.00028118078791149485, "loss": 3.3351, "step": 49350 }, { "epoch": 5.3261455525606465, "grad_norm": 0.6211855411529541, "learning_rate": 0.0002808569886670264, "loss": 3.3069, "step": 49400 }, { "epoch": 5.331536388140162, "grad_norm": 0.7042295932769775, "learning_rate": 0.00028053318942255795, "loss": 3.322, "step": 49450 }, { "epoch": 5.336927223719677, "grad_norm": 0.6894974708557129, "learning_rate": 0.00028020939017808956, "loss": 3.3321, "step": 49500 }, { "epoch": 5.342318059299191, "grad_norm": 0.645094633102417, "learning_rate": 0.0002798920669185105, "loss": 3.3203, "step": 49550 }, { "epoch": 5.347708894878706, "grad_norm": 0.6860967874526978, "learning_rate": 0.00027956826767404206, "loss": 3.3143, "step": 49600 }, { "epoch": 5.353099730458221, "grad_norm": 0.7084428668022156, "learning_rate": 0.00027924446842957367, "loss": 3.3198, "step": 49650 }, { "epoch": 5.3584905660377355, "grad_norm": 0.6508020162582397, "learning_rate": 0.0002789206691851052, "loss": 3.2959, "step": 49700 }, { "epoch": 5.363881401617251, "grad_norm": 0.651019811630249, "learning_rate": 0.00027859686994063677, "loss": 3.3288, "step": 49750 }, { "epoch": 5.369272237196766, "grad_norm": 0.6705125570297241, "learning_rate": 0.0002782730706961683, "loss": 3.3182, "step": 49800 }, { "epoch": 5.37466307277628, "grad_norm": 0.6464545130729675, "learning_rate": 0.0002779492714516999, "loss": 3.3219, "step": 49850 }, { "epoch": 5.380053908355795, "grad_norm": 0.6712095737457275, "learning_rate": 0.00027762547220723147, "loss": 3.3218, "step": 49900 }, { "epoch": 5.38544474393531, "grad_norm": 0.7088808417320251, "learning_rate": 0.0002773016729627631, "loss": 3.32, "step": 49950 }, { "epoch": 5.390835579514825, "grad_norm": 0.6843079924583435, "learning_rate": 0.00027697787371829463, "loss": 3.331, "step": 50000 }, { "epoch": 5.390835579514825, "eval_accuracy": 0.3825648622428064, "eval_loss": 3.397858142852783, "eval_runtime": 183.2184, "eval_samples_per_second": 98.303, "eval_steps_per_second": 6.146, "step": 50000 }, { "epoch": 5.39622641509434, "grad_norm": 0.6787253618240356, "learning_rate": 0.00027665407447382623, "loss": 3.324, "step": 50050 }, { "epoch": 5.401617250673855, "grad_norm": 0.7045350074768066, "learning_rate": 0.0002763302752293578, "loss": 3.3343, "step": 50100 }, { "epoch": 5.407008086253369, "grad_norm": 0.6545983552932739, "learning_rate": 0.00027600647598488933, "loss": 3.3073, "step": 50150 }, { "epoch": 5.412398921832884, "grad_norm": 0.7040044665336609, "learning_rate": 0.0002756826767404209, "loss": 3.3056, "step": 50200 }, { "epoch": 5.4177897574123985, "grad_norm": 0.682117760181427, "learning_rate": 0.0002753588774959525, "loss": 3.3306, "step": 50250 }, { "epoch": 5.423180592991914, "grad_norm": 0.6643507480621338, "learning_rate": 0.00027503507825148404, "loss": 3.3126, "step": 50300 }, { "epoch": 5.428571428571429, "grad_norm": 0.6264309287071228, "learning_rate": 0.00027471127900701564, "loss": 3.3265, "step": 50350 }, { "epoch": 5.433962264150943, "grad_norm": 0.6541180610656738, "learning_rate": 0.0002743874797625472, "loss": 3.3153, "step": 50400 }, { "epoch": 5.439353099730458, "grad_norm": 0.7172690629959106, "learning_rate": 0.0002740636805180788, "loss": 3.327, "step": 50450 }, { "epoch": 5.444743935309973, "grad_norm": 0.6622113585472107, "learning_rate": 0.00027373988127361035, "loss": 3.3246, "step": 50500 }, { "epoch": 5.450134770889488, "grad_norm": 0.6717416644096375, "learning_rate": 0.0002734160820291419, "loss": 3.3573, "step": 50550 }, { "epoch": 5.455525606469003, "grad_norm": 0.6707860231399536, "learning_rate": 0.00027309228278467345, "loss": 3.3132, "step": 50600 }, { "epoch": 5.460916442048518, "grad_norm": 0.6682204008102417, "learning_rate": 0.00027276848354020506, "loss": 3.3331, "step": 50650 }, { "epoch": 5.466307277628032, "grad_norm": 0.6391171813011169, "learning_rate": 0.0002724446842957366, "loss": 3.3307, "step": 50700 }, { "epoch": 5.471698113207547, "grad_norm": 0.681674063205719, "learning_rate": 0.0002721208850512682, "loss": 3.3208, "step": 50750 }, { "epoch": 5.4770889487870615, "grad_norm": 0.6989498138427734, "learning_rate": 0.00027179708580679976, "loss": 3.3355, "step": 50800 }, { "epoch": 5.482479784366577, "grad_norm": 0.6760035753250122, "learning_rate": 0.0002714732865623313, "loss": 3.336, "step": 50850 }, { "epoch": 5.487870619946092, "grad_norm": 0.6494047045707703, "learning_rate": 0.0002711494873178629, "loss": 3.328, "step": 50900 }, { "epoch": 5.493261455525606, "grad_norm": 0.7357152104377747, "learning_rate": 0.00027082568807339447, "loss": 3.3348, "step": 50950 }, { "epoch": 5.498652291105121, "grad_norm": 0.6467098593711853, "learning_rate": 0.000270501888828926, "loss": 3.3158, "step": 51000 }, { "epoch": 5.498652291105121, "eval_accuracy": 0.3829662260103931, "eval_loss": 3.393737316131592, "eval_runtime": 183.6994, "eval_samples_per_second": 98.046, "eval_steps_per_second": 6.13, "step": 51000 }, { "epoch": 5.504043126684636, "grad_norm": 0.6844212412834167, "learning_rate": 0.0002701780895844576, "loss": 3.3327, "step": 51050 }, { "epoch": 5.509433962264151, "grad_norm": 0.6608554124832153, "learning_rate": 0.0002698542903399892, "loss": 3.3068, "step": 51100 }, { "epoch": 5.514824797843666, "grad_norm": 0.7180224061012268, "learning_rate": 0.0002695304910955207, "loss": 3.3203, "step": 51150 }, { "epoch": 5.520215633423181, "grad_norm": 0.7351436614990234, "learning_rate": 0.00026920669185105233, "loss": 3.3291, "step": 51200 }, { "epoch": 5.525606469002695, "grad_norm": 0.6572839021682739, "learning_rate": 0.0002688828926065839, "loss": 3.3288, "step": 51250 }, { "epoch": 5.53099730458221, "grad_norm": 0.7058694958686829, "learning_rate": 0.0002685590933621155, "loss": 3.3398, "step": 51300 }, { "epoch": 5.536388140161725, "grad_norm": 0.6541451811790466, "learning_rate": 0.00026823529411764704, "loss": 3.3093, "step": 51350 }, { "epoch": 5.54177897574124, "grad_norm": 0.7691888809204102, "learning_rate": 0.0002679114948731786, "loss": 3.3288, "step": 51400 }, { "epoch": 5.547169811320755, "grad_norm": 0.6912489533424377, "learning_rate": 0.0002675876956287102, "loss": 3.3276, "step": 51450 }, { "epoch": 5.55256064690027, "grad_norm": 0.6621717810630798, "learning_rate": 0.00026726389638424174, "loss": 3.3131, "step": 51500 }, { "epoch": 5.557951482479784, "grad_norm": 0.674435555934906, "learning_rate": 0.0002669400971397733, "loss": 3.3392, "step": 51550 }, { "epoch": 5.563342318059299, "grad_norm": 0.7033376097679138, "learning_rate": 0.00026662277388019424, "loss": 3.3262, "step": 51600 }, { "epoch": 5.568733153638814, "grad_norm": 0.6880776882171631, "learning_rate": 0.00026629897463572585, "loss": 3.3215, "step": 51650 }, { "epoch": 5.574123989218329, "grad_norm": 0.6662737727165222, "learning_rate": 0.0002659751753912574, "loss": 3.3392, "step": 51700 }, { "epoch": 5.579514824797844, "grad_norm": 0.6773297190666199, "learning_rate": 0.00026565137614678895, "loss": 3.328, "step": 51750 }, { "epoch": 5.584905660377358, "grad_norm": 0.7141923308372498, "learning_rate": 0.0002653275769023205, "loss": 3.3329, "step": 51800 }, { "epoch": 5.590296495956873, "grad_norm": 0.6370801329612732, "learning_rate": 0.0002650037776578521, "loss": 3.3387, "step": 51850 }, { "epoch": 5.595687331536388, "grad_norm": 0.7268838286399841, "learning_rate": 0.00026468645439827306, "loss": 3.3367, "step": 51900 }, { "epoch": 5.601078167115903, "grad_norm": 0.6624352335929871, "learning_rate": 0.0002643626551538046, "loss": 3.3439, "step": 51950 }, { "epoch": 5.606469002695418, "grad_norm": 0.6981469988822937, "learning_rate": 0.0002640388559093362, "loss": 3.3354, "step": 52000 }, { "epoch": 5.606469002695418, "eval_accuracy": 0.38319341919881317, "eval_loss": 3.390268087387085, "eval_runtime": 183.3396, "eval_samples_per_second": 98.238, "eval_steps_per_second": 6.142, "step": 52000 }, { "epoch": 5.611859838274933, "grad_norm": 0.7103446125984192, "learning_rate": 0.00026371505666486776, "loss": 3.3376, "step": 52050 }, { "epoch": 5.617250673854447, "grad_norm": 0.6647046804428101, "learning_rate": 0.0002633912574203993, "loss": 3.3324, "step": 52100 }, { "epoch": 5.622641509433962, "grad_norm": 0.6633039712905884, "learning_rate": 0.00026306745817593086, "loss": 3.3342, "step": 52150 }, { "epoch": 5.628032345013477, "grad_norm": 0.7168214917182922, "learning_rate": 0.00026274365893146247, "loss": 3.3202, "step": 52200 }, { "epoch": 5.633423180592992, "grad_norm": 0.6797349452972412, "learning_rate": 0.000262419859686994, "loss": 3.3205, "step": 52250 }, { "epoch": 5.638814016172507, "grad_norm": 0.7016252279281616, "learning_rate": 0.0002620960604425256, "loss": 3.341, "step": 52300 }, { "epoch": 5.644204851752022, "grad_norm": 0.6899751424789429, "learning_rate": 0.0002617722611980572, "loss": 3.3142, "step": 52350 }, { "epoch": 5.649595687331536, "grad_norm": 0.6652300953865051, "learning_rate": 0.0002614484619535888, "loss": 3.3168, "step": 52400 }, { "epoch": 5.654986522911051, "grad_norm": 0.6907793283462524, "learning_rate": 0.00026112466270912033, "loss": 3.3208, "step": 52450 }, { "epoch": 5.660377358490566, "grad_norm": 0.700583279132843, "learning_rate": 0.0002608008634646519, "loss": 3.3349, "step": 52500 }, { "epoch": 5.665768194070081, "grad_norm": 0.7041873335838318, "learning_rate": 0.00026047706422018343, "loss": 3.3405, "step": 52550 }, { "epoch": 5.671159029649596, "grad_norm": 0.7494111657142639, "learning_rate": 0.00026015326497571504, "loss": 3.3385, "step": 52600 }, { "epoch": 5.67654986522911, "grad_norm": 0.645006000995636, "learning_rate": 0.0002598294657312466, "loss": 3.3303, "step": 52650 }, { "epoch": 5.681940700808625, "grad_norm": 0.6822366118431091, "learning_rate": 0.0002595056664867782, "loss": 3.3312, "step": 52700 }, { "epoch": 5.6873315363881405, "grad_norm": 0.6748678088188171, "learning_rate": 0.00025918186724230974, "loss": 3.3285, "step": 52750 }, { "epoch": 5.692722371967655, "grad_norm": 0.6696112155914307, "learning_rate": 0.00025885806799784135, "loss": 3.3249, "step": 52800 }, { "epoch": 5.69811320754717, "grad_norm": 0.6802454590797424, "learning_rate": 0.0002585342687533729, "loss": 3.3251, "step": 52850 }, { "epoch": 5.703504043126685, "grad_norm": 0.7200360894203186, "learning_rate": 0.00025821046950890445, "loss": 3.3194, "step": 52900 }, { "epoch": 5.708894878706199, "grad_norm": 0.6851761937141418, "learning_rate": 0.000257886670264436, "loss": 3.3145, "step": 52950 }, { "epoch": 5.714285714285714, "grad_norm": 0.6522359251976013, "learning_rate": 0.0002575628710199676, "loss": 3.3422, "step": 53000 }, { "epoch": 5.714285714285714, "eval_accuracy": 0.38380991568427253, "eval_loss": 3.3851399421691895, "eval_runtime": 183.4753, "eval_samples_per_second": 98.166, "eval_steps_per_second": 6.137, "step": 53000 }, { "epoch": 5.719676549865229, "grad_norm": 0.6580410003662109, "learning_rate": 0.00025723907177549915, "loss": 3.3208, "step": 53050 }, { "epoch": 5.725067385444744, "grad_norm": 0.6901311278343201, "learning_rate": 0.00025691527253103076, "loss": 3.314, "step": 53100 }, { "epoch": 5.730458221024259, "grad_norm": 0.7142326235771179, "learning_rate": 0.0002565914732865623, "loss": 3.3408, "step": 53150 }, { "epoch": 5.735849056603773, "grad_norm": 0.6859725117683411, "learning_rate": 0.00025626767404209386, "loss": 3.3055, "step": 53200 }, { "epoch": 5.741239892183288, "grad_norm": 0.6842724680900574, "learning_rate": 0.00025594387479762546, "loss": 3.3248, "step": 53250 }, { "epoch": 5.7466307277628035, "grad_norm": 0.6995661854743958, "learning_rate": 0.000255620075553157, "loss": 3.3282, "step": 53300 }, { "epoch": 5.752021563342318, "grad_norm": 0.6869672536849976, "learning_rate": 0.00025529627630868857, "loss": 3.3208, "step": 53350 }, { "epoch": 5.757412398921833, "grad_norm": 0.6568567156791687, "learning_rate": 0.00025497247706422017, "loss": 3.3467, "step": 53400 }, { "epoch": 5.762803234501348, "grad_norm": 0.6627490520477295, "learning_rate": 0.0002546486778197517, "loss": 3.3176, "step": 53450 }, { "epoch": 5.768194070080862, "grad_norm": 0.6492809653282166, "learning_rate": 0.00025432487857528327, "loss": 3.3171, "step": 53500 }, { "epoch": 5.773584905660377, "grad_norm": 0.7379694581031799, "learning_rate": 0.0002540010793308149, "loss": 3.3445, "step": 53550 }, { "epoch": 5.7789757412398925, "grad_norm": 0.7134669423103333, "learning_rate": 0.0002536772800863464, "loss": 3.3258, "step": 53600 }, { "epoch": 5.784366576819407, "grad_norm": 0.7085931897163391, "learning_rate": 0.00025335348084187803, "loss": 3.3441, "step": 53650 }, { "epoch": 5.789757412398922, "grad_norm": 0.7263320088386536, "learning_rate": 0.0002530296815974096, "loss": 3.3446, "step": 53700 }, { "epoch": 5.795148247978437, "grad_norm": 0.7074159383773804, "learning_rate": 0.0002527058823529412, "loss": 3.3256, "step": 53750 }, { "epoch": 5.800539083557951, "grad_norm": 0.6964407563209534, "learning_rate": 0.0002523820831084727, "loss": 3.3323, "step": 53800 }, { "epoch": 5.8059299191374665, "grad_norm": 0.6850724220275879, "learning_rate": 0.0002520582838640043, "loss": 3.3159, "step": 53850 }, { "epoch": 5.811320754716981, "grad_norm": 0.6719462871551514, "learning_rate": 0.00025173448461953584, "loss": 3.3493, "step": 53900 }, { "epoch": 5.816711590296496, "grad_norm": 0.6726512908935547, "learning_rate": 0.00025141068537506744, "loss": 3.3207, "step": 53950 }, { "epoch": 5.822102425876011, "grad_norm": 0.6462234258651733, "learning_rate": 0.000251086886130599, "loss": 3.3243, "step": 54000 }, { "epoch": 5.822102425876011, "eval_accuracy": 0.38394768754602127, "eval_loss": 3.3802895545959473, "eval_runtime": 183.3426, "eval_samples_per_second": 98.237, "eval_steps_per_second": 6.142, "step": 54000 }, { "epoch": 5.827493261455525, "grad_norm": 0.7041391134262085, "learning_rate": 0.0002507630868861306, "loss": 3.319, "step": 54050 }, { "epoch": 5.83288409703504, "grad_norm": 0.671320915222168, "learning_rate": 0.00025043928764166215, "loss": 3.3248, "step": 54100 }, { "epoch": 5.8382749326145555, "grad_norm": 0.6599231958389282, "learning_rate": 0.00025011548839719375, "loss": 3.342, "step": 54150 }, { "epoch": 5.84366576819407, "grad_norm": 0.6870060563087463, "learning_rate": 0.0002497916891527253, "loss": 3.3316, "step": 54200 }, { "epoch": 5.849056603773585, "grad_norm": 0.6556626558303833, "learning_rate": 0.00024946788990825686, "loss": 3.3394, "step": 54250 }, { "epoch": 5.8544474393531, "grad_norm": 0.7121956944465637, "learning_rate": 0.0002491440906637884, "loss": 3.3378, "step": 54300 }, { "epoch": 5.859838274932614, "grad_norm": 0.6935588717460632, "learning_rate": 0.00024882029141932, "loss": 3.3287, "step": 54350 }, { "epoch": 5.8652291105121295, "grad_norm": 0.6818873882293701, "learning_rate": 0.00024849649217485156, "loss": 3.3147, "step": 54400 }, { "epoch": 5.870619946091644, "grad_norm": 0.6630216836929321, "learning_rate": 0.00024817269293038317, "loss": 3.3099, "step": 54450 }, { "epoch": 5.876010781671159, "grad_norm": 0.7189406752586365, "learning_rate": 0.0002478488936859147, "loss": 3.3263, "step": 54500 }, { "epoch": 5.881401617250674, "grad_norm": 0.708378791809082, "learning_rate": 0.00024752509444144627, "loss": 3.3216, "step": 54550 }, { "epoch": 5.886792452830189, "grad_norm": 0.696700394153595, "learning_rate": 0.00024720129519697787, "loss": 3.3531, "step": 54600 }, { "epoch": 5.892183288409703, "grad_norm": 0.6870036125183105, "learning_rate": 0.0002468774959525094, "loss": 3.3366, "step": 54650 }, { "epoch": 5.8975741239892185, "grad_norm": 0.7101106643676758, "learning_rate": 0.00024655369670804097, "loss": 3.3365, "step": 54700 }, { "epoch": 5.902964959568733, "grad_norm": 0.7199245691299438, "learning_rate": 0.0002462298974635726, "loss": 3.3377, "step": 54750 }, { "epoch": 5.908355795148248, "grad_norm": 0.714850127696991, "learning_rate": 0.00024590609821910413, "loss": 3.3418, "step": 54800 }, { "epoch": 5.913746630727763, "grad_norm": 0.7096270322799683, "learning_rate": 0.0002455822989746357, "loss": 3.3353, "step": 54850 }, { "epoch": 5.919137466307277, "grad_norm": 0.691370964050293, "learning_rate": 0.0002452584997301673, "loss": 3.3158, "step": 54900 }, { "epoch": 5.9245283018867925, "grad_norm": 0.6646804213523865, "learning_rate": 0.00024493470048569883, "loss": 3.3219, "step": 54950 }, { "epoch": 5.929919137466308, "grad_norm": 0.6864519119262695, "learning_rate": 0.00024461090124123044, "loss": 3.3245, "step": 55000 }, { "epoch": 5.929919137466308, "eval_accuracy": 0.3845000788276701, "eval_loss": 3.375938892364502, "eval_runtime": 183.427, "eval_samples_per_second": 98.192, "eval_steps_per_second": 6.139, "step": 55000 }, { "epoch": 5.935309973045822, "grad_norm": 0.6417977809906006, "learning_rate": 0.000244287101996762, "loss": 3.3377, "step": 55050 }, { "epoch": 5.940700808625337, "grad_norm": 0.7369255423545837, "learning_rate": 0.00024396330275229354, "loss": 3.3254, "step": 55100 }, { "epoch": 5.946091644204852, "grad_norm": 0.6987436413764954, "learning_rate": 0.00024363950350782512, "loss": 3.3251, "step": 55150 }, { "epoch": 5.951482479784366, "grad_norm": 0.689603865146637, "learning_rate": 0.0002433157042633567, "loss": 3.3442, "step": 55200 }, { "epoch": 5.9568733153638815, "grad_norm": 0.7123790979385376, "learning_rate": 0.00024299190501888827, "loss": 3.3154, "step": 55250 }, { "epoch": 5.962264150943396, "grad_norm": 0.666307270526886, "learning_rate": 0.00024266810577441985, "loss": 3.3173, "step": 55300 }, { "epoch": 5.967654986522911, "grad_norm": 0.6958657503128052, "learning_rate": 0.00024234430652995143, "loss": 3.3256, "step": 55350 }, { "epoch": 5.973045822102426, "grad_norm": 0.7178813219070435, "learning_rate": 0.00024202050728548298, "loss": 3.3286, "step": 55400 }, { "epoch": 5.97843665768194, "grad_norm": 0.690979540348053, "learning_rate": 0.00024169670804101456, "loss": 3.3219, "step": 55450 }, { "epoch": 5.9838274932614555, "grad_norm": 0.6696831583976746, "learning_rate": 0.0002413729087965461, "loss": 3.3019, "step": 55500 }, { "epoch": 5.989218328840971, "grad_norm": 0.6762212514877319, "learning_rate": 0.00024104910955207768, "loss": 3.3312, "step": 55550 }, { "epoch": 5.994609164420485, "grad_norm": 0.6979408264160156, "learning_rate": 0.00024072531030760926, "loss": 3.3208, "step": 55600 }, { "epoch": 6.0, "grad_norm": 1.4232317209243774, "learning_rate": 0.00024040151106314084, "loss": 3.3492, "step": 55650 }, { "epoch": 6.005390835579515, "grad_norm": 0.6714563965797424, "learning_rate": 0.0002400777118186724, "loss": 3.2286, "step": 55700 }, { "epoch": 6.010781671159029, "grad_norm": 0.6843574047088623, "learning_rate": 0.00023975391257420397, "loss": 3.2305, "step": 55750 }, { "epoch": 6.0161725067385445, "grad_norm": 0.7195513248443604, "learning_rate": 0.00023943011332973555, "loss": 3.2422, "step": 55800 }, { "epoch": 6.02156334231806, "grad_norm": 0.6866133809089661, "learning_rate": 0.00023910631408526712, "loss": 3.2445, "step": 55850 }, { "epoch": 6.026954177897574, "grad_norm": 0.7409161925315857, "learning_rate": 0.0002387825148407987, "loss": 3.2428, "step": 55900 }, { "epoch": 6.032345013477089, "grad_norm": 0.7081006169319153, "learning_rate": 0.00023845871559633025, "loss": 3.2432, "step": 55950 }, { "epoch": 6.037735849056604, "grad_norm": 0.736912727355957, "learning_rate": 0.0002381413923367512, "loss": 3.2569, "step": 56000 }, { "epoch": 6.037735849056604, "eval_accuracy": 0.38479583198830114, "eval_loss": 3.3801846504211426, "eval_runtime": 183.4392, "eval_samples_per_second": 98.185, "eval_steps_per_second": 6.138, "step": 56000 }, { "epoch": 6.0431266846361185, "grad_norm": 0.7223060131072998, "learning_rate": 0.00023781759309228275, "loss": 3.2541, "step": 56050 }, { "epoch": 6.048517520215634, "grad_norm": 0.853127658367157, "learning_rate": 0.00023749379384781433, "loss": 3.2419, "step": 56100 }, { "epoch": 6.053908355795148, "grad_norm": 0.7139176726341248, "learning_rate": 0.0002371699946033459, "loss": 3.2564, "step": 56150 }, { "epoch": 6.059299191374663, "grad_norm": 0.7075513005256653, "learning_rate": 0.0002368461953588775, "loss": 3.2494, "step": 56200 }, { "epoch": 6.064690026954178, "grad_norm": 0.7035233974456787, "learning_rate": 0.00023652239611440904, "loss": 3.2336, "step": 56250 }, { "epoch": 6.070080862533692, "grad_norm": 0.7118614912033081, "learning_rate": 0.00023619859686994062, "loss": 3.2595, "step": 56300 }, { "epoch": 6.0754716981132075, "grad_norm": 0.7053843140602112, "learning_rate": 0.00023587479762547217, "loss": 3.2453, "step": 56350 }, { "epoch": 6.080862533692723, "grad_norm": 0.7055346369743347, "learning_rate": 0.00023555099838100374, "loss": 3.2494, "step": 56400 }, { "epoch": 6.086253369272237, "grad_norm": 0.739841878414154, "learning_rate": 0.00023522719913653532, "loss": 3.2442, "step": 56450 }, { "epoch": 6.091644204851752, "grad_norm": 0.7180924415588379, "learning_rate": 0.0002349033998920669, "loss": 3.2631, "step": 56500 }, { "epoch": 6.097035040431267, "grad_norm": 0.7231001853942871, "learning_rate": 0.00023457960064759848, "loss": 3.2418, "step": 56550 }, { "epoch": 6.1024258760107815, "grad_norm": 0.706662118434906, "learning_rate": 0.00023425580140313005, "loss": 3.2493, "step": 56600 }, { "epoch": 6.107816711590297, "grad_norm": 0.6997068524360657, "learning_rate": 0.00023393200215866163, "loss": 3.2453, "step": 56650 }, { "epoch": 6.113207547169812, "grad_norm": 0.6997630000114441, "learning_rate": 0.00023360820291419316, "loss": 3.2687, "step": 56700 }, { "epoch": 6.118598382749326, "grad_norm": 0.7305110692977905, "learning_rate": 0.00023328440366972473, "loss": 3.2583, "step": 56750 }, { "epoch": 6.123989218328841, "grad_norm": 0.7409155368804932, "learning_rate": 0.0002329606044252563, "loss": 3.2608, "step": 56800 }, { "epoch": 6.129380053908355, "grad_norm": 0.6931422352790833, "learning_rate": 0.0002326368051807879, "loss": 3.2628, "step": 56850 }, { "epoch": 6.1347708894878705, "grad_norm": 0.7668306827545166, "learning_rate": 0.00023231300593631947, "loss": 3.2696, "step": 56900 }, { "epoch": 6.140161725067386, "grad_norm": 0.7603601217269897, "learning_rate": 0.00023198920669185104, "loss": 3.2466, "step": 56950 }, { "epoch": 6.1455525606469, "grad_norm": 0.7167591452598572, "learning_rate": 0.00023166540744738262, "loss": 3.2452, "step": 57000 }, { "epoch": 6.1455525606469, "eval_accuracy": 0.38466523121714813, "eval_loss": 3.3798298835754395, "eval_runtime": 183.6772, "eval_samples_per_second": 98.058, "eval_steps_per_second": 6.13, "step": 57000 }, { "epoch": 6.150943396226415, "grad_norm": 0.6705722808837891, "learning_rate": 0.0002313416082029142, "loss": 3.2731, "step": 57050 }, { "epoch": 6.15633423180593, "grad_norm": 0.691725492477417, "learning_rate": 0.00023101780895844572, "loss": 3.2644, "step": 57100 }, { "epoch": 6.1617250673854445, "grad_norm": 0.6815258264541626, "learning_rate": 0.0002306940097139773, "loss": 3.247, "step": 57150 }, { "epoch": 6.16711590296496, "grad_norm": 0.6972390413284302, "learning_rate": 0.00023037021046950888, "loss": 3.2524, "step": 57200 }, { "epoch": 6.172506738544475, "grad_norm": 0.695257842540741, "learning_rate": 0.00023004641122504046, "loss": 3.256, "step": 57250 }, { "epoch": 6.177897574123989, "grad_norm": 0.8920659422874451, "learning_rate": 0.00022972261198057203, "loss": 3.255, "step": 57300 }, { "epoch": 6.183288409703504, "grad_norm": 0.7532164454460144, "learning_rate": 0.0002293988127361036, "loss": 3.271, "step": 57350 }, { "epoch": 6.188679245283019, "grad_norm": 0.6974930167198181, "learning_rate": 0.00022907501349163516, "loss": 3.2621, "step": 57400 }, { "epoch": 6.1940700808625335, "grad_norm": 0.7437677979469299, "learning_rate": 0.00022875121424716674, "loss": 3.2682, "step": 57450 }, { "epoch": 6.199460916442049, "grad_norm": 0.7247287631034851, "learning_rate": 0.00022842741500269832, "loss": 3.2651, "step": 57500 }, { "epoch": 6.204851752021563, "grad_norm": 0.7260898947715759, "learning_rate": 0.00022810361575822987, "loss": 3.2551, "step": 57550 }, { "epoch": 6.210242587601078, "grad_norm": 0.783756673336029, "learning_rate": 0.00022777981651376145, "loss": 3.2437, "step": 57600 }, { "epoch": 6.215633423180593, "grad_norm": 0.7135037779808044, "learning_rate": 0.00022745601726929302, "loss": 3.2631, "step": 57650 }, { "epoch": 6.2210242587601075, "grad_norm": 0.7173543572425842, "learning_rate": 0.00022713221802482457, "loss": 3.2685, "step": 57700 }, { "epoch": 6.226415094339623, "grad_norm": 0.7847826480865479, "learning_rate": 0.00022680841878035615, "loss": 3.283, "step": 57750 }, { "epoch": 6.231805929919138, "grad_norm": 0.7096986770629883, "learning_rate": 0.00022648461953588773, "loss": 3.28, "step": 57800 }, { "epoch": 6.237196765498652, "grad_norm": 0.7000459432601929, "learning_rate": 0.0002261608202914193, "loss": 3.2509, "step": 57850 }, { "epoch": 6.242587601078167, "grad_norm": 0.6716017127037048, "learning_rate": 0.00022583702104695088, "loss": 3.2718, "step": 57900 }, { "epoch": 6.247978436657682, "grad_norm": 0.712790310382843, "learning_rate": 0.00022551322180248246, "loss": 3.2622, "step": 57950 }, { "epoch": 6.2533692722371965, "grad_norm": 0.7777376770973206, "learning_rate": 0.00022518942255801399, "loss": 3.2473, "step": 58000 }, { "epoch": 6.2533692722371965, "eval_accuracy": 0.3850733314637793, "eval_loss": 3.378077983856201, "eval_runtime": 183.3483, "eval_samples_per_second": 98.234, "eval_steps_per_second": 6.141, "step": 58000 }, { "epoch": 6.258760107816712, "grad_norm": 0.7233048677444458, "learning_rate": 0.00022486562331354556, "loss": 3.2652, "step": 58050 }, { "epoch": 6.264150943396227, "grad_norm": 0.7433217167854309, "learning_rate": 0.00022454182406907714, "loss": 3.2765, "step": 58100 }, { "epoch": 6.269541778975741, "grad_norm": 0.7410855889320374, "learning_rate": 0.00022421802482460872, "loss": 3.2542, "step": 58150 }, { "epoch": 6.274932614555256, "grad_norm": 0.7226539850234985, "learning_rate": 0.00022390070156502967, "loss": 3.2707, "step": 58200 }, { "epoch": 6.280323450134771, "grad_norm": 0.7170883417129517, "learning_rate": 0.00022357690232056125, "loss": 3.2763, "step": 58250 }, { "epoch": 6.285714285714286, "grad_norm": 0.7725226879119873, "learning_rate": 0.0002232531030760928, "loss": 3.2611, "step": 58300 }, { "epoch": 6.291105121293801, "grad_norm": 0.766638457775116, "learning_rate": 0.00022292930383162435, "loss": 3.2514, "step": 58350 }, { "epoch": 6.296495956873315, "grad_norm": 0.7184855341911316, "learning_rate": 0.00022260550458715593, "loss": 3.2592, "step": 58400 }, { "epoch": 6.30188679245283, "grad_norm": 0.7369925379753113, "learning_rate": 0.0002222817053426875, "loss": 3.2782, "step": 58450 }, { "epoch": 6.307277628032345, "grad_norm": 0.6875489354133606, "learning_rate": 0.00022195790609821908, "loss": 3.2501, "step": 58500 }, { "epoch": 6.3126684636118595, "grad_norm": 0.7300560474395752, "learning_rate": 0.00022163410685375066, "loss": 3.2723, "step": 58550 }, { "epoch": 6.318059299191375, "grad_norm": 0.7411653995513916, "learning_rate": 0.00022131030760928224, "loss": 3.2799, "step": 58600 }, { "epoch": 6.32345013477089, "grad_norm": 0.7001060843467712, "learning_rate": 0.00022098650836481382, "loss": 3.2727, "step": 58650 }, { "epoch": 6.328840970350404, "grad_norm": 0.7271462678909302, "learning_rate": 0.0002206627091203454, "loss": 3.2883, "step": 58700 }, { "epoch": 6.334231805929919, "grad_norm": 0.7320724129676819, "learning_rate": 0.00022033890987587692, "loss": 3.2852, "step": 58750 }, { "epoch": 6.339622641509434, "grad_norm": 0.7684124112129211, "learning_rate": 0.0002200151106314085, "loss": 3.2706, "step": 58800 }, { "epoch": 6.345013477088949, "grad_norm": 0.7218632102012634, "learning_rate": 0.00021969131138694007, "loss": 3.2767, "step": 58850 }, { "epoch": 6.350404312668464, "grad_norm": 0.7296848297119141, "learning_rate": 0.00021936751214247165, "loss": 3.2619, "step": 58900 }, { "epoch": 6.355795148247978, "grad_norm": 0.7030173540115356, "learning_rate": 0.00021904371289800323, "loss": 3.2828, "step": 58950 }, { "epoch": 6.361185983827493, "grad_norm": 0.6917963624000549, "learning_rate": 0.0002187199136535348, "loss": 3.2807, "step": 59000 }, { "epoch": 6.361185983827493, "eval_accuracy": 0.38561768243172984, "eval_loss": 3.3727059364318848, "eval_runtime": 183.1739, "eval_samples_per_second": 98.327, "eval_steps_per_second": 6.147, "step": 59000 }, { "epoch": 6.366576819407008, "grad_norm": 0.7581689357757568, "learning_rate": 0.00021839611440906638, "loss": 3.2838, "step": 59050 }, { "epoch": 6.3719676549865225, "grad_norm": 0.7609435319900513, "learning_rate": 0.00021807231516459793, "loss": 3.2745, "step": 59100 }, { "epoch": 6.377358490566038, "grad_norm": 0.747968316078186, "learning_rate": 0.00021774851592012948, "loss": 3.2765, "step": 59150 }, { "epoch": 6.382749326145553, "grad_norm": 0.733305037021637, "learning_rate": 0.00021742471667566106, "loss": 3.2944, "step": 59200 }, { "epoch": 6.388140161725067, "grad_norm": 0.7445916533470154, "learning_rate": 0.00021710091743119264, "loss": 3.2692, "step": 59250 }, { "epoch": 6.393530997304582, "grad_norm": 0.7506402730941772, "learning_rate": 0.00021677711818672422, "loss": 3.2604, "step": 59300 }, { "epoch": 6.398921832884097, "grad_norm": 0.7963356971740723, "learning_rate": 0.0002164533189422558, "loss": 3.2751, "step": 59350 }, { "epoch": 6.404312668463612, "grad_norm": 0.7167596817016602, "learning_rate": 0.00021612951969778734, "loss": 3.2961, "step": 59400 }, { "epoch": 6.409703504043127, "grad_norm": 0.7183796763420105, "learning_rate": 0.00021580572045331892, "loss": 3.2764, "step": 59450 }, { "epoch": 6.415094339622642, "grad_norm": 0.6938195824623108, "learning_rate": 0.0002154819212088505, "loss": 3.2624, "step": 59500 }, { "epoch": 6.420485175202156, "grad_norm": 0.702420175075531, "learning_rate": 0.00021515812196438208, "loss": 3.2971, "step": 59550 }, { "epoch": 6.425876010781671, "grad_norm": 0.7254041433334351, "learning_rate": 0.00021483432271991363, "loss": 3.2668, "step": 59600 }, { "epoch": 6.431266846361186, "grad_norm": 0.7375361323356628, "learning_rate": 0.0002145105234754452, "loss": 3.2671, "step": 59650 }, { "epoch": 6.436657681940701, "grad_norm": 0.7460655570030212, "learning_rate": 0.00021418672423097676, "loss": 3.305, "step": 59700 }, { "epoch": 6.442048517520216, "grad_norm": 0.7111104130744934, "learning_rate": 0.00021386292498650833, "loss": 3.286, "step": 59750 }, { "epoch": 6.44743935309973, "grad_norm": 0.7258356809616089, "learning_rate": 0.0002135391257420399, "loss": 3.2734, "step": 59800 }, { "epoch": 6.452830188679245, "grad_norm": 0.7276678085327148, "learning_rate": 0.0002132153264975715, "loss": 3.2684, "step": 59850 }, { "epoch": 6.45822102425876, "grad_norm": 0.7468143701553345, "learning_rate": 0.00021289152725310307, "loss": 3.2648, "step": 59900 }, { "epoch": 6.463611859838275, "grad_norm": 0.7350443005561829, "learning_rate": 0.00021256772800863464, "loss": 3.2631, "step": 59950 }, { "epoch": 6.46900269541779, "grad_norm": 0.6993089318275452, "learning_rate": 0.0002122439287641662, "loss": 3.2817, "step": 60000 }, { "epoch": 6.46900269541779, "eval_accuracy": 0.38607826202317547, "eval_loss": 3.366489887237549, "eval_runtime": 183.7834, "eval_samples_per_second": 98.001, "eval_steps_per_second": 6.127, "step": 60000 }, { "epoch": 6.474393530997305, "grad_norm": 0.7127420902252197, "learning_rate": 0.00021192012951969775, "loss": 3.2576, "step": 60050 }, { "epoch": 6.479784366576819, "grad_norm": 0.7203495502471924, "learning_rate": 0.00021159633027522932, "loss": 3.2822, "step": 60100 }, { "epoch": 6.485175202156334, "grad_norm": 0.7426835298538208, "learning_rate": 0.0002112725310307609, "loss": 3.2841, "step": 60150 }, { "epoch": 6.490566037735849, "grad_norm": 0.7064744234085083, "learning_rate": 0.00021094873178629248, "loss": 3.2526, "step": 60200 }, { "epoch": 6.495956873315364, "grad_norm": 0.7300220131874084, "learning_rate": 0.00021063140852671343, "loss": 3.2619, "step": 60250 }, { "epoch": 6.501347708894879, "grad_norm": 0.7384023666381836, "learning_rate": 0.000210307609282245, "loss": 3.297, "step": 60300 }, { "epoch": 6.506738544474393, "grad_norm": 0.757164478302002, "learning_rate": 0.00020998381003777653, "loss": 3.2567, "step": 60350 }, { "epoch": 6.512129380053908, "grad_norm": 0.7513059377670288, "learning_rate": 0.0002096600107933081, "loss": 3.2715, "step": 60400 }, { "epoch": 6.517520215633423, "grad_norm": 0.7424511313438416, "learning_rate": 0.0002093362115488397, "loss": 3.275, "step": 60450 }, { "epoch": 6.5229110512129385, "grad_norm": 0.704623281955719, "learning_rate": 0.00020901888828926064, "loss": 3.28, "step": 60500 }, { "epoch": 6.528301886792453, "grad_norm": 0.7265028953552246, "learning_rate": 0.00020869508904479222, "loss": 3.2796, "step": 60550 }, { "epoch": 6.533692722371968, "grad_norm": 0.7257518172264099, "learning_rate": 0.0002083712898003238, "loss": 3.2759, "step": 60600 }, { "epoch": 6.539083557951482, "grad_norm": 0.7516768574714661, "learning_rate": 0.00020804749055585537, "loss": 3.2809, "step": 60650 }, { "epoch": 6.544474393530997, "grad_norm": 0.7546630501747131, "learning_rate": 0.0002077236913113869, "loss": 3.2721, "step": 60700 }, { "epoch": 6.549865229110512, "grad_norm": 0.8262805342674255, "learning_rate": 0.00020739989206691847, "loss": 3.2756, "step": 60750 }, { "epoch": 6.555256064690027, "grad_norm": 0.7627807259559631, "learning_rate": 0.00020707609282245005, "loss": 3.2692, "step": 60800 }, { "epoch": 6.560646900269542, "grad_norm": 0.7664798498153687, "learning_rate": 0.00020675229357798163, "loss": 3.2687, "step": 60850 }, { "epoch": 6.566037735849057, "grad_norm": 0.75236576795578, "learning_rate": 0.0002064284943335132, "loss": 3.266, "step": 60900 }, { "epoch": 6.571428571428571, "grad_norm": 0.7668713331222534, "learning_rate": 0.00020610469508904478, "loss": 3.2746, "step": 60950 }, { "epoch": 6.576819407008086, "grad_norm": 0.6870357990264893, "learning_rate": 0.00020578089584457636, "loss": 3.2753, "step": 61000 }, { "epoch": 6.576819407008086, "eval_accuracy": 0.38637314596070393, "eval_loss": 3.363420248031616, "eval_runtime": 183.2196, "eval_samples_per_second": 98.303, "eval_steps_per_second": 6.146, "step": 61000 }, { "epoch": 6.5822102425876015, "grad_norm": 0.7615857720375061, "learning_rate": 0.00020545709660010794, "loss": 3.2695, "step": 61050 }, { "epoch": 6.587601078167116, "grad_norm": 0.7549526691436768, "learning_rate": 0.00020513329735563946, "loss": 3.288, "step": 61100 }, { "epoch": 6.592991913746631, "grad_norm": 0.76313716173172, "learning_rate": 0.00020480949811117104, "loss": 3.2877, "step": 61150 }, { "epoch": 6.598382749326145, "grad_norm": 0.7764831185340881, "learning_rate": 0.00020448569886670262, "loss": 3.2824, "step": 61200 }, { "epoch": 6.60377358490566, "grad_norm": 0.7454062104225159, "learning_rate": 0.0002041618996222342, "loss": 3.2825, "step": 61250 }, { "epoch": 6.609164420485175, "grad_norm": 0.7208166718482971, "learning_rate": 0.00020383810037776577, "loss": 3.2659, "step": 61300 }, { "epoch": 6.6145552560646905, "grad_norm": 0.7168710827827454, "learning_rate": 0.00020351430113329735, "loss": 3.2976, "step": 61350 }, { "epoch": 6.619946091644205, "grad_norm": 0.7853072285652161, "learning_rate": 0.00020319050188882893, "loss": 3.2847, "step": 61400 }, { "epoch": 6.62533692722372, "grad_norm": 0.7360592484474182, "learning_rate": 0.00020286670264436048, "loss": 3.2946, "step": 61450 }, { "epoch": 6.630727762803234, "grad_norm": 0.7565436363220215, "learning_rate": 0.00020254290339989206, "loss": 3.281, "step": 61500 }, { "epoch": 6.636118598382749, "grad_norm": 0.7261871099472046, "learning_rate": 0.0002022191041554236, "loss": 3.2848, "step": 61550 }, { "epoch": 6.6415094339622645, "grad_norm": 0.7736920714378357, "learning_rate": 0.00020189530491095519, "loss": 3.2826, "step": 61600 }, { "epoch": 6.646900269541779, "grad_norm": 0.74223393201828, "learning_rate": 0.00020157150566648676, "loss": 3.28, "step": 61650 }, { "epoch": 6.652291105121294, "grad_norm": 0.777579128742218, "learning_rate": 0.00020124770642201834, "loss": 3.2633, "step": 61700 }, { "epoch": 6.657681940700809, "grad_norm": 0.764687180519104, "learning_rate": 0.0002009239071775499, "loss": 3.2872, "step": 61750 }, { "epoch": 6.663072776280323, "grad_norm": 0.7332888245582581, "learning_rate": 0.00020060010793308147, "loss": 3.2934, "step": 61800 }, { "epoch": 6.668463611859838, "grad_norm": 0.8394750356674194, "learning_rate": 0.00020027630868861305, "loss": 3.2917, "step": 61850 }, { "epoch": 6.6738544474393535, "grad_norm": 0.7316187620162964, "learning_rate": 0.00019995250944414462, "loss": 3.2639, "step": 61900 }, { "epoch": 6.679245283018868, "grad_norm": 0.7434121370315552, "learning_rate": 0.00019962871019967617, "loss": 3.2656, "step": 61950 }, { "epoch": 6.684636118598383, "grad_norm": 0.8291236162185669, "learning_rate": 0.00019930491095520775, "loss": 3.2679, "step": 62000 }, { "epoch": 6.684636118598383, "eval_accuracy": 0.3871309998532099, "eval_loss": 3.358170986175537, "eval_runtime": 183.5019, "eval_samples_per_second": 98.152, "eval_steps_per_second": 6.136, "step": 62000 }, { "epoch": 6.690026954177897, "grad_norm": 0.7411676645278931, "learning_rate": 0.0001989811117107393, "loss": 3.2909, "step": 62050 }, { "epoch": 6.695417789757412, "grad_norm": 0.7574923038482666, "learning_rate": 0.00019865731246627088, "loss": 3.2834, "step": 62100 }, { "epoch": 6.7008086253369274, "grad_norm": 0.7495836615562439, "learning_rate": 0.00019833351322180246, "loss": 3.2719, "step": 62150 }, { "epoch": 6.706199460916442, "grad_norm": 0.7210440635681152, "learning_rate": 0.00019800971397733404, "loss": 3.2732, "step": 62200 }, { "epoch": 6.711590296495957, "grad_norm": 0.7707087397575378, "learning_rate": 0.00019768591473286561, "loss": 3.2867, "step": 62250 }, { "epoch": 6.716981132075472, "grad_norm": 0.7962491512298584, "learning_rate": 0.0001973621154883972, "loss": 3.2703, "step": 62300 }, { "epoch": 6.722371967654986, "grad_norm": 0.7390112280845642, "learning_rate": 0.00019703831624392877, "loss": 3.2883, "step": 62350 }, { "epoch": 6.727762803234501, "grad_norm": 0.8107832670211792, "learning_rate": 0.0001967145169994603, "loss": 3.271, "step": 62400 }, { "epoch": 6.7331536388140165, "grad_norm": 0.741752028465271, "learning_rate": 0.00019639071775499187, "loss": 3.2754, "step": 62450 }, { "epoch": 6.738544474393531, "grad_norm": 0.7602443695068359, "learning_rate": 0.00019606691851052345, "loss": 3.2785, "step": 62500 }, { "epoch": 6.743935309973046, "grad_norm": 0.716080904006958, "learning_rate": 0.00019574311926605503, "loss": 3.2864, "step": 62550 }, { "epoch": 6.74932614555256, "grad_norm": 0.7163423299789429, "learning_rate": 0.0001954193200215866, "loss": 3.269, "step": 62600 }, { "epoch": 6.754716981132075, "grad_norm": 0.7376225590705872, "learning_rate": 0.00019509552077711818, "loss": 3.274, "step": 62650 }, { "epoch": 6.7601078167115904, "grad_norm": 0.7619684338569641, "learning_rate": 0.00019477172153264976, "loss": 3.2638, "step": 62700 }, { "epoch": 6.765498652291106, "grad_norm": 0.7855158448219299, "learning_rate": 0.00019444792228818134, "loss": 3.2921, "step": 62750 }, { "epoch": 6.77088948787062, "grad_norm": 0.7887711524963379, "learning_rate": 0.00019412412304371286, "loss": 3.2783, "step": 62800 }, { "epoch": 6.776280323450135, "grad_norm": 0.7664616107940674, "learning_rate": 0.00019380032379924444, "loss": 3.2934, "step": 62850 }, { "epoch": 6.781671159029649, "grad_norm": 0.7111510634422302, "learning_rate": 0.00019347652455477602, "loss": 3.2909, "step": 62900 }, { "epoch": 6.787061994609164, "grad_norm": 0.7316784262657166, "learning_rate": 0.0001931527253103076, "loss": 3.2634, "step": 62950 }, { "epoch": 6.7924528301886795, "grad_norm": 0.7745811939239502, "learning_rate": 0.00019282892606583917, "loss": 3.2727, "step": 63000 }, { "epoch": 6.7924528301886795, "eval_accuracy": 0.3873692756361871, "eval_loss": 3.3546576499938965, "eval_runtime": 183.7912, "eval_samples_per_second": 97.997, "eval_steps_per_second": 6.127, "step": 63000 }, { "epoch": 6.797843665768194, "grad_norm": 0.7474355101585388, "learning_rate": 0.00019250512682137075, "loss": 3.2738, "step": 63050 }, { "epoch": 6.803234501347709, "grad_norm": 0.7750080823898315, "learning_rate": 0.0001921813275769023, "loss": 3.2733, "step": 63100 }, { "epoch": 6.808625336927224, "grad_norm": 0.7421109676361084, "learning_rate": 0.00019185752833243388, "loss": 3.2766, "step": 63150 }, { "epoch": 6.814016172506738, "grad_norm": 0.7296343445777893, "learning_rate": 0.00019153372908796545, "loss": 3.2777, "step": 63200 }, { "epoch": 6.819407008086253, "grad_norm": 0.7462671995162964, "learning_rate": 0.000191209929843497, "loss": 3.27, "step": 63250 }, { "epoch": 6.824797843665769, "grad_norm": 0.7307702898979187, "learning_rate": 0.00019088613059902858, "loss": 3.2594, "step": 63300 }, { "epoch": 6.830188679245283, "grad_norm": 0.751537561416626, "learning_rate": 0.00019056233135456016, "loss": 3.2779, "step": 63350 }, { "epoch": 6.835579514824798, "grad_norm": 0.7643833756446838, "learning_rate": 0.0001902385321100917, "loss": 3.2702, "step": 63400 }, { "epoch": 6.840970350404312, "grad_norm": 0.7389995455741882, "learning_rate": 0.0001899147328656233, "loss": 3.2886, "step": 63450 }, { "epoch": 6.846361185983827, "grad_norm": 0.7349757552146912, "learning_rate": 0.00018959093362115487, "loss": 3.2749, "step": 63500 }, { "epoch": 6.8517520215633425, "grad_norm": 0.7448405623435974, "learning_rate": 0.00018926713437668644, "loss": 3.2778, "step": 63550 }, { "epoch": 6.857142857142857, "grad_norm": 0.7943175435066223, "learning_rate": 0.00018894333513221802, "loss": 3.271, "step": 63600 }, { "epoch": 6.862533692722372, "grad_norm": 0.7844904661178589, "learning_rate": 0.00018861953588774957, "loss": 3.2804, "step": 63650 }, { "epoch": 6.867924528301887, "grad_norm": 0.7307280898094177, "learning_rate": 0.00018829573664328115, "loss": 3.2785, "step": 63700 }, { "epoch": 6.873315363881401, "grad_norm": 0.7532712817192078, "learning_rate": 0.0001879719373988127, "loss": 3.2801, "step": 63750 }, { "epoch": 6.878706199460916, "grad_norm": 0.7567185759544373, "learning_rate": 0.00018764813815434428, "loss": 3.2661, "step": 63800 }, { "epoch": 6.884097035040432, "grad_norm": 0.7488755583763123, "learning_rate": 0.00018732433890987586, "loss": 3.2661, "step": 63850 }, { "epoch": 6.889487870619946, "grad_norm": 0.7576258778572083, "learning_rate": 0.00018700053966540743, "loss": 3.2932, "step": 63900 }, { "epoch": 6.894878706199461, "grad_norm": 0.7660182118415833, "learning_rate": 0.000186676740420939, "loss": 3.2827, "step": 63950 }, { "epoch": 6.900269541778976, "grad_norm": 0.7318092584609985, "learning_rate": 0.0001863529411764706, "loss": 3.2896, "step": 64000 }, { "epoch": 6.900269541778976, "eval_accuracy": 0.3878326802027159, "eval_loss": 3.352012872695923, "eval_runtime": 183.2112, "eval_samples_per_second": 98.307, "eval_steps_per_second": 6.146, "step": 64000 }, { "epoch": 6.90566037735849, "grad_norm": 0.7451673150062561, "learning_rate": 0.00018603561791689149, "loss": 3.273, "step": 64050 }, { "epoch": 6.9110512129380055, "grad_norm": 0.7861278653144836, "learning_rate": 0.00018571181867242306, "loss": 3.2591, "step": 64100 }, { "epoch": 6.916442048517521, "grad_norm": 0.7036826610565186, "learning_rate": 0.00018538801942795464, "loss": 3.2688, "step": 64150 }, { "epoch": 6.921832884097035, "grad_norm": 0.7634555697441101, "learning_rate": 0.00018506422018348622, "loss": 3.2769, "step": 64200 }, { "epoch": 6.92722371967655, "grad_norm": 0.7458739280700684, "learning_rate": 0.0001847404209390178, "loss": 3.2709, "step": 64250 }, { "epoch": 6.932614555256064, "grad_norm": 0.8086719512939453, "learning_rate": 0.00018441662169454937, "loss": 3.2676, "step": 64300 }, { "epoch": 6.938005390835579, "grad_norm": 0.716335654258728, "learning_rate": 0.00018409282245008095, "loss": 3.2789, "step": 64350 }, { "epoch": 6.943396226415095, "grad_norm": 0.7624198794364929, "learning_rate": 0.00018376902320561253, "loss": 3.2732, "step": 64400 }, { "epoch": 6.948787061994609, "grad_norm": 0.77959144115448, "learning_rate": 0.00018344522396114405, "loss": 3.2667, "step": 64450 }, { "epoch": 6.954177897574124, "grad_norm": 0.766917884349823, "learning_rate": 0.00018312142471667563, "loss": 3.2833, "step": 64500 }, { "epoch": 6.959568733153639, "grad_norm": 0.7285516858100891, "learning_rate": 0.0001827976254722072, "loss": 3.275, "step": 64550 }, { "epoch": 6.964959568733153, "grad_norm": 0.7674565315246582, "learning_rate": 0.00018247382622773879, "loss": 3.2768, "step": 64600 }, { "epoch": 6.9703504043126685, "grad_norm": 0.7331922054290771, "learning_rate": 0.00018215002698327036, "loss": 3.2847, "step": 64650 }, { "epoch": 6.975741239892184, "grad_norm": 0.7738147377967834, "learning_rate": 0.00018182622773880194, "loss": 3.2892, "step": 64700 }, { "epoch": 6.981132075471698, "grad_norm": 0.7373318672180176, "learning_rate": 0.00018150242849433352, "loss": 3.2808, "step": 64750 }, { "epoch": 6.986522911051213, "grad_norm": 0.7882824540138245, "learning_rate": 0.00018117862924986507, "loss": 3.2797, "step": 64800 }, { "epoch": 6.991913746630727, "grad_norm": 0.718296468257904, "learning_rate": 0.00018085483000539662, "loss": 3.2775, "step": 64850 }, { "epoch": 6.997304582210242, "grad_norm": 0.7915265560150146, "learning_rate": 0.0001805310307609282, "loss": 3.2826, "step": 64900 }, { "epoch": 7.002695417789758, "grad_norm": 0.7813780903816223, "learning_rate": 0.00018020723151645978, "loss": 3.228, "step": 64950 }, { "epoch": 7.008086253369272, "grad_norm": 0.8058916926383972, "learning_rate": 0.00017988343227199135, "loss": 3.1934, "step": 65000 }, { "epoch": 7.008086253369272, "eval_accuracy": 0.387832028285389, "eval_loss": 3.354559898376465, "eval_runtime": 183.3424, "eval_samples_per_second": 98.237, "eval_steps_per_second": 6.142, "step": 65000 }, { "epoch": 7.013477088948787, "grad_norm": 0.7725360989570618, "learning_rate": 0.00017955963302752293, "loss": 3.1876, "step": 65050 }, { "epoch": 7.018867924528302, "grad_norm": 0.7770508527755737, "learning_rate": 0.00017923583378305448, "loss": 3.1881, "step": 65100 }, { "epoch": 7.024258760107816, "grad_norm": 0.8097355961799622, "learning_rate": 0.00017891203453858606, "loss": 3.1979, "step": 65150 }, { "epoch": 7.0296495956873315, "grad_norm": 0.7231821417808533, "learning_rate": 0.00017858823529411764, "loss": 3.175, "step": 65200 }, { "epoch": 7.035040431266847, "grad_norm": 0.8374466300010681, "learning_rate": 0.00017826443604964921, "loss": 3.1885, "step": 65250 }, { "epoch": 7.040431266846361, "grad_norm": 0.8206698298454285, "learning_rate": 0.00017794063680518077, "loss": 3.1949, "step": 65300 }, { "epoch": 7.045822102425876, "grad_norm": 0.735885500907898, "learning_rate": 0.00017761683756071234, "loss": 3.1847, "step": 65350 }, { "epoch": 7.051212938005391, "grad_norm": 0.8094024062156677, "learning_rate": 0.00017729303831624392, "loss": 3.2144, "step": 65400 }, { "epoch": 7.056603773584905, "grad_norm": 0.7802730798721313, "learning_rate": 0.00017696923907177547, "loss": 3.2029, "step": 65450 }, { "epoch": 7.061994609164421, "grad_norm": 0.7770246863365173, "learning_rate": 0.00017664543982730705, "loss": 3.1913, "step": 65500 }, { "epoch": 7.067385444743936, "grad_norm": 0.7353746891021729, "learning_rate": 0.00017632164058283863, "loss": 3.2044, "step": 65550 }, { "epoch": 7.07277628032345, "grad_norm": 0.7481196522712708, "learning_rate": 0.0001759978413383702, "loss": 3.1822, "step": 65600 }, { "epoch": 7.078167115902965, "grad_norm": 0.7949692010879517, "learning_rate": 0.00017567404209390178, "loss": 3.2052, "step": 65650 }, { "epoch": 7.083557951482479, "grad_norm": 0.7633875012397766, "learning_rate": 0.00017535024284943333, "loss": 3.2092, "step": 65700 }, { "epoch": 7.0889487870619945, "grad_norm": 0.8302983641624451, "learning_rate": 0.00017502644360496488, "loss": 3.2087, "step": 65750 }, { "epoch": 7.09433962264151, "grad_norm": 0.794335126876831, "learning_rate": 0.00017470264436049646, "loss": 3.2061, "step": 65800 }, { "epoch": 7.099730458221024, "grad_norm": 0.7204697132110596, "learning_rate": 0.00017437884511602804, "loss": 3.1899, "step": 65850 }, { "epoch": 7.105121293800539, "grad_norm": 0.7534080743789673, "learning_rate": 0.00017405504587155962, "loss": 3.2178, "step": 65900 }, { "epoch": 7.110512129380054, "grad_norm": 0.7708962559700012, "learning_rate": 0.0001737312466270912, "loss": 3.1813, "step": 65950 }, { "epoch": 7.115902964959568, "grad_norm": 0.7770982384681702, "learning_rate": 0.00017340744738262277, "loss": 3.2125, "step": 66000 }, { "epoch": 7.115902964959568, "eval_accuracy": 0.3879341619999346, "eval_loss": 3.3549797534942627, "eval_runtime": 183.6028, "eval_samples_per_second": 98.098, "eval_steps_per_second": 6.133, "step": 66000 }, { "epoch": 7.121293800539084, "grad_norm": 0.8400378823280334, "learning_rate": 0.00017308364813815435, "loss": 3.204, "step": 66050 }, { "epoch": 7.126684636118599, "grad_norm": 0.783427357673645, "learning_rate": 0.00017275984889368593, "loss": 3.1943, "step": 66100 }, { "epoch": 7.132075471698113, "grad_norm": 0.7737600207328796, "learning_rate": 0.00017243604964921745, "loss": 3.2306, "step": 66150 }, { "epoch": 7.137466307277628, "grad_norm": 0.7946293354034424, "learning_rate": 0.00017211225040474903, "loss": 3.2256, "step": 66200 }, { "epoch": 7.142857142857143, "grad_norm": 0.797230064868927, "learning_rate": 0.0001717884511602806, "loss": 3.2133, "step": 66250 }, { "epoch": 7.1482479784366575, "grad_norm": 0.817846953868866, "learning_rate": 0.00017146465191581218, "loss": 3.2025, "step": 66300 }, { "epoch": 7.153638814016173, "grad_norm": 0.786282479763031, "learning_rate": 0.00017114085267134376, "loss": 3.21, "step": 66350 }, { "epoch": 7.159029649595688, "grad_norm": 0.7998514771461487, "learning_rate": 0.00017081705342687534, "loss": 3.2079, "step": 66400 }, { "epoch": 7.164420485175202, "grad_norm": 0.7554528117179871, "learning_rate": 0.0001704932541824069, "loss": 3.2061, "step": 66450 }, { "epoch": 7.169811320754717, "grad_norm": 0.7405669093132019, "learning_rate": 0.00017016945493793847, "loss": 3.2116, "step": 66500 }, { "epoch": 7.175202156334231, "grad_norm": 0.7673985958099365, "learning_rate": 0.00016984565569347002, "loss": 3.2082, "step": 66550 }, { "epoch": 7.180592991913747, "grad_norm": 0.7783784866333008, "learning_rate": 0.0001695218564490016, "loss": 3.2171, "step": 66600 }, { "epoch": 7.185983827493262, "grad_norm": 0.7714428901672363, "learning_rate": 0.00016919805720453317, "loss": 3.2155, "step": 66650 }, { "epoch": 7.191374663072776, "grad_norm": 0.7873608469963074, "learning_rate": 0.00016887425796006475, "loss": 3.2122, "step": 66700 }, { "epoch": 7.196765498652291, "grad_norm": 0.8219302892684937, "learning_rate": 0.00016855045871559633, "loss": 3.1983, "step": 66750 }, { "epoch": 7.202156334231806, "grad_norm": 0.802437961101532, "learning_rate": 0.00016822665947112788, "loss": 3.2066, "step": 66800 }, { "epoch": 7.2075471698113205, "grad_norm": 0.7578773498535156, "learning_rate": 0.00016790286022665946, "loss": 3.2035, "step": 66850 }, { "epoch": 7.212938005390836, "grad_norm": 0.7701559662818909, "learning_rate": 0.00016757906098219103, "loss": 3.2044, "step": 66900 }, { "epoch": 7.218328840970351, "grad_norm": 0.8019431829452515, "learning_rate": 0.0001672552617377226, "loss": 3.207, "step": 66950 }, { "epoch": 7.223719676549865, "grad_norm": 0.7901089787483215, "learning_rate": 0.00016693146249325416, "loss": 3.215, "step": 67000 }, { "epoch": 7.223719676549865, "eval_accuracy": 0.3883080365869042, "eval_loss": 3.351943016052246, "eval_runtime": 183.3094, "eval_samples_per_second": 98.255, "eval_steps_per_second": 6.143, "step": 67000 }, { "epoch": 7.22911051212938, "grad_norm": 0.7946869134902954, "learning_rate": 0.00016660766324878574, "loss": 3.2258, "step": 67050 }, { "epoch": 7.234501347708895, "grad_norm": 0.77195143699646, "learning_rate": 0.0001662838640043173, "loss": 3.2286, "step": 67100 }, { "epoch": 7.2398921832884096, "grad_norm": 0.782701849937439, "learning_rate": 0.00016596006475984887, "loss": 3.186, "step": 67150 }, { "epoch": 7.245283018867925, "grad_norm": 0.756524384021759, "learning_rate": 0.00016563626551538045, "loss": 3.2187, "step": 67200 }, { "epoch": 7.250673854447439, "grad_norm": 0.7699859142303467, "learning_rate": 0.00016531246627091202, "loss": 3.2101, "step": 67250 }, { "epoch": 7.256064690026954, "grad_norm": 0.7793636322021484, "learning_rate": 0.0001649886670264436, "loss": 3.2176, "step": 67300 }, { "epoch": 7.261455525606469, "grad_norm": 0.7925220727920532, "learning_rate": 0.00016466486778197518, "loss": 3.232, "step": 67350 }, { "epoch": 7.2668463611859835, "grad_norm": 0.7722257375717163, "learning_rate": 0.0001643410685375067, "loss": 3.2253, "step": 67400 }, { "epoch": 7.272237196765499, "grad_norm": 0.8224971294403076, "learning_rate": 0.00016401726929303828, "loss": 3.237, "step": 67450 }, { "epoch": 7.277628032345014, "grad_norm": 0.7844073176383972, "learning_rate": 0.00016369347004856986, "loss": 3.2074, "step": 67500 }, { "epoch": 7.283018867924528, "grad_norm": 0.7575783133506775, "learning_rate": 0.00016336967080410143, "loss": 3.1997, "step": 67550 }, { "epoch": 7.288409703504043, "grad_norm": 0.7861126661300659, "learning_rate": 0.000163045871559633, "loss": 3.2356, "step": 67600 }, { "epoch": 7.293800539083558, "grad_norm": 0.8623127341270447, "learning_rate": 0.0001627220723151646, "loss": 3.2232, "step": 67650 }, { "epoch": 7.2991913746630726, "grad_norm": 0.7551688551902771, "learning_rate": 0.00016239827307069617, "loss": 3.2158, "step": 67700 }, { "epoch": 7.304582210242588, "grad_norm": 0.7738710641860962, "learning_rate": 0.00016207447382622775, "loss": 3.223, "step": 67750 }, { "epoch": 7.309973045822103, "grad_norm": 0.7757263779640198, "learning_rate": 0.00016175067458175932, "loss": 3.2344, "step": 67800 }, { "epoch": 7.315363881401617, "grad_norm": 0.7811141014099121, "learning_rate": 0.00016142687533729085, "loss": 3.219, "step": 67850 }, { "epoch": 7.320754716981132, "grad_norm": 0.8079759478569031, "learning_rate": 0.00016110307609282242, "loss": 3.2148, "step": 67900 }, { "epoch": 7.3261455525606465, "grad_norm": 0.8070315718650818, "learning_rate": 0.000160779276848354, "loss": 3.2236, "step": 67950 }, { "epoch": 7.331536388140162, "grad_norm": 0.8166431784629822, "learning_rate": 0.00016045547760388558, "loss": 3.2193, "step": 68000 }, { "epoch": 7.331536388140162, "eval_accuracy": 0.3887239598414581, "eval_loss": 3.3503804206848145, "eval_runtime": 183.4566, "eval_samples_per_second": 98.176, "eval_steps_per_second": 6.138, "step": 68000 }, { "epoch": 7.336927223719677, "grad_norm": 0.7583974003791809, "learning_rate": 0.00016013815434430653, "loss": 3.2139, "step": 68050 }, { "epoch": 7.342318059299191, "grad_norm": 0.757961630821228, "learning_rate": 0.0001598143550998381, "loss": 3.2173, "step": 68100 }, { "epoch": 7.347708894878706, "grad_norm": 0.7688447833061218, "learning_rate": 0.00015949055585536966, "loss": 3.2238, "step": 68150 }, { "epoch": 7.353099730458221, "grad_norm": 0.7891886830329895, "learning_rate": 0.0001591667566109012, "loss": 3.2162, "step": 68200 }, { "epoch": 7.3584905660377355, "grad_norm": 0.8004134297370911, "learning_rate": 0.0001588429573664328, "loss": 3.2212, "step": 68250 }, { "epoch": 7.363881401617251, "grad_norm": 0.8025893568992615, "learning_rate": 0.00015851915812196437, "loss": 3.2367, "step": 68300 }, { "epoch": 7.369272237196766, "grad_norm": 0.8140208125114441, "learning_rate": 0.00015819535887749594, "loss": 3.2145, "step": 68350 }, { "epoch": 7.37466307277628, "grad_norm": 0.7988510727882385, "learning_rate": 0.00015787155963302752, "loss": 3.2117, "step": 68400 }, { "epoch": 7.380053908355795, "grad_norm": 0.8174982666969299, "learning_rate": 0.0001575477603885591, "loss": 3.2268, "step": 68450 }, { "epoch": 7.38544474393531, "grad_norm": 0.8240893483161926, "learning_rate": 0.00015722396114409065, "loss": 3.199, "step": 68500 }, { "epoch": 7.390835579514825, "grad_norm": 0.7737301588058472, "learning_rate": 0.00015690016189962223, "loss": 3.1935, "step": 68550 }, { "epoch": 7.39622641509434, "grad_norm": 0.7589561939239502, "learning_rate": 0.00015657636265515378, "loss": 3.2208, "step": 68600 }, { "epoch": 7.401617250673855, "grad_norm": 0.8396160006523132, "learning_rate": 0.00015625256341068536, "loss": 3.2331, "step": 68650 }, { "epoch": 7.407008086253369, "grad_norm": 0.7733933925628662, "learning_rate": 0.00015592876416621693, "loss": 3.2099, "step": 68700 }, { "epoch": 7.412398921832884, "grad_norm": 0.8380154967308044, "learning_rate": 0.0001556049649217485, "loss": 3.2235, "step": 68750 }, { "epoch": 7.4177897574123985, "grad_norm": 0.799769401550293, "learning_rate": 0.00015528116567728006, "loss": 3.2154, "step": 68800 }, { "epoch": 7.423180592991914, "grad_norm": 0.794215977191925, "learning_rate": 0.00015495736643281164, "loss": 3.2324, "step": 68850 }, { "epoch": 7.428571428571429, "grad_norm": 0.8496611714363098, "learning_rate": 0.00015463356718834322, "loss": 3.2261, "step": 68900 }, { "epoch": 7.433962264150943, "grad_norm": 0.7774752378463745, "learning_rate": 0.0001543097679438748, "loss": 3.2289, "step": 68950 }, { "epoch": 7.439353099730458, "grad_norm": 0.8549684286117554, "learning_rate": 0.00015398596869940637, "loss": 3.2403, "step": 69000 }, { "epoch": 7.439353099730458, "eval_accuracy": 0.38884412993538087, "eval_loss": 3.345785617828369, "eval_runtime": 183.4217, "eval_samples_per_second": 98.195, "eval_steps_per_second": 6.139, "step": 69000 }, { "epoch": 7.444743935309973, "grad_norm": 0.766400933265686, "learning_rate": 0.00015366216945493792, "loss": 3.2498, "step": 69050 }, { "epoch": 7.450134770889488, "grad_norm": 0.7826684713363647, "learning_rate": 0.00015333837021046947, "loss": 3.2286, "step": 69100 }, { "epoch": 7.455525606469003, "grad_norm": 0.7703137993812561, "learning_rate": 0.00015301457096600105, "loss": 3.2104, "step": 69150 }, { "epoch": 7.460916442048518, "grad_norm": 0.7931674718856812, "learning_rate": 0.00015269077172153263, "loss": 3.2337, "step": 69200 }, { "epoch": 7.466307277628032, "grad_norm": 0.8100343346595764, "learning_rate": 0.0001523669724770642, "loss": 3.2269, "step": 69250 }, { "epoch": 7.471698113207547, "grad_norm": 0.7942655682563782, "learning_rate": 0.00015204317323259578, "loss": 3.2206, "step": 69300 }, { "epoch": 7.4770889487870615, "grad_norm": 0.8322016000747681, "learning_rate": 0.00015171937398812736, "loss": 3.221, "step": 69350 }, { "epoch": 7.482479784366577, "grad_norm": 0.7898223400115967, "learning_rate": 0.00015139557474365894, "loss": 3.248, "step": 69400 }, { "epoch": 7.487870619946092, "grad_norm": 0.8208786845207214, "learning_rate": 0.00015107177549919046, "loss": 3.2311, "step": 69450 }, { "epoch": 7.493261455525606, "grad_norm": 0.7814972400665283, "learning_rate": 0.00015074797625472204, "loss": 3.2196, "step": 69500 }, { "epoch": 7.498652291105121, "grad_norm": 0.8326282501220703, "learning_rate": 0.00015042417701025362, "loss": 3.2347, "step": 69550 }, { "epoch": 7.504043126684636, "grad_norm": 0.8042081594467163, "learning_rate": 0.0001501003777657852, "loss": 3.2242, "step": 69600 }, { "epoch": 7.509433962264151, "grad_norm": 0.7709265947341919, "learning_rate": 0.00014977657852131677, "loss": 3.2092, "step": 69650 }, { "epoch": 7.514824797843666, "grad_norm": 0.7810226678848267, "learning_rate": 0.0001494592552617377, "loss": 3.2464, "step": 69700 }, { "epoch": 7.520215633423181, "grad_norm": 0.8230692744255066, "learning_rate": 0.00014913545601726928, "loss": 3.2242, "step": 69750 }, { "epoch": 7.525606469002695, "grad_norm": 0.7870705127716064, "learning_rate": 0.00014881165677280085, "loss": 3.2335, "step": 69800 }, { "epoch": 7.53099730458221, "grad_norm": 0.7646998763084412, "learning_rate": 0.00014848785752833243, "loss": 3.219, "step": 69850 }, { "epoch": 7.536388140161725, "grad_norm": 0.8329770565032959, "learning_rate": 0.00014816405828386398, "loss": 3.239, "step": 69900 }, { "epoch": 7.54177897574124, "grad_norm": 0.8003082275390625, "learning_rate": 0.00014784025903939556, "loss": 3.2322, "step": 69950 }, { "epoch": 7.547169811320755, "grad_norm": 0.7998071312904358, "learning_rate": 0.00014751645979492714, "loss": 3.2254, "step": 70000 }, { "epoch": 7.547169811320755, "eval_accuracy": 0.3892735261480237, "eval_loss": 3.343639373779297, "eval_runtime": 183.6901, "eval_samples_per_second": 98.051, "eval_steps_per_second": 6.13, "step": 70000 }, { "epoch": 7.55256064690027, "grad_norm": 0.7884393930435181, "learning_rate": 0.00014719266055045871, "loss": 3.2285, "step": 70050 }, { "epoch": 7.557951482479784, "grad_norm": 0.8215367197990417, "learning_rate": 0.00014686886130599027, "loss": 3.238, "step": 70100 }, { "epoch": 7.563342318059299, "grad_norm": 0.794144332408905, "learning_rate": 0.00014654506206152184, "loss": 3.2335, "step": 70150 }, { "epoch": 7.568733153638814, "grad_norm": 0.8532745838165283, "learning_rate": 0.00014622126281705342, "loss": 3.23, "step": 70200 }, { "epoch": 7.574123989218329, "grad_norm": 0.8423836827278137, "learning_rate": 0.000145897463572585, "loss": 3.2318, "step": 70250 }, { "epoch": 7.579514824797844, "grad_norm": 0.8306622505187988, "learning_rate": 0.00014557366432811658, "loss": 3.2334, "step": 70300 }, { "epoch": 7.584905660377358, "grad_norm": 0.8016265630722046, "learning_rate": 0.00014524986508364813, "loss": 3.2495, "step": 70350 }, { "epoch": 7.590296495956873, "grad_norm": 0.7804158329963684, "learning_rate": 0.0001449260658391797, "loss": 3.22, "step": 70400 }, { "epoch": 7.595687331536388, "grad_norm": 0.8219479322433472, "learning_rate": 0.00014460226659471128, "loss": 3.2402, "step": 70450 }, { "epoch": 7.601078167115903, "grad_norm": 0.8083851933479309, "learning_rate": 0.00014427846735024283, "loss": 3.2458, "step": 70500 }, { "epoch": 7.606469002695418, "grad_norm": 0.7973158359527588, "learning_rate": 0.0001439546681057744, "loss": 3.2348, "step": 70550 }, { "epoch": 7.611859838274933, "grad_norm": 0.8355487585067749, "learning_rate": 0.000143630868861306, "loss": 3.2238, "step": 70600 }, { "epoch": 7.617250673854447, "grad_norm": 0.8130533695220947, "learning_rate": 0.00014330706961683754, "loss": 3.2209, "step": 70650 }, { "epoch": 7.622641509433962, "grad_norm": 0.824119508266449, "learning_rate": 0.00014298327037236912, "loss": 3.2475, "step": 70700 }, { "epoch": 7.628032345013477, "grad_norm": 0.8470996022224426, "learning_rate": 0.0001426594711279007, "loss": 3.2272, "step": 70750 }, { "epoch": 7.633423180592992, "grad_norm": 0.812836229801178, "learning_rate": 0.00014233567188343224, "loss": 3.2344, "step": 70800 }, { "epoch": 7.638814016172507, "grad_norm": 0.7536956667900085, "learning_rate": 0.00014201187263896382, "loss": 3.2221, "step": 70850 }, { "epoch": 7.644204851752022, "grad_norm": 0.8352275490760803, "learning_rate": 0.0001416880733944954, "loss": 3.2347, "step": 70900 }, { "epoch": 7.649595687331536, "grad_norm": 0.7672180533409119, "learning_rate": 0.00014136427415002695, "loss": 3.2151, "step": 70950 }, { "epoch": 7.654986522911051, "grad_norm": 0.8054909706115723, "learning_rate": 0.00014104047490555853, "loss": 3.2215, "step": 71000 }, { "epoch": 7.654986522911051, "eval_accuracy": 0.38935914462362153, "eval_loss": 3.3389949798583984, "eval_runtime": 183.3427, "eval_samples_per_second": 98.237, "eval_steps_per_second": 6.142, "step": 71000 }, { "epoch": 7.660377358490566, "grad_norm": 0.8233856558799744, "learning_rate": 0.0001407166756610901, "loss": 3.2494, "step": 71050 }, { "epoch": 7.665768194070081, "grad_norm": 0.8189040422439575, "learning_rate": 0.00014039287641662168, "loss": 3.2215, "step": 71100 }, { "epoch": 7.671159029649596, "grad_norm": 0.8202671408653259, "learning_rate": 0.00014006907717215326, "loss": 3.2173, "step": 71150 }, { "epoch": 7.67654986522911, "grad_norm": 0.8153406977653503, "learning_rate": 0.0001397452779276848, "loss": 3.223, "step": 71200 }, { "epoch": 7.681940700808625, "grad_norm": 0.8444876670837402, "learning_rate": 0.0001394214786832164, "loss": 3.2304, "step": 71250 }, { "epoch": 7.6873315363881405, "grad_norm": 0.836453378200531, "learning_rate": 0.00013909767943874797, "loss": 3.2242, "step": 71300 }, { "epoch": 7.692722371967655, "grad_norm": 0.7985456585884094, "learning_rate": 0.00013877388019427954, "loss": 3.2336, "step": 71350 }, { "epoch": 7.69811320754717, "grad_norm": 0.8078007102012634, "learning_rate": 0.0001384500809498111, "loss": 3.2358, "step": 71400 }, { "epoch": 7.703504043126685, "grad_norm": 0.9007571935653687, "learning_rate": 0.00013812628170534267, "loss": 3.2288, "step": 71450 }, { "epoch": 7.708894878706199, "grad_norm": 0.7986894249916077, "learning_rate": 0.00013780248246087425, "loss": 3.2364, "step": 71500 }, { "epoch": 7.714285714285714, "grad_norm": 0.8218685388565063, "learning_rate": 0.00013747868321640583, "loss": 3.2235, "step": 71550 }, { "epoch": 7.719676549865229, "grad_norm": 0.8080571293830872, "learning_rate": 0.00013715488397193738, "loss": 3.2249, "step": 71600 }, { "epoch": 7.725067385444744, "grad_norm": 0.854092001914978, "learning_rate": 0.00013683108472746896, "loss": 3.2193, "step": 71650 }, { "epoch": 7.730458221024259, "grad_norm": 0.7832936644554138, "learning_rate": 0.00013650728548300053, "loss": 3.2439, "step": 71700 }, { "epoch": 7.735849056603773, "grad_norm": 0.779666006565094, "learning_rate": 0.0001361834862385321, "loss": 3.2301, "step": 71750 }, { "epoch": 7.741239892183288, "grad_norm": 0.8854478597640991, "learning_rate": 0.0001358596869940637, "loss": 3.2211, "step": 71800 }, { "epoch": 7.7466307277628035, "grad_norm": 0.7972925901412964, "learning_rate": 0.00013553588774959524, "loss": 3.2189, "step": 71850 }, { "epoch": 7.752021563342318, "grad_norm": 0.853445291519165, "learning_rate": 0.00013521208850512682, "loss": 3.2392, "step": 71900 }, { "epoch": 7.757412398921833, "grad_norm": 0.8733102083206177, "learning_rate": 0.0001348882892606584, "loss": 3.2315, "step": 71950 }, { "epoch": 7.762803234501348, "grad_norm": 0.7951270937919617, "learning_rate": 0.00013456449001618995, "loss": 3.235, "step": 72000 }, { "epoch": 7.762803234501348, "eval_accuracy": 0.39014046753989545, "eval_loss": 3.3319358825683594, "eval_runtime": 183.3781, "eval_samples_per_second": 98.218, "eval_steps_per_second": 6.14, "step": 72000 }, { "epoch": 7.768194070080862, "grad_norm": 0.7769864201545715, "learning_rate": 0.00013424069077172152, "loss": 3.2239, "step": 72050 }, { "epoch": 7.773584905660377, "grad_norm": 0.7999830842018127, "learning_rate": 0.0001339168915272531, "loss": 3.2342, "step": 72100 }, { "epoch": 7.7789757412398925, "grad_norm": 0.781497597694397, "learning_rate": 0.00013359309228278465, "loss": 3.2331, "step": 72150 }, { "epoch": 7.784366576819407, "grad_norm": 0.8018577098846436, "learning_rate": 0.00013326929303831623, "loss": 3.2561, "step": 72200 }, { "epoch": 7.789757412398922, "grad_norm": 0.8189263939857483, "learning_rate": 0.0001329454937938478, "loss": 3.211, "step": 72250 }, { "epoch": 7.795148247978437, "grad_norm": 0.8497806191444397, "learning_rate": 0.00013262169454937936, "loss": 3.2363, "step": 72300 }, { "epoch": 7.800539083557951, "grad_norm": 0.8176953792572021, "learning_rate": 0.00013229789530491093, "loss": 3.2281, "step": 72350 }, { "epoch": 7.8059299191374665, "grad_norm": 0.8766255378723145, "learning_rate": 0.0001319740960604425, "loss": 3.2288, "step": 72400 }, { "epoch": 7.811320754716981, "grad_norm": 0.8095853924751282, "learning_rate": 0.0001316502968159741, "loss": 3.2339, "step": 72450 }, { "epoch": 7.816711590296496, "grad_norm": 0.8431499600410461, "learning_rate": 0.00013132649757150564, "loss": 3.2225, "step": 72500 }, { "epoch": 7.822102425876011, "grad_norm": 0.8290701508522034, "learning_rate": 0.00013100269832703722, "loss": 3.2066, "step": 72550 }, { "epoch": 7.827493261455525, "grad_norm": 0.780984103679657, "learning_rate": 0.0001306788990825688, "loss": 3.2393, "step": 72600 }, { "epoch": 7.83288409703504, "grad_norm": 0.795058012008667, "learning_rate": 0.00013035509983810037, "loss": 3.2264, "step": 72650 }, { "epoch": 7.8382749326145555, "grad_norm": 0.833223819732666, "learning_rate": 0.00013003130059363192, "loss": 3.2348, "step": 72700 }, { "epoch": 7.84366576819407, "grad_norm": 0.8560569882392883, "learning_rate": 0.0001297075013491635, "loss": 3.229, "step": 72750 }, { "epoch": 7.849056603773585, "grad_norm": 0.8031648397445679, "learning_rate": 0.00012938370210469508, "loss": 3.2249, "step": 72800 }, { "epoch": 7.8544474393531, "grad_norm": 0.8022270202636719, "learning_rate": 0.00012905990286022666, "loss": 3.2204, "step": 72850 }, { "epoch": 7.859838274932614, "grad_norm": 0.7824501991271973, "learning_rate": 0.0001287361036157582, "loss": 3.2205, "step": 72900 }, { "epoch": 7.8652291105121295, "grad_norm": 0.8288241028785706, "learning_rate": 0.00012841230437128979, "loss": 3.2178, "step": 72950 }, { "epoch": 7.870619946091644, "grad_norm": 0.8087592124938965, "learning_rate": 0.00012808850512682136, "loss": 3.2186, "step": 73000 }, { "epoch": 7.870619946091644, "eval_accuracy": 0.3905230343578902, "eval_loss": 3.329652786254883, "eval_runtime": 183.3863, "eval_samples_per_second": 98.213, "eval_steps_per_second": 6.14, "step": 73000 }, { "epoch": 7.876010781671159, "grad_norm": 0.8783208727836609, "learning_rate": 0.00012776470588235294, "loss": 3.2303, "step": 73050 }, { "epoch": 7.881401617250674, "grad_norm": 0.850925862789154, "learning_rate": 0.0001274409066378845, "loss": 3.2095, "step": 73100 }, { "epoch": 7.886792452830189, "grad_norm": 0.7974444627761841, "learning_rate": 0.00012711710739341607, "loss": 3.2193, "step": 73150 }, { "epoch": 7.892183288409703, "grad_norm": 0.8161489367485046, "learning_rate": 0.00012679330814894765, "loss": 3.2342, "step": 73200 }, { "epoch": 7.8975741239892185, "grad_norm": 0.8146517872810364, "learning_rate": 0.00012646950890447922, "loss": 3.2266, "step": 73250 }, { "epoch": 7.902964959568733, "grad_norm": 0.8209285140037537, "learning_rate": 0.00012614570966001077, "loss": 3.2198, "step": 73300 }, { "epoch": 7.908355795148248, "grad_norm": 0.8260653614997864, "learning_rate": 0.00012582191041554235, "loss": 3.2057, "step": 73350 }, { "epoch": 7.913746630727763, "grad_norm": 0.8421151638031006, "learning_rate": 0.00012549811117107393, "loss": 3.2516, "step": 73400 }, { "epoch": 7.919137466307277, "grad_norm": 0.8532014489173889, "learning_rate": 0.0001251743119266055, "loss": 3.2325, "step": 73450 }, { "epoch": 7.9245283018867925, "grad_norm": 0.7685270309448242, "learning_rate": 0.00012485051268213706, "loss": 3.2406, "step": 73500 }, { "epoch": 7.929919137466308, "grad_norm": 0.859190821647644, "learning_rate": 0.00012452671343766864, "loss": 3.2347, "step": 73550 }, { "epoch": 7.935309973045822, "grad_norm": 0.8019168972969055, "learning_rate": 0.00012420291419320021, "loss": 3.2328, "step": 73600 }, { "epoch": 7.940700808625337, "grad_norm": 0.849294126033783, "learning_rate": 0.0001238791149487318, "loss": 3.2427, "step": 73650 }, { "epoch": 7.946091644204852, "grad_norm": 0.8092149496078491, "learning_rate": 0.00012356179168915272, "loss": 3.2481, "step": 73700 }, { "epoch": 7.951482479784366, "grad_norm": 0.8331597447395325, "learning_rate": 0.0001232379924446843, "loss": 3.2047, "step": 73750 }, { "epoch": 7.9568733153638815, "grad_norm": 0.8321600556373596, "learning_rate": 0.00012291419320021587, "loss": 3.2282, "step": 73800 }, { "epoch": 7.962264150943396, "grad_norm": 0.8063613772392273, "learning_rate": 0.00012259039395574742, "loss": 3.2137, "step": 73850 }, { "epoch": 7.967654986522911, "grad_norm": 0.853704035282135, "learning_rate": 0.000122266594711279, "loss": 3.2196, "step": 73900 }, { "epoch": 7.973045822102426, "grad_norm": 0.8520025014877319, "learning_rate": 0.00012194279546681056, "loss": 3.2307, "step": 73950 }, { "epoch": 7.97843665768194, "grad_norm": 0.8468704223632812, "learning_rate": 0.00012161899622234214, "loss": 3.2275, "step": 74000 }, { "epoch": 7.97843665768194, "eval_accuracy": 0.3908582285167979, "eval_loss": 3.326791286468506, "eval_runtime": 183.707, "eval_samples_per_second": 98.042, "eval_steps_per_second": 6.129, "step": 74000 }, { "epoch": 7.9838274932614555, "grad_norm": 0.8065071105957031, "learning_rate": 0.00012129519697787372, "loss": 3.2299, "step": 74050 }, { "epoch": 7.989218328840971, "grad_norm": 0.8211476802825928, "learning_rate": 0.00012097139773340527, "loss": 3.2332, "step": 74100 }, { "epoch": 7.994609164420485, "grad_norm": 0.7979119420051575, "learning_rate": 0.00012064759848893685, "loss": 3.2219, "step": 74150 }, { "epoch": 8.0, "grad_norm": 1.7197800874710083, "learning_rate": 0.00012032379924446843, "loss": 3.2359, "step": 74200 }, { "epoch": 8.005390835579515, "grad_norm": 0.8147539496421814, "learning_rate": 0.00011999999999999999, "loss": 3.1524, "step": 74250 }, { "epoch": 8.01078167115903, "grad_norm": 0.8394148945808411, "learning_rate": 0.00011967620075553155, "loss": 3.1463, "step": 74300 }, { "epoch": 8.016172506738544, "grad_norm": 0.8568943738937378, "learning_rate": 0.00011935240151106313, "loss": 3.1327, "step": 74350 }, { "epoch": 8.021563342318059, "grad_norm": 0.8077348470687866, "learning_rate": 0.00011902860226659471, "loss": 3.1388, "step": 74400 }, { "epoch": 8.026954177897574, "grad_norm": 0.785599410533905, "learning_rate": 0.00011870480302212627, "loss": 3.1547, "step": 74450 }, { "epoch": 8.032345013477089, "grad_norm": 0.8443362712860107, "learning_rate": 0.00011838100377765784, "loss": 3.1424, "step": 74500 }, { "epoch": 8.037735849056604, "grad_norm": 0.8324596881866455, "learning_rate": 0.00011805720453318941, "loss": 3.1601, "step": 74550 }, { "epoch": 8.04312668463612, "grad_norm": 0.8503394722938538, "learning_rate": 0.00011773340528872098, "loss": 3.1528, "step": 74600 }, { "epoch": 8.048517520215633, "grad_norm": 0.8025884628295898, "learning_rate": 0.00011740960604425256, "loss": 3.1453, "step": 74650 }, { "epoch": 8.053908355795148, "grad_norm": 0.8412855267524719, "learning_rate": 0.00011708580679978412, "loss": 3.1539, "step": 74700 }, { "epoch": 8.059299191374663, "grad_norm": 0.8652845025062561, "learning_rate": 0.00011676200755531568, "loss": 3.1489, "step": 74750 }, { "epoch": 8.064690026954178, "grad_norm": 0.8231407999992371, "learning_rate": 0.00011643820831084726, "loss": 3.156, "step": 74800 }, { "epoch": 8.070080862533693, "grad_norm": 0.8152666687965393, "learning_rate": 0.00011611440906637884, "loss": 3.1646, "step": 74850 }, { "epoch": 8.075471698113208, "grad_norm": 0.8229835033416748, "learning_rate": 0.00011579060982191042, "loss": 3.141, "step": 74900 }, { "epoch": 8.080862533692722, "grad_norm": 0.8419477343559265, "learning_rate": 0.00011546681057744197, "loss": 3.1425, "step": 74950 }, { "epoch": 8.086253369272237, "grad_norm": 0.8185001015663147, "learning_rate": 0.00011514301133297355, "loss": 3.1539, "step": 75000 }, { "epoch": 8.086253369272237, "eval_accuracy": 0.3905573686704396, "eval_loss": 3.333664894104004, "eval_runtime": 183.2216, "eval_samples_per_second": 98.302, "eval_steps_per_second": 6.146, "step": 75000 }, { "epoch": 8.091644204851752, "grad_norm": 0.8968559503555298, "learning_rate": 0.00011481921208850512, "loss": 3.1622, "step": 75050 }, { "epoch": 8.097035040431267, "grad_norm": 0.7851213812828064, "learning_rate": 0.00011449541284403669, "loss": 3.1479, "step": 75100 }, { "epoch": 8.102425876010782, "grad_norm": 0.8505614995956421, "learning_rate": 0.00011417161359956825, "loss": 3.1686, "step": 75150 }, { "epoch": 8.107816711590296, "grad_norm": 0.8062684535980225, "learning_rate": 0.00011384781435509983, "loss": 3.1602, "step": 75200 }, { "epoch": 8.11320754716981, "grad_norm": 0.9041953682899475, "learning_rate": 0.0001135240151106314, "loss": 3.1506, "step": 75250 }, { "epoch": 8.118598382749326, "grad_norm": 0.8515322208404541, "learning_rate": 0.00011320021586616297, "loss": 3.1608, "step": 75300 }, { "epoch": 8.123989218328841, "grad_norm": 0.8603762984275818, "learning_rate": 0.00011288289260658391, "loss": 3.1645, "step": 75350 }, { "epoch": 8.129380053908356, "grad_norm": 0.8565706014633179, "learning_rate": 0.00011255909336211549, "loss": 3.1632, "step": 75400 }, { "epoch": 8.134770889487871, "grad_norm": 0.932990550994873, "learning_rate": 0.00011223529411764705, "loss": 3.167, "step": 75450 }, { "epoch": 8.140161725067385, "grad_norm": 0.7992711663246155, "learning_rate": 0.00011191149487317862, "loss": 3.141, "step": 75500 }, { "epoch": 8.1455525606469, "grad_norm": 0.835195004940033, "learning_rate": 0.0001115876956287102, "loss": 3.172, "step": 75550 }, { "epoch": 8.150943396226415, "grad_norm": 0.8241792917251587, "learning_rate": 0.00011126389638424176, "loss": 3.1497, "step": 75600 }, { "epoch": 8.15633423180593, "grad_norm": 0.7964764833450317, "learning_rate": 0.00011094009713977334, "loss": 3.1637, "step": 75650 }, { "epoch": 8.161725067385445, "grad_norm": 0.8399732708930969, "learning_rate": 0.0001106162978953049, "loss": 3.1866, "step": 75700 }, { "epoch": 8.167115902964959, "grad_norm": 0.8350764513015747, "learning_rate": 0.00011029249865083646, "loss": 3.1631, "step": 75750 }, { "epoch": 8.172506738544474, "grad_norm": 0.8351109623908997, "learning_rate": 0.00010996869940636804, "loss": 3.159, "step": 75800 }, { "epoch": 8.177897574123989, "grad_norm": 0.8726935386657715, "learning_rate": 0.00010964490016189962, "loss": 3.1482, "step": 75850 }, { "epoch": 8.183288409703504, "grad_norm": 0.8468180894851685, "learning_rate": 0.00010932110091743117, "loss": 3.1839, "step": 75900 }, { "epoch": 8.18867924528302, "grad_norm": 0.8606348037719727, "learning_rate": 0.00010899730167296275, "loss": 3.1466, "step": 75950 }, { "epoch": 8.194070080862534, "grad_norm": 0.9001939296722412, "learning_rate": 0.00010867350242849432, "loss": 3.1617, "step": 76000 }, { "epoch": 8.194070080862534, "eval_accuracy": 0.3908328037410493, "eval_loss": 3.3323988914489746, "eval_runtime": 183.5683, "eval_samples_per_second": 98.116, "eval_steps_per_second": 6.134, "step": 76000 }, { "epoch": 8.199460916442048, "grad_norm": 0.9116581082344055, "learning_rate": 0.0001083497031840259, "loss": 3.171, "step": 76050 }, { "epoch": 8.204851752021563, "grad_norm": 0.8499873876571655, "learning_rate": 0.00010802590393955745, "loss": 3.1795, "step": 76100 }, { "epoch": 8.210242587601078, "grad_norm": 0.8768589496612549, "learning_rate": 0.00010770210469508903, "loss": 3.1671, "step": 76150 }, { "epoch": 8.215633423180593, "grad_norm": 0.8752418756484985, "learning_rate": 0.00010737830545062061, "loss": 3.1726, "step": 76200 }, { "epoch": 8.221024258760108, "grad_norm": 0.8474659323692322, "learning_rate": 0.00010705450620615219, "loss": 3.1591, "step": 76250 }, { "epoch": 8.226415094339623, "grad_norm": 0.8635431528091431, "learning_rate": 0.00010673070696168375, "loss": 3.1685, "step": 76300 }, { "epoch": 8.231805929919137, "grad_norm": 0.8334670662879944, "learning_rate": 0.00010640690771721531, "loss": 3.1738, "step": 76350 }, { "epoch": 8.237196765498652, "grad_norm": 0.8614861369132996, "learning_rate": 0.00010608310847274689, "loss": 3.1823, "step": 76400 }, { "epoch": 8.242587601078167, "grad_norm": 0.8414133191108704, "learning_rate": 0.00010575930922827846, "loss": 3.1497, "step": 76450 }, { "epoch": 8.247978436657682, "grad_norm": 0.8237685561180115, "learning_rate": 0.00010543550998381003, "loss": 3.158, "step": 76500 }, { "epoch": 8.253369272237197, "grad_norm": 0.8597197532653809, "learning_rate": 0.0001051117107393416, "loss": 3.1511, "step": 76550 }, { "epoch": 8.25876010781671, "grad_norm": 0.866402268409729, "learning_rate": 0.00010478791149487316, "loss": 3.1755, "step": 76600 }, { "epoch": 8.264150943396226, "grad_norm": 0.8706876039505005, "learning_rate": 0.00010446411225040474, "loss": 3.1563, "step": 76650 }, { "epoch": 8.269541778975741, "grad_norm": 0.8851219415664673, "learning_rate": 0.00010414031300593632, "loss": 3.1876, "step": 76700 }, { "epoch": 8.274932614555256, "grad_norm": 0.8189446330070496, "learning_rate": 0.00010381651376146787, "loss": 3.1711, "step": 76750 }, { "epoch": 8.280323450134771, "grad_norm": 0.8582973480224609, "learning_rate": 0.00010349271451699945, "loss": 3.1521, "step": 76800 }, { "epoch": 8.285714285714286, "grad_norm": 0.8564453721046448, "learning_rate": 0.00010316891527253102, "loss": 3.1961, "step": 76850 }, { "epoch": 8.2911051212938, "grad_norm": 0.837863028049469, "learning_rate": 0.0001028451160280626, "loss": 3.1797, "step": 76900 }, { "epoch": 8.296495956873315, "grad_norm": 0.8210705518722534, "learning_rate": 0.00010252779276848353, "loss": 3.1627, "step": 76950 }, { "epoch": 8.30188679245283, "grad_norm": 0.8527716398239136, "learning_rate": 0.0001022039935240151, "loss": 3.1744, "step": 77000 }, { "epoch": 8.30188679245283, "eval_accuracy": 0.3909301567285311, "eval_loss": 3.3277838230133057, "eval_runtime": 183.3119, "eval_samples_per_second": 98.253, "eval_steps_per_second": 6.143, "step": 77000 }, { "epoch": 8.307277628032345, "grad_norm": 0.8103308081626892, "learning_rate": 0.00010188019427954668, "loss": 3.1687, "step": 77050 }, { "epoch": 8.31266846361186, "grad_norm": 0.8414570093154907, "learning_rate": 0.00010155639503507823, "loss": 3.171, "step": 77100 }, { "epoch": 8.318059299191376, "grad_norm": 0.8726758360862732, "learning_rate": 0.00010123259579060981, "loss": 3.1752, "step": 77150 }, { "epoch": 8.323450134770889, "grad_norm": 0.8334712982177734, "learning_rate": 0.00010090879654614139, "loss": 3.1825, "step": 77200 }, { "epoch": 8.328840970350404, "grad_norm": 0.8159593939781189, "learning_rate": 0.00010058499730167296, "loss": 3.185, "step": 77250 }, { "epoch": 8.33423180592992, "grad_norm": 0.8698613047599792, "learning_rate": 0.00010026119805720452, "loss": 3.1679, "step": 77300 }, { "epoch": 8.339622641509434, "grad_norm": 0.854110598564148, "learning_rate": 9.993739881273609e-05, "loss": 3.1685, "step": 77350 }, { "epoch": 8.34501347708895, "grad_norm": 0.8709824085235596, "learning_rate": 9.961359956826767e-05, "loss": 3.1878, "step": 77400 }, { "epoch": 8.350404312668463, "grad_norm": 0.8535655736923218, "learning_rate": 9.928980032379923e-05, "loss": 3.1643, "step": 77450 }, { "epoch": 8.355795148247978, "grad_norm": 0.9109821915626526, "learning_rate": 9.896600107933081e-05, "loss": 3.1706, "step": 77500 }, { "epoch": 8.361185983827493, "grad_norm": 0.8124513030052185, "learning_rate": 9.864220183486238e-05, "loss": 3.1827, "step": 77550 }, { "epoch": 8.366576819407008, "grad_norm": 0.8645600080490112, "learning_rate": 9.831840259039394e-05, "loss": 3.1798, "step": 77600 }, { "epoch": 8.371967654986523, "grad_norm": 0.8976259231567383, "learning_rate": 9.799460334592552e-05, "loss": 3.1675, "step": 77650 }, { "epoch": 8.377358490566039, "grad_norm": 0.8319971561431885, "learning_rate": 9.76708041014571e-05, "loss": 3.1847, "step": 77700 }, { "epoch": 8.382749326145552, "grad_norm": 0.8768755793571472, "learning_rate": 9.734700485698865e-05, "loss": 3.1646, "step": 77750 }, { "epoch": 8.388140161725067, "grad_norm": 0.8494071960449219, "learning_rate": 9.702320561252022e-05, "loss": 3.1844, "step": 77800 }, { "epoch": 8.393530997304582, "grad_norm": 0.8520126938819885, "learning_rate": 9.66994063680518e-05, "loss": 3.173, "step": 77850 }, { "epoch": 8.398921832884097, "grad_norm": 0.8483304381370544, "learning_rate": 9.637560712358338e-05, "loss": 3.176, "step": 77900 }, { "epoch": 8.404312668463612, "grad_norm": 0.8193878531455994, "learning_rate": 9.605180787911493e-05, "loss": 3.1709, "step": 77950 }, { "epoch": 8.409703504043126, "grad_norm": 0.8840237259864807, "learning_rate": 9.572800863464651e-05, "loss": 3.1638, "step": 78000 }, { "epoch": 8.409703504043126, "eval_accuracy": 0.3914478877389671, "eval_loss": 3.3253719806671143, "eval_runtime": 183.4973, "eval_samples_per_second": 98.154, "eval_steps_per_second": 6.136, "step": 78000 }, { "epoch": 8.415094339622641, "grad_norm": 0.881489098072052, "learning_rate": 9.540420939017809e-05, "loss": 3.1724, "step": 78050 }, { "epoch": 8.420485175202156, "grad_norm": 0.86197429895401, "learning_rate": 9.508041014570966e-05, "loss": 3.1732, "step": 78100 }, { "epoch": 8.425876010781671, "grad_norm": 0.9012764692306519, "learning_rate": 9.475661090124121e-05, "loss": 3.1727, "step": 78150 }, { "epoch": 8.431266846361186, "grad_norm": 0.8909586071968079, "learning_rate": 9.443281165677279e-05, "loss": 3.1593, "step": 78200 }, { "epoch": 8.436657681940702, "grad_norm": 0.848753035068512, "learning_rate": 9.410901241230437e-05, "loss": 3.1753, "step": 78250 }, { "epoch": 8.442048517520215, "grad_norm": 0.9008297324180603, "learning_rate": 9.378521316783593e-05, "loss": 3.1694, "step": 78300 }, { "epoch": 8.44743935309973, "grad_norm": 0.8660398125648499, "learning_rate": 9.346141392336751e-05, "loss": 3.1938, "step": 78350 }, { "epoch": 8.452830188679245, "grad_norm": 0.8556224703788757, "learning_rate": 9.313761467889907e-05, "loss": 3.1827, "step": 78400 }, { "epoch": 8.45822102425876, "grad_norm": 0.8545512557029724, "learning_rate": 9.281381543443064e-05, "loss": 3.1655, "step": 78450 }, { "epoch": 8.463611859838275, "grad_norm": 0.8092154264450073, "learning_rate": 9.249001618996222e-05, "loss": 3.1973, "step": 78500 }, { "epoch": 8.46900269541779, "grad_norm": 0.8078595399856567, "learning_rate": 9.21662169454938e-05, "loss": 3.1669, "step": 78550 }, { "epoch": 8.474393530997304, "grad_norm": 0.7993897199630737, "learning_rate": 9.184241770102534e-05, "loss": 3.1705, "step": 78600 }, { "epoch": 8.479784366576819, "grad_norm": 0.8611373901367188, "learning_rate": 9.151861845655692e-05, "loss": 3.1709, "step": 78650 }, { "epoch": 8.485175202156334, "grad_norm": 0.8634024262428284, "learning_rate": 9.11948192120885e-05, "loss": 3.1975, "step": 78700 }, { "epoch": 8.49056603773585, "grad_norm": 0.8928923010826111, "learning_rate": 9.087101996762008e-05, "loss": 3.1808, "step": 78750 }, { "epoch": 8.495956873315365, "grad_norm": 0.8743278384208679, "learning_rate": 9.054722072315163e-05, "loss": 3.173, "step": 78800 }, { "epoch": 8.501347708894878, "grad_norm": 0.8760882019996643, "learning_rate": 9.02234214786832e-05, "loss": 3.1766, "step": 78850 }, { "epoch": 8.506738544474393, "grad_norm": 0.8380445241928101, "learning_rate": 8.989962223421478e-05, "loss": 3.1711, "step": 78900 }, { "epoch": 8.512129380053908, "grad_norm": 0.8691101670265198, "learning_rate": 8.958229897463571e-05, "loss": 3.1905, "step": 78950 }, { "epoch": 8.517520215633423, "grad_norm": 0.8391762971878052, "learning_rate": 8.925849973016729e-05, "loss": 3.1771, "step": 79000 }, { "epoch": 8.517520215633423, "eval_accuracy": 0.39183414875514755, "eval_loss": 3.3210854530334473, "eval_runtime": 183.4514, "eval_samples_per_second": 98.179, "eval_steps_per_second": 6.138, "step": 79000 }, { "epoch": 8.522911051212938, "grad_norm": 0.8594598174095154, "learning_rate": 8.893470048569886e-05, "loss": 3.1846, "step": 79050 }, { "epoch": 8.528301886792454, "grad_norm": 0.8403089642524719, "learning_rate": 8.861090124123044e-05, "loss": 3.1923, "step": 79100 }, { "epoch": 8.533692722371967, "grad_norm": 0.815895676612854, "learning_rate": 8.828710199676199e-05, "loss": 3.1661, "step": 79150 }, { "epoch": 8.539083557951482, "grad_norm": 0.8398244380950928, "learning_rate": 8.796330275229357e-05, "loss": 3.1722, "step": 79200 }, { "epoch": 8.544474393530997, "grad_norm": 0.8309531807899475, "learning_rate": 8.763950350782515e-05, "loss": 3.163, "step": 79250 }, { "epoch": 8.549865229110512, "grad_norm": 0.8906266093254089, "learning_rate": 8.731570426335671e-05, "loss": 3.1818, "step": 79300 }, { "epoch": 8.555256064690028, "grad_norm": 0.8349162340164185, "learning_rate": 8.699190501888828e-05, "loss": 3.1804, "step": 79350 }, { "epoch": 8.560646900269543, "grad_norm": 0.8070085048675537, "learning_rate": 8.666810577441985e-05, "loss": 3.1515, "step": 79400 }, { "epoch": 8.566037735849056, "grad_norm": 0.8301887512207031, "learning_rate": 8.634430652995142e-05, "loss": 3.1848, "step": 79450 }, { "epoch": 8.571428571428571, "grad_norm": 0.8491299748420715, "learning_rate": 8.6020507285483e-05, "loss": 3.1771, "step": 79500 }, { "epoch": 8.576819407008086, "grad_norm": 0.8146814703941345, "learning_rate": 8.569670804101456e-05, "loss": 3.1755, "step": 79550 }, { "epoch": 8.582210242587601, "grad_norm": 0.8444635272026062, "learning_rate": 8.537290879654612e-05, "loss": 3.1763, "step": 79600 }, { "epoch": 8.587601078167117, "grad_norm": 0.8767122030258179, "learning_rate": 8.50491095520777e-05, "loss": 3.1571, "step": 79650 }, { "epoch": 8.59299191374663, "grad_norm": 0.8696253895759583, "learning_rate": 8.472531030760928e-05, "loss": 3.1813, "step": 79700 }, { "epoch": 8.598382749326145, "grad_norm": 0.8822606205940247, "learning_rate": 8.440151106314086e-05, "loss": 3.189, "step": 79750 }, { "epoch": 8.60377358490566, "grad_norm": 0.8302372694015503, "learning_rate": 8.407771181867241e-05, "loss": 3.1693, "step": 79800 }, { "epoch": 8.609164420485175, "grad_norm": 0.8339834213256836, "learning_rate": 8.375391257420398e-05, "loss": 3.1542, "step": 79850 }, { "epoch": 8.61455525606469, "grad_norm": 0.862395703792572, "learning_rate": 8.343011332973556e-05, "loss": 3.1626, "step": 79900 }, { "epoch": 8.619946091644206, "grad_norm": 0.846082329750061, "learning_rate": 8.310631408526714e-05, "loss": 3.1861, "step": 79950 }, { "epoch": 8.625336927223719, "grad_norm": 0.8844338059425354, "learning_rate": 8.278251484079869e-05, "loss": 3.1753, "step": 80000 }, { "epoch": 8.625336927223719, "eval_accuracy": 0.3923148291308388, "eval_loss": 3.3195295333862305, "eval_runtime": 183.5025, "eval_samples_per_second": 98.151, "eval_steps_per_second": 6.136, "step": 80000 }, { "epoch": 8.630727762803234, "grad_norm": 0.8043167591094971, "learning_rate": 8.245871559633027e-05, "loss": 3.1842, "step": 80050 }, { "epoch": 8.63611859838275, "grad_norm": 0.8519346714019775, "learning_rate": 8.213491635186185e-05, "loss": 3.1895, "step": 80100 }, { "epoch": 8.641509433962264, "grad_norm": 0.8410979509353638, "learning_rate": 8.181111710739341e-05, "loss": 3.1753, "step": 80150 }, { "epoch": 8.64690026954178, "grad_norm": 0.859257161617279, "learning_rate": 8.148731786292497e-05, "loss": 3.1692, "step": 80200 }, { "epoch": 8.652291105121293, "grad_norm": 0.8353541493415833, "learning_rate": 8.116351861845655e-05, "loss": 3.179, "step": 80250 }, { "epoch": 8.657681940700808, "grad_norm": 1.4765746593475342, "learning_rate": 8.083971937398812e-05, "loss": 3.1822, "step": 80300 }, { "epoch": 8.663072776280323, "grad_norm": 0.8867174386978149, "learning_rate": 8.05159201295197e-05, "loss": 3.1748, "step": 80350 }, { "epoch": 8.668463611859838, "grad_norm": 0.8695386052131653, "learning_rate": 8.019212088505126e-05, "loss": 3.1764, "step": 80400 }, { "epoch": 8.673854447439354, "grad_norm": 0.8609994649887085, "learning_rate": 7.986832164058282e-05, "loss": 3.1638, "step": 80450 }, { "epoch": 8.679245283018869, "grad_norm": 0.8649408221244812, "learning_rate": 7.95445223961144e-05, "loss": 3.1631, "step": 80500 }, { "epoch": 8.684636118598382, "grad_norm": 0.8425276279449463, "learning_rate": 7.922072315164598e-05, "loss": 3.1902, "step": 80550 }, { "epoch": 8.690026954177897, "grad_norm": 0.8469082117080688, "learning_rate": 7.889692390717755e-05, "loss": 3.1604, "step": 80600 }, { "epoch": 8.695417789757412, "grad_norm": 0.9055061936378479, "learning_rate": 7.85731246627091e-05, "loss": 3.1877, "step": 80650 }, { "epoch": 8.700808625336927, "grad_norm": 0.8626482486724854, "learning_rate": 7.824932541824068e-05, "loss": 3.1765, "step": 80700 }, { "epoch": 8.706199460916443, "grad_norm": 0.913098931312561, "learning_rate": 7.792552617377226e-05, "loss": 3.1799, "step": 80750 }, { "epoch": 8.711590296495956, "grad_norm": 0.8522776961326599, "learning_rate": 7.760172692930382e-05, "loss": 3.1608, "step": 80800 }, { "epoch": 8.716981132075471, "grad_norm": 0.8847063183784485, "learning_rate": 7.727792768483539e-05, "loss": 3.1656, "step": 80850 }, { "epoch": 8.722371967654986, "grad_norm": 0.9037518501281738, "learning_rate": 7.695412844036697e-05, "loss": 3.1761, "step": 80900 }, { "epoch": 8.727762803234501, "grad_norm": 0.8497751951217651, "learning_rate": 7.663032919589854e-05, "loss": 3.1929, "step": 80950 }, { "epoch": 8.733153638814017, "grad_norm": 0.8641258478164673, "learning_rate": 7.630652995143011e-05, "loss": 3.1883, "step": 81000 }, { "epoch": 8.733153638814017, "eval_accuracy": 0.39242739352261463, "eval_loss": 3.3154139518737793, "eval_runtime": 183.473, "eval_samples_per_second": 98.167, "eval_steps_per_second": 6.137, "step": 81000 }, { "epoch": 8.738544474393532, "grad_norm": 0.8507005572319031, "learning_rate": 7.598273070696167e-05, "loss": 3.1759, "step": 81050 }, { "epoch": 8.743935309973045, "grad_norm": 0.8656426072120667, "learning_rate": 7.565893146249325e-05, "loss": 3.1855, "step": 81100 }, { "epoch": 8.74932614555256, "grad_norm": 0.8609006404876709, "learning_rate": 7.533513221802481e-05, "loss": 3.1698, "step": 81150 }, { "epoch": 8.754716981132075, "grad_norm": 0.973294198513031, "learning_rate": 7.501133297355639e-05, "loss": 3.1679, "step": 81200 }, { "epoch": 8.76010781671159, "grad_norm": 0.8869118094444275, "learning_rate": 7.468753372908796e-05, "loss": 3.1771, "step": 81250 }, { "epoch": 8.765498652291106, "grad_norm": 0.8876540064811707, "learning_rate": 7.436373448461953e-05, "loss": 3.1754, "step": 81300 }, { "epoch": 8.77088948787062, "grad_norm": 0.8496720790863037, "learning_rate": 7.40399352401511e-05, "loss": 3.1767, "step": 81350 }, { "epoch": 8.776280323450134, "grad_norm": 0.8450832366943359, "learning_rate": 7.371613599568268e-05, "loss": 3.1846, "step": 81400 }, { "epoch": 8.78167115902965, "grad_norm": 0.8679110407829285, "learning_rate": 7.339233675121424e-05, "loss": 3.1839, "step": 81450 }, { "epoch": 8.787061994609164, "grad_norm": 0.9450589418411255, "learning_rate": 7.306853750674582e-05, "loss": 3.2063, "step": 81500 }, { "epoch": 8.79245283018868, "grad_norm": 0.8842014670372009, "learning_rate": 7.274473826227738e-05, "loss": 3.1691, "step": 81550 }, { "epoch": 8.797843665768195, "grad_norm": 0.8416998386383057, "learning_rate": 7.242093901780896e-05, "loss": 3.1774, "step": 81600 }, { "epoch": 8.80323450134771, "grad_norm": 0.9002299904823303, "learning_rate": 7.209713977334052e-05, "loss": 3.1794, "step": 81650 }, { "epoch": 8.808625336927223, "grad_norm": 0.8381384015083313, "learning_rate": 7.17733405288721e-05, "loss": 3.1905, "step": 81700 }, { "epoch": 8.814016172506738, "grad_norm": 0.8705641031265259, "learning_rate": 7.144954128440366e-05, "loss": 3.1742, "step": 81750 }, { "epoch": 8.819407008086253, "grad_norm": 0.8659651279449463, "learning_rate": 7.112574203993523e-05, "loss": 3.1648, "step": 81800 }, { "epoch": 8.824797843665769, "grad_norm": 0.8624113202095032, "learning_rate": 7.08019427954668e-05, "loss": 3.181, "step": 81850 }, { "epoch": 8.830188679245284, "grad_norm": 0.8544336557388306, "learning_rate": 7.047814355099837e-05, "loss": 3.2008, "step": 81900 }, { "epoch": 8.835579514824797, "grad_norm": 0.8490225076675415, "learning_rate": 7.015434430652993e-05, "loss": 3.1846, "step": 81950 }, { "epoch": 8.840970350404312, "grad_norm": 0.8430733680725098, "learning_rate": 6.983054506206151e-05, "loss": 3.173, "step": 82000 }, { "epoch": 8.840970350404312, "eval_accuracy": 0.3929860866717567, "eval_loss": 3.311267137527466, "eval_runtime": 183.6755, "eval_samples_per_second": 98.059, "eval_steps_per_second": 6.13, "step": 82000 }, { "epoch": 8.846361185983827, "grad_norm": 0.8979526162147522, "learning_rate": 6.950674581759309e-05, "loss": 3.1733, "step": 82050 }, { "epoch": 8.851752021563343, "grad_norm": 0.8584031462669373, "learning_rate": 6.918294657312465e-05, "loss": 3.1726, "step": 82100 }, { "epoch": 8.857142857142858, "grad_norm": 0.8862038850784302, "learning_rate": 6.885914732865623e-05, "loss": 3.169, "step": 82150 }, { "epoch": 8.862533692722373, "grad_norm": 0.8636807799339294, "learning_rate": 6.85353480841878e-05, "loss": 3.1808, "step": 82200 }, { "epoch": 8.867924528301886, "grad_norm": 0.8127702474594116, "learning_rate": 6.821154883971937e-05, "loss": 3.182, "step": 82250 }, { "epoch": 8.873315363881401, "grad_norm": 0.8639582991600037, "learning_rate": 6.788774959525094e-05, "loss": 3.1617, "step": 82300 }, { "epoch": 8.878706199460916, "grad_norm": 0.8684182167053223, "learning_rate": 6.756395035078252e-05, "loss": 3.1501, "step": 82350 }, { "epoch": 8.884097035040432, "grad_norm": 0.8748733401298523, "learning_rate": 6.724015110631408e-05, "loss": 3.1779, "step": 82400 }, { "epoch": 8.889487870619947, "grad_norm": 0.8723418116569519, "learning_rate": 6.691635186184566e-05, "loss": 3.1651, "step": 82450 }, { "epoch": 8.89487870619946, "grad_norm": 0.8358088135719299, "learning_rate": 6.659255261737722e-05, "loss": 3.1762, "step": 82500 }, { "epoch": 8.900269541778975, "grad_norm": 0.8295577168464661, "learning_rate": 6.626875337290879e-05, "loss": 3.1649, "step": 82550 }, { "epoch": 8.90566037735849, "grad_norm": 0.8575540781021118, "learning_rate": 6.594495412844036e-05, "loss": 3.1891, "step": 82600 }, { "epoch": 8.911051212938006, "grad_norm": 0.8286822438240051, "learning_rate": 6.562115488397193e-05, "loss": 3.1915, "step": 82650 }, { "epoch": 8.91644204851752, "grad_norm": 0.8888596296310425, "learning_rate": 6.52973556395035e-05, "loss": 3.1872, "step": 82700 }, { "epoch": 8.921832884097036, "grad_norm": 0.863433301448822, "learning_rate": 6.497355639503507e-05, "loss": 3.1827, "step": 82750 }, { "epoch": 8.92722371967655, "grad_norm": 0.8844254612922668, "learning_rate": 6.464975715056663e-05, "loss": 3.181, "step": 82800 }, { "epoch": 8.932614555256064, "grad_norm": 0.8509985208511353, "learning_rate": 6.432595790609821e-05, "loss": 3.1813, "step": 82850 }, { "epoch": 8.93800539083558, "grad_norm": 0.896156907081604, "learning_rate": 6.400215866162979e-05, "loss": 3.177, "step": 82900 }, { "epoch": 8.943396226415095, "grad_norm": 0.8738552927970886, "learning_rate": 6.367835941716135e-05, "loss": 3.1741, "step": 82950 }, { "epoch": 8.94878706199461, "grad_norm": 0.8401568531990051, "learning_rate": 6.335456017269293e-05, "loss": 3.1879, "step": 83000 }, { "epoch": 8.94878706199461, "eval_accuracy": 0.3931521082843372, "eval_loss": 3.3100833892822266, "eval_runtime": 183.3659, "eval_samples_per_second": 98.224, "eval_steps_per_second": 6.141, "step": 83000 }, { "epoch": 8.954177897574123, "grad_norm": 0.8782355785369873, "learning_rate": 6.30307609282245e-05, "loss": 3.1927, "step": 83050 }, { "epoch": 8.959568733153638, "grad_norm": 0.8759222030639648, "learning_rate": 6.270696168375607e-05, "loss": 3.1784, "step": 83100 }, { "epoch": 8.964959568733153, "grad_norm": 0.8785428404808044, "learning_rate": 6.238316243928764e-05, "loss": 3.1803, "step": 83150 }, { "epoch": 8.970350404312669, "grad_norm": 0.8433955311775208, "learning_rate": 6.205936319481921e-05, "loss": 3.188, "step": 83200 }, { "epoch": 8.975741239892184, "grad_norm": 0.877407968044281, "learning_rate": 6.173556395035078e-05, "loss": 3.1738, "step": 83250 }, { "epoch": 8.981132075471699, "grad_norm": 0.8445099592208862, "learning_rate": 6.141176470588236e-05, "loss": 3.1865, "step": 83300 }, { "epoch": 8.986522911051212, "grad_norm": 0.8456767797470093, "learning_rate": 6.108796546141392e-05, "loss": 3.1833, "step": 83350 }, { "epoch": 8.991913746630727, "grad_norm": 0.8464460968971252, "learning_rate": 6.076416621694549e-05, "loss": 3.1835, "step": 83400 }, { "epoch": 8.997304582210242, "grad_norm": 0.8834063410758972, "learning_rate": 6.0440366972477055e-05, "loss": 3.1796, "step": 83450 }, { "epoch": 9.002695417789758, "grad_norm": 0.8756682276725769, "learning_rate": 6.011656772800863e-05, "loss": 3.1574, "step": 83500 }, { "epoch": 9.008086253369273, "grad_norm": 0.8651514053344727, "learning_rate": 5.9792768483540197e-05, "loss": 3.1089, "step": 83550 }, { "epoch": 9.013477088948788, "grad_norm": 0.8796454668045044, "learning_rate": 5.946896923907177e-05, "loss": 3.1068, "step": 83600 }, { "epoch": 9.018867924528301, "grad_norm": 0.9122756123542786, "learning_rate": 5.9145169994603345e-05, "loss": 3.1101, "step": 83650 }, { "epoch": 9.024258760107816, "grad_norm": 0.9356156587600708, "learning_rate": 5.882137075013491e-05, "loss": 3.1069, "step": 83700 }, { "epoch": 9.029649595687331, "grad_norm": 0.8637686371803284, "learning_rate": 5.849757150566649e-05, "loss": 3.1241, "step": 83750 }, { "epoch": 9.035040431266847, "grad_norm": 0.830084502696991, "learning_rate": 5.817377226119805e-05, "loss": 3.1141, "step": 83800 }, { "epoch": 9.040431266846362, "grad_norm": 0.8634269833564758, "learning_rate": 5.784997301672963e-05, "loss": 3.1249, "step": 83850 }, { "epoch": 9.045822102425875, "grad_norm": 0.8700315356254578, "learning_rate": 5.752617377226119e-05, "loss": 3.1224, "step": 83900 }, { "epoch": 9.05121293800539, "grad_norm": 0.8730193376541138, "learning_rate": 5.7202374527792764e-05, "loss": 3.1196, "step": 83950 }, { "epoch": 9.056603773584905, "grad_norm": 0.9231434464454651, "learning_rate": 5.6878575283324335e-05, "loss": 3.1161, "step": 84000 }, { "epoch": 9.056603773584905, "eval_accuracy": 0.39329487817892544, "eval_loss": 3.3126213550567627, "eval_runtime": 183.6807, "eval_samples_per_second": 98.056, "eval_steps_per_second": 6.13, "step": 84000 }, { "epoch": 9.06199460916442, "grad_norm": 0.8769927024841309, "learning_rate": 5.6554776038855905e-05, "loss": 3.1255, "step": 84050 }, { "epoch": 9.067385444743936, "grad_norm": 0.8235280513763428, "learning_rate": 5.623097679438747e-05, "loss": 3.1235, "step": 84100 }, { "epoch": 9.07277628032345, "grad_norm": 0.8591448664665222, "learning_rate": 5.590717754991905e-05, "loss": 3.1205, "step": 84150 }, { "epoch": 9.078167115902964, "grad_norm": 0.8690507411956787, "learning_rate": 5.558337830545061e-05, "loss": 3.1238, "step": 84200 }, { "epoch": 9.08355795148248, "grad_norm": 0.841168224811554, "learning_rate": 5.525957906098219e-05, "loss": 3.1343, "step": 84250 }, { "epoch": 9.088948787061994, "grad_norm": 0.8726950883865356, "learning_rate": 5.493577981651375e-05, "loss": 3.1138, "step": 84300 }, { "epoch": 9.09433962264151, "grad_norm": 0.898984968662262, "learning_rate": 5.461845655693469e-05, "loss": 3.131, "step": 84350 }, { "epoch": 9.099730458221025, "grad_norm": 0.871024489402771, "learning_rate": 5.429465731246627e-05, "loss": 3.1095, "step": 84400 }, { "epoch": 9.10512129380054, "grad_norm": 0.9210131764411926, "learning_rate": 5.3970858067997833e-05, "loss": 3.1129, "step": 84450 }, { "epoch": 9.110512129380053, "grad_norm": 0.8362647294998169, "learning_rate": 5.364705882352941e-05, "loss": 3.1118, "step": 84500 }, { "epoch": 9.115902964959568, "grad_norm": 0.9286665320396423, "learning_rate": 5.3323259579060975e-05, "loss": 3.1208, "step": 84550 }, { "epoch": 9.121293800539084, "grad_norm": 0.8982177972793579, "learning_rate": 5.299946033459255e-05, "loss": 3.1197, "step": 84600 }, { "epoch": 9.126684636118599, "grad_norm": 0.8783411979675293, "learning_rate": 5.267566109012412e-05, "loss": 3.1145, "step": 84650 }, { "epoch": 9.132075471698114, "grad_norm": 0.8987107276916504, "learning_rate": 5.235186184565569e-05, "loss": 3.1187, "step": 84700 }, { "epoch": 9.137466307277627, "grad_norm": 0.872234582901001, "learning_rate": 5.202806260118726e-05, "loss": 3.128, "step": 84750 }, { "epoch": 9.142857142857142, "grad_norm": 0.8869773745536804, "learning_rate": 5.170426335671883e-05, "loss": 3.132, "step": 84800 }, { "epoch": 9.148247978436657, "grad_norm": 0.8714069724082947, "learning_rate": 5.1380464112250394e-05, "loss": 3.1384, "step": 84850 }, { "epoch": 9.153638814016173, "grad_norm": 0.8658605217933655, "learning_rate": 5.105666486778197e-05, "loss": 3.1152, "step": 84900 }, { "epoch": 9.159029649595688, "grad_norm": 0.8711130619049072, "learning_rate": 5.0732865623313536e-05, "loss": 3.1266, "step": 84950 }, { "epoch": 9.164420485175203, "grad_norm": 0.8554506301879883, "learning_rate": 5.040906637884511e-05, "loss": 3.1182, "step": 85000 }, { "epoch": 9.164420485175203, "eval_accuracy": 0.3934888235836743, "eval_loss": 3.310671806335449, "eval_runtime": 183.8724, "eval_samples_per_second": 97.954, "eval_steps_per_second": 6.124, "step": 85000 }, { "epoch": 9.169811320754716, "grad_norm": 0.8684996962547302, "learning_rate": 5.0085267134376684e-05, "loss": 3.1199, "step": 85050 }, { "epoch": 9.175202156334231, "grad_norm": 0.8756259679794312, "learning_rate": 4.9761467889908255e-05, "loss": 3.1533, "step": 85100 }, { "epoch": 9.180592991913747, "grad_norm": 0.8757433295249939, "learning_rate": 4.9437668645439826e-05, "loss": 3.1191, "step": 85150 }, { "epoch": 9.185983827493262, "grad_norm": 0.857020378112793, "learning_rate": 4.911386940097139e-05, "loss": 3.1121, "step": 85200 }, { "epoch": 9.191374663072777, "grad_norm": 0.9065777659416199, "learning_rate": 4.879007015650297e-05, "loss": 3.1354, "step": 85250 }, { "epoch": 9.19676549865229, "grad_norm": 0.848362147808075, "learning_rate": 4.846627091203453e-05, "loss": 3.1066, "step": 85300 }, { "epoch": 9.202156334231805, "grad_norm": 0.8962082862854004, "learning_rate": 4.814247166756611e-05, "loss": 3.1158, "step": 85350 }, { "epoch": 9.20754716981132, "grad_norm": 0.8896428942680359, "learning_rate": 4.7818672423097674e-05, "loss": 3.1332, "step": 85400 }, { "epoch": 9.212938005390836, "grad_norm": 0.8223966360092163, "learning_rate": 4.7494873178629244e-05, "loss": 3.097, "step": 85450 }, { "epoch": 9.21832884097035, "grad_norm": 0.867064893245697, "learning_rate": 4.7171073934160815e-05, "loss": 3.1308, "step": 85500 }, { "epoch": 9.223719676549866, "grad_norm": 0.9328610301017761, "learning_rate": 4.6847274689692386e-05, "loss": 3.1116, "step": 85550 }, { "epoch": 9.22911051212938, "grad_norm": 0.8840168714523315, "learning_rate": 4.652347544522396e-05, "loss": 3.134, "step": 85600 }, { "epoch": 9.234501347708894, "grad_norm": 0.8884910345077515, "learning_rate": 4.619967620075553e-05, "loss": 3.1195, "step": 85650 }, { "epoch": 9.23989218328841, "grad_norm": 0.9012236595153809, "learning_rate": 4.587587695628709e-05, "loss": 3.1036, "step": 85700 }, { "epoch": 9.245283018867925, "grad_norm": 0.8753321170806885, "learning_rate": 4.555207771181867e-05, "loss": 3.1286, "step": 85750 }, { "epoch": 9.25067385444744, "grad_norm": 0.8457467555999756, "learning_rate": 4.522827846735024e-05, "loss": 3.1347, "step": 85800 }, { "epoch": 9.256064690026955, "grad_norm": 0.8559665083885193, "learning_rate": 4.490447922288181e-05, "loss": 3.134, "step": 85850 }, { "epoch": 9.261455525606468, "grad_norm": 0.8733518123626709, "learning_rate": 4.458067997841338e-05, "loss": 3.0981, "step": 85900 }, { "epoch": 9.266846361185983, "grad_norm": 0.8558553457260132, "learning_rate": 4.4256880733944947e-05, "loss": 3.134, "step": 85950 }, { "epoch": 9.272237196765499, "grad_norm": 0.8802152872085571, "learning_rate": 4.3933081489476524e-05, "loss": 3.1283, "step": 86000 }, { "epoch": 9.272237196765499, "eval_accuracy": 0.39366853546011943, "eval_loss": 3.3087074756622314, "eval_runtime": 183.4292, "eval_samples_per_second": 98.19, "eval_steps_per_second": 6.139, "step": 86000 }, { "epoch": 9.277628032345014, "grad_norm": 0.8693817257881165, "learning_rate": 4.360928224500809e-05, "loss": 3.1253, "step": 86050 }, { "epoch": 9.283018867924529, "grad_norm": 0.929667592048645, "learning_rate": 4.3285483000539666e-05, "loss": 3.124, "step": 86100 }, { "epoch": 9.288409703504042, "grad_norm": 0.9104236960411072, "learning_rate": 4.296168375607123e-05, "loss": 3.1201, "step": 86150 }, { "epoch": 9.293800539083557, "grad_norm": 0.8924854397773743, "learning_rate": 4.263788451160281e-05, "loss": 3.134, "step": 86200 }, { "epoch": 9.299191374663073, "grad_norm": 0.8825605511665344, "learning_rate": 4.231408526713437e-05, "loss": 3.1373, "step": 86250 }, { "epoch": 9.304582210242588, "grad_norm": 0.8415103554725647, "learning_rate": 4.199028602266594e-05, "loss": 3.1284, "step": 86300 }, { "epoch": 9.309973045822103, "grad_norm": 0.8934675455093384, "learning_rate": 4.1666486778197514e-05, "loss": 3.0965, "step": 86350 }, { "epoch": 9.315363881401618, "grad_norm": 0.8981188535690308, "learning_rate": 4.1342687533729085e-05, "loss": 3.1221, "step": 86400 }, { "epoch": 9.320754716981131, "grad_norm": 0.8392598032951355, "learning_rate": 4.101888828926065e-05, "loss": 3.1186, "step": 86450 }, { "epoch": 9.326145552560646, "grad_norm": 0.8929069638252258, "learning_rate": 4.0695089044792226e-05, "loss": 3.1407, "step": 86500 }, { "epoch": 9.331536388140162, "grad_norm": 0.9197957515716553, "learning_rate": 4.037128980032379e-05, "loss": 3.1202, "step": 86550 }, { "epoch": 9.336927223719677, "grad_norm": 0.8734220266342163, "learning_rate": 4.004749055585537e-05, "loss": 3.1092, "step": 86600 }, { "epoch": 9.342318059299192, "grad_norm": 0.8806682229042053, "learning_rate": 3.972369131138694e-05, "loss": 3.1167, "step": 86650 }, { "epoch": 9.347708894878707, "grad_norm": 0.8593670129776001, "learning_rate": 3.93998920669185e-05, "loss": 3.1351, "step": 86700 }, { "epoch": 9.35309973045822, "grad_norm": 0.8641849160194397, "learning_rate": 3.907609282245008e-05, "loss": 3.1207, "step": 86750 }, { "epoch": 9.358490566037736, "grad_norm": 0.8967059254646301, "learning_rate": 3.8752293577981645e-05, "loss": 3.1447, "step": 86800 }, { "epoch": 9.36388140161725, "grad_norm": 0.851588249206543, "learning_rate": 3.842849433351322e-05, "loss": 3.1141, "step": 86850 }, { "epoch": 9.369272237196766, "grad_norm": 0.8949471116065979, "learning_rate": 3.810469508904479e-05, "loss": 3.1332, "step": 86900 }, { "epoch": 9.374663072776281, "grad_norm": 0.8716377019882202, "learning_rate": 3.7780895844576364e-05, "loss": 3.1285, "step": 86950 }, { "epoch": 9.380053908355794, "grad_norm": 0.8617969155311584, "learning_rate": 3.745709660010793e-05, "loss": 3.1145, "step": 87000 }, { "epoch": 9.380053908355794, "eval_accuracy": 0.39399134318981627, "eval_loss": 3.307685613632202, "eval_runtime": 183.5956, "eval_samples_per_second": 98.101, "eval_steps_per_second": 6.133, "step": 87000 }, { "epoch": 9.38544474393531, "grad_norm": 0.8978739976882935, "learning_rate": 3.71332973556395e-05, "loss": 3.1249, "step": 87050 }, { "epoch": 9.390835579514825, "grad_norm": 0.857925295829773, "learning_rate": 3.680949811117107e-05, "loss": 3.1099, "step": 87100 }, { "epoch": 9.39622641509434, "grad_norm": 0.8732088208198547, "learning_rate": 3.648569886670264e-05, "loss": 3.1194, "step": 87150 }, { "epoch": 9.401617250673855, "grad_norm": 0.9072591066360474, "learning_rate": 3.616189962223421e-05, "loss": 3.1287, "step": 87200 }, { "epoch": 9.40700808625337, "grad_norm": 0.8680362105369568, "learning_rate": 3.583810037776578e-05, "loss": 3.1274, "step": 87250 }, { "epoch": 9.412398921832883, "grad_norm": 0.8892818093299866, "learning_rate": 3.5514301133297354e-05, "loss": 3.1368, "step": 87300 }, { "epoch": 9.417789757412399, "grad_norm": 0.9270879626274109, "learning_rate": 3.519697787371829e-05, "loss": 3.1019, "step": 87350 }, { "epoch": 9.423180592991914, "grad_norm": 0.9087534546852112, "learning_rate": 3.487317862924986e-05, "loss": 3.1312, "step": 87400 }, { "epoch": 9.428571428571429, "grad_norm": 0.8477288484573364, "learning_rate": 3.4549379384781434e-05, "loss": 3.1269, "step": 87450 }, { "epoch": 9.433962264150944, "grad_norm": 0.8833394646644592, "learning_rate": 3.4225580140313e-05, "loss": 3.1109, "step": 87500 }, { "epoch": 9.439353099730457, "grad_norm": 0.834549605846405, "learning_rate": 3.390178089584457e-05, "loss": 3.1338, "step": 87550 }, { "epoch": 9.444743935309972, "grad_norm": 0.8486028909683228, "learning_rate": 3.357798165137615e-05, "loss": 3.1179, "step": 87600 }, { "epoch": 9.450134770889488, "grad_norm": 0.8409470915794373, "learning_rate": 3.325418240690772e-05, "loss": 3.1084, "step": 87650 }, { "epoch": 9.455525606469003, "grad_norm": 0.9353410005569458, "learning_rate": 3.293038316243929e-05, "loss": 3.1225, "step": 87700 }, { "epoch": 9.460916442048518, "grad_norm": 0.886806309223175, "learning_rate": 3.260658391797086e-05, "loss": 3.1205, "step": 87750 }, { "epoch": 9.466307277628033, "grad_norm": 0.8259122371673584, "learning_rate": 3.2282784673502424e-05, "loss": 3.121, "step": 87800 }, { "epoch": 9.471698113207546, "grad_norm": 0.8698675036430359, "learning_rate": 3.1958985429033995e-05, "loss": 3.1229, "step": 87850 }, { "epoch": 9.477088948787062, "grad_norm": 0.8667991757392883, "learning_rate": 3.1635186184565565e-05, "loss": 3.1285, "step": 87900 }, { "epoch": 9.482479784366577, "grad_norm": 0.9097034931182861, "learning_rate": 3.1311386940097136e-05, "loss": 3.1274, "step": 87950 }, { "epoch": 9.487870619946092, "grad_norm": 0.9387816786766052, "learning_rate": 3.098758769562871e-05, "loss": 3.1251, "step": 88000 }, { "epoch": 9.487870619946092, "eval_accuracy": 0.3944075924030335, "eval_loss": 3.3036043643951416, "eval_runtime": 183.5422, "eval_samples_per_second": 98.13, "eval_steps_per_second": 6.135, "step": 88000 }, { "epoch": 9.493261455525607, "grad_norm": 0.8956674933433533, "learning_rate": 3.066378845116028e-05, "loss": 3.1346, "step": 88050 }, { "epoch": 9.498652291105122, "grad_norm": 0.9043859839439392, "learning_rate": 3.033998920669185e-05, "loss": 3.13, "step": 88100 }, { "epoch": 9.504043126684635, "grad_norm": 0.8860067129135132, "learning_rate": 3.0016189962223416e-05, "loss": 3.1347, "step": 88150 }, { "epoch": 9.50943396226415, "grad_norm": 0.8624042272567749, "learning_rate": 2.9692390717754987e-05, "loss": 3.1483, "step": 88200 }, { "epoch": 9.514824797843666, "grad_norm": 0.9003132581710815, "learning_rate": 2.9368591473286558e-05, "loss": 3.1226, "step": 88250 }, { "epoch": 9.520215633423181, "grad_norm": 0.884479284286499, "learning_rate": 2.904479222881813e-05, "loss": 3.1117, "step": 88300 }, { "epoch": 9.525606469002696, "grad_norm": 0.9293926954269409, "learning_rate": 2.87209929843497e-05, "loss": 3.1243, "step": 88350 }, { "epoch": 9.530997304582211, "grad_norm": 0.9077318906784058, "learning_rate": 2.8397193739881274e-05, "loss": 3.1164, "step": 88400 }, { "epoch": 9.536388140161725, "grad_norm": 0.8652298450469971, "learning_rate": 2.8073394495412842e-05, "loss": 3.1231, "step": 88450 }, { "epoch": 9.54177897574124, "grad_norm": 0.8628005385398865, "learning_rate": 2.7749595250944413e-05, "loss": 3.1198, "step": 88500 }, { "epoch": 9.547169811320755, "grad_norm": 0.8354774117469788, "learning_rate": 2.7425796006475984e-05, "loss": 3.1267, "step": 88550 }, { "epoch": 9.55256064690027, "grad_norm": 0.8727371692657471, "learning_rate": 2.7101996762007554e-05, "loss": 3.1216, "step": 88600 }, { "epoch": 9.557951482479785, "grad_norm": 0.8466863036155701, "learning_rate": 2.6778197517539125e-05, "loss": 3.1325, "step": 88650 }, { "epoch": 9.563342318059298, "grad_norm": 0.8633363842964172, "learning_rate": 2.6454398273070693e-05, "loss": 3.1185, "step": 88700 }, { "epoch": 9.568733153638814, "grad_norm": 0.8681420087814331, "learning_rate": 2.6130599028602264e-05, "loss": 3.1405, "step": 88750 }, { "epoch": 9.574123989218329, "grad_norm": 0.9068518877029419, "learning_rate": 2.5806799784133835e-05, "loss": 3.1347, "step": 88800 }, { "epoch": 9.579514824797844, "grad_norm": 0.8506572842597961, "learning_rate": 2.5483000539665406e-05, "loss": 3.1212, "step": 88850 }, { "epoch": 9.584905660377359, "grad_norm": 0.8176720142364502, "learning_rate": 2.5159201295196976e-05, "loss": 3.1273, "step": 88900 }, { "epoch": 9.590296495956874, "grad_norm": 0.9253360629081726, "learning_rate": 2.4835402050728544e-05, "loss": 3.1155, "step": 88950 }, { "epoch": 9.595687331536388, "grad_norm": 0.8876266479492188, "learning_rate": 2.4511602806260115e-05, "loss": 3.1404, "step": 89000 }, { "epoch": 9.595687331536388, "eval_accuracy": 0.39452298176989253, "eval_loss": 3.302065372467041, "eval_runtime": 183.5463, "eval_samples_per_second": 98.128, "eval_steps_per_second": 6.135, "step": 89000 }, { "epoch": 9.601078167115903, "grad_norm": 0.8948820233345032, "learning_rate": 2.4187803561791686e-05, "loss": 3.1014, "step": 89050 }, { "epoch": 9.606469002695418, "grad_norm": 0.8849186897277832, "learning_rate": 2.3864004317323257e-05, "loss": 3.1244, "step": 89100 }, { "epoch": 9.611859838274933, "grad_norm": 0.8476836085319519, "learning_rate": 2.354020507285483e-05, "loss": 3.1156, "step": 89150 }, { "epoch": 9.617250673854448, "grad_norm": 0.8894110321998596, "learning_rate": 2.3216405828386402e-05, "loss": 3.1006, "step": 89200 }, { "epoch": 9.622641509433961, "grad_norm": 0.8986078500747681, "learning_rate": 2.289260658391797e-05, "loss": 3.1276, "step": 89250 }, { "epoch": 9.628032345013477, "grad_norm": 0.9119869470596313, "learning_rate": 2.256880733944954e-05, "loss": 3.1514, "step": 89300 }, { "epoch": 9.633423180592992, "grad_norm": 0.8732916712760925, "learning_rate": 2.224500809498111e-05, "loss": 3.1433, "step": 89350 }, { "epoch": 9.638814016172507, "grad_norm": 0.8498047590255737, "learning_rate": 2.1921208850512682e-05, "loss": 3.1164, "step": 89400 }, { "epoch": 9.644204851752022, "grad_norm": 0.8872281312942505, "learning_rate": 2.1597409606044253e-05, "loss": 3.1294, "step": 89450 }, { "epoch": 9.649595687331537, "grad_norm": 0.8698834776878357, "learning_rate": 2.127361036157582e-05, "loss": 3.1374, "step": 89500 }, { "epoch": 9.65498652291105, "grad_norm": 0.8387486338615417, "learning_rate": 2.094981111710739e-05, "loss": 3.1255, "step": 89550 }, { "epoch": 9.660377358490566, "grad_norm": 0.8434497714042664, "learning_rate": 2.0626011872638962e-05, "loss": 3.1261, "step": 89600 }, { "epoch": 9.66576819407008, "grad_norm": 0.8968543410301208, "learning_rate": 2.0302212628170533e-05, "loss": 3.1309, "step": 89650 }, { "epoch": 9.671159029649596, "grad_norm": 0.9121111035346985, "learning_rate": 1.99784133837021e-05, "loss": 3.1151, "step": 89700 }, { "epoch": 9.676549865229111, "grad_norm": 0.9186899065971375, "learning_rate": 1.965461413923367e-05, "loss": 3.1392, "step": 89750 }, { "epoch": 9.681940700808624, "grad_norm": 0.8917949199676514, "learning_rate": 1.9330814894765242e-05, "loss": 3.1306, "step": 89800 }, { "epoch": 9.68733153638814, "grad_norm": 0.8518571257591248, "learning_rate": 1.9007015650296813e-05, "loss": 3.1275, "step": 89850 }, { "epoch": 9.692722371967655, "grad_norm": 0.8881104588508606, "learning_rate": 1.8683216405828384e-05, "loss": 3.1199, "step": 89900 }, { "epoch": 9.69811320754717, "grad_norm": 0.8823761343955994, "learning_rate": 1.8359417161359955e-05, "loss": 3.1253, "step": 89950 }, { "epoch": 9.703504043126685, "grad_norm": 0.8819106817245483, "learning_rate": 1.8035617916891526e-05, "loss": 3.1261, "step": 90000 }, { "epoch": 9.703504043126685, "eval_accuracy": 0.3946354375087805, "eval_loss": 3.299715518951416, "eval_runtime": 183.4384, "eval_samples_per_second": 98.186, "eval_steps_per_second": 6.138, "step": 90000 }, { "epoch": 9.7088948787062, "grad_norm": 0.8763957023620605, "learning_rate": 1.7711818672423097e-05, "loss": 3.1182, "step": 90050 }, { "epoch": 9.714285714285714, "grad_norm": 0.8612681031227112, "learning_rate": 1.7388019427954664e-05, "loss": 3.1227, "step": 90100 }, { "epoch": 9.719676549865229, "grad_norm": 0.8781333565711975, "learning_rate": 1.706422018348624e-05, "loss": 3.1294, "step": 90150 }, { "epoch": 9.725067385444744, "grad_norm": 0.865515410900116, "learning_rate": 1.674042093901781e-05, "loss": 3.1297, "step": 90200 }, { "epoch": 9.730458221024259, "grad_norm": 0.8704222440719604, "learning_rate": 1.6423097679438745e-05, "loss": 3.1347, "step": 90250 }, { "epoch": 9.735849056603774, "grad_norm": 0.8565595746040344, "learning_rate": 1.6099298434970315e-05, "loss": 3.117, "step": 90300 }, { "epoch": 9.74123989218329, "grad_norm": 0.8548470735549927, "learning_rate": 1.577549919050189e-05, "loss": 3.1338, "step": 90350 }, { "epoch": 9.746630727762803, "grad_norm": 0.8468580842018127, "learning_rate": 1.5451699946033457e-05, "loss": 3.1301, "step": 90400 }, { "epoch": 9.752021563342318, "grad_norm": 0.9051481485366821, "learning_rate": 1.5127900701565028e-05, "loss": 3.1297, "step": 90450 }, { "epoch": 9.757412398921833, "grad_norm": 0.8959400057792664, "learning_rate": 1.4804101457096599e-05, "loss": 3.1377, "step": 90500 }, { "epoch": 9.762803234501348, "grad_norm": 0.9014003872871399, "learning_rate": 1.448030221262817e-05, "loss": 3.1243, "step": 90550 }, { "epoch": 9.768194070080863, "grad_norm": 0.8818705677986145, "learning_rate": 1.4156502968159739e-05, "loss": 3.1397, "step": 90600 }, { "epoch": 9.773584905660378, "grad_norm": 0.8922802209854126, "learning_rate": 1.383270372369131e-05, "loss": 3.1233, "step": 90650 }, { "epoch": 9.778975741239892, "grad_norm": 0.9080004096031189, "learning_rate": 1.3508904479222879e-05, "loss": 3.1246, "step": 90700 }, { "epoch": 9.784366576819407, "grad_norm": 0.9191465973854065, "learning_rate": 1.3185105234754452e-05, "loss": 3.1084, "step": 90750 }, { "epoch": 9.789757412398922, "grad_norm": 0.9019896984100342, "learning_rate": 1.2861305990286023e-05, "loss": 3.1193, "step": 90800 }, { "epoch": 9.795148247978437, "grad_norm": 0.8658702373504639, "learning_rate": 1.2537506745817592e-05, "loss": 3.1277, "step": 90850 }, { "epoch": 9.800539083557952, "grad_norm": 0.8594561219215393, "learning_rate": 1.2213707501349163e-05, "loss": 3.1363, "step": 90900 }, { "epoch": 9.805929919137466, "grad_norm": 0.8785389065742493, "learning_rate": 1.1889908256880732e-05, "loss": 3.1238, "step": 90950 }, { "epoch": 9.81132075471698, "grad_norm": 0.8855112195014954, "learning_rate": 1.1566109012412303e-05, "loss": 3.1272, "step": 91000 }, { "epoch": 9.81132075471698, "eval_accuracy": 0.3948550249950536, "eval_loss": 3.297788381576538, "eval_runtime": 183.5523, "eval_samples_per_second": 98.125, "eval_steps_per_second": 6.134, "step": 91000 }, { "epoch": 9.816711590296496, "grad_norm": 0.868584394454956, "learning_rate": 1.1242309767943874e-05, "loss": 3.1144, "step": 91050 }, { "epoch": 9.822102425876011, "grad_norm": 0.8530396819114685, "learning_rate": 1.0918510523475443e-05, "loss": 3.1235, "step": 91100 }, { "epoch": 9.827493261455526, "grad_norm": 0.8684161901473999, "learning_rate": 1.0594711279007015e-05, "loss": 3.1208, "step": 91150 }, { "epoch": 9.832884097035041, "grad_norm": 0.8899792432785034, "learning_rate": 1.0270912034538586e-05, "loss": 3.1213, "step": 91200 }, { "epoch": 9.838274932614555, "grad_norm": 0.8613511919975281, "learning_rate": 9.947112790070156e-06, "loss": 3.1172, "step": 91250 }, { "epoch": 9.84366576819407, "grad_norm": 0.8709462881088257, "learning_rate": 9.623313545601726e-06, "loss": 3.1474, "step": 91300 }, { "epoch": 9.849056603773585, "grad_norm": 0.896946132183075, "learning_rate": 9.299514301133296e-06, "loss": 3.1258, "step": 91350 }, { "epoch": 9.8544474393531, "grad_norm": 0.8559094071388245, "learning_rate": 8.975715056664867e-06, "loss": 3.1215, "step": 91400 }, { "epoch": 9.859838274932615, "grad_norm": 0.8676149249076843, "learning_rate": 8.651915812196437e-06, "loss": 3.1358, "step": 91450 }, { "epoch": 9.865229110512129, "grad_norm": 0.8665869235992432, "learning_rate": 8.328116567728008e-06, "loss": 3.0967, "step": 91500 }, { "epoch": 9.870619946091644, "grad_norm": 0.8311969637870789, "learning_rate": 8.004317323259577e-06, "loss": 3.0991, "step": 91550 }, { "epoch": 9.876010781671159, "grad_norm": 0.8719154000282288, "learning_rate": 7.680518078791148e-06, "loss": 3.12, "step": 91600 }, { "epoch": 9.881401617250674, "grad_norm": 0.8440600633621216, "learning_rate": 7.356718834322719e-06, "loss": 3.1188, "step": 91650 }, { "epoch": 9.88679245283019, "grad_norm": 0.8644077777862549, "learning_rate": 7.03291958985429e-06, "loss": 3.1107, "step": 91700 }, { "epoch": 9.892183288409704, "grad_norm": 0.8675840497016907, "learning_rate": 6.70912034538586e-06, "loss": 3.1316, "step": 91750 }, { "epoch": 9.897574123989218, "grad_norm": 0.873511552810669, "learning_rate": 6.38532110091743e-06, "loss": 3.1241, "step": 91800 }, { "epoch": 9.902964959568733, "grad_norm": 0.9048805236816406, "learning_rate": 6.061521856449001e-06, "loss": 3.1268, "step": 91850 }, { "epoch": 9.908355795148248, "grad_norm": 0.8429412245750427, "learning_rate": 5.737722611980571e-06, "loss": 3.1232, "step": 91900 }, { "epoch": 9.913746630727763, "grad_norm": 0.8978517651557922, "learning_rate": 5.413923367512142e-06, "loss": 3.1066, "step": 91950 }, { "epoch": 9.919137466307278, "grad_norm": 0.8608512282371521, "learning_rate": 5.090124123043712e-06, "loss": 3.0993, "step": 92000 }, { "epoch": 9.919137466307278, "eval_accuracy": 0.39501604857479466, "eval_loss": 3.2966372966766357, "eval_runtime": 183.5216, "eval_samples_per_second": 98.141, "eval_steps_per_second": 6.136, "step": 92000 }, { "epoch": 9.924528301886792, "grad_norm": 0.8535104990005493, "learning_rate": 4.766324878575283e-06, "loss": 3.1249, "step": 92050 }, { "epoch": 9.929919137466307, "grad_norm": 0.9014983177185059, "learning_rate": 4.442525634106853e-06, "loss": 3.1135, "step": 92100 }, { "epoch": 9.935309973045822, "grad_norm": 0.8669118285179138, "learning_rate": 4.118726389638424e-06, "loss": 3.1419, "step": 92150 }, { "epoch": 9.940700808625337, "grad_norm": 0.8581469058990479, "learning_rate": 3.7949271451699944e-06, "loss": 3.1323, "step": 92200 }, { "epoch": 9.946091644204852, "grad_norm": 0.8778931498527527, "learning_rate": 3.4711279007015644e-06, "loss": 3.124, "step": 92250 }, { "epoch": 9.951482479784367, "grad_norm": 0.9205542206764221, "learning_rate": 3.1473286562331353e-06, "loss": 3.1097, "step": 92300 }, { "epoch": 9.95687331536388, "grad_norm": 0.9244022965431213, "learning_rate": 2.8235294117647054e-06, "loss": 3.1215, "step": 92350 }, { "epoch": 9.962264150943396, "grad_norm": 0.858595073223114, "learning_rate": 2.4997301672962763e-06, "loss": 3.1055, "step": 92400 }, { "epoch": 9.967654986522911, "grad_norm": 0.8582491874694824, "learning_rate": 2.1759309228278467e-06, "loss": 3.1251, "step": 92450 }, { "epoch": 9.973045822102426, "grad_norm": 0.8598997592926025, "learning_rate": 1.852131678359417e-06, "loss": 3.12, "step": 92500 }, { "epoch": 9.978436657681941, "grad_norm": 0.8376339077949524, "learning_rate": 1.5283324338909875e-06, "loss": 3.1245, "step": 92550 }, { "epoch": 9.983827493261456, "grad_norm": 0.8226578831672668, "learning_rate": 1.204533189422558e-06, "loss": 3.1198, "step": 92600 }, { "epoch": 9.98921832884097, "grad_norm": 0.8843859434127808, "learning_rate": 8.807339449541284e-07, "loss": 3.1088, "step": 92650 }, { "epoch": 9.994609164420485, "grad_norm": 0.886563777923584, "learning_rate": 5.569347004856989e-07, "loss": 3.131, "step": 92700 }, { "epoch": 10.0, "grad_norm": 1.917934536933899, "learning_rate": 2.3313545601726927e-07, "loss": 3.1285, "step": 92750 }, { "epoch": 10.0, "step": 92750, "total_flos": 7.75449427968e+17, "train_loss": 3.4498943302123695, "train_runtime": 79544.4067, "train_samples_per_second": 37.309, "train_steps_per_second": 1.166 } ], "logging_steps": 50, "max_steps": 92750, "num_input_tokens_seen": 0, "num_train_epochs": 10, "save_steps": 10000, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 7.75449427968e+17, "train_batch_size": 32, "trial_name": null, "trial_params": null }