{ "best_metric": 3.3030855655670166, "best_model_checkpoint": "/scratch/cl5625/exceptions/models/100M_low_0_6910/checkpoint-90000", "epoch": 10.0, "eval_steps": 1000, "global_step": 92750, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.005390835579514825, "grad_norm": 2.8118252754211426, "learning_rate": 0.000276, "loss": 9.0225, "step": 50 }, { "epoch": 0.01078167115902965, "grad_norm": 1.6856672763824463, "learning_rate": 0.0005759999999999999, "loss": 6.9773, "step": 100 }, { "epoch": 0.016172506738544475, "grad_norm": 0.9568011164665222, "learning_rate": 0.000599702104695089, "loss": 6.5177, "step": 150 }, { "epoch": 0.0215633423180593, "grad_norm": 1.2443560361862183, "learning_rate": 0.0005993783054506205, "loss": 6.2578, "step": 200 }, { "epoch": 0.026954177897574125, "grad_norm": 1.41825270652771, "learning_rate": 0.0005990545062061521, "loss": 6.0969, "step": 250 }, { "epoch": 0.03234501347708895, "grad_norm": 1.9072396755218506, "learning_rate": 0.0005987307069616836, "loss": 5.9635, "step": 300 }, { "epoch": 0.03773584905660377, "grad_norm": 1.5556931495666504, "learning_rate": 0.0005984069077172153, "loss": 5.8839, "step": 350 }, { "epoch": 0.0431266846361186, "grad_norm": 1.5125844478607178, "learning_rate": 0.0005980831084727469, "loss": 5.8238, "step": 400 }, { "epoch": 0.04851752021563342, "grad_norm": 1.5804698467254639, "learning_rate": 0.0005977593092282784, "loss": 5.7627, "step": 450 }, { "epoch": 0.05390835579514825, "grad_norm": 1.1904308795928955, "learning_rate": 0.00059743550998381, "loss": 5.6518, "step": 500 }, { "epoch": 0.05929919137466307, "grad_norm": 1.009339690208435, "learning_rate": 0.0005971117107393416, "loss": 5.6142, "step": 550 }, { "epoch": 0.0646900269541779, "grad_norm": 1.3357371091842651, "learning_rate": 0.0005967879114948732, "loss": 5.5406, "step": 600 }, { "epoch": 0.07008086253369272, "grad_norm": 1.434008240699768, "learning_rate": 0.0005964641122504047, "loss": 5.4783, "step": 650 }, { "epoch": 0.07547169811320754, "grad_norm": 0.8490063548088074, "learning_rate": 0.0005961403130059363, "loss": 5.3996, "step": 700 }, { "epoch": 0.08086253369272237, "grad_norm": 1.1594209671020508, "learning_rate": 0.0005958165137614678, "loss": 5.331, "step": 750 }, { "epoch": 0.0862533692722372, "grad_norm": 0.9333789348602295, "learning_rate": 0.0005954927145169995, "loss": 5.2596, "step": 800 }, { "epoch": 0.09164420485175202, "grad_norm": 1.0127500295639038, "learning_rate": 0.0005951689152725309, "loss": 5.2418, "step": 850 }, { "epoch": 0.09703504043126684, "grad_norm": 1.113052487373352, "learning_rate": 0.0005948451160280626, "loss": 5.1815, "step": 900 }, { "epoch": 0.10242587601078167, "grad_norm": 0.9113388061523438, "learning_rate": 0.0005945213167835941, "loss": 5.1162, "step": 950 }, { "epoch": 0.1078167115902965, "grad_norm": 0.9989785552024841, "learning_rate": 0.0005941975175391257, "loss": 5.1216, "step": 1000 }, { "epoch": 0.1078167115902965, "eval_accuracy": 0.22690449769715637, "eval_loss": 5.032634735107422, "eval_runtime": 184.9778, "eval_samples_per_second": 97.368, "eval_steps_per_second": 6.087, "step": 1000 }, { "epoch": 0.11320754716981132, "grad_norm": 1.0346425771713257, "learning_rate": 0.0005938737182946572, "loss": 5.0533, "step": 1050 }, { "epoch": 0.11859838274932614, "grad_norm": 1.3372762203216553, "learning_rate": 0.0005935499190501888, "loss": 5.014, "step": 1100 }, { "epoch": 0.12398921832884097, "grad_norm": 1.185377836227417, "learning_rate": 0.0005932261198057204, "loss": 5.0192, "step": 1150 }, { "epoch": 0.1293800539083558, "grad_norm": 1.010504961013794, "learning_rate": 0.000592902320561252, "loss": 4.9761, "step": 1200 }, { "epoch": 0.1347708894878706, "grad_norm": 0.8940131068229675, "learning_rate": 0.0005925785213167835, "loss": 4.9547, "step": 1250 }, { "epoch": 0.14016172506738545, "grad_norm": 1.0484291315078735, "learning_rate": 0.0005922547220723151, "loss": 4.9102, "step": 1300 }, { "epoch": 0.14555256064690028, "grad_norm": 0.882337212562561, "learning_rate": 0.0005919309228278468, "loss": 4.8568, "step": 1350 }, { "epoch": 0.1509433962264151, "grad_norm": 1.2623690366744995, "learning_rate": 0.0005916071235833783, "loss": 4.8624, "step": 1400 }, { "epoch": 0.15633423180592992, "grad_norm": 0.7710188627243042, "learning_rate": 0.0005912833243389097, "loss": 4.8192, "step": 1450 }, { "epoch": 0.16172506738544473, "grad_norm": 0.933644711971283, "learning_rate": 0.0005909595250944414, "loss": 4.8205, "step": 1500 }, { "epoch": 0.16711590296495957, "grad_norm": 1.3466095924377441, "learning_rate": 0.000590635725849973, "loss": 4.8013, "step": 1550 }, { "epoch": 0.1725067385444744, "grad_norm": 1.1399619579315186, "learning_rate": 0.0005903119266055045, "loss": 4.7772, "step": 1600 }, { "epoch": 0.1778975741239892, "grad_norm": 0.8201636075973511, "learning_rate": 0.0005899881273610361, "loss": 4.7511, "step": 1650 }, { "epoch": 0.18328840970350405, "grad_norm": 0.902688205242157, "learning_rate": 0.0005896643281165677, "loss": 4.7286, "step": 1700 }, { "epoch": 0.18867924528301888, "grad_norm": 0.8644108176231384, "learning_rate": 0.0005893405288720993, "loss": 4.6775, "step": 1750 }, { "epoch": 0.1940700808625337, "grad_norm": 0.7629256844520569, "learning_rate": 0.0005890167296276308, "loss": 4.704, "step": 1800 }, { "epoch": 0.19946091644204852, "grad_norm": 1.2781596183776855, "learning_rate": 0.0005886929303831624, "loss": 4.6549, "step": 1850 }, { "epoch": 0.20485175202156333, "grad_norm": 0.7812705636024475, "learning_rate": 0.0005883691311386939, "loss": 4.632, "step": 1900 }, { "epoch": 0.21024258760107817, "grad_norm": 0.7588018178939819, "learning_rate": 0.0005880453318942256, "loss": 4.608, "step": 1950 }, { "epoch": 0.215633423180593, "grad_norm": 0.7466468811035156, "learning_rate": 0.0005877215326497571, "loss": 4.5906, "step": 2000 }, { "epoch": 0.215633423180593, "eval_accuracy": 0.26987845327398857, "eval_loss": 4.5157904624938965, "eval_runtime": 183.7342, "eval_samples_per_second": 98.027, "eval_steps_per_second": 6.128, "step": 2000 }, { "epoch": 0.2210242587601078, "grad_norm": 0.826682448387146, "learning_rate": 0.0005873977334052887, "loss": 4.576, "step": 2050 }, { "epoch": 0.22641509433962265, "grad_norm": 0.9647506475448608, "learning_rate": 0.0005870739341608202, "loss": 4.5599, "step": 2100 }, { "epoch": 0.23180592991913745, "grad_norm": 1.005610466003418, "learning_rate": 0.0005867501349163519, "loss": 4.5335, "step": 2150 }, { "epoch": 0.2371967654986523, "grad_norm": 1.0172079801559448, "learning_rate": 0.0005864263356718833, "loss": 4.5304, "step": 2200 }, { "epoch": 0.24258760107816713, "grad_norm": 0.8978919982910156, "learning_rate": 0.000586102536427415, "loss": 4.5045, "step": 2250 }, { "epoch": 0.24797843665768193, "grad_norm": 0.9270089268684387, "learning_rate": 0.0005857787371829465, "loss": 4.5004, "step": 2300 }, { "epoch": 0.25336927223719674, "grad_norm": 1.124731183052063, "learning_rate": 0.0005854549379384781, "loss": 4.4774, "step": 2350 }, { "epoch": 0.2587601078167116, "grad_norm": 0.7949519157409668, "learning_rate": 0.0005851311386940096, "loss": 4.4615, "step": 2400 }, { "epoch": 0.2641509433962264, "grad_norm": 0.7883573770523071, "learning_rate": 0.0005848073394495412, "loss": 4.4503, "step": 2450 }, { "epoch": 0.2695417789757412, "grad_norm": 0.9283429384231567, "learning_rate": 0.0005844835402050728, "loss": 4.4294, "step": 2500 }, { "epoch": 0.2749326145552561, "grad_norm": 0.703683078289032, "learning_rate": 0.0005841597409606044, "loss": 4.3969, "step": 2550 }, { "epoch": 0.2803234501347709, "grad_norm": 0.812880277633667, "learning_rate": 0.000583835941716136, "loss": 4.4119, "step": 2600 }, { "epoch": 0.2857142857142857, "grad_norm": 0.9256618618965149, "learning_rate": 0.0005835121424716675, "loss": 4.3917, "step": 2650 }, { "epoch": 0.29110512129380056, "grad_norm": 0.7537206411361694, "learning_rate": 0.0005831883432271992, "loss": 4.3791, "step": 2700 }, { "epoch": 0.29649595687331537, "grad_norm": 0.9869926571846008, "learning_rate": 0.0005828645439827307, "loss": 4.3759, "step": 2750 }, { "epoch": 0.3018867924528302, "grad_norm": 0.8514196276664734, "learning_rate": 0.0005825407447382622, "loss": 4.3314, "step": 2800 }, { "epoch": 0.30727762803234504, "grad_norm": 0.7071971893310547, "learning_rate": 0.0005822169454937938, "loss": 4.3577, "step": 2850 }, { "epoch": 0.31266846361185985, "grad_norm": 0.8087393045425415, "learning_rate": 0.0005818931462493254, "loss": 4.326, "step": 2900 }, { "epoch": 0.31805929919137466, "grad_norm": 0.8092687129974365, "learning_rate": 0.0005815693470048569, "loss": 4.3304, "step": 2950 }, { "epoch": 0.32345013477088946, "grad_norm": 0.7119380235671997, "learning_rate": 0.0005812455477603885, "loss": 4.3198, "step": 3000 }, { "epoch": 0.32345013477088946, "eval_accuracy": 0.29812342337868974, "eval_loss": 4.240254878997803, "eval_runtime": 183.7893, "eval_samples_per_second": 97.998, "eval_steps_per_second": 6.127, "step": 3000 }, { "epoch": 0.3288409703504043, "grad_norm": 0.7455107569694519, "learning_rate": 0.0005809217485159201, "loss": 4.3188, "step": 3050 }, { "epoch": 0.33423180592991913, "grad_norm": 0.8890029191970825, "learning_rate": 0.0005805979492714517, "loss": 4.2983, "step": 3100 }, { "epoch": 0.33962264150943394, "grad_norm": 0.8936235308647156, "learning_rate": 0.0005802741500269832, "loss": 4.2817, "step": 3150 }, { "epoch": 0.3450134770889488, "grad_norm": 0.6431916356086731, "learning_rate": 0.0005799503507825148, "loss": 4.2802, "step": 3200 }, { "epoch": 0.3504043126684636, "grad_norm": 0.7159081697463989, "learning_rate": 0.0005796265515380463, "loss": 4.2707, "step": 3250 }, { "epoch": 0.3557951482479784, "grad_norm": 0.9267504215240479, "learning_rate": 0.000579302752293578, "loss": 4.277, "step": 3300 }, { "epoch": 0.3611859838274933, "grad_norm": 0.8296390771865845, "learning_rate": 0.0005789789530491095, "loss": 4.2509, "step": 3350 }, { "epoch": 0.3665768194070081, "grad_norm": 0.7630621790885925, "learning_rate": 0.0005786551538046411, "loss": 4.2406, "step": 3400 }, { "epoch": 0.3719676549865229, "grad_norm": 0.6547260284423828, "learning_rate": 0.0005783313545601726, "loss": 4.2344, "step": 3450 }, { "epoch": 0.37735849056603776, "grad_norm": 0.8587298393249512, "learning_rate": 0.0005780075553157043, "loss": 4.2341, "step": 3500 }, { "epoch": 0.38274932614555257, "grad_norm": 0.7555488348007202, "learning_rate": 0.0005776837560712357, "loss": 4.2299, "step": 3550 }, { "epoch": 0.3881401617250674, "grad_norm": 0.8421213626861572, "learning_rate": 0.0005773599568267673, "loss": 4.2202, "step": 3600 }, { "epoch": 0.3935309973045822, "grad_norm": 0.7566924095153809, "learning_rate": 0.0005770361575822989, "loss": 4.2103, "step": 3650 }, { "epoch": 0.39892183288409705, "grad_norm": 0.7638437747955322, "learning_rate": 0.0005767123583378305, "loss": 4.1973, "step": 3700 }, { "epoch": 0.40431266846361186, "grad_norm": 0.6439513564109802, "learning_rate": 0.000576388559093362, "loss": 4.199, "step": 3750 }, { "epoch": 0.40970350404312667, "grad_norm": 0.7719266414642334, "learning_rate": 0.0005760647598488936, "loss": 4.1635, "step": 3800 }, { "epoch": 0.41509433962264153, "grad_norm": 0.6647982597351074, "learning_rate": 0.0005757409606044253, "loss": 4.1739, "step": 3850 }, { "epoch": 0.42048517520215634, "grad_norm": 0.7858614325523376, "learning_rate": 0.0005754171613599568, "loss": 4.1932, "step": 3900 }, { "epoch": 0.42587601078167114, "grad_norm": 1.070395588874817, "learning_rate": 0.0005750933621154884, "loss": 4.1588, "step": 3950 }, { "epoch": 0.431266846361186, "grad_norm": 0.6882054805755615, "learning_rate": 0.0005747695628710199, "loss": 4.1634, "step": 4000 }, { "epoch": 0.431266846361186, "eval_accuracy": 0.3118610599024015, "eval_loss": 4.099079608917236, "eval_runtime": 183.6446, "eval_samples_per_second": 98.075, "eval_steps_per_second": 6.131, "step": 4000 }, { "epoch": 0.4366576819407008, "grad_norm": 0.8163891434669495, "learning_rate": 0.0005744457636265515, "loss": 4.1703, "step": 4050 }, { "epoch": 0.4420485175202156, "grad_norm": 0.7172017097473145, "learning_rate": 0.0005741219643820831, "loss": 4.1633, "step": 4100 }, { "epoch": 0.4474393530997305, "grad_norm": 0.7089101672172546, "learning_rate": 0.0005737981651376146, "loss": 4.1486, "step": 4150 }, { "epoch": 0.4528301886792453, "grad_norm": 0.6500125527381897, "learning_rate": 0.0005734743658931462, "loss": 4.1541, "step": 4200 }, { "epoch": 0.4582210242587601, "grad_norm": 0.6067988276481628, "learning_rate": 0.0005731505666486778, "loss": 4.1386, "step": 4250 }, { "epoch": 0.4636118598382749, "grad_norm": 0.8405300974845886, "learning_rate": 0.0005728267674042093, "loss": 4.1407, "step": 4300 }, { "epoch": 0.46900269541778977, "grad_norm": 0.65191650390625, "learning_rate": 0.0005725029681597409, "loss": 4.1283, "step": 4350 }, { "epoch": 0.4743935309973046, "grad_norm": 0.674238920211792, "learning_rate": 0.0005721791689152725, "loss": 4.1114, "step": 4400 }, { "epoch": 0.4797843665768194, "grad_norm": 0.660973072052002, "learning_rate": 0.0005718553696708041, "loss": 4.1208, "step": 4450 }, { "epoch": 0.48517520215633425, "grad_norm": 0.6465425491333008, "learning_rate": 0.0005715315704263356, "loss": 4.1158, "step": 4500 }, { "epoch": 0.49056603773584906, "grad_norm": 0.7483091950416565, "learning_rate": 0.0005712077711818672, "loss": 4.1276, "step": 4550 }, { "epoch": 0.49595687331536387, "grad_norm": 0.845150351524353, "learning_rate": 0.0005708839719373987, "loss": 4.1192, "step": 4600 }, { "epoch": 0.5013477088948787, "grad_norm": 0.634871244430542, "learning_rate": 0.0005705601726929304, "loss": 4.0755, "step": 4650 }, { "epoch": 0.5067385444743935, "grad_norm": 0.6169816851615906, "learning_rate": 0.0005702363734484619, "loss": 4.078, "step": 4700 }, { "epoch": 0.5121293800539084, "grad_norm": 0.8197508454322815, "learning_rate": 0.0005699125742039935, "loss": 4.084, "step": 4750 }, { "epoch": 0.5175202156334232, "grad_norm": 0.733070969581604, "learning_rate": 0.000569588774959525, "loss": 4.0813, "step": 4800 }, { "epoch": 0.522911051212938, "grad_norm": 0.6208024024963379, "learning_rate": 0.0005692649757150567, "loss": 4.0748, "step": 4850 }, { "epoch": 0.5283018867924528, "grad_norm": 0.7824249863624573, "learning_rate": 0.0005689411764705881, "loss": 4.0771, "step": 4900 }, { "epoch": 0.5336927223719676, "grad_norm": 0.6890459656715393, "learning_rate": 0.0005686173772261197, "loss": 4.0671, "step": 4950 }, { "epoch": 0.5390835579514824, "grad_norm": 0.7617940902709961, "learning_rate": 0.0005682935779816514, "loss": 4.0659, "step": 5000 }, { "epoch": 0.5390835579514824, "eval_accuracy": 0.32073376337421977, "eval_loss": 3.998711347579956, "eval_runtime": 183.5304, "eval_samples_per_second": 98.136, "eval_steps_per_second": 6.135, "step": 5000 }, { "epoch": 0.5444743935309974, "grad_norm": 0.6309065222740173, "learning_rate": 0.0005679697787371829, "loss": 4.0746, "step": 5050 }, { "epoch": 0.5498652291105122, "grad_norm": 0.5925028920173645, "learning_rate": 0.0005676459794927145, "loss": 4.0574, "step": 5100 }, { "epoch": 0.555256064690027, "grad_norm": 0.6035439968109131, "learning_rate": 0.000567322180248246, "loss": 4.0516, "step": 5150 }, { "epoch": 0.5606469002695418, "grad_norm": 0.7275799512863159, "learning_rate": 0.0005669983810037777, "loss": 4.0651, "step": 5200 }, { "epoch": 0.5660377358490566, "grad_norm": 0.6090968251228333, "learning_rate": 0.0005666745817593092, "loss": 4.0379, "step": 5250 }, { "epoch": 0.5714285714285714, "grad_norm": 0.632185161113739, "learning_rate": 0.0005663507825148408, "loss": 4.0381, "step": 5300 }, { "epoch": 0.5768194070080862, "grad_norm": 0.6599447131156921, "learning_rate": 0.0005660269832703723, "loss": 4.0278, "step": 5350 }, { "epoch": 0.5822102425876011, "grad_norm": 0.648209810256958, "learning_rate": 0.0005657031840259039, "loss": 4.0327, "step": 5400 }, { "epoch": 0.5876010781671159, "grad_norm": 0.6686100363731384, "learning_rate": 0.0005653793847814355, "loss": 4.0357, "step": 5450 }, { "epoch": 0.5929919137466307, "grad_norm": 0.7332231998443604, "learning_rate": 0.000565055585536967, "loss": 4.0131, "step": 5500 }, { "epoch": 0.5983827493261455, "grad_norm": 0.6814959645271301, "learning_rate": 0.0005647317862924986, "loss": 4.033, "step": 5550 }, { "epoch": 0.6037735849056604, "grad_norm": 0.6917067766189575, "learning_rate": 0.0005644079870480302, "loss": 3.9815, "step": 5600 }, { "epoch": 0.6091644204851752, "grad_norm": 0.6626110672950745, "learning_rate": 0.0005640841878035617, "loss": 4.0186, "step": 5650 }, { "epoch": 0.6145552560646901, "grad_norm": 0.7377511262893677, "learning_rate": 0.0005637603885590933, "loss": 4.0184, "step": 5700 }, { "epoch": 0.6199460916442049, "grad_norm": 0.6328345537185669, "learning_rate": 0.0005634365893146248, "loss": 4.019, "step": 5750 }, { "epoch": 0.6253369272237197, "grad_norm": 0.6522849798202515, "learning_rate": 0.0005631127900701565, "loss": 4.01, "step": 5800 }, { "epoch": 0.6307277628032345, "grad_norm": 0.6383638978004456, "learning_rate": 0.000562788990825688, "loss": 3.9816, "step": 5850 }, { "epoch": 0.6361185983827493, "grad_norm": 0.593140721321106, "learning_rate": 0.0005624651915812196, "loss": 3.9949, "step": 5900 }, { "epoch": 0.6415094339622641, "grad_norm": 0.7360444068908691, "learning_rate": 0.0005621413923367511, "loss": 4.0016, "step": 5950 }, { "epoch": 0.6469002695417789, "grad_norm": 0.6608056426048279, "learning_rate": 0.0005618175930922828, "loss": 4.0076, "step": 6000 }, { "epoch": 0.6469002695417789, "eval_accuracy": 0.32758476256247404, "eval_loss": 3.921957492828369, "eval_runtime": 183.5766, "eval_samples_per_second": 98.112, "eval_steps_per_second": 6.134, "step": 6000 }, { "epoch": 0.6522911051212938, "grad_norm": 0.6179393529891968, "learning_rate": 0.0005614937938478143, "loss": 3.9939, "step": 6050 }, { "epoch": 0.6576819407008087, "grad_norm": 0.7146060466766357, "learning_rate": 0.0005611699946033459, "loss": 3.9929, "step": 6100 }, { "epoch": 0.6630727762803235, "grad_norm": 0.601253867149353, "learning_rate": 0.0005608461953588774, "loss": 3.9838, "step": 6150 }, { "epoch": 0.6684636118598383, "grad_norm": 0.6216392517089844, "learning_rate": 0.000560522396114409, "loss": 3.9788, "step": 6200 }, { "epoch": 0.6738544474393531, "grad_norm": 0.6294983625411987, "learning_rate": 0.0005601985968699405, "loss": 3.9608, "step": 6250 }, { "epoch": 0.6792452830188679, "grad_norm": 0.7225786447525024, "learning_rate": 0.0005598747976254721, "loss": 3.9794, "step": 6300 }, { "epoch": 0.6846361185983828, "grad_norm": 0.6607632637023926, "learning_rate": 0.0005595509983810038, "loss": 3.9496, "step": 6350 }, { "epoch": 0.6900269541778976, "grad_norm": 0.5790310502052307, "learning_rate": 0.0005592271991365353, "loss": 3.9592, "step": 6400 }, { "epoch": 0.6954177897574124, "grad_norm": 0.6292189955711365, "learning_rate": 0.0005589033998920669, "loss": 3.9773, "step": 6450 }, { "epoch": 0.7008086253369272, "grad_norm": 0.6256137490272522, "learning_rate": 0.0005585796006475984, "loss": 3.9487, "step": 6500 }, { "epoch": 0.706199460916442, "grad_norm": 0.6231578588485718, "learning_rate": 0.0005582558014031301, "loss": 3.9727, "step": 6550 }, { "epoch": 0.7115902964959568, "grad_norm": 0.6470305323600769, "learning_rate": 0.0005579320021586616, "loss": 3.9563, "step": 6600 }, { "epoch": 0.7169811320754716, "grad_norm": 0.5552076697349548, "learning_rate": 0.0005576082029141932, "loss": 3.951, "step": 6650 }, { "epoch": 0.7223719676549866, "grad_norm": 0.5381990671157837, "learning_rate": 0.0005572844036697247, "loss": 3.9356, "step": 6700 }, { "epoch": 0.7277628032345014, "grad_norm": 0.6558448076248169, "learning_rate": 0.0005569606044252563, "loss": 3.9426, "step": 6750 }, { "epoch": 0.7331536388140162, "grad_norm": 0.8135426640510559, "learning_rate": 0.0005566368051807879, "loss": 3.9613, "step": 6800 }, { "epoch": 0.738544474393531, "grad_norm": 0.6013303995132446, "learning_rate": 0.0005563130059363194, "loss": 3.9451, "step": 6850 }, { "epoch": 0.7439353099730458, "grad_norm": 0.5324015617370605, "learning_rate": 0.000555989206691851, "loss": 3.9444, "step": 6900 }, { "epoch": 0.7493261455525606, "grad_norm": 0.6945801377296448, "learning_rate": 0.0005556654074473826, "loss": 3.9473, "step": 6950 }, { "epoch": 0.7547169811320755, "grad_norm": 0.7069705128669739, "learning_rate": 0.0005553416082029141, "loss": 3.9328, "step": 7000 }, { "epoch": 0.7547169811320755, "eval_accuracy": 0.3323933047655917, "eval_loss": 3.8696444034576416, "eval_runtime": 183.451, "eval_samples_per_second": 98.179, "eval_steps_per_second": 6.138, "step": 7000 }, { "epoch": 0.7601078167115903, "grad_norm": 0.6576606631278992, "learning_rate": 0.0005550178089584457, "loss": 3.9266, "step": 7050 }, { "epoch": 0.7654986522911051, "grad_norm": 0.5154832005500793, "learning_rate": 0.0005546940097139772, "loss": 3.9252, "step": 7100 }, { "epoch": 0.77088948787062, "grad_norm": 0.6892321109771729, "learning_rate": 0.0005543702104695089, "loss": 3.9271, "step": 7150 }, { "epoch": 0.7762803234501348, "grad_norm": 0.6380577087402344, "learning_rate": 0.0005540464112250404, "loss": 3.9261, "step": 7200 }, { "epoch": 0.7816711590296496, "grad_norm": 0.652199923992157, "learning_rate": 0.000553722611980572, "loss": 3.9387, "step": 7250 }, { "epoch": 0.7870619946091644, "grad_norm": 0.5706573724746704, "learning_rate": 0.0005533988127361035, "loss": 3.9201, "step": 7300 }, { "epoch": 0.7924528301886793, "grad_norm": 0.5596190690994263, "learning_rate": 0.0005530750134916352, "loss": 3.9361, "step": 7350 }, { "epoch": 0.7978436657681941, "grad_norm": 0.6239616274833679, "learning_rate": 0.0005527512142471668, "loss": 3.9104, "step": 7400 }, { "epoch": 0.8032345013477089, "grad_norm": 0.5858375430107117, "learning_rate": 0.0005524274150026982, "loss": 3.9105, "step": 7450 }, { "epoch": 0.8086253369272237, "grad_norm": 0.5788413286209106, "learning_rate": 0.0005521036157582299, "loss": 3.904, "step": 7500 }, { "epoch": 0.8140161725067385, "grad_norm": 0.6172971725463867, "learning_rate": 0.0005517798165137614, "loss": 3.9068, "step": 7550 }, { "epoch": 0.8194070080862533, "grad_norm": 0.6352159976959229, "learning_rate": 0.000551456017269293, "loss": 3.8812, "step": 7600 }, { "epoch": 0.8247978436657682, "grad_norm": 0.6148518323898315, "learning_rate": 0.0005511322180248245, "loss": 3.8997, "step": 7650 }, { "epoch": 0.8301886792452831, "grad_norm": 0.6033445000648499, "learning_rate": 0.0005508084187803562, "loss": 3.9111, "step": 7700 }, { "epoch": 0.8355795148247979, "grad_norm": 0.5412169694900513, "learning_rate": 0.0005504846195358877, "loss": 3.9042, "step": 7750 }, { "epoch": 0.8409703504043127, "grad_norm": 0.5904088616371155, "learning_rate": 0.0005501608202914193, "loss": 3.892, "step": 7800 }, { "epoch": 0.8463611859838275, "grad_norm": 0.6405267715454102, "learning_rate": 0.0005498370210469508, "loss": 3.8977, "step": 7850 }, { "epoch": 0.8517520215633423, "grad_norm": 0.6236185431480408, "learning_rate": 0.0005495132218024824, "loss": 3.8806, "step": 7900 }, { "epoch": 0.8571428571428571, "grad_norm": 0.6019570231437683, "learning_rate": 0.000549189422558014, "loss": 3.8888, "step": 7950 }, { "epoch": 0.862533692722372, "grad_norm": 0.5633127093315125, "learning_rate": 0.0005488656233135456, "loss": 3.8875, "step": 8000 }, { "epoch": 0.862533692722372, "eval_accuracy": 0.33722933614932643, "eval_loss": 3.821709632873535, "eval_runtime": 183.4138, "eval_samples_per_second": 98.199, "eval_steps_per_second": 6.139, "step": 8000 }, { "epoch": 0.8679245283018868, "grad_norm": 0.5992864966392517, "learning_rate": 0.0005485418240690771, "loss": 3.8707, "step": 8050 }, { "epoch": 0.8733153638814016, "grad_norm": 0.6274523735046387, "learning_rate": 0.0005482180248246087, "loss": 3.8864, "step": 8100 }, { "epoch": 0.8787061994609164, "grad_norm": 0.6176576614379883, "learning_rate": 0.0005478942255801403, "loss": 3.8807, "step": 8150 }, { "epoch": 0.8840970350404312, "grad_norm": 0.5266938805580139, "learning_rate": 0.0005475704263356718, "loss": 3.8706, "step": 8200 }, { "epoch": 0.889487870619946, "grad_norm": 0.5737940073013306, "learning_rate": 0.0005472466270912034, "loss": 3.8805, "step": 8250 }, { "epoch": 0.894878706199461, "grad_norm": 0.6148428320884705, "learning_rate": 0.000546922827846735, "loss": 3.8693, "step": 8300 }, { "epoch": 0.9002695417789758, "grad_norm": 0.5487964749336243, "learning_rate": 0.0005465990286022665, "loss": 3.869, "step": 8350 }, { "epoch": 0.9056603773584906, "grad_norm": 0.6526573896408081, "learning_rate": 0.0005462752293577981, "loss": 3.8841, "step": 8400 }, { "epoch": 0.9110512129380054, "grad_norm": 0.601149320602417, "learning_rate": 0.0005459514301133296, "loss": 3.8793, "step": 8450 }, { "epoch": 0.9164420485175202, "grad_norm": 0.5263657569885254, "learning_rate": 0.0005456276308688613, "loss": 3.8761, "step": 8500 }, { "epoch": 0.921832884097035, "grad_norm": 0.5656020045280457, "learning_rate": 0.0005453038316243929, "loss": 3.8672, "step": 8550 }, { "epoch": 0.9272237196765498, "grad_norm": 0.565776526927948, "learning_rate": 0.0005449800323799244, "loss": 3.8777, "step": 8600 }, { "epoch": 0.9326145552560647, "grad_norm": 0.5656868815422058, "learning_rate": 0.000544656233135456, "loss": 3.8577, "step": 8650 }, { "epoch": 0.9380053908355795, "grad_norm": 0.5700314044952393, "learning_rate": 0.0005443324338909875, "loss": 3.8624, "step": 8700 }, { "epoch": 0.9433962264150944, "grad_norm": 0.5940127968788147, "learning_rate": 0.0005440086346465192, "loss": 3.8726, "step": 8750 }, { "epoch": 0.9487870619946092, "grad_norm": 0.5483199954032898, "learning_rate": 0.0005436848354020506, "loss": 3.8541, "step": 8800 }, { "epoch": 0.954177897574124, "grad_norm": 0.6202383041381836, "learning_rate": 0.0005433610361575823, "loss": 3.8599, "step": 8850 }, { "epoch": 0.9595687331536388, "grad_norm": 0.5427079200744629, "learning_rate": 0.0005430372369131138, "loss": 3.8437, "step": 8900 }, { "epoch": 0.9649595687331537, "grad_norm": 0.5505421757698059, "learning_rate": 0.0005427134376686454, "loss": 3.858, "step": 8950 }, { "epoch": 0.9703504043126685, "grad_norm": 0.6305214166641235, "learning_rate": 0.0005423896384241769, "loss": 3.8437, "step": 9000 }, { "epoch": 0.9703504043126685, "eval_accuracy": 0.3406345176534323, "eval_loss": 3.7832400798797607, "eval_runtime": 183.7219, "eval_samples_per_second": 98.034, "eval_steps_per_second": 6.129, "step": 9000 }, { "epoch": 0.9757412398921833, "grad_norm": 0.7392898797988892, "learning_rate": 0.0005420658391797086, "loss": 3.8372, "step": 9050 }, { "epoch": 0.9811320754716981, "grad_norm": 0.587247908115387, "learning_rate": 0.0005417420399352401, "loss": 3.8504, "step": 9100 }, { "epoch": 0.9865229110512129, "grad_norm": 0.5904769897460938, "learning_rate": 0.0005414182406907717, "loss": 3.844, "step": 9150 }, { "epoch": 0.9919137466307277, "grad_norm": 0.632688581943512, "learning_rate": 0.0005410944414463032, "loss": 3.8564, "step": 9200 }, { "epoch": 0.9973045822102425, "grad_norm": 0.5667609572410583, "learning_rate": 0.0005407706422018348, "loss": 3.8552, "step": 9250 }, { "epoch": 1.0026954177897573, "grad_norm": 0.6239280700683594, "learning_rate": 0.0005404468429573664, "loss": 3.8025, "step": 9300 }, { "epoch": 1.0080862533692723, "grad_norm": 0.6430540680885315, "learning_rate": 0.000540123043712898, "loss": 3.7743, "step": 9350 }, { "epoch": 1.013477088948787, "grad_norm": 0.5992752909660339, "learning_rate": 0.0005397992444684295, "loss": 3.7575, "step": 9400 }, { "epoch": 1.0188679245283019, "grad_norm": 0.6434339284896851, "learning_rate": 0.0005394754452239611, "loss": 3.7703, "step": 9450 }, { "epoch": 1.0242587601078168, "grad_norm": 0.5548680424690247, "learning_rate": 0.0005391516459794927, "loss": 3.7811, "step": 9500 }, { "epoch": 1.0296495956873315, "grad_norm": 0.5591529011726379, "learning_rate": 0.0005388278467350242, "loss": 3.7885, "step": 9550 }, { "epoch": 1.0350404312668464, "grad_norm": 0.5492196083068848, "learning_rate": 0.0005385040474905557, "loss": 3.7816, "step": 9600 }, { "epoch": 1.0404312668463611, "grad_norm": 0.5632776618003845, "learning_rate": 0.0005381802482460874, "loss": 3.7914, "step": 9650 }, { "epoch": 1.045822102425876, "grad_norm": 0.5463435053825378, "learning_rate": 0.000537856449001619, "loss": 3.7925, "step": 9700 }, { "epoch": 1.0512129380053907, "grad_norm": 0.5662521719932556, "learning_rate": 0.0005375326497571505, "loss": 3.7623, "step": 9750 }, { "epoch": 1.0566037735849056, "grad_norm": 0.6173110008239746, "learning_rate": 0.000537208850512682, "loss": 3.7692, "step": 9800 }, { "epoch": 1.0619946091644206, "grad_norm": 0.5675989389419556, "learning_rate": 0.0005368850512682137, "loss": 3.7665, "step": 9850 }, { "epoch": 1.0673854447439353, "grad_norm": 0.5368490815162659, "learning_rate": 0.0005365612520237453, "loss": 3.7797, "step": 9900 }, { "epoch": 1.0727762803234502, "grad_norm": 0.5896486639976501, "learning_rate": 0.0005362374527792768, "loss": 3.7825, "step": 9950 }, { "epoch": 1.0781671159029649, "grad_norm": 0.5884218215942383, "learning_rate": 0.0005359136535348084, "loss": 3.7795, "step": 10000 }, { "epoch": 1.0781671159029649, "eval_accuracy": 0.3442962286256681, "eval_loss": 3.7569897174835205, "eval_runtime": 183.6339, "eval_samples_per_second": 98.081, "eval_steps_per_second": 6.132, "step": 10000 }, { "epoch": 1.0835579514824798, "grad_norm": 0.5574485063552856, "learning_rate": 0.0005355898542903399, "loss": 3.7575, "step": 10050 }, { "epoch": 1.0889487870619945, "grad_norm": 0.5631133317947388, "learning_rate": 0.0005352660550458716, "loss": 3.7605, "step": 10100 }, { "epoch": 1.0943396226415094, "grad_norm": 0.6444439888000488, "learning_rate": 0.000534942255801403, "loss": 3.7508, "step": 10150 }, { "epoch": 1.0997304582210243, "grad_norm": 0.6213650107383728, "learning_rate": 0.0005346184565569347, "loss": 3.7672, "step": 10200 }, { "epoch": 1.105121293800539, "grad_norm": 0.5763867497444153, "learning_rate": 0.0005342946573124662, "loss": 3.7855, "step": 10250 }, { "epoch": 1.110512129380054, "grad_norm": 0.6422098278999329, "learning_rate": 0.0005339708580679978, "loss": 3.7876, "step": 10300 }, { "epoch": 1.1159029649595686, "grad_norm": 0.564333975315094, "learning_rate": 0.0005336470588235293, "loss": 3.7619, "step": 10350 }, { "epoch": 1.1212938005390836, "grad_norm": 0.573645830154419, "learning_rate": 0.000533323259579061, "loss": 3.7662, "step": 10400 }, { "epoch": 1.1266846361185983, "grad_norm": 0.5722724199295044, "learning_rate": 0.0005329994603345925, "loss": 3.7615, "step": 10450 }, { "epoch": 1.1320754716981132, "grad_norm": 0.5559325814247131, "learning_rate": 0.0005326756610901241, "loss": 3.7594, "step": 10500 }, { "epoch": 1.137466307277628, "grad_norm": 0.553900957107544, "learning_rate": 0.0005323518618456556, "loss": 3.76, "step": 10550 }, { "epoch": 1.1428571428571428, "grad_norm": 0.6239808201789856, "learning_rate": 0.0005320280626011872, "loss": 3.7736, "step": 10600 }, { "epoch": 1.1482479784366577, "grad_norm": 0.6128347516059875, "learning_rate": 0.0005317042633567188, "loss": 3.7596, "step": 10650 }, { "epoch": 1.1536388140161726, "grad_norm": 0.6108589172363281, "learning_rate": 0.0005313804641122504, "loss": 3.7629, "step": 10700 }, { "epoch": 1.1590296495956873, "grad_norm": 0.5402905941009521, "learning_rate": 0.0005310566648677819, "loss": 3.7593, "step": 10750 }, { "epoch": 1.1644204851752022, "grad_norm": 0.6511978507041931, "learning_rate": 0.0005307328656233135, "loss": 3.7501, "step": 10800 }, { "epoch": 1.169811320754717, "grad_norm": 0.5607616901397705, "learning_rate": 0.000530409066378845, "loss": 3.7458, "step": 10850 }, { "epoch": 1.1752021563342319, "grad_norm": 1.1150946617126465, "learning_rate": 0.0005300852671343766, "loss": 3.7631, "step": 10900 }, { "epoch": 1.1805929919137466, "grad_norm": 0.6214979290962219, "learning_rate": 0.0005297614678899081, "loss": 3.751, "step": 10950 }, { "epoch": 1.1859838274932615, "grad_norm": 0.542398989200592, "learning_rate": 0.0005294376686454398, "loss": 3.7415, "step": 11000 }, { "epoch": 1.1859838274932615, "eval_accuracy": 0.3474025060353963, "eval_loss": 3.724672555923462, "eval_runtime": 183.3754, "eval_samples_per_second": 98.219, "eval_steps_per_second": 6.14, "step": 11000 }, { "epoch": 1.1913746630727764, "grad_norm": 0.5348007082939148, "learning_rate": 0.0005291138694009714, "loss": 3.7517, "step": 11050 }, { "epoch": 1.196765498652291, "grad_norm": 0.6700088977813721, "learning_rate": 0.0005287900701565029, "loss": 3.7463, "step": 11100 }, { "epoch": 1.202156334231806, "grad_norm": 0.5958192348480225, "learning_rate": 0.0005284662709120345, "loss": 3.7478, "step": 11150 }, { "epoch": 1.2075471698113207, "grad_norm": 0.6137239336967468, "learning_rate": 0.0005281424716675661, "loss": 3.7436, "step": 11200 }, { "epoch": 1.2129380053908356, "grad_norm": 0.5924616456031799, "learning_rate": 0.0005278186724230977, "loss": 3.7357, "step": 11250 }, { "epoch": 1.2183288409703503, "grad_norm": 0.5333642959594727, "learning_rate": 0.0005274948731786292, "loss": 3.7353, "step": 11300 }, { "epoch": 1.2237196765498652, "grad_norm": 0.6624649167060852, "learning_rate": 0.0005271710739341608, "loss": 3.7523, "step": 11350 }, { "epoch": 1.2291105121293802, "grad_norm": 0.5522056221961975, "learning_rate": 0.0005268472746896923, "loss": 3.7463, "step": 11400 }, { "epoch": 1.2345013477088949, "grad_norm": 0.6152771711349487, "learning_rate": 0.000526523475445224, "loss": 3.7354, "step": 11450 }, { "epoch": 1.2398921832884098, "grad_norm": 0.5271309614181519, "learning_rate": 0.0005261996762007554, "loss": 3.7252, "step": 11500 }, { "epoch": 1.2452830188679245, "grad_norm": 0.5856218338012695, "learning_rate": 0.0005258758769562871, "loss": 3.7444, "step": 11550 }, { "epoch": 1.2506738544474394, "grad_norm": 0.6060943007469177, "learning_rate": 0.0005255520777118186, "loss": 3.7456, "step": 11600 }, { "epoch": 1.256064690026954, "grad_norm": 0.5646070837974548, "learning_rate": 0.0005252282784673502, "loss": 3.7347, "step": 11650 }, { "epoch": 1.261455525606469, "grad_norm": 0.5944088697433472, "learning_rate": 0.0005249044792228817, "loss": 3.7372, "step": 11700 }, { "epoch": 1.266846361185984, "grad_norm": 0.5187632441520691, "learning_rate": 0.0005245806799784133, "loss": 3.7333, "step": 11750 }, { "epoch": 1.2722371967654986, "grad_norm": 0.6018880009651184, "learning_rate": 0.0005242568807339449, "loss": 3.7307, "step": 11800 }, { "epoch": 1.2776280323450135, "grad_norm": 0.584701418876648, "learning_rate": 0.0005239330814894765, "loss": 3.7259, "step": 11850 }, { "epoch": 1.2830188679245282, "grad_norm": 0.5245819687843323, "learning_rate": 0.000523609282245008, "loss": 3.7477, "step": 11900 }, { "epoch": 1.2884097035040432, "grad_norm": 0.5737572908401489, "learning_rate": 0.0005232854830005396, "loss": 3.741, "step": 11950 }, { "epoch": 1.2938005390835579, "grad_norm": 0.5504003763198853, "learning_rate": 0.0005229616837560712, "loss": 3.7386, "step": 12000 }, { "epoch": 1.2938005390835579, "eval_accuracy": 0.34930121524995433, "eval_loss": 3.7041478157043457, "eval_runtime": 183.6844, "eval_samples_per_second": 98.054, "eval_steps_per_second": 6.13, "step": 12000 }, { "epoch": 1.2991913746630728, "grad_norm": 0.5650700926780701, "learning_rate": 0.0005226378845116028, "loss": 3.7186, "step": 12050 }, { "epoch": 1.3045822102425877, "grad_norm": 0.5875822305679321, "learning_rate": 0.0005223140852671344, "loss": 3.732, "step": 12100 }, { "epoch": 1.3099730458221024, "grad_norm": 0.6313338279724121, "learning_rate": 0.0005219902860226659, "loss": 3.7404, "step": 12150 }, { "epoch": 1.3153638814016173, "grad_norm": 0.553345799446106, "learning_rate": 0.0005216664867781975, "loss": 3.7401, "step": 12200 }, { "epoch": 1.320754716981132, "grad_norm": 0.6565985679626465, "learning_rate": 0.000521342687533729, "loss": 3.7227, "step": 12250 }, { "epoch": 1.326145552560647, "grad_norm": 0.5814613103866577, "learning_rate": 0.0005210188882892606, "loss": 3.7334, "step": 12300 }, { "epoch": 1.3315363881401616, "grad_norm": 0.5827574729919434, "learning_rate": 0.0005206950890447922, "loss": 3.7207, "step": 12350 }, { "epoch": 1.3369272237196765, "grad_norm": 0.5803568363189697, "learning_rate": 0.0005203712898003238, "loss": 3.7345, "step": 12400 }, { "epoch": 1.3423180592991915, "grad_norm": 0.6482006311416626, "learning_rate": 0.0005200474905558553, "loss": 3.7252, "step": 12450 }, { "epoch": 1.3477088948787062, "grad_norm": 0.699213445186615, "learning_rate": 0.0005197236913113869, "loss": 3.7266, "step": 12500 }, { "epoch": 1.353099730458221, "grad_norm": 0.5527870059013367, "learning_rate": 0.0005193998920669184, "loss": 3.7349, "step": 12550 }, { "epoch": 1.3584905660377358, "grad_norm": 0.5830569267272949, "learning_rate": 0.0005190760928224501, "loss": 3.7173, "step": 12600 }, { "epoch": 1.3638814016172507, "grad_norm": 0.5488144159317017, "learning_rate": 0.0005187522935779816, "loss": 3.7258, "step": 12650 }, { "epoch": 1.3692722371967654, "grad_norm": 0.5564178824424744, "learning_rate": 0.0005184284943335132, "loss": 3.7399, "step": 12700 }, { "epoch": 1.3746630727762803, "grad_norm": 0.5656182169914246, "learning_rate": 0.0005181046950890447, "loss": 3.7232, "step": 12750 }, { "epoch": 1.3800539083557952, "grad_norm": 0.5447649359703064, "learning_rate": 0.0005177808958445764, "loss": 3.7155, "step": 12800 }, { "epoch": 1.38544474393531, "grad_norm": 0.5839430689811707, "learning_rate": 0.0005174570966001078, "loss": 3.711, "step": 12850 }, { "epoch": 1.3908355795148248, "grad_norm": 0.5413840413093567, "learning_rate": 0.0005171332973556395, "loss": 3.7186, "step": 12900 }, { "epoch": 1.3962264150943398, "grad_norm": 0.5594467520713806, "learning_rate": 0.000516809498111171, "loss": 3.7149, "step": 12950 }, { "epoch": 1.4016172506738545, "grad_norm": 0.5479294657707214, "learning_rate": 0.0005164856988667026, "loss": 3.7126, "step": 13000 }, { "epoch": 1.4016172506738545, "eval_accuracy": 0.3515265350452827, "eval_loss": 3.68241024017334, "eval_runtime": 183.4286, "eval_samples_per_second": 98.191, "eval_steps_per_second": 6.139, "step": 13000 }, { "epoch": 1.4070080862533692, "grad_norm": 0.6145225167274475, "learning_rate": 0.0005161618996222341, "loss": 3.7236, "step": 13050 }, { "epoch": 1.412398921832884, "grad_norm": 0.5675894021987915, "learning_rate": 0.0005158381003777657, "loss": 3.7337, "step": 13100 }, { "epoch": 1.417789757412399, "grad_norm": 0.5551406741142273, "learning_rate": 0.0005155143011332973, "loss": 3.7143, "step": 13150 }, { "epoch": 1.4231805929919137, "grad_norm": 0.5070226192474365, "learning_rate": 0.0005151905018888289, "loss": 3.7167, "step": 13200 }, { "epoch": 1.4285714285714286, "grad_norm": 0.5413742661476135, "learning_rate": 0.0005148731786292498, "loss": 3.7111, "step": 13250 }, { "epoch": 1.4339622641509435, "grad_norm": 0.5501816272735596, "learning_rate": 0.0005145493793847814, "loss": 3.7103, "step": 13300 }, { "epoch": 1.4393530997304582, "grad_norm": 0.593174934387207, "learning_rate": 0.000514225580140313, "loss": 3.6969, "step": 13350 }, { "epoch": 1.444743935309973, "grad_norm": 0.577828586101532, "learning_rate": 0.0005139017808958445, "loss": 3.7057, "step": 13400 }, { "epoch": 1.4501347708894878, "grad_norm": 0.6046187877655029, "learning_rate": 0.0005135779816513762, "loss": 3.7161, "step": 13450 }, { "epoch": 1.4555256064690028, "grad_norm": 0.5544584393501282, "learning_rate": 0.0005132541824069076, "loss": 3.7009, "step": 13500 }, { "epoch": 1.4609164420485174, "grad_norm": 0.5399063229560852, "learning_rate": 0.0005129303831624393, "loss": 3.7132, "step": 13550 }, { "epoch": 1.4663072776280324, "grad_norm": 0.5806102156639099, "learning_rate": 0.0005126065839179708, "loss": 3.7158, "step": 13600 }, { "epoch": 1.4716981132075473, "grad_norm": 0.5742630362510681, "learning_rate": 0.0005122827846735024, "loss": 3.6862, "step": 13650 }, { "epoch": 1.477088948787062, "grad_norm": 0.5323203206062317, "learning_rate": 0.0005119589854290339, "loss": 3.6859, "step": 13700 }, { "epoch": 1.482479784366577, "grad_norm": 0.5530096888542175, "learning_rate": 0.0005116351861845655, "loss": 3.7044, "step": 13750 }, { "epoch": 1.4878706199460916, "grad_norm": 0.6719633340835571, "learning_rate": 0.0005113113869400971, "loss": 3.7217, "step": 13800 }, { "epoch": 1.4932614555256065, "grad_norm": 0.6222640872001648, "learning_rate": 0.0005109875876956287, "loss": 3.7135, "step": 13850 }, { "epoch": 1.4986522911051212, "grad_norm": 0.631252110004425, "learning_rate": 0.0005106637884511602, "loss": 3.7266, "step": 13900 }, { "epoch": 1.5040431266846361, "grad_norm": 0.5431990027427673, "learning_rate": 0.0005103399892066918, "loss": 3.7016, "step": 13950 }, { "epoch": 1.509433962264151, "grad_norm": 0.5950245261192322, "learning_rate": 0.0005100161899622234, "loss": 3.703, "step": 14000 }, { "epoch": 1.509433962264151, "eval_accuracy": 0.35352118475978095, "eval_loss": 3.6599960327148438, "eval_runtime": 183.3906, "eval_samples_per_second": 98.211, "eval_steps_per_second": 6.14, "step": 14000 }, { "epoch": 1.5148247978436657, "grad_norm": 0.5107572674751282, "learning_rate": 0.000509692390717755, "loss": 3.6925, "step": 14050 }, { "epoch": 1.5202156334231804, "grad_norm": 0.6692053079605103, "learning_rate": 0.0005093685914732865, "loss": 3.6872, "step": 14100 }, { "epoch": 1.5256064690026954, "grad_norm": 0.5478536486625671, "learning_rate": 0.0005090447922288181, "loss": 3.6865, "step": 14150 }, { "epoch": 1.5309973045822103, "grad_norm": 0.5649746060371399, "learning_rate": 0.0005087209929843496, "loss": 3.7203, "step": 14200 }, { "epoch": 1.536388140161725, "grad_norm": 0.6324547529220581, "learning_rate": 0.0005083971937398812, "loss": 3.695, "step": 14250 }, { "epoch": 1.54177897574124, "grad_norm": 0.6280053853988647, "learning_rate": 0.0005080733944954127, "loss": 3.6973, "step": 14300 }, { "epoch": 1.5471698113207548, "grad_norm": 0.5381866693496704, "learning_rate": 0.0005077495952509444, "loss": 3.6772, "step": 14350 }, { "epoch": 1.5525606469002695, "grad_norm": 0.5399860739707947, "learning_rate": 0.0005074257960064759, "loss": 3.6922, "step": 14400 }, { "epoch": 1.5579514824797842, "grad_norm": 0.5678397417068481, "learning_rate": 0.0005071019967620075, "loss": 3.6985, "step": 14450 }, { "epoch": 1.5633423180592994, "grad_norm": 0.5648748874664307, "learning_rate": 0.000506778197517539, "loss": 3.69, "step": 14500 }, { "epoch": 1.568733153638814, "grad_norm": 0.5564712285995483, "learning_rate": 0.0005064543982730707, "loss": 3.6994, "step": 14550 }, { "epoch": 1.5741239892183287, "grad_norm": 0.6412194967269897, "learning_rate": 0.0005061305990286023, "loss": 3.6939, "step": 14600 }, { "epoch": 1.5795148247978437, "grad_norm": 0.6159881353378296, "learning_rate": 0.0005058067997841338, "loss": 3.6991, "step": 14650 }, { "epoch": 1.5849056603773586, "grad_norm": 0.583437979221344, "learning_rate": 0.0005054830005396654, "loss": 3.6886, "step": 14700 }, { "epoch": 1.5902964959568733, "grad_norm": 0.5995129942893982, "learning_rate": 0.0005051592012951969, "loss": 3.6783, "step": 14750 }, { "epoch": 1.595687331536388, "grad_norm": 0.5731451511383057, "learning_rate": 0.0005048354020507286, "loss": 3.701, "step": 14800 }, { "epoch": 1.6010781671159031, "grad_norm": 0.6119737029075623, "learning_rate": 0.00050451160280626, "loss": 3.6983, "step": 14850 }, { "epoch": 1.6064690026954178, "grad_norm": 0.5756235718727112, "learning_rate": 0.0005041878035617917, "loss": 3.6896, "step": 14900 }, { "epoch": 1.6118598382749325, "grad_norm": 0.5833989977836609, "learning_rate": 0.0005038640043173232, "loss": 3.6857, "step": 14950 }, { "epoch": 1.6172506738544474, "grad_norm": 0.6236714124679565, "learning_rate": 0.0005035402050728548, "loss": 3.6784, "step": 15000 }, { "epoch": 1.6172506738544474, "eval_accuracy": 0.3555121402760935, "eval_loss": 3.6398637294769287, "eval_runtime": 183.4963, "eval_samples_per_second": 98.155, "eval_steps_per_second": 6.136, "step": 15000 }, { "epoch": 1.6226415094339623, "grad_norm": 0.5502594709396362, "learning_rate": 0.0005032164058283863, "loss": 3.6833, "step": 15050 }, { "epoch": 1.628032345013477, "grad_norm": 0.5723167061805725, "learning_rate": 0.0005028926065839179, "loss": 3.6792, "step": 15100 }, { "epoch": 1.633423180592992, "grad_norm": 0.5737243294715881, "learning_rate": 0.0005025688073394495, "loss": 3.6794, "step": 15150 }, { "epoch": 1.6388140161725069, "grad_norm": 0.548410952091217, "learning_rate": 0.0005022450080949811, "loss": 3.6866, "step": 15200 }, { "epoch": 1.6442048517520216, "grad_norm": 0.654476523399353, "learning_rate": 0.0005019212088505126, "loss": 3.6874, "step": 15250 }, { "epoch": 1.6495956873315363, "grad_norm": 0.511240541934967, "learning_rate": 0.0005015974096060442, "loss": 3.6893, "step": 15300 }, { "epoch": 1.6549865229110512, "grad_norm": 0.5759029984474182, "learning_rate": 0.0005012736103615758, "loss": 3.6728, "step": 15350 }, { "epoch": 1.6603773584905661, "grad_norm": 0.5841777920722961, "learning_rate": 0.0005009498111171074, "loss": 3.6776, "step": 15400 }, { "epoch": 1.6657681940700808, "grad_norm": 0.5825933814048767, "learning_rate": 0.0005006260118726389, "loss": 3.6777, "step": 15450 }, { "epoch": 1.6711590296495957, "grad_norm": 0.5646806359291077, "learning_rate": 0.0005003022126281705, "loss": 3.6786, "step": 15500 }, { "epoch": 1.6765498652291106, "grad_norm": 0.627292275428772, "learning_rate": 0.000499978413383702, "loss": 3.6701, "step": 15550 }, { "epoch": 1.6819407008086253, "grad_norm": 0.5698474645614624, "learning_rate": 0.0004996546141392336, "loss": 3.6674, "step": 15600 }, { "epoch": 1.68733153638814, "grad_norm": 0.5848374366760254, "learning_rate": 0.0004993308148947651, "loss": 3.677, "step": 15650 }, { "epoch": 1.692722371967655, "grad_norm": 0.5110187530517578, "learning_rate": 0.0004990070156502968, "loss": 3.6661, "step": 15700 }, { "epoch": 1.6981132075471699, "grad_norm": 0.5932977199554443, "learning_rate": 0.0004986832164058284, "loss": 3.6621, "step": 15750 }, { "epoch": 1.7035040431266846, "grad_norm": 0.5683602094650269, "learning_rate": 0.0004983594171613599, "loss": 3.6748, "step": 15800 }, { "epoch": 1.7088948787061995, "grad_norm": 0.6081506013870239, "learning_rate": 0.0004980356179168915, "loss": 3.6814, "step": 15850 }, { "epoch": 1.7142857142857144, "grad_norm": 0.5934674143791199, "learning_rate": 0.000497711818672423, "loss": 3.704, "step": 15900 }, { "epoch": 1.719676549865229, "grad_norm": 0.5152272582054138, "learning_rate": 0.0004973880194279547, "loss": 3.6704, "step": 15950 }, { "epoch": 1.7250673854447438, "grad_norm": 0.5506434440612793, "learning_rate": 0.0004970642201834862, "loss": 3.6787, "step": 16000 }, { "epoch": 1.7250673854447438, "eval_accuracy": 0.3569788456087012, "eval_loss": 3.6205105781555176, "eval_runtime": 183.4216, "eval_samples_per_second": 98.195, "eval_steps_per_second": 6.139, "step": 16000 }, { "epoch": 1.7304582210242587, "grad_norm": 0.5687834024429321, "learning_rate": 0.0004967404209390178, "loss": 3.6465, "step": 16050 }, { "epoch": 1.7358490566037736, "grad_norm": 0.5210959911346436, "learning_rate": 0.0004964166216945493, "loss": 3.6549, "step": 16100 }, { "epoch": 1.7412398921832883, "grad_norm": 0.567616879940033, "learning_rate": 0.000496092822450081, "loss": 3.6694, "step": 16150 }, { "epoch": 1.7466307277628033, "grad_norm": 0.5949267745018005, "learning_rate": 0.0004957690232056125, "loss": 3.6572, "step": 16200 }, { "epoch": 1.7520215633423182, "grad_norm": 0.5664389729499817, "learning_rate": 0.0004954452239611441, "loss": 3.6673, "step": 16250 }, { "epoch": 1.7574123989218329, "grad_norm": 0.5412474870681763, "learning_rate": 0.0004951214247166756, "loss": 3.658, "step": 16300 }, { "epoch": 1.7628032345013476, "grad_norm": 0.5369322896003723, "learning_rate": 0.0004947976254722072, "loss": 3.6511, "step": 16350 }, { "epoch": 1.7681940700808625, "grad_norm": 0.5792037844657898, "learning_rate": 0.0004944738262277387, "loss": 3.667, "step": 16400 }, { "epoch": 1.7735849056603774, "grad_norm": 0.6002668142318726, "learning_rate": 0.0004941500269832703, "loss": 3.6646, "step": 16450 }, { "epoch": 1.778975741239892, "grad_norm": 0.5983853340148926, "learning_rate": 0.0004938262277388019, "loss": 3.6523, "step": 16500 }, { "epoch": 1.784366576819407, "grad_norm": 0.5764635801315308, "learning_rate": 0.0004935024284943335, "loss": 3.6473, "step": 16550 }, { "epoch": 1.789757412398922, "grad_norm": 0.5904407501220703, "learning_rate": 0.000493178629249865, "loss": 3.6545, "step": 16600 }, { "epoch": 1.7951482479784366, "grad_norm": 0.5274083018302917, "learning_rate": 0.0004928548300053966, "loss": 3.6631, "step": 16650 }, { "epoch": 1.8005390835579513, "grad_norm": 0.5829530954360962, "learning_rate": 0.0004925310307609282, "loss": 3.654, "step": 16700 }, { "epoch": 1.8059299191374663, "grad_norm": 0.522125780582428, "learning_rate": 0.0004922072315164598, "loss": 3.6488, "step": 16750 }, { "epoch": 1.8113207547169812, "grad_norm": 0.5361061096191406, "learning_rate": 0.0004918834322719913, "loss": 3.6535, "step": 16800 }, { "epoch": 1.8167115902964959, "grad_norm": 0.5397604703903198, "learning_rate": 0.0004915596330275229, "loss": 3.6442, "step": 16850 }, { "epoch": 1.8221024258760108, "grad_norm": 0.5718424320220947, "learning_rate": 0.0004912358337830544, "loss": 3.6489, "step": 16900 }, { "epoch": 1.8274932614555257, "grad_norm": 0.5481303930282593, "learning_rate": 0.000490912034538586, "loss": 3.655, "step": 16950 }, { "epoch": 1.8328840970350404, "grad_norm": 0.5255277156829834, "learning_rate": 0.0004905882352941175, "loss": 3.6622, "step": 17000 }, { "epoch": 1.8328840970350404, "eval_accuracy": 0.35840252439773435, "eval_loss": 3.60727596282959, "eval_runtime": 183.9803, "eval_samples_per_second": 97.896, "eval_steps_per_second": 6.12, "step": 17000 }, { "epoch": 1.838274932614555, "grad_norm": 0.5432764887809753, "learning_rate": 0.0004902644360496492, "loss": 3.6562, "step": 17050 }, { "epoch": 1.8436657681940702, "grad_norm": 0.5756969451904297, "learning_rate": 0.0004899406368051808, "loss": 3.6436, "step": 17100 }, { "epoch": 1.849056603773585, "grad_norm": 0.5970697402954102, "learning_rate": 0.0004896168375607123, "loss": 3.6439, "step": 17150 }, { "epoch": 1.8544474393530996, "grad_norm": 0.5418063402175903, "learning_rate": 0.0004892930383162439, "loss": 3.6429, "step": 17200 }, { "epoch": 1.8598382749326146, "grad_norm": 0.5667785406112671, "learning_rate": 0.0004889757150566648, "loss": 3.6382, "step": 17250 }, { "epoch": 1.8652291105121295, "grad_norm": 0.5172768235206604, "learning_rate": 0.0004886519158121964, "loss": 3.6628, "step": 17300 }, { "epoch": 1.8706199460916442, "grad_norm": 0.5945035219192505, "learning_rate": 0.000488328116567728, "loss": 3.6666, "step": 17350 }, { "epoch": 1.8760107816711589, "grad_norm": 0.5649627447128296, "learning_rate": 0.0004880043173232595, "loss": 3.6471, "step": 17400 }, { "epoch": 1.881401617250674, "grad_norm": 0.538306474685669, "learning_rate": 0.0004876805180787911, "loss": 3.6564, "step": 17450 }, { "epoch": 1.8867924528301887, "grad_norm": 0.5613325834274292, "learning_rate": 0.000487363194819212, "loss": 3.6392, "step": 17500 }, { "epoch": 1.8921832884097034, "grad_norm": 0.5915970206260681, "learning_rate": 0.0004870393955747436, "loss": 3.6539, "step": 17550 }, { "epoch": 1.8975741239892183, "grad_norm": 0.5147404670715332, "learning_rate": 0.0004867155963302752, "loss": 3.6422, "step": 17600 }, { "epoch": 1.9029649595687332, "grad_norm": 0.5603023767471313, "learning_rate": 0.0004863917970858068, "loss": 3.6386, "step": 17650 }, { "epoch": 1.908355795148248, "grad_norm": 0.5458604693412781, "learning_rate": 0.00048606799784133833, "loss": 3.6524, "step": 17700 }, { "epoch": 1.9137466307277629, "grad_norm": 0.606473445892334, "learning_rate": 0.00048574419859686994, "loss": 3.6401, "step": 17750 }, { "epoch": 1.9191374663072778, "grad_norm": 0.5725317001342773, "learning_rate": 0.0004854203993524015, "loss": 3.6557, "step": 17800 }, { "epoch": 1.9245283018867925, "grad_norm": 0.6147105097770691, "learning_rate": 0.000485096600107933, "loss": 3.6629, "step": 17850 }, { "epoch": 1.9299191374663072, "grad_norm": 0.5641502141952515, "learning_rate": 0.00048477280086346464, "loss": 3.6383, "step": 17900 }, { "epoch": 1.935309973045822, "grad_norm": 0.5484232902526855, "learning_rate": 0.00048444900161899614, "loss": 3.6478, "step": 17950 }, { "epoch": 1.940700808625337, "grad_norm": 0.5765843391418457, "learning_rate": 0.00048412520237452774, "loss": 3.6447, "step": 18000 }, { "epoch": 1.940700808625337, "eval_accuracy": 0.3603276362640313, "eval_loss": 3.5914807319641113, "eval_runtime": 183.476, "eval_samples_per_second": 98.165, "eval_steps_per_second": 6.137, "step": 18000 }, { "epoch": 1.9460916442048517, "grad_norm": 0.5953685641288757, "learning_rate": 0.0004838014031300593, "loss": 3.6288, "step": 18050 }, { "epoch": 1.9514824797843666, "grad_norm": 0.5497512817382812, "learning_rate": 0.0004834776038855909, "loss": 3.6586, "step": 18100 }, { "epoch": 1.9568733153638815, "grad_norm": 0.5568259954452515, "learning_rate": 0.00048315380464112245, "loss": 3.6575, "step": 18150 }, { "epoch": 1.9622641509433962, "grad_norm": 0.6135269999504089, "learning_rate": 0.00048283000539665405, "loss": 3.6467, "step": 18200 }, { "epoch": 1.967654986522911, "grad_norm": 0.5931421518325806, "learning_rate": 0.0004825062061521856, "loss": 3.648, "step": 18250 }, { "epoch": 1.9730458221024259, "grad_norm": 0.558696448802948, "learning_rate": 0.00048218240690771716, "loss": 3.6273, "step": 18300 }, { "epoch": 1.9784366576819408, "grad_norm": 0.6715732216835022, "learning_rate": 0.00048185860766324876, "loss": 3.6528, "step": 18350 }, { "epoch": 1.9838274932614555, "grad_norm": 0.5224708914756775, "learning_rate": 0.0004815348084187803, "loss": 3.6293, "step": 18400 }, { "epoch": 1.9892183288409704, "grad_norm": 0.5655230283737183, "learning_rate": 0.0004812110091743119, "loss": 3.6291, "step": 18450 }, { "epoch": 1.9946091644204853, "grad_norm": 0.5798264741897583, "learning_rate": 0.00048088720992984347, "loss": 3.6214, "step": 18500 }, { "epoch": 2.0, "grad_norm": 1.0706647634506226, "learning_rate": 0.00048056341068537507, "loss": 3.6367, "step": 18550 }, { "epoch": 2.0053908355795147, "grad_norm": 0.6311784982681274, "learning_rate": 0.00048023961144090657, "loss": 3.5528, "step": 18600 }, { "epoch": 2.01078167115903, "grad_norm": 0.5586987137794495, "learning_rate": 0.00047991581219643817, "loss": 3.5434, "step": 18650 }, { "epoch": 2.0161725067385445, "grad_norm": 0.6267822980880737, "learning_rate": 0.0004795920129519697, "loss": 3.552, "step": 18700 }, { "epoch": 2.0215633423180592, "grad_norm": 0.5614279508590698, "learning_rate": 0.0004792682137075013, "loss": 3.5445, "step": 18750 }, { "epoch": 2.026954177897574, "grad_norm": 0.5612114071846008, "learning_rate": 0.0004789444144630329, "loss": 3.5517, "step": 18800 }, { "epoch": 2.032345013477089, "grad_norm": 0.5366635322570801, "learning_rate": 0.00047862061521856443, "loss": 3.5508, "step": 18850 }, { "epoch": 2.0377358490566038, "grad_norm": 0.5916035175323486, "learning_rate": 0.00047829681597409603, "loss": 3.5641, "step": 18900 }, { "epoch": 2.0431266846361185, "grad_norm": 0.5509299635887146, "learning_rate": 0.0004779730167296276, "loss": 3.5591, "step": 18950 }, { "epoch": 2.0485175202156336, "grad_norm": 0.6209540963172913, "learning_rate": 0.0004776492174851592, "loss": 3.5598, "step": 19000 }, { "epoch": 2.0485175202156336, "eval_accuracy": 0.3615285766330448, "eval_loss": 3.5827741622924805, "eval_runtime": 183.6104, "eval_samples_per_second": 98.094, "eval_steps_per_second": 6.133, "step": 19000 }, { "epoch": 2.0539083557951483, "grad_norm": 0.5647927522659302, "learning_rate": 0.00047732541824069074, "loss": 3.5683, "step": 19050 }, { "epoch": 2.059299191374663, "grad_norm": 0.5451118350028992, "learning_rate": 0.0004770016189962223, "loss": 3.5618, "step": 19100 }, { "epoch": 2.0646900269541777, "grad_norm": 0.561872661113739, "learning_rate": 0.0004766778197517539, "loss": 3.5564, "step": 19150 }, { "epoch": 2.070080862533693, "grad_norm": 0.5675294995307922, "learning_rate": 0.0004763540205072854, "loss": 3.5752, "step": 19200 }, { "epoch": 2.0754716981132075, "grad_norm": 0.5630126595497131, "learning_rate": 0.00047603022126281705, "loss": 3.5646, "step": 19250 }, { "epoch": 2.0808625336927222, "grad_norm": 0.603242814540863, "learning_rate": 0.00047570642201834855, "loss": 3.5555, "step": 19300 }, { "epoch": 2.0862533692722374, "grad_norm": 0.5813462138175964, "learning_rate": 0.00047538262277388015, "loss": 3.5722, "step": 19350 }, { "epoch": 2.091644204851752, "grad_norm": 0.5469586253166199, "learning_rate": 0.0004750588235294117, "loss": 3.5634, "step": 19400 }, { "epoch": 2.0970350404312668, "grad_norm": 0.6438509225845337, "learning_rate": 0.0004747350242849433, "loss": 3.5481, "step": 19450 }, { "epoch": 2.1024258760107815, "grad_norm": 0.5822634696960449, "learning_rate": 0.00047441122504047486, "loss": 3.5723, "step": 19500 }, { "epoch": 2.1078167115902966, "grad_norm": 0.5717223882675171, "learning_rate": 0.0004740874257960064, "loss": 3.5625, "step": 19550 }, { "epoch": 2.1132075471698113, "grad_norm": 0.5862929821014404, "learning_rate": 0.000473763626551538, "loss": 3.5588, "step": 19600 }, { "epoch": 2.118598382749326, "grad_norm": 0.5764575600624084, "learning_rate": 0.00047343982730706956, "loss": 3.5623, "step": 19650 }, { "epoch": 2.123989218328841, "grad_norm": 0.5479846596717834, "learning_rate": 0.00047311602806260117, "loss": 3.5536, "step": 19700 }, { "epoch": 2.129380053908356, "grad_norm": 0.6235895752906799, "learning_rate": 0.0004727922288181327, "loss": 3.5448, "step": 19750 }, { "epoch": 2.1347708894878705, "grad_norm": 0.5840886831283569, "learning_rate": 0.0004724684295736643, "loss": 3.5682, "step": 19800 }, { "epoch": 2.1401617250673857, "grad_norm": 0.5456245541572571, "learning_rate": 0.0004721446303291959, "loss": 3.5595, "step": 19850 }, { "epoch": 2.1455525606469004, "grad_norm": 0.5832547545433044, "learning_rate": 0.0004718208310847275, "loss": 3.5621, "step": 19900 }, { "epoch": 2.150943396226415, "grad_norm": 0.5474064946174622, "learning_rate": 0.000471497031840259, "loss": 3.5543, "step": 19950 }, { "epoch": 2.1563342318059298, "grad_norm": 0.5883390307426453, "learning_rate": 0.0004711732325957905, "loss": 3.5586, "step": 20000 }, { "epoch": 2.1563342318059298, "eval_accuracy": 0.36281231050257284, "eval_loss": 3.5723373889923096, "eval_runtime": 183.7122, "eval_samples_per_second": 98.039, "eval_steps_per_second": 6.129, "step": 20000 }, { "epoch": 2.161725067385445, "grad_norm": 0.5845204591751099, "learning_rate": 0.00047084943335132213, "loss": 3.5599, "step": 20050 }, { "epoch": 2.1671159029649596, "grad_norm": 0.5878739953041077, "learning_rate": 0.0004705256341068537, "loss": 3.5556, "step": 20100 }, { "epoch": 2.1725067385444743, "grad_norm": 0.5857297778129578, "learning_rate": 0.0004702018348623853, "loss": 3.5674, "step": 20150 }, { "epoch": 2.177897574123989, "grad_norm": 0.5607808828353882, "learning_rate": 0.00046987803561791684, "loss": 3.5675, "step": 20200 }, { "epoch": 2.183288409703504, "grad_norm": 0.5514035224914551, "learning_rate": 0.00046955423637344844, "loss": 3.5755, "step": 20250 }, { "epoch": 2.188679245283019, "grad_norm": 0.607312023639679, "learning_rate": 0.00046923043712898, "loss": 3.5548, "step": 20300 }, { "epoch": 2.1940700808625335, "grad_norm": 0.5242741703987122, "learning_rate": 0.0004689066378845116, "loss": 3.5481, "step": 20350 }, { "epoch": 2.1994609164420487, "grad_norm": 0.6142726540565491, "learning_rate": 0.00046858283864004315, "loss": 3.5619, "step": 20400 }, { "epoch": 2.2048517520215634, "grad_norm": 0.7217954397201538, "learning_rate": 0.0004682590393955747, "loss": 3.5574, "step": 20450 }, { "epoch": 2.210242587601078, "grad_norm": 0.5498636364936829, "learning_rate": 0.0004679352401511063, "loss": 3.572, "step": 20500 }, { "epoch": 2.215633423180593, "grad_norm": 0.5720429420471191, "learning_rate": 0.0004676114409066378, "loss": 3.5675, "step": 20550 }, { "epoch": 2.221024258760108, "grad_norm": 0.6373676061630249, "learning_rate": 0.00046728764166216946, "loss": 3.5666, "step": 20600 }, { "epoch": 2.2264150943396226, "grad_norm": 0.5550791025161743, "learning_rate": 0.00046696384241770095, "loss": 3.5731, "step": 20650 }, { "epoch": 2.2318059299191373, "grad_norm": 0.6740472912788391, "learning_rate": 0.00046664004317323256, "loss": 3.5562, "step": 20700 }, { "epoch": 2.2371967654986524, "grad_norm": 0.5599523186683655, "learning_rate": 0.0004663162439287641, "loss": 3.5742, "step": 20750 }, { "epoch": 2.242587601078167, "grad_norm": 0.5968847870826721, "learning_rate": 0.00046599244468429566, "loss": 3.5682, "step": 20800 }, { "epoch": 2.247978436657682, "grad_norm": 0.577918291091919, "learning_rate": 0.00046566864543982726, "loss": 3.5715, "step": 20850 }, { "epoch": 2.2533692722371965, "grad_norm": 0.544826865196228, "learning_rate": 0.0004653448461953588, "loss": 3.5616, "step": 20900 }, { "epoch": 2.2587601078167117, "grad_norm": 0.5795401930809021, "learning_rate": 0.0004650210469508904, "loss": 3.5613, "step": 20950 }, { "epoch": 2.2641509433962264, "grad_norm": 0.5422238707542419, "learning_rate": 0.00046469724770642197, "loss": 3.5585, "step": 21000 }, { "epoch": 2.2641509433962264, "eval_accuracy": 0.3636982661498121, "eval_loss": 3.5617871284484863, "eval_runtime": 183.3857, "eval_samples_per_second": 98.214, "eval_steps_per_second": 6.14, "step": 21000 }, { "epoch": 2.269541778975741, "grad_norm": 0.6669674515724182, "learning_rate": 0.0004643734484619536, "loss": 3.556, "step": 21050 }, { "epoch": 2.274932614555256, "grad_norm": 0.6207566261291504, "learning_rate": 0.0004640496492174851, "loss": 3.546, "step": 21100 }, { "epoch": 2.280323450134771, "grad_norm": 0.595628559589386, "learning_rate": 0.00046372584997301673, "loss": 3.5727, "step": 21150 }, { "epoch": 2.2857142857142856, "grad_norm": 0.5632020235061646, "learning_rate": 0.0004634020507285483, "loss": 3.5522, "step": 21200 }, { "epoch": 2.2911051212938007, "grad_norm": 0.5731921195983887, "learning_rate": 0.0004630782514840798, "loss": 3.5504, "step": 21250 }, { "epoch": 2.2964959568733154, "grad_norm": 0.5752100348472595, "learning_rate": 0.0004627544522396114, "loss": 3.5611, "step": 21300 }, { "epoch": 2.30188679245283, "grad_norm": 0.5865209102630615, "learning_rate": 0.00046243065299514293, "loss": 3.5384, "step": 21350 }, { "epoch": 2.3072776280323453, "grad_norm": 0.5781216621398926, "learning_rate": 0.00046210685375067454, "loss": 3.5746, "step": 21400 }, { "epoch": 2.31266846361186, "grad_norm": 0.5815436244010925, "learning_rate": 0.0004617830545062061, "loss": 3.5338, "step": 21450 }, { "epoch": 2.3180592991913747, "grad_norm": 0.5783815979957581, "learning_rate": 0.0004614592552617377, "loss": 3.5657, "step": 21500 }, { "epoch": 2.3234501347708894, "grad_norm": 0.6043853163719177, "learning_rate": 0.00046114193200215864, "loss": 3.5665, "step": 21550 }, { "epoch": 2.3288409703504045, "grad_norm": 0.5591253042221069, "learning_rate": 0.0004608181327576902, "loss": 3.5399, "step": 21600 }, { "epoch": 2.334231805929919, "grad_norm": 0.6128972172737122, "learning_rate": 0.00046049433351322175, "loss": 3.5586, "step": 21650 }, { "epoch": 2.339622641509434, "grad_norm": 0.5784398317337036, "learning_rate": 0.00046017053426875335, "loss": 3.5736, "step": 21700 }, { "epoch": 2.3450134770889486, "grad_norm": 0.5711215734481812, "learning_rate": 0.0004598467350242849, "loss": 3.5537, "step": 21750 }, { "epoch": 2.3504043126684637, "grad_norm": 0.5763333439826965, "learning_rate": 0.0004595229357798165, "loss": 3.5654, "step": 21800 }, { "epoch": 2.3557951482479784, "grad_norm": 0.6388970613479614, "learning_rate": 0.00045919913653534806, "loss": 3.5649, "step": 21850 }, { "epoch": 2.361185983827493, "grad_norm": 0.5283222794532776, "learning_rate": 0.00045887533729087966, "loss": 3.558, "step": 21900 }, { "epoch": 2.3665768194070083, "grad_norm": 0.5484132170677185, "learning_rate": 0.00045855153804641116, "loss": 3.5504, "step": 21950 }, { "epoch": 2.371967654986523, "grad_norm": 0.5693395733833313, "learning_rate": 0.0004582277388019427, "loss": 3.546, "step": 22000 }, { "epoch": 2.371967654986523, "eval_accuracy": 0.36476686730146757, "eval_loss": 3.5517172813415527, "eval_runtime": 183.5403, "eval_samples_per_second": 98.131, "eval_steps_per_second": 6.135, "step": 22000 }, { "epoch": 2.3773584905660377, "grad_norm": 0.5790075659751892, "learning_rate": 0.0004579039395574743, "loss": 3.5475, "step": 22050 }, { "epoch": 2.382749326145553, "grad_norm": 0.5600558519363403, "learning_rate": 0.00045758014031300586, "loss": 3.5632, "step": 22100 }, { "epoch": 2.3881401617250675, "grad_norm": 0.5839028358459473, "learning_rate": 0.00045725634106853747, "loss": 3.557, "step": 22150 }, { "epoch": 2.393530997304582, "grad_norm": 0.582438051700592, "learning_rate": 0.000456932541824069, "loss": 3.5437, "step": 22200 }, { "epoch": 2.398921832884097, "grad_norm": 0.8435466885566711, "learning_rate": 0.0004566087425796006, "loss": 3.5393, "step": 22250 }, { "epoch": 2.404312668463612, "grad_norm": 0.6102995872497559, "learning_rate": 0.0004562849433351322, "loss": 3.5668, "step": 22300 }, { "epoch": 2.4097035040431267, "grad_norm": 0.5426021814346313, "learning_rate": 0.0004559611440906638, "loss": 3.5759, "step": 22350 }, { "epoch": 2.4150943396226414, "grad_norm": 0.6063995957374573, "learning_rate": 0.00045563734484619533, "loss": 3.5585, "step": 22400 }, { "epoch": 2.420485175202156, "grad_norm": 0.5520073771476746, "learning_rate": 0.0004553135456017269, "loss": 3.564, "step": 22450 }, { "epoch": 2.4258760107816713, "grad_norm": 0.6225586533546448, "learning_rate": 0.0004549897463572585, "loss": 3.5633, "step": 22500 }, { "epoch": 2.431266846361186, "grad_norm": 0.5612086653709412, "learning_rate": 0.00045466594711279, "loss": 3.543, "step": 22550 }, { "epoch": 2.4366576819407006, "grad_norm": 0.563895046710968, "learning_rate": 0.00045434214786832164, "loss": 3.5606, "step": 22600 }, { "epoch": 2.442048517520216, "grad_norm": 0.5550323128700256, "learning_rate": 0.00045401834862385314, "loss": 3.5558, "step": 22650 }, { "epoch": 2.4474393530997305, "grad_norm": 0.610876202583313, "learning_rate": 0.00045369454937938474, "loss": 3.563, "step": 22700 }, { "epoch": 2.452830188679245, "grad_norm": 0.5941126346588135, "learning_rate": 0.0004533707501349163, "loss": 3.5505, "step": 22750 }, { "epoch": 2.4582210242587603, "grad_norm": 0.5647153258323669, "learning_rate": 0.0004530469508904479, "loss": 3.5483, "step": 22800 }, { "epoch": 2.463611859838275, "grad_norm": 0.6184137463569641, "learning_rate": 0.00045272315164597945, "loss": 3.5388, "step": 22850 }, { "epoch": 2.4690026954177897, "grad_norm": 0.5737881064414978, "learning_rate": 0.000452399352401511, "loss": 3.5513, "step": 22900 }, { "epoch": 2.4743935309973044, "grad_norm": 0.5692729353904724, "learning_rate": 0.0004520755531570426, "loss": 3.5391, "step": 22950 }, { "epoch": 2.4797843665768196, "grad_norm": 0.5915802121162415, "learning_rate": 0.00045175175391257415, "loss": 3.5531, "step": 23000 }, { "epoch": 2.4797843665768196, "eval_accuracy": 0.3655561218785519, "eval_loss": 3.5417704582214355, "eval_runtime": 183.6152, "eval_samples_per_second": 98.091, "eval_steps_per_second": 6.132, "step": 23000 }, { "epoch": 2.4851752021563343, "grad_norm": 0.59952312707901, "learning_rate": 0.00045142795466810576, "loss": 3.5615, "step": 23050 }, { "epoch": 2.490566037735849, "grad_norm": 0.6111100316047668, "learning_rate": 0.0004511041554236373, "loss": 3.5572, "step": 23100 }, { "epoch": 2.4959568733153636, "grad_norm": 0.6241294145584106, "learning_rate": 0.0004507803561791689, "loss": 3.5702, "step": 23150 }, { "epoch": 2.501347708894879, "grad_norm": 0.5904486179351807, "learning_rate": 0.00045045655693470046, "loss": 3.5615, "step": 23200 }, { "epoch": 2.5067385444743935, "grad_norm": 0.5772064328193665, "learning_rate": 0.00045013275769023207, "loss": 3.5602, "step": 23250 }, { "epoch": 2.512129380053908, "grad_norm": 0.5506559014320374, "learning_rate": 0.00044980895844576356, "loss": 3.5598, "step": 23300 }, { "epoch": 2.5175202156334233, "grad_norm": 0.583018958568573, "learning_rate": 0.0004494851592012951, "loss": 3.5572, "step": 23350 }, { "epoch": 2.522911051212938, "grad_norm": 0.573809802532196, "learning_rate": 0.0004491613599568267, "loss": 3.535, "step": 23400 }, { "epoch": 2.5283018867924527, "grad_norm": 0.6045467257499695, "learning_rate": 0.00044883756071235827, "loss": 3.5499, "step": 23450 }, { "epoch": 2.533692722371968, "grad_norm": 0.5793042182922363, "learning_rate": 0.0004485137614678899, "loss": 3.5448, "step": 23500 }, { "epoch": 2.5390835579514826, "grad_norm": 0.5313010215759277, "learning_rate": 0.00044819643820831083, "loss": 3.5483, "step": 23550 }, { "epoch": 2.5444743935309972, "grad_norm": 0.6156229376792908, "learning_rate": 0.0004478726389638424, "loss": 3.5537, "step": 23600 }, { "epoch": 2.5498652291105124, "grad_norm": 0.5773780345916748, "learning_rate": 0.00044754883971937393, "loss": 3.5403, "step": 23650 }, { "epoch": 2.555256064690027, "grad_norm": 0.5939944386482239, "learning_rate": 0.00044722504047490553, "loss": 3.5289, "step": 23700 }, { "epoch": 2.560646900269542, "grad_norm": 0.6025189161300659, "learning_rate": 0.0004469012412304371, "loss": 3.5384, "step": 23750 }, { "epoch": 2.5660377358490565, "grad_norm": 0.5510246157646179, "learning_rate": 0.0004465774419859687, "loss": 3.5567, "step": 23800 }, { "epoch": 2.571428571428571, "grad_norm": 0.5493279695510864, "learning_rate": 0.00044625364274150024, "loss": 3.5405, "step": 23850 }, { "epoch": 2.5768194070080863, "grad_norm": 0.5634421706199646, "learning_rate": 0.00044592984349703184, "loss": 3.5443, "step": 23900 }, { "epoch": 2.582210242587601, "grad_norm": 0.6100355982780457, "learning_rate": 0.00044560604425256334, "loss": 3.5354, "step": 23950 }, { "epoch": 2.5876010781671157, "grad_norm": 0.5959258079528809, "learning_rate": 0.000445282245008095, "loss": 3.5406, "step": 24000 }, { "epoch": 2.5876010781671157, "eval_accuracy": 0.3667099068942539, "eval_loss": 3.5328526496887207, "eval_runtime": 183.3949, "eval_samples_per_second": 98.209, "eval_steps_per_second": 6.14, "step": 24000 }, { "epoch": 2.592991913746631, "grad_norm": 0.5708218812942505, "learning_rate": 0.0004449584457636265, "loss": 3.5664, "step": 24050 }, { "epoch": 2.5983827493261455, "grad_norm": 0.5921903252601624, "learning_rate": 0.00044463464651915805, "loss": 3.5394, "step": 24100 }, { "epoch": 2.6037735849056602, "grad_norm": 0.5770680904388428, "learning_rate": 0.00044431084727468965, "loss": 3.5536, "step": 24150 }, { "epoch": 2.6091644204851754, "grad_norm": 0.5639709234237671, "learning_rate": 0.0004439870480302212, "loss": 3.5388, "step": 24200 }, { "epoch": 2.61455525606469, "grad_norm": 0.5982383489608765, "learning_rate": 0.0004436632487857528, "loss": 3.5558, "step": 24250 }, { "epoch": 2.6199460916442048, "grad_norm": 0.6448655724525452, "learning_rate": 0.00044333944954128436, "loss": 3.5496, "step": 24300 }, { "epoch": 2.62533692722372, "grad_norm": 0.5641359686851501, "learning_rate": 0.00044301565029681596, "loss": 3.543, "step": 24350 }, { "epoch": 2.6307277628032346, "grad_norm": 0.5653684139251709, "learning_rate": 0.0004426918510523475, "loss": 3.545, "step": 24400 }, { "epoch": 2.6361185983827493, "grad_norm": 0.5824022889137268, "learning_rate": 0.00044237452779276846, "loss": 3.5449, "step": 24450 }, { "epoch": 2.641509433962264, "grad_norm": 0.560650646686554, "learning_rate": 0.0004420507285483, "loss": 3.5525, "step": 24500 }, { "epoch": 2.6469002695417787, "grad_norm": 0.5790745615959167, "learning_rate": 0.0004417269293038316, "loss": 3.537, "step": 24550 }, { "epoch": 2.652291105121294, "grad_norm": 0.6426572799682617, "learning_rate": 0.0004414031300593631, "loss": 3.5435, "step": 24600 }, { "epoch": 2.6576819407008085, "grad_norm": 0.5720652341842651, "learning_rate": 0.0004410793308148948, "loss": 3.5407, "step": 24650 }, { "epoch": 2.6630727762803232, "grad_norm": 0.5447050929069519, "learning_rate": 0.00044075553157042627, "loss": 3.5486, "step": 24700 }, { "epoch": 2.6684636118598384, "grad_norm": 0.602033793926239, "learning_rate": 0.0004404317323259579, "loss": 3.5622, "step": 24750 }, { "epoch": 2.673854447439353, "grad_norm": 0.5450906157493591, "learning_rate": 0.0004401079330814894, "loss": 3.5564, "step": 24800 }, { "epoch": 2.6792452830188678, "grad_norm": 0.5530940890312195, "learning_rate": 0.000439784133837021, "loss": 3.5576, "step": 24850 }, { "epoch": 2.684636118598383, "grad_norm": 0.5873911380767822, "learning_rate": 0.0004394603345925526, "loss": 3.5276, "step": 24900 }, { "epoch": 2.6900269541778976, "grad_norm": 0.5609381198883057, "learning_rate": 0.00043913653534808413, "loss": 3.5476, "step": 24950 }, { "epoch": 2.6954177897574123, "grad_norm": 0.5607409477233887, "learning_rate": 0.00043881273610361574, "loss": 3.5264, "step": 25000 }, { "epoch": 2.6954177897574123, "eval_accuracy": 0.36771798838739667, "eval_loss": 3.5224156379699707, "eval_runtime": 183.5829, "eval_samples_per_second": 98.108, "eval_steps_per_second": 6.133, "step": 25000 }, { "epoch": 2.7008086253369274, "grad_norm": 0.6033768057823181, "learning_rate": 0.0004384889368591473, "loss": 3.5403, "step": 25050 }, { "epoch": 2.706199460916442, "grad_norm": 0.5833696722984314, "learning_rate": 0.0004381651376146789, "loss": 3.5425, "step": 25100 }, { "epoch": 2.711590296495957, "grad_norm": 0.5619771480560303, "learning_rate": 0.00043784133837021044, "loss": 3.5439, "step": 25150 }, { "epoch": 2.7169811320754715, "grad_norm": 0.6554853320121765, "learning_rate": 0.00043751753912574205, "loss": 3.5431, "step": 25200 }, { "epoch": 2.7223719676549867, "grad_norm": 0.627469003200531, "learning_rate": 0.0004371937398812736, "loss": 3.5358, "step": 25250 }, { "epoch": 2.7277628032345014, "grad_norm": 0.6013321280479431, "learning_rate": 0.0004368699406368051, "loss": 3.5432, "step": 25300 }, { "epoch": 2.733153638814016, "grad_norm": 0.6281169056892395, "learning_rate": 0.0004365461413923367, "loss": 3.5394, "step": 25350 }, { "epoch": 2.7385444743935308, "grad_norm": 0.562379002571106, "learning_rate": 0.00043622234214786825, "loss": 3.5336, "step": 25400 }, { "epoch": 2.743935309973046, "grad_norm": 0.5520660281181335, "learning_rate": 0.00043589854290339985, "loss": 3.5501, "step": 25450 }, { "epoch": 2.7493261455525606, "grad_norm": 0.5592831969261169, "learning_rate": 0.0004355747436589314, "loss": 3.5281, "step": 25500 }, { "epoch": 2.7547169811320753, "grad_norm": 0.6207462549209595, "learning_rate": 0.000435250944414463, "loss": 3.519, "step": 25550 }, { "epoch": 2.7601078167115904, "grad_norm": 0.5609697103500366, "learning_rate": 0.00043492714516999456, "loss": 3.5249, "step": 25600 }, { "epoch": 2.765498652291105, "grad_norm": 0.5504409074783325, "learning_rate": 0.0004346033459255261, "loss": 3.5315, "step": 25650 }, { "epoch": 2.77088948787062, "grad_norm": 0.5725831985473633, "learning_rate": 0.0004342795466810577, "loss": 3.5187, "step": 25700 }, { "epoch": 2.776280323450135, "grad_norm": 0.5668317675590515, "learning_rate": 0.00043395574743658927, "loss": 3.5219, "step": 25750 }, { "epoch": 2.7816711590296497, "grad_norm": 0.583371639251709, "learning_rate": 0.00043363194819212087, "loss": 3.5371, "step": 25800 }, { "epoch": 2.7870619946091644, "grad_norm": 0.5703887343406677, "learning_rate": 0.0004333081489476524, "loss": 3.522, "step": 25850 }, { "epoch": 2.7924528301886795, "grad_norm": 0.5865967869758606, "learning_rate": 0.000432984349703184, "loss": 3.5239, "step": 25900 }, { "epoch": 2.797843665768194, "grad_norm": 0.5793086886405945, "learning_rate": 0.0004326605504587155, "loss": 3.5336, "step": 25950 }, { "epoch": 2.803234501347709, "grad_norm": 0.6235855221748352, "learning_rate": 0.0004323367512142472, "loss": 3.5396, "step": 26000 }, { "epoch": 2.803234501347709, "eval_accuracy": 0.3685781933002239, "eval_loss": 3.5141384601593018, "eval_runtime": 183.8236, "eval_samples_per_second": 97.98, "eval_steps_per_second": 6.125, "step": 26000 }, { "epoch": 2.8086253369272236, "grad_norm": 0.6220427751541138, "learning_rate": 0.0004320129519697787, "loss": 3.5379, "step": 26050 }, { "epoch": 2.8140161725067383, "grad_norm": 0.5779548287391663, "learning_rate": 0.00043168915272531023, "loss": 3.5207, "step": 26100 }, { "epoch": 2.8194070080862534, "grad_norm": 0.573950469493866, "learning_rate": 0.00043136535348084183, "loss": 3.5309, "step": 26150 }, { "epoch": 2.824797843665768, "grad_norm": 0.6422613859176636, "learning_rate": 0.0004310415542363734, "loss": 3.5288, "step": 26200 }, { "epoch": 2.830188679245283, "grad_norm": 0.6020054817199707, "learning_rate": 0.000430717754991905, "loss": 3.5225, "step": 26250 }, { "epoch": 2.835579514824798, "grad_norm": 0.5860601663589478, "learning_rate": 0.00043039395574743654, "loss": 3.5358, "step": 26300 }, { "epoch": 2.8409703504043127, "grad_norm": 0.5909083485603333, "learning_rate": 0.00043007015650296814, "loss": 3.5416, "step": 26350 }, { "epoch": 2.8463611859838274, "grad_norm": 0.5843289494514465, "learning_rate": 0.0004297463572584997, "loss": 3.5282, "step": 26400 }, { "epoch": 2.8517520215633425, "grad_norm": 0.5413671731948853, "learning_rate": 0.0004294225580140313, "loss": 3.5301, "step": 26450 }, { "epoch": 2.857142857142857, "grad_norm": 0.6106083393096924, "learning_rate": 0.00042909875876956285, "loss": 3.5269, "step": 26500 }, { "epoch": 2.862533692722372, "grad_norm": 0.5917198061943054, "learning_rate": 0.0004287749595250944, "loss": 3.5182, "step": 26550 }, { "epoch": 2.867924528301887, "grad_norm": 0.5803675651550293, "learning_rate": 0.000428451160280626, "loss": 3.528, "step": 26600 }, { "epoch": 2.8733153638814017, "grad_norm": 0.6025941371917725, "learning_rate": 0.0004281273610361575, "loss": 3.524, "step": 26650 }, { "epoch": 2.8787061994609164, "grad_norm": 0.5806294679641724, "learning_rate": 0.0004278035617916891, "loss": 3.5223, "step": 26700 }, { "epoch": 2.884097035040431, "grad_norm": 0.5862604975700378, "learning_rate": 0.00042747976254722066, "loss": 3.5418, "step": 26750 }, { "epoch": 2.889487870619946, "grad_norm": 0.6699230670928955, "learning_rate": 0.00042715596330275226, "loss": 3.5272, "step": 26800 }, { "epoch": 2.894878706199461, "grad_norm": 0.5533138513565063, "learning_rate": 0.0004268321640582838, "loss": 3.5215, "step": 26850 }, { "epoch": 2.9002695417789757, "grad_norm": 0.6020789742469788, "learning_rate": 0.0004265083648138154, "loss": 3.5449, "step": 26900 }, { "epoch": 2.9056603773584904, "grad_norm": 0.5816729664802551, "learning_rate": 0.00042618456556934697, "loss": 3.5387, "step": 26950 }, { "epoch": 2.9110512129380055, "grad_norm": 0.5571572184562683, "learning_rate": 0.0004258607663248785, "loss": 3.5222, "step": 27000 }, { "epoch": 2.9110512129380055, "eval_accuracy": 0.36945111060092545, "eval_loss": 3.506294012069702, "eval_runtime": 183.5095, "eval_samples_per_second": 98.148, "eval_steps_per_second": 6.136, "step": 27000 }, { "epoch": 2.91644204851752, "grad_norm": 0.6122097969055176, "learning_rate": 0.0004255369670804101, "loss": 3.5398, "step": 27050 }, { "epoch": 2.921832884097035, "grad_norm": 0.5753620862960815, "learning_rate": 0.0004252131678359417, "loss": 3.5455, "step": 27100 }, { "epoch": 2.92722371967655, "grad_norm": 0.5587215423583984, "learning_rate": 0.0004248893685914733, "loss": 3.529, "step": 27150 }, { "epoch": 2.9326145552560647, "grad_norm": 0.5809001326560974, "learning_rate": 0.00042456556934700483, "loss": 3.5189, "step": 27200 }, { "epoch": 2.9380053908355794, "grad_norm": 0.5886865258216858, "learning_rate": 0.00042424177010253643, "loss": 3.5316, "step": 27250 }, { "epoch": 2.9433962264150946, "grad_norm": 0.6303690075874329, "learning_rate": 0.00042391797085806793, "loss": 3.5273, "step": 27300 }, { "epoch": 2.9487870619946093, "grad_norm": 0.6268852353096008, "learning_rate": 0.0004235941716135995, "loss": 3.521, "step": 27350 }, { "epoch": 2.954177897574124, "grad_norm": 0.5647582411766052, "learning_rate": 0.0004232703723691311, "loss": 3.5257, "step": 27400 }, { "epoch": 2.9595687331536387, "grad_norm": 0.5563523173332214, "learning_rate": 0.00042295304910955204, "loss": 3.5211, "step": 27450 }, { "epoch": 2.964959568733154, "grad_norm": 0.5889139771461487, "learning_rate": 0.0004226292498650836, "loss": 3.5278, "step": 27500 }, { "epoch": 2.9703504043126685, "grad_norm": 0.5559007525444031, "learning_rate": 0.0004223054506206152, "loss": 3.5315, "step": 27550 }, { "epoch": 2.975741239892183, "grad_norm": 0.5638795495033264, "learning_rate": 0.00042198165137614674, "loss": 3.5319, "step": 27600 }, { "epoch": 2.981132075471698, "grad_norm": 0.5552601218223572, "learning_rate": 0.00042165785213167835, "loss": 3.5331, "step": 27650 }, { "epoch": 2.986522911051213, "grad_norm": 0.5989307165145874, "learning_rate": 0.0004213340528872099, "loss": 3.5241, "step": 27700 }, { "epoch": 2.9919137466307277, "grad_norm": 0.5570740103721619, "learning_rate": 0.00042101025364274145, "loss": 3.5344, "step": 27750 }, { "epoch": 2.9973045822102424, "grad_norm": 0.6432395577430725, "learning_rate": 0.00042068645439827305, "loss": 3.531, "step": 27800 }, { "epoch": 3.0026954177897576, "grad_norm": 0.5959632396697998, "learning_rate": 0.0004203626551538046, "loss": 3.4774, "step": 27850 }, { "epoch": 3.0080862533692723, "grad_norm": 0.5988036394119263, "learning_rate": 0.0004200388559093362, "loss": 3.4268, "step": 27900 }, { "epoch": 3.013477088948787, "grad_norm": 0.6331761479377747, "learning_rate": 0.0004197150566648677, "loss": 3.4278, "step": 27950 }, { "epoch": 3.018867924528302, "grad_norm": 0.5820286273956299, "learning_rate": 0.00041939125742039936, "loss": 3.4471, "step": 28000 }, { "epoch": 3.018867924528302, "eval_accuracy": 0.37059087939409935, "eval_loss": 3.5022330284118652, "eval_runtime": 183.4286, "eval_samples_per_second": 98.191, "eval_steps_per_second": 6.139, "step": 28000 }, { "epoch": 3.024258760107817, "grad_norm": 0.5634085536003113, "learning_rate": 0.00041906745817593086, "loss": 3.441, "step": 28050 }, { "epoch": 3.0296495956873315, "grad_norm": 0.5626387000083923, "learning_rate": 0.00041874365893146247, "loss": 3.4477, "step": 28100 }, { "epoch": 3.035040431266846, "grad_norm": 0.6222658157348633, "learning_rate": 0.000418419859686994, "loss": 3.4389, "step": 28150 }, { "epoch": 3.0404312668463613, "grad_norm": 0.6200679540634155, "learning_rate": 0.00041809606044252557, "loss": 3.4408, "step": 28200 }, { "epoch": 3.045822102425876, "grad_norm": 0.6300202012062073, "learning_rate": 0.00041777226119805717, "loss": 3.4246, "step": 28250 }, { "epoch": 3.0512129380053907, "grad_norm": 0.5943900942802429, "learning_rate": 0.0004174484619535887, "loss": 3.4441, "step": 28300 }, { "epoch": 3.056603773584906, "grad_norm": 0.5677528977394104, "learning_rate": 0.00041712466270912033, "loss": 3.4205, "step": 28350 }, { "epoch": 3.0619946091644206, "grad_norm": 0.6124181151390076, "learning_rate": 0.0004168008634646519, "loss": 3.449, "step": 28400 }, { "epoch": 3.0673854447439353, "grad_norm": 0.6310901641845703, "learning_rate": 0.0004164770642201835, "loss": 3.4178, "step": 28450 }, { "epoch": 3.07277628032345, "grad_norm": 0.6272411942481995, "learning_rate": 0.00041615326497571503, "loss": 3.4442, "step": 28500 }, { "epoch": 3.078167115902965, "grad_norm": 0.6128734350204468, "learning_rate": 0.0004158294657312466, "loss": 3.4544, "step": 28550 }, { "epoch": 3.08355795148248, "grad_norm": 0.6625658273696899, "learning_rate": 0.0004155056664867782, "loss": 3.439, "step": 28600 }, { "epoch": 3.0889487870619945, "grad_norm": 0.6313899755477905, "learning_rate": 0.0004151818672423097, "loss": 3.4497, "step": 28650 }, { "epoch": 3.0943396226415096, "grad_norm": 0.5638771653175354, "learning_rate": 0.0004148580679978413, "loss": 3.4477, "step": 28700 }, { "epoch": 3.0997304582210243, "grad_norm": 0.595225989818573, "learning_rate": 0.00041453426875337284, "loss": 3.4742, "step": 28750 }, { "epoch": 3.105121293800539, "grad_norm": 0.6290755867958069, "learning_rate": 0.00041421046950890445, "loss": 3.4346, "step": 28800 }, { "epoch": 3.1105121293800537, "grad_norm": 0.6257621645927429, "learning_rate": 0.000413886670264436, "loss": 3.4416, "step": 28850 }, { "epoch": 3.115902964959569, "grad_norm": 0.6242688894271851, "learning_rate": 0.0004135628710199676, "loss": 3.4387, "step": 28900 }, { "epoch": 3.1212938005390836, "grad_norm": 0.6004605293273926, "learning_rate": 0.00041323907177549915, "loss": 3.4676, "step": 28950 }, { "epoch": 3.1266846361185983, "grad_norm": 0.6270654797554016, "learning_rate": 0.0004129152725310307, "loss": 3.4396, "step": 29000 }, { "epoch": 3.1266846361185983, "eval_accuracy": 0.37067845362167784, "eval_loss": 3.495734453201294, "eval_runtime": 183.7423, "eval_samples_per_second": 98.023, "eval_steps_per_second": 6.128, "step": 29000 }, { "epoch": 3.1320754716981134, "grad_norm": 0.6041761040687561, "learning_rate": 0.0004125914732865623, "loss": 3.4579, "step": 29050 }, { "epoch": 3.137466307277628, "grad_norm": 0.5562618970870972, "learning_rate": 0.00041226767404209386, "loss": 3.4598, "step": 29100 }, { "epoch": 3.142857142857143, "grad_norm": 0.6546818017959595, "learning_rate": 0.00041194387479762546, "loss": 3.4509, "step": 29150 }, { "epoch": 3.1482479784366575, "grad_norm": 0.6424098610877991, "learning_rate": 0.000411620075553157, "loss": 3.4595, "step": 29200 }, { "epoch": 3.1536388140161726, "grad_norm": 0.593454897403717, "learning_rate": 0.0004112962763086886, "loss": 3.4614, "step": 29250 }, { "epoch": 3.1590296495956873, "grad_norm": 0.5967779159545898, "learning_rate": 0.0004109724770642201, "loss": 3.4656, "step": 29300 }, { "epoch": 3.164420485175202, "grad_norm": 0.6650448441505432, "learning_rate": 0.00041064867781975177, "loss": 3.4551, "step": 29350 }, { "epoch": 3.169811320754717, "grad_norm": 0.5960310697555542, "learning_rate": 0.00041032487857528327, "loss": 3.4487, "step": 29400 }, { "epoch": 3.175202156334232, "grad_norm": 0.6126105189323425, "learning_rate": 0.0004100010793308148, "loss": 3.4341, "step": 29450 }, { "epoch": 3.1805929919137466, "grad_norm": 0.5907238125801086, "learning_rate": 0.0004096772800863464, "loss": 3.4591, "step": 29500 }, { "epoch": 3.1859838274932613, "grad_norm": 0.5988099575042725, "learning_rate": 0.000409353480841878, "loss": 3.4454, "step": 29550 }, { "epoch": 3.1913746630727764, "grad_norm": 0.587455689907074, "learning_rate": 0.0004090296815974096, "loss": 3.4524, "step": 29600 }, { "epoch": 3.196765498652291, "grad_norm": 0.5905243754386902, "learning_rate": 0.00040870588235294113, "loss": 3.467, "step": 29650 }, { "epoch": 3.202156334231806, "grad_norm": 0.6206773519515991, "learning_rate": 0.00040838208310847273, "loss": 3.4518, "step": 29700 }, { "epoch": 3.207547169811321, "grad_norm": 0.6231102347373962, "learning_rate": 0.0004080582838640043, "loss": 3.4605, "step": 29750 }, { "epoch": 3.2129380053908356, "grad_norm": 0.641909658908844, "learning_rate": 0.0004077344846195359, "loss": 3.4698, "step": 29800 }, { "epoch": 3.2183288409703503, "grad_norm": 0.6328471302986145, "learning_rate": 0.00040741068537506744, "loss": 3.4754, "step": 29850 }, { "epoch": 3.223719676549865, "grad_norm": 0.5950383543968201, "learning_rate": 0.000407086886130599, "loss": 3.4404, "step": 29900 }, { "epoch": 3.22911051212938, "grad_norm": 0.5742519497871399, "learning_rate": 0.0004067630868861306, "loss": 3.4444, "step": 29950 }, { "epoch": 3.234501347708895, "grad_norm": 0.5945967435836792, "learning_rate": 0.0004064392876416621, "loss": 3.4687, "step": 30000 }, { "epoch": 3.234501347708895, "eval_accuracy": 0.3716783861482345, "eval_loss": 3.490046501159668, "eval_runtime": 183.4166, "eval_samples_per_second": 98.197, "eval_steps_per_second": 6.139, "step": 30000 }, { "epoch": 3.2398921832884096, "grad_norm": 0.5931757092475891, "learning_rate": 0.0004061154883971937, "loss": 3.456, "step": 30050 }, { "epoch": 3.2452830188679247, "grad_norm": 0.5897646546363831, "learning_rate": 0.00040579168915272525, "loss": 3.4629, "step": 30100 }, { "epoch": 3.2506738544474394, "grad_norm": 0.6198738217353821, "learning_rate": 0.00040546788990825685, "loss": 3.4665, "step": 30150 }, { "epoch": 3.256064690026954, "grad_norm": 0.5888984799385071, "learning_rate": 0.0004051440906637884, "loss": 3.4521, "step": 30200 }, { "epoch": 3.2614555256064692, "grad_norm": 0.6324925422668457, "learning_rate": 0.00040482029141931995, "loss": 3.4564, "step": 30250 }, { "epoch": 3.266846361185984, "grad_norm": 0.6119275093078613, "learning_rate": 0.00040449649217485156, "loss": 3.4414, "step": 30300 }, { "epoch": 3.2722371967654986, "grad_norm": 0.5899844169616699, "learning_rate": 0.0004041726929303831, "loss": 3.4562, "step": 30350 }, { "epoch": 3.2776280323450133, "grad_norm": 0.6019811630249023, "learning_rate": 0.00040385536967080406, "loss": 3.4674, "step": 30400 }, { "epoch": 3.2830188679245285, "grad_norm": 0.6066054701805115, "learning_rate": 0.00040353157042633567, "loss": 3.4593, "step": 30450 }, { "epoch": 3.288409703504043, "grad_norm": 0.6244698762893677, "learning_rate": 0.0004032077711818672, "loss": 3.4782, "step": 30500 }, { "epoch": 3.293800539083558, "grad_norm": 0.6058152318000793, "learning_rate": 0.0004028839719373988, "loss": 3.4692, "step": 30550 }, { "epoch": 3.2991913746630726, "grad_norm": 0.5960109233856201, "learning_rate": 0.00040256017269293037, "loss": 3.4614, "step": 30600 }, { "epoch": 3.3045822102425877, "grad_norm": 0.6146271824836731, "learning_rate": 0.00040223637344846187, "loss": 3.462, "step": 30650 }, { "epoch": 3.3099730458221024, "grad_norm": 0.6514822244644165, "learning_rate": 0.00040191257420399347, "loss": 3.4523, "step": 30700 }, { "epoch": 3.315363881401617, "grad_norm": 0.6561552882194519, "learning_rate": 0.000401588774959525, "loss": 3.4581, "step": 30750 }, { "epoch": 3.3207547169811322, "grad_norm": 0.6325382590293884, "learning_rate": 0.00040126497571505663, "loss": 3.4593, "step": 30800 }, { "epoch": 3.326145552560647, "grad_norm": 0.5675181746482849, "learning_rate": 0.0004009411764705882, "loss": 3.4515, "step": 30850 }, { "epoch": 3.3315363881401616, "grad_norm": 0.5973735451698303, "learning_rate": 0.0004006173772261198, "loss": 3.4729, "step": 30900 }, { "epoch": 3.3369272237196768, "grad_norm": 0.5759937763214111, "learning_rate": 0.00040029357798165133, "loss": 3.4586, "step": 30950 }, { "epoch": 3.3423180592991915, "grad_norm": 0.6003343462944031, "learning_rate": 0.00039996977873718294, "loss": 3.4757, "step": 31000 }, { "epoch": 3.3423180592991915, "eval_accuracy": 0.37271232702867707, "eval_loss": 3.483132839202881, "eval_runtime": 183.5871, "eval_samples_per_second": 98.106, "eval_steps_per_second": 6.133, "step": 31000 }, { "epoch": 3.347708894878706, "grad_norm": 0.6348472833633423, "learning_rate": 0.0003996459794927145, "loss": 3.4504, "step": 31050 }, { "epoch": 3.353099730458221, "grad_norm": 0.6023390889167786, "learning_rate": 0.00039932218024824604, "loss": 3.4698, "step": 31100 }, { "epoch": 3.358490566037736, "grad_norm": 0.6116836071014404, "learning_rate": 0.00039899838100377764, "loss": 3.4493, "step": 31150 }, { "epoch": 3.3638814016172507, "grad_norm": 0.5540566444396973, "learning_rate": 0.0003986745817593092, "loss": 3.4486, "step": 31200 }, { "epoch": 3.3692722371967654, "grad_norm": 0.6478095054626465, "learning_rate": 0.0003983507825148408, "loss": 3.4682, "step": 31250 }, { "epoch": 3.37466307277628, "grad_norm": 0.5724659562110901, "learning_rate": 0.00039802698327037235, "loss": 3.4532, "step": 31300 }, { "epoch": 3.3800539083557952, "grad_norm": 0.5920699238777161, "learning_rate": 0.00039770318402590396, "loss": 3.4404, "step": 31350 }, { "epoch": 3.38544474393531, "grad_norm": 0.6260170936584473, "learning_rate": 0.00039737938478143545, "loss": 3.449, "step": 31400 }, { "epoch": 3.3908355795148246, "grad_norm": 0.5793566703796387, "learning_rate": 0.000397055585536967, "loss": 3.4676, "step": 31450 }, { "epoch": 3.3962264150943398, "grad_norm": 0.5834464430809021, "learning_rate": 0.0003967317862924986, "loss": 3.4691, "step": 31500 }, { "epoch": 3.4016172506738545, "grad_norm": 0.6603100895881653, "learning_rate": 0.00039640798704803016, "loss": 3.4556, "step": 31550 }, { "epoch": 3.407008086253369, "grad_norm": 0.5993121266365051, "learning_rate": 0.00039608418780356176, "loss": 3.4406, "step": 31600 }, { "epoch": 3.4123989218328843, "grad_norm": 0.661611020565033, "learning_rate": 0.0003957603885590933, "loss": 3.4628, "step": 31650 }, { "epoch": 3.417789757412399, "grad_norm": 0.6264667510986328, "learning_rate": 0.0003954365893146249, "loss": 3.4674, "step": 31700 }, { "epoch": 3.4231805929919137, "grad_norm": 0.5887061357498169, "learning_rate": 0.00039511279007015647, "loss": 3.4641, "step": 31750 }, { "epoch": 3.4285714285714284, "grad_norm": 0.6657390594482422, "learning_rate": 0.00039478899082568807, "loss": 3.453, "step": 31800 }, { "epoch": 3.4339622641509435, "grad_norm": 0.5978161692619324, "learning_rate": 0.0003944651915812196, "loss": 3.4537, "step": 31850 }, { "epoch": 3.439353099730458, "grad_norm": 0.5931206345558167, "learning_rate": 0.0003941413923367512, "loss": 3.4645, "step": 31900 }, { "epoch": 3.444743935309973, "grad_norm": 0.5882127285003662, "learning_rate": 0.0003938175930922828, "loss": 3.4478, "step": 31950 }, { "epoch": 3.450134770889488, "grad_norm": 0.5751291513442993, "learning_rate": 0.0003934937938478143, "loss": 3.4616, "step": 32000 }, { "epoch": 3.450134770889488, "eval_accuracy": 0.3731474818443741, "eval_loss": 3.476580858230591, "eval_runtime": 183.6874, "eval_samples_per_second": 98.052, "eval_steps_per_second": 6.13, "step": 32000 }, { "epoch": 3.4555256064690028, "grad_norm": 0.5731863975524902, "learning_rate": 0.0003931699946033459, "loss": 3.4844, "step": 32050 }, { "epoch": 3.4609164420485174, "grad_norm": 0.6018608212471008, "learning_rate": 0.00039284619535887743, "loss": 3.4735, "step": 32100 }, { "epoch": 3.466307277628032, "grad_norm": 0.5918667316436768, "learning_rate": 0.00039252239611440904, "loss": 3.4458, "step": 32150 }, { "epoch": 3.4716981132075473, "grad_norm": 0.5810291171073914, "learning_rate": 0.0003921985968699406, "loss": 3.4621, "step": 32200 }, { "epoch": 3.477088948787062, "grad_norm": 0.5926442742347717, "learning_rate": 0.0003918747976254722, "loss": 3.4497, "step": 32250 }, { "epoch": 3.4824797843665767, "grad_norm": 0.6344408392906189, "learning_rate": 0.00039155099838100374, "loss": 3.4549, "step": 32300 }, { "epoch": 3.487870619946092, "grad_norm": 0.5833978652954102, "learning_rate": 0.0003912271991365353, "loss": 3.4461, "step": 32350 }, { "epoch": 3.4932614555256065, "grad_norm": 0.6158148646354675, "learning_rate": 0.0003909033998920669, "loss": 3.4513, "step": 32400 }, { "epoch": 3.498652291105121, "grad_norm": 0.6152658462524414, "learning_rate": 0.00039057960064759845, "loss": 3.4622, "step": 32450 }, { "epoch": 3.5040431266846364, "grad_norm": 0.618352472782135, "learning_rate": 0.00039025580140313005, "loss": 3.4679, "step": 32500 }, { "epoch": 3.509433962264151, "grad_norm": 0.6015332341194153, "learning_rate": 0.0003899320021586616, "loss": 3.4678, "step": 32550 }, { "epoch": 3.5148247978436657, "grad_norm": 0.6236855387687683, "learning_rate": 0.0003896082029141932, "loss": 3.4557, "step": 32600 }, { "epoch": 3.5202156334231804, "grad_norm": 0.6326050758361816, "learning_rate": 0.00038928440366972476, "loss": 3.4591, "step": 32650 }, { "epoch": 3.525606469002695, "grad_norm": 0.5875770449638367, "learning_rate": 0.00038896060442525636, "loss": 3.4755, "step": 32700 }, { "epoch": 3.5309973045822103, "grad_norm": 0.625442624092102, "learning_rate": 0.00038863680518078786, "loss": 3.4483, "step": 32750 }, { "epoch": 3.536388140161725, "grad_norm": 0.6161897778511047, "learning_rate": 0.0003883130059363194, "loss": 3.4769, "step": 32800 }, { "epoch": 3.5417789757412397, "grad_norm": 0.543728768825531, "learning_rate": 0.000387989206691851, "loss": 3.4769, "step": 32850 }, { "epoch": 3.547169811320755, "grad_norm": 0.5994245409965515, "learning_rate": 0.00038766540744738256, "loss": 3.4708, "step": 32900 }, { "epoch": 3.5525606469002695, "grad_norm": 0.6172114014625549, "learning_rate": 0.00038734160820291417, "loss": 3.4589, "step": 32950 }, { "epoch": 3.557951482479784, "grad_norm": 0.5996062755584717, "learning_rate": 0.0003870178089584457, "loss": 3.4542, "step": 33000 }, { "epoch": 3.557951482479784, "eval_accuracy": 0.3733641357026762, "eval_loss": 3.4711246490478516, "eval_runtime": 183.9565, "eval_samples_per_second": 97.909, "eval_steps_per_second": 6.121, "step": 33000 }, { "epoch": 3.5633423180592994, "grad_norm": 0.6201596260070801, "learning_rate": 0.0003866940097139773, "loss": 3.4653, "step": 33050 }, { "epoch": 3.568733153638814, "grad_norm": 0.6010172963142395, "learning_rate": 0.0003863702104695089, "loss": 3.4675, "step": 33100 }, { "epoch": 3.5741239892183287, "grad_norm": 0.6184124946594238, "learning_rate": 0.0003860464112250404, "loss": 3.47, "step": 33150 }, { "epoch": 3.579514824797844, "grad_norm": 0.6181142926216125, "learning_rate": 0.00038572261198057203, "loss": 3.4644, "step": 33200 }, { "epoch": 3.5849056603773586, "grad_norm": 0.596049964427948, "learning_rate": 0.0003853988127361036, "loss": 3.4499, "step": 33250 }, { "epoch": 3.5902964959568733, "grad_norm": 0.6390442848205566, "learning_rate": 0.0003850750134916352, "loss": 3.4757, "step": 33300 }, { "epoch": 3.595687331536388, "grad_norm": 0.6242977976799011, "learning_rate": 0.0003847512142471667, "loss": 3.4634, "step": 33350 }, { "epoch": 3.601078167115903, "grad_norm": 0.5911014080047607, "learning_rate": 0.0003844274150026983, "loss": 3.4587, "step": 33400 }, { "epoch": 3.606469002695418, "grad_norm": 0.6290136575698853, "learning_rate": 0.00038410361575822984, "loss": 3.4752, "step": 33450 }, { "epoch": 3.6118598382749325, "grad_norm": 0.5740649700164795, "learning_rate": 0.00038377981651376144, "loss": 3.453, "step": 33500 }, { "epoch": 3.617250673854447, "grad_norm": 0.6023151874542236, "learning_rate": 0.000383456017269293, "loss": 3.4519, "step": 33550 }, { "epoch": 3.6226415094339623, "grad_norm": 0.6273173093795776, "learning_rate": 0.00038313221802482454, "loss": 3.4473, "step": 33600 }, { "epoch": 3.628032345013477, "grad_norm": 0.6236704587936401, "learning_rate": 0.00038280841878035615, "loss": 3.4628, "step": 33650 }, { "epoch": 3.6334231805929917, "grad_norm": 0.6301255226135254, "learning_rate": 0.0003824846195358877, "loss": 3.4684, "step": 33700 }, { "epoch": 3.638814016172507, "grad_norm": 0.6301966905593872, "learning_rate": 0.0003821608202914193, "loss": 3.4473, "step": 33750 }, { "epoch": 3.6442048517520216, "grad_norm": 0.6245051026344299, "learning_rate": 0.00038183702104695085, "loss": 3.4498, "step": 33800 }, { "epoch": 3.6495956873315363, "grad_norm": 0.6175593733787537, "learning_rate": 0.00038151322180248246, "loss": 3.4379, "step": 33850 }, { "epoch": 3.6549865229110514, "grad_norm": 0.6254450082778931, "learning_rate": 0.000381189422558014, "loss": 3.4292, "step": 33900 }, { "epoch": 3.660377358490566, "grad_norm": 0.6150121092796326, "learning_rate": 0.0003808656233135456, "loss": 3.4558, "step": 33950 }, { "epoch": 3.665768194070081, "grad_norm": 0.6018447875976562, "learning_rate": 0.00038054182406907716, "loss": 3.4533, "step": 34000 }, { "epoch": 3.665768194070081, "eval_accuracy": 0.374140243280335, "eval_loss": 3.465543270111084, "eval_runtime": 183.4349, "eval_samples_per_second": 98.187, "eval_steps_per_second": 6.138, "step": 34000 }, { "epoch": 3.671159029649596, "grad_norm": 0.6516909599304199, "learning_rate": 0.00038021802482460866, "loss": 3.4546, "step": 34050 }, { "epoch": 3.6765498652291106, "grad_norm": 0.6079384088516235, "learning_rate": 0.00037989422558014027, "loss": 3.4524, "step": 34100 }, { "epoch": 3.6819407008086253, "grad_norm": 0.6471598148345947, "learning_rate": 0.0003795704263356718, "loss": 3.448, "step": 34150 }, { "epoch": 3.68733153638814, "grad_norm": 0.6753168702125549, "learning_rate": 0.0003792466270912034, "loss": 3.4608, "step": 34200 }, { "epoch": 3.6927223719676547, "grad_norm": 0.6510220766067505, "learning_rate": 0.00037892282784673497, "loss": 3.4502, "step": 34250 }, { "epoch": 3.69811320754717, "grad_norm": 0.6369727253913879, "learning_rate": 0.0003785990286022666, "loss": 3.454, "step": 34300 }, { "epoch": 3.7035040431266846, "grad_norm": 0.6588965654373169, "learning_rate": 0.00037827522935779813, "loss": 3.4524, "step": 34350 }, { "epoch": 3.7088948787061993, "grad_norm": 0.6100491881370544, "learning_rate": 0.0003779579060982191, "loss": 3.4605, "step": 34400 }, { "epoch": 3.7142857142857144, "grad_norm": 0.6216384768486023, "learning_rate": 0.00037763410685375063, "loss": 3.4318, "step": 34450 }, { "epoch": 3.719676549865229, "grad_norm": 0.645187258720398, "learning_rate": 0.00037731030760928223, "loss": 3.4734, "step": 34500 }, { "epoch": 3.725067385444744, "grad_norm": 0.6211811304092407, "learning_rate": 0.0003769865083648138, "loss": 3.4501, "step": 34550 }, { "epoch": 3.730458221024259, "grad_norm": 0.5801553726196289, "learning_rate": 0.0003766627091203454, "loss": 3.4566, "step": 34600 }, { "epoch": 3.7358490566037736, "grad_norm": 0.6076372265815735, "learning_rate": 0.00037633890987587694, "loss": 3.4587, "step": 34650 }, { "epoch": 3.7412398921832883, "grad_norm": 0.5914555788040161, "learning_rate": 0.00037601511063140855, "loss": 3.4643, "step": 34700 }, { "epoch": 3.7466307277628035, "grad_norm": 0.6262001991271973, "learning_rate": 0.00037569131138694004, "loss": 3.4698, "step": 34750 }, { "epoch": 3.752021563342318, "grad_norm": 0.5983145236968994, "learning_rate": 0.0003753675121424716, "loss": 3.4542, "step": 34800 }, { "epoch": 3.757412398921833, "grad_norm": 0.6043577790260315, "learning_rate": 0.0003750437128980032, "loss": 3.4627, "step": 34850 }, { "epoch": 3.7628032345013476, "grad_norm": 0.6562215089797974, "learning_rate": 0.00037471991365353475, "loss": 3.455, "step": 34900 }, { "epoch": 3.7681940700808623, "grad_norm": 0.6480857133865356, "learning_rate": 0.00037439611440906635, "loss": 3.446, "step": 34950 }, { "epoch": 3.7735849056603774, "grad_norm": 0.6734146475791931, "learning_rate": 0.0003740723151645979, "loss": 3.4704, "step": 35000 }, { "epoch": 3.7735849056603774, "eval_accuracy": 0.37506574857873876, "eval_loss": 3.4583606719970703, "eval_runtime": 183.5302, "eval_samples_per_second": 98.136, "eval_steps_per_second": 6.135, "step": 35000 }, { "epoch": 3.778975741239892, "grad_norm": 0.5972731113433838, "learning_rate": 0.0003737485159201295, "loss": 3.4544, "step": 35050 }, { "epoch": 3.784366576819407, "grad_norm": 0.6210933923721313, "learning_rate": 0.00037342471667566106, "loss": 3.4579, "step": 35100 }, { "epoch": 3.789757412398922, "grad_norm": 0.6391624212265015, "learning_rate": 0.00037310091743119266, "loss": 3.4427, "step": 35150 }, { "epoch": 3.7951482479784366, "grad_norm": 0.5807982683181763, "learning_rate": 0.0003727771181867242, "loss": 3.4476, "step": 35200 }, { "epoch": 3.8005390835579513, "grad_norm": 0.6649582982063293, "learning_rate": 0.00037245331894225576, "loss": 3.4544, "step": 35250 }, { "epoch": 3.8059299191374665, "grad_norm": 0.6210810542106628, "learning_rate": 0.00037212951969778737, "loss": 3.4477, "step": 35300 }, { "epoch": 3.811320754716981, "grad_norm": 0.6078605651855469, "learning_rate": 0.00037180572045331887, "loss": 3.4687, "step": 35350 }, { "epoch": 3.816711590296496, "grad_norm": 0.5738853216171265, "learning_rate": 0.00037148192120885047, "loss": 3.4499, "step": 35400 }, { "epoch": 3.822102425876011, "grad_norm": 0.6362777352333069, "learning_rate": 0.000371158121964382, "loss": 3.4559, "step": 35450 }, { "epoch": 3.8274932614555257, "grad_norm": 0.6148720979690552, "learning_rate": 0.0003708343227199136, "loss": 3.4472, "step": 35500 }, { "epoch": 3.8328840970350404, "grad_norm": 0.5940957069396973, "learning_rate": 0.0003705105234754452, "loss": 3.4512, "step": 35550 }, { "epoch": 3.838274932614555, "grad_norm": 0.6278628706932068, "learning_rate": 0.0003701867242309768, "loss": 3.4608, "step": 35600 }, { "epoch": 3.8436657681940702, "grad_norm": 0.6533029079437256, "learning_rate": 0.00036986292498650833, "loss": 3.4467, "step": 35650 }, { "epoch": 3.849056603773585, "grad_norm": 0.6200972199440002, "learning_rate": 0.0003695391257420399, "loss": 3.4462, "step": 35700 }, { "epoch": 3.8544474393530996, "grad_norm": 0.6362287998199463, "learning_rate": 0.0003692153264975715, "loss": 3.4568, "step": 35750 }, { "epoch": 3.8598382749326143, "grad_norm": 0.6161172986030579, "learning_rate": 0.00036889152725310304, "loss": 3.4427, "step": 35800 }, { "epoch": 3.8652291105121295, "grad_norm": 0.6286973357200623, "learning_rate": 0.00036856772800863464, "loss": 3.4532, "step": 35850 }, { "epoch": 3.870619946091644, "grad_norm": 0.6777767539024353, "learning_rate": 0.0003682439287641662, "loss": 3.4637, "step": 35900 }, { "epoch": 3.876010781671159, "grad_norm": 0.6047759652137756, "learning_rate": 0.0003679201295196978, "loss": 3.4663, "step": 35950 }, { "epoch": 3.881401617250674, "grad_norm": 0.5936576724052429, "learning_rate": 0.00036759633027522935, "loss": 3.4368, "step": 36000 }, { "epoch": 3.881401617250674, "eval_accuracy": 0.37563161281847657, "eval_loss": 3.452744960784912, "eval_runtime": 183.8136, "eval_samples_per_second": 97.985, "eval_steps_per_second": 6.126, "step": 36000 }, { "epoch": 3.8867924528301887, "grad_norm": 0.6451914310455322, "learning_rate": 0.00036727253103076084, "loss": 3.4734, "step": 36050 }, { "epoch": 3.8921832884097034, "grad_norm": 0.6473202109336853, "learning_rate": 0.00036694873178629245, "loss": 3.4377, "step": 36100 }, { "epoch": 3.8975741239892185, "grad_norm": 0.6400440335273743, "learning_rate": 0.000366624932541824, "loss": 3.4438, "step": 36150 }, { "epoch": 3.9029649595687332, "grad_norm": 0.6485514044761658, "learning_rate": 0.0003663011332973556, "loss": 3.4499, "step": 36200 }, { "epoch": 3.908355795148248, "grad_norm": 0.5860769748687744, "learning_rate": 0.00036597733405288715, "loss": 3.4564, "step": 36250 }, { "epoch": 3.913746630727763, "grad_norm": 0.5832682847976685, "learning_rate": 0.00036565353480841876, "loss": 3.4587, "step": 36300 }, { "epoch": 3.9191374663072778, "grad_norm": 0.5989060401916504, "learning_rate": 0.0003653297355639503, "loss": 3.4729, "step": 36350 }, { "epoch": 3.9245283018867925, "grad_norm": 0.6010338664054871, "learning_rate": 0.00036501241230437126, "loss": 3.4403, "step": 36400 }, { "epoch": 3.929919137466307, "grad_norm": 0.6393582224845886, "learning_rate": 0.0003646886130599028, "loss": 3.4452, "step": 36450 }, { "epoch": 3.935309973045822, "grad_norm": 0.6078340411186218, "learning_rate": 0.0003643648138154344, "loss": 3.4593, "step": 36500 }, { "epoch": 3.940700808625337, "grad_norm": 0.5605495572090149, "learning_rate": 0.00036404101457096597, "loss": 3.4739, "step": 36550 }, { "epoch": 3.9460916442048517, "grad_norm": 0.6415327787399292, "learning_rate": 0.00036371721532649757, "loss": 3.4257, "step": 36600 }, { "epoch": 3.9514824797843664, "grad_norm": 0.6295135617256165, "learning_rate": 0.0003633934160820291, "loss": 3.4429, "step": 36650 }, { "epoch": 3.9568733153638815, "grad_norm": 0.6907050013542175, "learning_rate": 0.00036306961683756073, "loss": 3.4498, "step": 36700 }, { "epoch": 3.9622641509433962, "grad_norm": 0.6789411902427673, "learning_rate": 0.0003627458175930922, "loss": 3.4295, "step": 36750 }, { "epoch": 3.967654986522911, "grad_norm": 0.5623884201049805, "learning_rate": 0.0003624220183486238, "loss": 3.4439, "step": 36800 }, { "epoch": 3.973045822102426, "grad_norm": 0.6243301630020142, "learning_rate": 0.0003620982191041554, "loss": 3.4516, "step": 36850 }, { "epoch": 3.9784366576819408, "grad_norm": 0.6289325952529907, "learning_rate": 0.00036177441985968693, "loss": 3.4506, "step": 36900 }, { "epoch": 3.9838274932614555, "grad_norm": 0.6316044926643372, "learning_rate": 0.00036145062061521854, "loss": 3.4304, "step": 36950 }, { "epoch": 3.9892183288409706, "grad_norm": 0.6350688934326172, "learning_rate": 0.0003611268213707501, "loss": 3.4169, "step": 37000 }, { "epoch": 3.9892183288409706, "eval_accuracy": 0.3760328679331754, "eval_loss": 3.445186138153076, "eval_runtime": 183.4662, "eval_samples_per_second": 98.171, "eval_steps_per_second": 6.137, "step": 37000 }, { "epoch": 3.9946091644204853, "grad_norm": 0.633863627910614, "learning_rate": 0.0003608030221262817, "loss": 3.47, "step": 37050 }, { "epoch": 4.0, "grad_norm": 1.3224987983703613, "learning_rate": 0.00036047922288181324, "loss": 3.4343, "step": 37100 }, { "epoch": 4.005390835579515, "grad_norm": 0.6018860340118408, "learning_rate": 0.00036015542363734485, "loss": 3.3632, "step": 37150 }, { "epoch": 4.010781671159029, "grad_norm": 0.6127363443374634, "learning_rate": 0.0003598316243928764, "loss": 3.3584, "step": 37200 }, { "epoch": 4.0161725067385445, "grad_norm": 0.6423741579055786, "learning_rate": 0.00035950782514840795, "loss": 3.3555, "step": 37250 }, { "epoch": 4.02156334231806, "grad_norm": 0.5862413644790649, "learning_rate": 0.00035918402590393955, "loss": 3.3588, "step": 37300 }, { "epoch": 4.026954177897574, "grad_norm": 0.6085898876190186, "learning_rate": 0.00035886022665947105, "loss": 3.3414, "step": 37350 }, { "epoch": 4.032345013477089, "grad_norm": 0.5962231755256653, "learning_rate": 0.0003585364274150027, "loss": 3.3457, "step": 37400 }, { "epoch": 4.037735849056604, "grad_norm": 0.6367636919021606, "learning_rate": 0.0003582126281705342, "loss": 3.3553, "step": 37450 }, { "epoch": 4.0431266846361185, "grad_norm": 0.6896623373031616, "learning_rate": 0.0003578888289260658, "loss": 3.3537, "step": 37500 }, { "epoch": 4.048517520215634, "grad_norm": 0.6605024933815002, "learning_rate": 0.00035756502968159736, "loss": 3.3657, "step": 37550 }, { "epoch": 4.053908355795148, "grad_norm": 0.6501374244689941, "learning_rate": 0.00035724123043712896, "loss": 3.3591, "step": 37600 }, { "epoch": 4.059299191374663, "grad_norm": 0.6687803864479065, "learning_rate": 0.0003569174311926605, "loss": 3.3656, "step": 37650 }, { "epoch": 4.064690026954178, "grad_norm": 0.6082162261009216, "learning_rate": 0.00035659363194819206, "loss": 3.3675, "step": 37700 }, { "epoch": 4.070080862533692, "grad_norm": 0.5966090559959412, "learning_rate": 0.00035626983270372367, "loss": 3.3737, "step": 37750 }, { "epoch": 4.0754716981132075, "grad_norm": 0.6330704689025879, "learning_rate": 0.0003559460334592552, "loss": 3.3645, "step": 37800 }, { "epoch": 4.080862533692723, "grad_norm": 0.6244950294494629, "learning_rate": 0.0003556222342147868, "loss": 3.3694, "step": 37850 }, { "epoch": 4.086253369272237, "grad_norm": 0.6000611186027527, "learning_rate": 0.0003552984349703184, "loss": 3.3807, "step": 37900 }, { "epoch": 4.091644204851752, "grad_norm": 0.6247694492340088, "learning_rate": 0.00035497463572585, "loss": 3.3803, "step": 37950 }, { "epoch": 4.097035040431267, "grad_norm": 0.6111884713172913, "learning_rate": 0.00035465083648138153, "loss": 3.36, "step": 38000 }, { "epoch": 4.097035040431267, "eval_accuracy": 0.37655614024088996, "eval_loss": 3.4490854740142822, "eval_runtime": 183.4823, "eval_samples_per_second": 98.162, "eval_steps_per_second": 6.137, "step": 38000 }, { "epoch": 4.1024258760107815, "grad_norm": 0.6444174647331238, "learning_rate": 0.00035432703723691314, "loss": 3.3765, "step": 38050 }, { "epoch": 4.107816711590297, "grad_norm": 0.6280059814453125, "learning_rate": 0.00035400323799244463, "loss": 3.3616, "step": 38100 }, { "epoch": 4.113207547169812, "grad_norm": 0.6255881786346436, "learning_rate": 0.0003536794387479762, "loss": 3.377, "step": 38150 }, { "epoch": 4.118598382749326, "grad_norm": 0.65887850522995, "learning_rate": 0.0003533556395035078, "loss": 3.3807, "step": 38200 }, { "epoch": 4.123989218328841, "grad_norm": 0.6200643181800842, "learning_rate": 0.00035303184025903934, "loss": 3.3683, "step": 38250 }, { "epoch": 4.129380053908355, "grad_norm": 0.6016085147857666, "learning_rate": 0.00035270804101457094, "loss": 3.3729, "step": 38300 }, { "epoch": 4.1347708894878705, "grad_norm": 0.6084494590759277, "learning_rate": 0.0003523842417701025, "loss": 3.3562, "step": 38350 }, { "epoch": 4.140161725067386, "grad_norm": 0.6303635835647583, "learning_rate": 0.0003520604425256341, "loss": 3.378, "step": 38400 }, { "epoch": 4.1455525606469, "grad_norm": 0.6024056077003479, "learning_rate": 0.000351743119266055, "loss": 3.3748, "step": 38450 }, { "epoch": 4.150943396226415, "grad_norm": 0.6303093433380127, "learning_rate": 0.0003514193200215866, "loss": 3.3691, "step": 38500 }, { "epoch": 4.15633423180593, "grad_norm": 0.6160764694213867, "learning_rate": 0.00035109552077711815, "loss": 3.3674, "step": 38550 }, { "epoch": 4.1617250673854445, "grad_norm": 0.6600135564804077, "learning_rate": 0.00035077172153264976, "loss": 3.3676, "step": 38600 }, { "epoch": 4.16711590296496, "grad_norm": 0.6453423500061035, "learning_rate": 0.0003504479222881813, "loss": 3.3716, "step": 38650 }, { "epoch": 4.172506738544475, "grad_norm": 0.6940420866012573, "learning_rate": 0.0003501241230437129, "loss": 3.3884, "step": 38700 }, { "epoch": 4.177897574123989, "grad_norm": 0.615378737449646, "learning_rate": 0.0003498003237992444, "loss": 3.3635, "step": 38750 }, { "epoch": 4.183288409703504, "grad_norm": 0.6390400528907776, "learning_rate": 0.000349476524554776, "loss": 3.3801, "step": 38800 }, { "epoch": 4.188679245283019, "grad_norm": 0.6343681812286377, "learning_rate": 0.00034915272531030756, "loss": 3.3872, "step": 38850 }, { "epoch": 4.1940700808625335, "grad_norm": 0.5857776403427124, "learning_rate": 0.0003488289260658391, "loss": 3.4035, "step": 38900 }, { "epoch": 4.199460916442049, "grad_norm": 0.6787095665931702, "learning_rate": 0.0003485051268213707, "loss": 3.3685, "step": 38950 }, { "epoch": 4.204851752021563, "grad_norm": 0.6941482424736023, "learning_rate": 0.00034818132757690227, "loss": 3.3837, "step": 39000 }, { "epoch": 4.204851752021563, "eval_accuracy": 0.3769635885701943, "eval_loss": 3.4448821544647217, "eval_runtime": 183.6433, "eval_samples_per_second": 98.076, "eval_steps_per_second": 6.131, "step": 39000 }, { "epoch": 4.210242587601078, "grad_norm": 0.6923218965530396, "learning_rate": 0.0003478575283324339, "loss": 3.3915, "step": 39050 }, { "epoch": 4.215633423180593, "grad_norm": 0.6843828558921814, "learning_rate": 0.0003475337290879654, "loss": 3.392, "step": 39100 }, { "epoch": 4.2210242587601075, "grad_norm": 0.6378276348114014, "learning_rate": 0.00034720992984349703, "loss": 3.381, "step": 39150 }, { "epoch": 4.226415094339623, "grad_norm": 0.6532413363456726, "learning_rate": 0.0003468861305990286, "loss": 3.3718, "step": 39200 }, { "epoch": 4.231805929919138, "grad_norm": 0.6036219000816345, "learning_rate": 0.0003465623313545602, "loss": 3.3749, "step": 39250 }, { "epoch": 4.237196765498652, "grad_norm": 0.6405287384986877, "learning_rate": 0.00034623853211009173, "loss": 3.3757, "step": 39300 }, { "epoch": 4.242587601078167, "grad_norm": 0.5965821146965027, "learning_rate": 0.0003459212088505127, "loss": 3.3881, "step": 39350 }, { "epoch": 4.247978436657682, "grad_norm": 0.6920449137687683, "learning_rate": 0.0003455974096060442, "loss": 3.3822, "step": 39400 }, { "epoch": 4.2533692722371965, "grad_norm": 0.6426447629928589, "learning_rate": 0.0003452736103615758, "loss": 3.392, "step": 39450 }, { "epoch": 4.258760107816712, "grad_norm": 0.6216702461242676, "learning_rate": 0.00034494981111710734, "loss": 3.3791, "step": 39500 }, { "epoch": 4.264150943396227, "grad_norm": 0.6139161586761475, "learning_rate": 0.00034462601187263894, "loss": 3.3677, "step": 39550 }, { "epoch": 4.269541778975741, "grad_norm": 0.6620310544967651, "learning_rate": 0.0003443022126281705, "loss": 3.3826, "step": 39600 }, { "epoch": 4.274932614555256, "grad_norm": 0.6156401038169861, "learning_rate": 0.00034397841338370204, "loss": 3.3912, "step": 39650 }, { "epoch": 4.280323450134771, "grad_norm": 0.6847094297409058, "learning_rate": 0.00034365461413923365, "loss": 3.3743, "step": 39700 }, { "epoch": 4.285714285714286, "grad_norm": 0.6415292620658875, "learning_rate": 0.0003433308148947652, "loss": 3.3973, "step": 39750 }, { "epoch": 4.291105121293801, "grad_norm": 0.6539881229400635, "learning_rate": 0.0003430070156502968, "loss": 3.3823, "step": 39800 }, { "epoch": 4.296495956873315, "grad_norm": 0.6364943981170654, "learning_rate": 0.00034268321640582835, "loss": 3.3856, "step": 39850 }, { "epoch": 4.30188679245283, "grad_norm": 0.6464160084724426, "learning_rate": 0.00034235941716135996, "loss": 3.3868, "step": 39900 }, { "epoch": 4.307277628032345, "grad_norm": 0.7435222864151001, "learning_rate": 0.0003420356179168915, "loss": 3.3939, "step": 39950 }, { "epoch": 4.3126684636118595, "grad_norm": 0.6538057327270508, "learning_rate": 0.0003417118186724231, "loss": 3.4004, "step": 40000 }, { "epoch": 4.3126684636118595, "eval_accuracy": 0.37684906842643784, "eval_loss": 3.4412429332733154, "eval_runtime": 183.3977, "eval_samples_per_second": 98.207, "eval_steps_per_second": 6.14, "step": 40000 }, { "epoch": 4.318059299191375, "grad_norm": 0.6281383037567139, "learning_rate": 0.00034138801942795467, "loss": 3.389, "step": 40050 }, { "epoch": 4.32345013477089, "grad_norm": 0.6336797475814819, "learning_rate": 0.00034106422018348616, "loss": 3.3922, "step": 40100 }, { "epoch": 4.328840970350404, "grad_norm": 0.638562798500061, "learning_rate": 0.00034074042093901777, "loss": 3.3743, "step": 40150 }, { "epoch": 4.334231805929919, "grad_norm": 0.6539092063903809, "learning_rate": 0.0003404166216945493, "loss": 3.4048, "step": 40200 }, { "epoch": 4.339622641509434, "grad_norm": 0.680752694606781, "learning_rate": 0.0003400928224500809, "loss": 3.4125, "step": 40250 }, { "epoch": 4.345013477088949, "grad_norm": 0.6655305624008179, "learning_rate": 0.00033976902320561247, "loss": 3.3806, "step": 40300 }, { "epoch": 4.350404312668464, "grad_norm": 0.6368870735168457, "learning_rate": 0.0003394452239611441, "loss": 3.3936, "step": 40350 }, { "epoch": 4.355795148247978, "grad_norm": 0.6510953307151794, "learning_rate": 0.00033912142471667563, "loss": 3.3909, "step": 40400 }, { "epoch": 4.361185983827493, "grad_norm": 0.6622096300125122, "learning_rate": 0.00033879762547220723, "loss": 3.3909, "step": 40450 }, { "epoch": 4.366576819407008, "grad_norm": 0.6338421702384949, "learning_rate": 0.0003384738262277388, "loss": 3.3849, "step": 40500 }, { "epoch": 4.3719676549865225, "grad_norm": 0.6532634496688843, "learning_rate": 0.00033815002698327033, "loss": 3.3902, "step": 40550 }, { "epoch": 4.377358490566038, "grad_norm": 0.6484360098838806, "learning_rate": 0.00033782622773880194, "loss": 3.3972, "step": 40600 }, { "epoch": 4.382749326145553, "grad_norm": 0.6758602857589722, "learning_rate": 0.0003375024284943335, "loss": 3.3879, "step": 40650 }, { "epoch": 4.388140161725067, "grad_norm": 0.6421069502830505, "learning_rate": 0.0003371786292498651, "loss": 3.4004, "step": 40700 }, { "epoch": 4.393530997304582, "grad_norm": 0.6300466060638428, "learning_rate": 0.0003368548300053966, "loss": 3.4042, "step": 40750 }, { "epoch": 4.398921832884097, "grad_norm": 0.6359042525291443, "learning_rate": 0.0003365310307609282, "loss": 3.3919, "step": 40800 }, { "epoch": 4.404312668463612, "grad_norm": 0.6414315104484558, "learning_rate": 0.00033620723151645975, "loss": 3.3817, "step": 40850 }, { "epoch": 4.409703504043127, "grad_norm": 0.6350900530815125, "learning_rate": 0.0003358834322719913, "loss": 3.385, "step": 40900 }, { "epoch": 4.415094339622642, "grad_norm": 1.0420467853546143, "learning_rate": 0.0003355596330275229, "loss": 3.3806, "step": 40950 }, { "epoch": 4.420485175202156, "grad_norm": 0.6752620935440063, "learning_rate": 0.00033523583378305445, "loss": 3.4147, "step": 41000 }, { "epoch": 4.420485175202156, "eval_accuracy": 0.3782003843921865, "eval_loss": 3.4338560104370117, "eval_runtime": 183.5528, "eval_samples_per_second": 98.124, "eval_steps_per_second": 6.134, "step": 41000 }, { "epoch": 4.425876010781671, "grad_norm": 0.6579931378364563, "learning_rate": 0.00033491203453858606, "loss": 3.3897, "step": 41050 }, { "epoch": 4.431266846361186, "grad_norm": 0.6752576231956482, "learning_rate": 0.0003345882352941176, "loss": 3.3981, "step": 41100 }, { "epoch": 4.436657681940701, "grad_norm": 0.6758126020431519, "learning_rate": 0.0003342644360496492, "loss": 3.4058, "step": 41150 }, { "epoch": 4.442048517520216, "grad_norm": 0.635006308555603, "learning_rate": 0.00033394063680518076, "loss": 3.3896, "step": 41200 }, { "epoch": 4.44743935309973, "grad_norm": 0.6431044340133667, "learning_rate": 0.00033361683756071237, "loss": 3.4063, "step": 41250 }, { "epoch": 4.452830188679245, "grad_norm": 0.618403971195221, "learning_rate": 0.0003332930383162439, "loss": 3.3787, "step": 41300 }, { "epoch": 4.45822102425876, "grad_norm": 0.7186667323112488, "learning_rate": 0.0003329692390717754, "loss": 3.3842, "step": 41350 }, { "epoch": 4.463611859838275, "grad_norm": 0.6048040390014648, "learning_rate": 0.0003326454398273071, "loss": 3.4078, "step": 41400 }, { "epoch": 4.46900269541779, "grad_norm": 0.6665133833885193, "learning_rate": 0.00033232164058283857, "loss": 3.3829, "step": 41450 }, { "epoch": 4.474393530997305, "grad_norm": 0.8045024275779724, "learning_rate": 0.0003319978413383702, "loss": 3.3956, "step": 41500 }, { "epoch": 4.479784366576819, "grad_norm": 0.667209267616272, "learning_rate": 0.0003316740420939017, "loss": 3.3875, "step": 41550 }, { "epoch": 4.485175202156334, "grad_norm": 0.6306861639022827, "learning_rate": 0.00033135024284943333, "loss": 3.4, "step": 41600 }, { "epoch": 4.490566037735849, "grad_norm": 0.6058953404426575, "learning_rate": 0.0003310264436049649, "loss": 3.3834, "step": 41650 }, { "epoch": 4.495956873315364, "grad_norm": 0.5988507270812988, "learning_rate": 0.00033070912034538583, "loss": 3.4056, "step": 41700 }, { "epoch": 4.501347708894879, "grad_norm": 0.6373119354248047, "learning_rate": 0.0003303853211009174, "loss": 3.39, "step": 41750 }, { "epoch": 4.506738544474393, "grad_norm": 0.6669942140579224, "learning_rate": 0.000330061521856449, "loss": 3.3935, "step": 41800 }, { "epoch": 4.512129380053908, "grad_norm": 0.6317675113677979, "learning_rate": 0.00032973772261198054, "loss": 3.381, "step": 41850 }, { "epoch": 4.517520215633423, "grad_norm": 0.6255390644073486, "learning_rate": 0.00032941392336751214, "loss": 3.3942, "step": 41900 }, { "epoch": 4.5229110512129385, "grad_norm": 0.6369220018386841, "learning_rate": 0.0003290901241230437, "loss": 3.3759, "step": 41950 }, { "epoch": 4.528301886792453, "grad_norm": 0.6291524171829224, "learning_rate": 0.0003287663248785753, "loss": 3.3877, "step": 42000 }, { "epoch": 4.528301886792453, "eval_accuracy": 0.37846582339711726, "eval_loss": 3.429187059402466, "eval_runtime": 183.4827, "eval_samples_per_second": 98.162, "eval_steps_per_second": 6.137, "step": 42000 }, { "epoch": 4.533692722371968, "grad_norm": 0.6402625441551208, "learning_rate": 0.00032844252563410685, "loss": 3.3852, "step": 42050 }, { "epoch": 4.539083557951482, "grad_norm": 0.667153537273407, "learning_rate": 0.00032811872638963834, "loss": 3.3935, "step": 42100 }, { "epoch": 4.544474393530997, "grad_norm": 0.6692747473716736, "learning_rate": 0.00032779492714516995, "loss": 3.4201, "step": 42150 }, { "epoch": 4.549865229110512, "grad_norm": 0.6011302471160889, "learning_rate": 0.0003274711279007015, "loss": 3.3878, "step": 42200 }, { "epoch": 4.555256064690027, "grad_norm": 0.6687884330749512, "learning_rate": 0.0003271473286562331, "loss": 3.3848, "step": 42250 }, { "epoch": 4.560646900269542, "grad_norm": 0.6640747785568237, "learning_rate": 0.00032682352941176466, "loss": 3.3882, "step": 42300 }, { "epoch": 4.566037735849057, "grad_norm": 0.6145426034927368, "learning_rate": 0.00032649973016729626, "loss": 3.3902, "step": 42350 }, { "epoch": 4.571428571428571, "grad_norm": 0.6300755143165588, "learning_rate": 0.0003261759309228278, "loss": 3.3816, "step": 42400 }, { "epoch": 4.576819407008086, "grad_norm": 0.6550257205963135, "learning_rate": 0.0003258521316783594, "loss": 3.3922, "step": 42450 }, { "epoch": 4.5822102425876015, "grad_norm": 0.6228023767471313, "learning_rate": 0.00032552833243389097, "loss": 3.3895, "step": 42500 }, { "epoch": 4.587601078167116, "grad_norm": 0.6689121723175049, "learning_rate": 0.0003252045331894225, "loss": 3.3885, "step": 42550 }, { "epoch": 4.592991913746631, "grad_norm": 0.6270008683204651, "learning_rate": 0.0003248807339449541, "loss": 3.3753, "step": 42600 }, { "epoch": 4.598382749326145, "grad_norm": 0.6733295917510986, "learning_rate": 0.00032455693470048567, "loss": 3.3757, "step": 42650 }, { "epoch": 4.60377358490566, "grad_norm": 0.6679153442382812, "learning_rate": 0.0003242331354560173, "loss": 3.3833, "step": 42700 }, { "epoch": 4.609164420485175, "grad_norm": 0.6631647348403931, "learning_rate": 0.0003239093362115488, "loss": 3.4027, "step": 42750 }, { "epoch": 4.6145552560646905, "grad_norm": 0.6664791107177734, "learning_rate": 0.00032358553696708043, "loss": 3.3741, "step": 42800 }, { "epoch": 4.619946091644205, "grad_norm": 0.6315823793411255, "learning_rate": 0.00032326173772261193, "loss": 3.3814, "step": 42850 }, { "epoch": 4.62533692722372, "grad_norm": 0.6814761161804199, "learning_rate": 0.00032293793847814353, "loss": 3.3802, "step": 42900 }, { "epoch": 4.630727762803234, "grad_norm": 0.6890589594841003, "learning_rate": 0.0003226141392336751, "loss": 3.4014, "step": 42950 }, { "epoch": 4.636118598382749, "grad_norm": 0.6870799660682678, "learning_rate": 0.00032229033998920663, "loss": 3.4082, "step": 43000 }, { "epoch": 4.636118598382749, "eval_accuracy": 0.3788883744778278, "eval_loss": 3.4247143268585205, "eval_runtime": 183.8361, "eval_samples_per_second": 97.973, "eval_steps_per_second": 6.125, "step": 43000 }, { "epoch": 4.6415094339622645, "grad_norm": 0.642059862613678, "learning_rate": 0.00032196654074473824, "loss": 3.3919, "step": 43050 }, { "epoch": 4.646900269541779, "grad_norm": 0.6234888434410095, "learning_rate": 0.0003216427415002698, "loss": 3.3981, "step": 43100 }, { "epoch": 4.652291105121294, "grad_norm": 0.6437119245529175, "learning_rate": 0.0003213189422558014, "loss": 3.4103, "step": 43150 }, { "epoch": 4.657681940700809, "grad_norm": 0.6424395442008972, "learning_rate": 0.00032099514301133295, "loss": 3.382, "step": 43200 }, { "epoch": 4.663072776280323, "grad_norm": 0.6444264650344849, "learning_rate": 0.00032067134376686455, "loss": 3.3805, "step": 43250 }, { "epoch": 4.668463611859838, "grad_norm": 0.6374757289886475, "learning_rate": 0.0003203475445223961, "loss": 3.3922, "step": 43300 }, { "epoch": 4.6738544474393535, "grad_norm": 0.709404468536377, "learning_rate": 0.0003200237452779277, "loss": 3.3917, "step": 43350 }, { "epoch": 4.679245283018868, "grad_norm": 0.6628051996231079, "learning_rate": 0.00031969994603345926, "loss": 3.402, "step": 43400 }, { "epoch": 4.684636118598383, "grad_norm": 0.624970555305481, "learning_rate": 0.00031937614678899075, "loss": 3.3909, "step": 43450 }, { "epoch": 4.690026954177897, "grad_norm": 0.6710526347160339, "learning_rate": 0.00031905234754452236, "loss": 3.4032, "step": 43500 }, { "epoch": 4.695417789757412, "grad_norm": 0.667590320110321, "learning_rate": 0.0003187285483000539, "loss": 3.3876, "step": 43550 }, { "epoch": 4.7008086253369274, "grad_norm": 0.65190190076828, "learning_rate": 0.0003184047490555855, "loss": 3.378, "step": 43600 }, { "epoch": 4.706199460916442, "grad_norm": 0.6367104649543762, "learning_rate": 0.00031808094981111706, "loss": 3.393, "step": 43650 }, { "epoch": 4.711590296495957, "grad_norm": 0.6436135172843933, "learning_rate": 0.00031775715056664867, "loss": 3.3811, "step": 43700 }, { "epoch": 4.716981132075472, "grad_norm": 0.626654326915741, "learning_rate": 0.0003174333513221802, "loss": 3.3992, "step": 43750 }, { "epoch": 4.722371967654986, "grad_norm": 0.6450473666191101, "learning_rate": 0.00031710955207771177, "loss": 3.3913, "step": 43800 }, { "epoch": 4.727762803234501, "grad_norm": 0.6737494468688965, "learning_rate": 0.0003167857528332434, "loss": 3.3945, "step": 43850 }, { "epoch": 4.7331536388140165, "grad_norm": 0.5905812382698059, "learning_rate": 0.0003164619535887749, "loss": 3.3977, "step": 43900 }, { "epoch": 4.738544474393531, "grad_norm": 0.649468183517456, "learning_rate": 0.00031613815434430653, "loss": 3.3887, "step": 43950 }, { "epoch": 4.743935309973046, "grad_norm": 0.63776034116745, "learning_rate": 0.0003158143550998381, "loss": 3.3935, "step": 44000 }, { "epoch": 4.743935309973046, "eval_accuracy": 0.3795477888539739, "eval_loss": 3.4194493293762207, "eval_runtime": 183.3803, "eval_samples_per_second": 98.217, "eval_steps_per_second": 6.14, "step": 44000 }, { "epoch": 4.74932614555256, "grad_norm": 0.6686685085296631, "learning_rate": 0.0003154905558553697, "loss": 3.4027, "step": 44050 }, { "epoch": 4.754716981132075, "grad_norm": 0.6547311544418335, "learning_rate": 0.0003151667566109012, "loss": 3.3956, "step": 44100 }, { "epoch": 4.7601078167115904, "grad_norm": 0.6947376728057861, "learning_rate": 0.00031484295736643284, "loss": 3.4059, "step": 44150 }, { "epoch": 4.765498652291106, "grad_norm": 0.6252668499946594, "learning_rate": 0.00031451915812196434, "loss": 3.4025, "step": 44200 }, { "epoch": 4.77088948787062, "grad_norm": 0.6244868040084839, "learning_rate": 0.0003141953588774959, "loss": 3.3631, "step": 44250 }, { "epoch": 4.776280323450135, "grad_norm": 0.6843665838241577, "learning_rate": 0.0003138715596330275, "loss": 3.3909, "step": 44300 }, { "epoch": 4.781671159029649, "grad_norm": 0.6329135298728943, "learning_rate": 0.00031354776038855904, "loss": 3.3944, "step": 44350 }, { "epoch": 4.787061994609164, "grad_norm": 0.6198347806930542, "learning_rate": 0.00031322396114409065, "loss": 3.3779, "step": 44400 }, { "epoch": 4.7924528301886795, "grad_norm": 0.5985150337219238, "learning_rate": 0.0003129001618996222, "loss": 3.4032, "step": 44450 }, { "epoch": 4.797843665768194, "grad_norm": 0.6926071047782898, "learning_rate": 0.0003125763626551538, "loss": 3.3996, "step": 44500 }, { "epoch": 4.803234501347709, "grad_norm": 0.6708675026893616, "learning_rate": 0.00031225256341068535, "loss": 3.3724, "step": 44550 }, { "epoch": 4.808625336927224, "grad_norm": 0.6399439573287964, "learning_rate": 0.00031192876416621696, "loss": 3.3872, "step": 44600 }, { "epoch": 4.814016172506738, "grad_norm": 0.6167232394218445, "learning_rate": 0.0003116049649217485, "loss": 3.3877, "step": 44650 }, { "epoch": 4.819407008086253, "grad_norm": 0.6150270104408264, "learning_rate": 0.00031128116567728, "loss": 3.3872, "step": 44700 }, { "epoch": 4.824797843665769, "grad_norm": 0.623081386089325, "learning_rate": 0.00031095736643281166, "loss": 3.401, "step": 44750 }, { "epoch": 4.830188679245283, "grad_norm": 0.7186699509620667, "learning_rate": 0.00031063356718834316, "loss": 3.3718, "step": 44800 }, { "epoch": 4.835579514824798, "grad_norm": 0.702882707118988, "learning_rate": 0.00031030976794387476, "loss": 3.386, "step": 44850 }, { "epoch": 4.840970350404312, "grad_norm": 0.6542748808860779, "learning_rate": 0.0003099859686994063, "loss": 3.3638, "step": 44900 }, { "epoch": 4.846361185983827, "grad_norm": 0.6316834688186646, "learning_rate": 0.0003096621694549379, "loss": 3.4019, "step": 44950 }, { "epoch": 4.8517520215633425, "grad_norm": 0.6602932810783386, "learning_rate": 0.00030933837021046947, "loss": 3.3906, "step": 45000 }, { "epoch": 4.8517520215633425, "eval_accuracy": 0.38019666390000195, "eval_loss": 3.412461042404175, "eval_runtime": 183.6637, "eval_samples_per_second": 98.065, "eval_steps_per_second": 6.131, "step": 45000 }, { "epoch": 4.857142857142857, "grad_norm": 0.6776353120803833, "learning_rate": 0.0003090145709660011, "loss": 3.3985, "step": 45050 }, { "epoch": 4.862533692722372, "grad_norm": 0.6363914608955383, "learning_rate": 0.0003086907717215326, "loss": 3.4018, "step": 45100 }, { "epoch": 4.867924528301887, "grad_norm": 0.6715949177742004, "learning_rate": 0.0003083669724770642, "loss": 3.3743, "step": 45150 }, { "epoch": 4.873315363881401, "grad_norm": 0.6310675740242004, "learning_rate": 0.0003080431732325958, "loss": 3.3816, "step": 45200 }, { "epoch": 4.878706199460916, "grad_norm": 0.7090080976486206, "learning_rate": 0.00030772584997301673, "loss": 3.3979, "step": 45250 }, { "epoch": 4.884097035040432, "grad_norm": 0.6891008019447327, "learning_rate": 0.0003074020507285483, "loss": 3.4082, "step": 45300 }, { "epoch": 4.889487870619946, "grad_norm": 0.6853170990943909, "learning_rate": 0.0003070782514840799, "loss": 3.3852, "step": 45350 }, { "epoch": 4.894878706199461, "grad_norm": 0.6550562381744385, "learning_rate": 0.00030675445223961144, "loss": 3.3967, "step": 45400 }, { "epoch": 4.900269541778976, "grad_norm": 0.6445778608322144, "learning_rate": 0.00030643065299514294, "loss": 3.3929, "step": 45450 }, { "epoch": 4.90566037735849, "grad_norm": 0.6672085523605347, "learning_rate": 0.00030610685375067454, "loss": 3.3912, "step": 45500 }, { "epoch": 4.9110512129380055, "grad_norm": 0.6518294811248779, "learning_rate": 0.0003057830545062061, "loss": 3.3933, "step": 45550 }, { "epoch": 4.916442048517521, "grad_norm": 0.6551598310470581, "learning_rate": 0.0003054592552617377, "loss": 3.3893, "step": 45600 }, { "epoch": 4.921832884097035, "grad_norm": 0.6479905843734741, "learning_rate": 0.00030513545601726925, "loss": 3.3906, "step": 45650 }, { "epoch": 4.92722371967655, "grad_norm": 0.6924375891685486, "learning_rate": 0.00030481165677280085, "loss": 3.3681, "step": 45700 }, { "epoch": 4.932614555256064, "grad_norm": 0.6434122920036316, "learning_rate": 0.0003044878575283324, "loss": 3.3833, "step": 45750 }, { "epoch": 4.938005390835579, "grad_norm": 0.7251989245414734, "learning_rate": 0.000304164058283864, "loss": 3.4028, "step": 45800 }, { "epoch": 4.943396226415095, "grad_norm": 0.704974353313446, "learning_rate": 0.00030384025903939556, "loss": 3.3812, "step": 45850 }, { "epoch": 4.948787061994609, "grad_norm": 0.6566643118858337, "learning_rate": 0.0003035164597949271, "loss": 3.3898, "step": 45900 }, { "epoch": 4.954177897574124, "grad_norm": 0.619677722454071, "learning_rate": 0.0003031926605504587, "loss": 3.3809, "step": 45950 }, { "epoch": 4.959568733153639, "grad_norm": 0.6274846196174622, "learning_rate": 0.00030286886130599026, "loss": 3.3874, "step": 46000 }, { "epoch": 4.959568733153639, "eval_accuracy": 0.38035877400862117, "eval_loss": 3.4135515689849854, "eval_runtime": 183.7281, "eval_samples_per_second": 98.031, "eval_steps_per_second": 6.129, "step": 46000 }, { "epoch": 4.964959568733153, "grad_norm": 0.6403006911277771, "learning_rate": 0.00030254506206152187, "loss": 3.3988, "step": 46050 }, { "epoch": 4.9703504043126685, "grad_norm": 0.6312180757522583, "learning_rate": 0.00030222126281705336, "loss": 3.3685, "step": 46100 }, { "epoch": 4.975741239892184, "grad_norm": 0.6532524228096008, "learning_rate": 0.000301897463572585, "loss": 3.3742, "step": 46150 }, { "epoch": 4.981132075471698, "grad_norm": 0.6965556144714355, "learning_rate": 0.0003015736643281165, "loss": 3.3847, "step": 46200 }, { "epoch": 4.986522911051213, "grad_norm": 0.6514734625816345, "learning_rate": 0.00030124986508364807, "loss": 3.4039, "step": 46250 }, { "epoch": 4.991913746630727, "grad_norm": 0.6422972679138184, "learning_rate": 0.0003009260658391797, "loss": 3.4007, "step": 46300 }, { "epoch": 4.997304582210242, "grad_norm": 0.6854938268661499, "learning_rate": 0.0003006022665947112, "loss": 3.3792, "step": 46350 }, { "epoch": 5.002695417789758, "grad_norm": 0.6698465943336487, "learning_rate": 0.00030027846735024283, "loss": 3.3286, "step": 46400 }, { "epoch": 5.008086253369272, "grad_norm": 0.7109942436218262, "learning_rate": 0.00029995466810577443, "loss": 3.3071, "step": 46450 }, { "epoch": 5.013477088948787, "grad_norm": 0.6470203995704651, "learning_rate": 0.00029963086886130593, "loss": 3.3059, "step": 46500 }, { "epoch": 5.018867924528302, "grad_norm": 0.6870930790901184, "learning_rate": 0.00029930706961683754, "loss": 3.3071, "step": 46550 }, { "epoch": 5.024258760107816, "grad_norm": 0.6598406434059143, "learning_rate": 0.0002989832703723691, "loss": 3.2899, "step": 46600 }, { "epoch": 5.0296495956873315, "grad_norm": 0.7045952081680298, "learning_rate": 0.0002986594711279007, "loss": 3.2978, "step": 46650 }, { "epoch": 5.035040431266847, "grad_norm": 0.643715500831604, "learning_rate": 0.00029833567188343224, "loss": 3.2985, "step": 46700 }, { "epoch": 5.040431266846361, "grad_norm": 0.688113272190094, "learning_rate": 0.00029801187263896385, "loss": 3.3033, "step": 46750 }, { "epoch": 5.045822102425876, "grad_norm": 0.6977607607841492, "learning_rate": 0.0002976880733944954, "loss": 3.3141, "step": 46800 }, { "epoch": 5.051212938005391, "grad_norm": 0.6813511252403259, "learning_rate": 0.00029736427415002695, "loss": 3.3115, "step": 46850 }, { "epoch": 5.056603773584905, "grad_norm": 0.6466038823127747, "learning_rate": 0.0002970404749055585, "loss": 3.3061, "step": 46900 }, { "epoch": 5.061994609164421, "grad_norm": 0.6962968111038208, "learning_rate": 0.0002967166756610901, "loss": 3.304, "step": 46950 }, { "epoch": 5.067385444743936, "grad_norm": 0.6491953134536743, "learning_rate": 0.00029639287641662165, "loss": 3.314, "step": 47000 }, { "epoch": 5.067385444743936, "eval_accuracy": 0.38063018892238176, "eval_loss": 3.413198947906494, "eval_runtime": 183.7299, "eval_samples_per_second": 98.03, "eval_steps_per_second": 6.129, "step": 47000 }, { "epoch": 5.07277628032345, "grad_norm": 0.6980647444725037, "learning_rate": 0.00029606907717215326, "loss": 3.3073, "step": 47050 }, { "epoch": 5.078167115902965, "grad_norm": 0.6380411386489868, "learning_rate": 0.0002957452779276848, "loss": 3.3019, "step": 47100 }, { "epoch": 5.083557951482479, "grad_norm": 0.643720805644989, "learning_rate": 0.00029542147868321636, "loss": 3.3155, "step": 47150 }, { "epoch": 5.0889487870619945, "grad_norm": 0.6720905303955078, "learning_rate": 0.00029509767943874796, "loss": 3.314, "step": 47200 }, { "epoch": 5.09433962264151, "grad_norm": 0.6560536026954651, "learning_rate": 0.0002947738801942795, "loss": 3.2982, "step": 47250 }, { "epoch": 5.099730458221024, "grad_norm": 0.68781578540802, "learning_rate": 0.0002944500809498111, "loss": 3.3222, "step": 47300 }, { "epoch": 5.105121293800539, "grad_norm": 0.6361771821975708, "learning_rate": 0.00029412628170534267, "loss": 3.3223, "step": 47350 }, { "epoch": 5.110512129380054, "grad_norm": 0.6629987955093384, "learning_rate": 0.0002938024824608742, "loss": 3.3037, "step": 47400 }, { "epoch": 5.115902964959568, "grad_norm": 0.7126314640045166, "learning_rate": 0.00029347868321640577, "loss": 3.3074, "step": 47450 }, { "epoch": 5.121293800539084, "grad_norm": 0.7076786160469055, "learning_rate": 0.0002931548839719374, "loss": 3.2979, "step": 47500 }, { "epoch": 5.126684636118599, "grad_norm": 0.647883415222168, "learning_rate": 0.0002928310847274689, "loss": 3.3051, "step": 47550 }, { "epoch": 5.132075471698113, "grad_norm": 0.7320659756660461, "learning_rate": 0.00029250728548300053, "loss": 3.2956, "step": 47600 }, { "epoch": 5.137466307277628, "grad_norm": 0.7425462603569031, "learning_rate": 0.0002921834862385321, "loss": 3.3094, "step": 47650 }, { "epoch": 5.142857142857143, "grad_norm": 0.726519763469696, "learning_rate": 0.0002918596869940637, "loss": 3.3247, "step": 47700 }, { "epoch": 5.1482479784366575, "grad_norm": 0.6692786812782288, "learning_rate": 0.0002915358877495952, "loss": 3.3143, "step": 47750 }, { "epoch": 5.153638814016173, "grad_norm": 0.6244843602180481, "learning_rate": 0.0002912120885051268, "loss": 3.2996, "step": 47800 }, { "epoch": 5.159029649595688, "grad_norm": 0.6885166168212891, "learning_rate": 0.00029089476524554774, "loss": 3.3091, "step": 47850 }, { "epoch": 5.164420485175202, "grad_norm": 0.695482611656189, "learning_rate": 0.0002905709660010793, "loss": 3.333, "step": 47900 }, { "epoch": 5.169811320754717, "grad_norm": 0.6709295511245728, "learning_rate": 0.0002902471667566109, "loss": 3.3144, "step": 47950 }, { "epoch": 5.175202156334231, "grad_norm": 0.6911921501159668, "learning_rate": 0.00028992336751214245, "loss": 3.3085, "step": 48000 }, { "epoch": 5.175202156334231, "eval_accuracy": 0.3809645138581869, "eval_loss": 3.4129929542541504, "eval_runtime": 183.2606, "eval_samples_per_second": 98.281, "eval_steps_per_second": 6.144, "step": 48000 }, { "epoch": 5.180592991913747, "grad_norm": 0.6802172064781189, "learning_rate": 0.00028959956826767405, "loss": 3.3148, "step": 48050 }, { "epoch": 5.185983827493262, "grad_norm": 0.6930907964706421, "learning_rate": 0.00028927576902320555, "loss": 3.304, "step": 48100 }, { "epoch": 5.191374663072776, "grad_norm": 0.6901477575302124, "learning_rate": 0.00028895196977873715, "loss": 3.3125, "step": 48150 }, { "epoch": 5.196765498652291, "grad_norm": 0.7224954962730408, "learning_rate": 0.0002886281705342687, "loss": 3.3272, "step": 48200 }, { "epoch": 5.202156334231806, "grad_norm": 0.665001392364502, "learning_rate": 0.0002883043712898003, "loss": 3.3197, "step": 48250 }, { "epoch": 5.2075471698113205, "grad_norm": 0.6910035610198975, "learning_rate": 0.00028798057204533186, "loss": 3.3222, "step": 48300 }, { "epoch": 5.212938005390836, "grad_norm": 0.6930015087127686, "learning_rate": 0.00028765677280086346, "loss": 3.3286, "step": 48350 }, { "epoch": 5.218328840970351, "grad_norm": 0.7036418914794922, "learning_rate": 0.000287332973556395, "loss": 3.3236, "step": 48400 }, { "epoch": 5.223719676549865, "grad_norm": 0.671301007270813, "learning_rate": 0.0002870091743119266, "loss": 3.3137, "step": 48450 }, { "epoch": 5.22911051212938, "grad_norm": 0.7079598903656006, "learning_rate": 0.00028668537506745817, "loss": 3.3494, "step": 48500 }, { "epoch": 5.234501347708895, "grad_norm": 0.6326192617416382, "learning_rate": 0.0002863615758229897, "loss": 3.3194, "step": 48550 }, { "epoch": 5.2398921832884096, "grad_norm": 0.6443894505500793, "learning_rate": 0.00028603777657852127, "loss": 3.3204, "step": 48600 }, { "epoch": 5.245283018867925, "grad_norm": 0.6314318776130676, "learning_rate": 0.0002857139773340529, "loss": 3.3184, "step": 48650 }, { "epoch": 5.250673854447439, "grad_norm": 1.178070068359375, "learning_rate": 0.0002853901780895844, "loss": 3.3221, "step": 48700 }, { "epoch": 5.256064690026954, "grad_norm": 0.6846694350242615, "learning_rate": 0.00028506637884511603, "loss": 3.3428, "step": 48750 }, { "epoch": 5.261455525606469, "grad_norm": 0.6815780401229858, "learning_rate": 0.0002847425796006476, "loss": 3.3422, "step": 48800 }, { "epoch": 5.2668463611859835, "grad_norm": 0.6925288438796997, "learning_rate": 0.00028441878035617913, "loss": 3.3171, "step": 48850 }, { "epoch": 5.272237196765499, "grad_norm": 0.7188766598701477, "learning_rate": 0.00028409498111171073, "loss": 3.3183, "step": 48900 }, { "epoch": 5.277628032345014, "grad_norm": 0.7365155220031738, "learning_rate": 0.0002837711818672423, "loss": 3.3391, "step": 48950 }, { "epoch": 5.283018867924528, "grad_norm": 0.668428897857666, "learning_rate": 0.00028344738262277384, "loss": 3.3291, "step": 49000 }, { "epoch": 5.283018867924528, "eval_accuracy": 0.38120333290560315, "eval_loss": 3.4074130058288574, "eval_runtime": 183.7218, "eval_samples_per_second": 98.034, "eval_steps_per_second": 6.129, "step": 49000 }, { "epoch": 5.288409703504043, "grad_norm": 0.6253606677055359, "learning_rate": 0.00028312358337830544, "loss": 3.3379, "step": 49050 }, { "epoch": 5.293800539083558, "grad_norm": 0.6850340962409973, "learning_rate": 0.000282799784133837, "loss": 3.3321, "step": 49100 }, { "epoch": 5.2991913746630726, "grad_norm": 0.6750385165214539, "learning_rate": 0.00028247598488936854, "loss": 3.3324, "step": 49150 }, { "epoch": 5.304582210242588, "grad_norm": 0.714887261390686, "learning_rate": 0.00028215218564490015, "loss": 3.3403, "step": 49200 }, { "epoch": 5.309973045822103, "grad_norm": 0.6349644660949707, "learning_rate": 0.0002818283864004317, "loss": 3.3279, "step": 49250 }, { "epoch": 5.315363881401617, "grad_norm": 0.7085480093955994, "learning_rate": 0.0002815045871559633, "loss": 3.3278, "step": 49300 }, { "epoch": 5.320754716981132, "grad_norm": 0.6977726817131042, "learning_rate": 0.00028118078791149485, "loss": 3.338, "step": 49350 }, { "epoch": 5.3261455525606465, "grad_norm": 0.7166506052017212, "learning_rate": 0.0002808569886670264, "loss": 3.3244, "step": 49400 }, { "epoch": 5.331536388140162, "grad_norm": 0.7002288103103638, "learning_rate": 0.00028053318942255795, "loss": 3.3195, "step": 49450 }, { "epoch": 5.336927223719677, "grad_norm": 0.7169665098190308, "learning_rate": 0.00028020939017808956, "loss": 3.3464, "step": 49500 }, { "epoch": 5.342318059299191, "grad_norm": 0.6901816129684448, "learning_rate": 0.0002798855909336211, "loss": 3.317, "step": 49550 }, { "epoch": 5.347708894878706, "grad_norm": 0.7446085214614868, "learning_rate": 0.0002795617916891527, "loss": 3.3362, "step": 49600 }, { "epoch": 5.353099730458221, "grad_norm": 0.6764737963676453, "learning_rate": 0.00027923799244468426, "loss": 3.331, "step": 49650 }, { "epoch": 5.3584905660377355, "grad_norm": 0.6573972105979919, "learning_rate": 0.00027891419320021587, "loss": 3.3411, "step": 49700 }, { "epoch": 5.363881401617251, "grad_norm": 0.7208666205406189, "learning_rate": 0.0002785903939557474, "loss": 3.3244, "step": 49750 }, { "epoch": 5.369272237196766, "grad_norm": 0.741628110408783, "learning_rate": 0.00027826659471127897, "loss": 3.3276, "step": 49800 }, { "epoch": 5.37466307277628, "grad_norm": 0.6378738880157471, "learning_rate": 0.0002779427954668105, "loss": 3.3416, "step": 49850 }, { "epoch": 5.380053908355795, "grad_norm": 0.6997308135032654, "learning_rate": 0.0002776189962223421, "loss": 3.3354, "step": 49900 }, { "epoch": 5.38544474393531, "grad_norm": 0.7298374176025391, "learning_rate": 0.0002772951969778737, "loss": 3.3216, "step": 49950 }, { "epoch": 5.390835579514825, "grad_norm": 0.6540275812149048, "learning_rate": 0.0002769713977334053, "loss": 3.3413, "step": 50000 }, { "epoch": 5.390835579514825, "eval_accuracy": 0.3816847738515091, "eval_loss": 3.401179075241089, "eval_runtime": 183.4607, "eval_samples_per_second": 98.174, "eval_steps_per_second": 6.138, "step": 50000 }, { "epoch": 5.39622641509434, "grad_norm": 0.6782923936843872, "learning_rate": 0.00027665407447382623, "loss": 3.3526, "step": 50050 }, { "epoch": 5.401617250673855, "grad_norm": 0.6657195091247559, "learning_rate": 0.0002763302752293578, "loss": 3.3432, "step": 50100 }, { "epoch": 5.407008086253369, "grad_norm": 0.6890636086463928, "learning_rate": 0.00027600647598488933, "loss": 3.3339, "step": 50150 }, { "epoch": 5.412398921832884, "grad_norm": 0.7247375845909119, "learning_rate": 0.0002756826767404209, "loss": 3.3174, "step": 50200 }, { "epoch": 5.4177897574123985, "grad_norm": 0.6520726680755615, "learning_rate": 0.0002753588774959525, "loss": 3.3458, "step": 50250 }, { "epoch": 5.423180592991914, "grad_norm": 0.6332509517669678, "learning_rate": 0.00027503507825148404, "loss": 3.3392, "step": 50300 }, { "epoch": 5.428571428571429, "grad_norm": 0.7123890519142151, "learning_rate": 0.00027471127900701564, "loss": 3.3293, "step": 50350 }, { "epoch": 5.433962264150943, "grad_norm": 0.7349735498428345, "learning_rate": 0.0002743874797625472, "loss": 3.3496, "step": 50400 }, { "epoch": 5.439353099730458, "grad_norm": 0.6921404004096985, "learning_rate": 0.0002740636805180788, "loss": 3.3469, "step": 50450 }, { "epoch": 5.444743935309973, "grad_norm": 0.6628040075302124, "learning_rate": 0.00027373988127361035, "loss": 3.3299, "step": 50500 }, { "epoch": 5.450134770889488, "grad_norm": 0.6844006776809692, "learning_rate": 0.0002734160820291419, "loss": 3.3262, "step": 50550 }, { "epoch": 5.455525606469003, "grad_norm": 0.714565098285675, "learning_rate": 0.00027309228278467345, "loss": 3.3431, "step": 50600 }, { "epoch": 5.460916442048518, "grad_norm": 0.8186658620834351, "learning_rate": 0.00027276848354020506, "loss": 3.3354, "step": 50650 }, { "epoch": 5.466307277628032, "grad_norm": 0.6907452344894409, "learning_rate": 0.0002724446842957366, "loss": 3.3278, "step": 50700 }, { "epoch": 5.471698113207547, "grad_norm": 0.6899653077125549, "learning_rate": 0.0002721208850512682, "loss": 3.3141, "step": 50750 }, { "epoch": 5.4770889487870615, "grad_norm": 0.7394895553588867, "learning_rate": 0.00027179708580679976, "loss": 3.3432, "step": 50800 }, { "epoch": 5.482479784366577, "grad_norm": 0.7098657488822937, "learning_rate": 0.0002714732865623313, "loss": 3.3277, "step": 50850 }, { "epoch": 5.487870619946092, "grad_norm": 0.6476383805274963, "learning_rate": 0.0002711494873178629, "loss": 3.3377, "step": 50900 }, { "epoch": 5.493261455525606, "grad_norm": 0.7089292407035828, "learning_rate": 0.00027082568807339447, "loss": 3.3536, "step": 50950 }, { "epoch": 5.498652291105121, "grad_norm": 0.6908971071243286, "learning_rate": 0.000270501888828926, "loss": 3.3168, "step": 51000 }, { "epoch": 5.498652291105121, "eval_accuracy": 0.3820141007544748, "eval_loss": 3.397711753845215, "eval_runtime": 183.6128, "eval_samples_per_second": 98.092, "eval_steps_per_second": 6.132, "step": 51000 }, { "epoch": 5.504043126684636, "grad_norm": 0.6817182898521423, "learning_rate": 0.0002701780895844576, "loss": 3.3255, "step": 51050 }, { "epoch": 5.509433962264151, "grad_norm": 0.707398533821106, "learning_rate": 0.0002698542903399892, "loss": 3.3347, "step": 51100 }, { "epoch": 5.514824797843666, "grad_norm": 0.6728003621101379, "learning_rate": 0.0002695304910955207, "loss": 3.3509, "step": 51150 }, { "epoch": 5.520215633423181, "grad_norm": 0.700847864151001, "learning_rate": 0.00026920669185105233, "loss": 3.3274, "step": 51200 }, { "epoch": 5.525606469002695, "grad_norm": 0.7568596005439758, "learning_rate": 0.0002688828926065839, "loss": 3.3324, "step": 51250 }, { "epoch": 5.53099730458221, "grad_norm": 0.6875473856925964, "learning_rate": 0.0002685590933621155, "loss": 3.3451, "step": 51300 }, { "epoch": 5.536388140161725, "grad_norm": 0.6690535545349121, "learning_rate": 0.00026823529411764704, "loss": 3.324, "step": 51350 }, { "epoch": 5.54177897574124, "grad_norm": 0.7146397829055786, "learning_rate": 0.0002679114948731786, "loss": 3.3327, "step": 51400 }, { "epoch": 5.547169811320755, "grad_norm": 0.7005475759506226, "learning_rate": 0.0002675876956287102, "loss": 3.3507, "step": 51450 }, { "epoch": 5.55256064690027, "grad_norm": 0.6918277144432068, "learning_rate": 0.00026726389638424174, "loss": 3.3357, "step": 51500 }, { "epoch": 5.557951482479784, "grad_norm": 0.701012134552002, "learning_rate": 0.0002669400971397733, "loss": 3.3272, "step": 51550 }, { "epoch": 5.563342318059299, "grad_norm": 0.7084556221961975, "learning_rate": 0.0002666162978953049, "loss": 3.3545, "step": 51600 }, { "epoch": 5.568733153638814, "grad_norm": 0.6553547978401184, "learning_rate": 0.00026629249865083645, "loss": 3.3508, "step": 51650 }, { "epoch": 5.574123989218329, "grad_norm": 0.7182124853134155, "learning_rate": 0.00026596869940636805, "loss": 3.3333, "step": 51700 }, { "epoch": 5.579514824797844, "grad_norm": 0.700818657875061, "learning_rate": 0.0002656449001618996, "loss": 3.3294, "step": 51750 }, { "epoch": 5.584905660377358, "grad_norm": 0.7259700894355774, "learning_rate": 0.0002653211009174312, "loss": 3.3407, "step": 51800 }, { "epoch": 5.590296495956873, "grad_norm": 0.7825680375099182, "learning_rate": 0.0002649973016729627, "loss": 3.3395, "step": 51850 }, { "epoch": 5.595687331536388, "grad_norm": 0.6643126606941223, "learning_rate": 0.0002646735024284943, "loss": 3.3443, "step": 51900 }, { "epoch": 5.601078167115903, "grad_norm": 0.6935634613037109, "learning_rate": 0.00026434970318402586, "loss": 3.3319, "step": 51950 }, { "epoch": 5.606469002695418, "grad_norm": 0.7245262265205383, "learning_rate": 0.00026402590393955746, "loss": 3.3291, "step": 52000 }, { "epoch": 5.606469002695418, "eval_accuracy": 0.3823364738726204, "eval_loss": 3.394861936569214, "eval_runtime": 183.6987, "eval_samples_per_second": 98.046, "eval_steps_per_second": 6.13, "step": 52000 }, { "epoch": 5.611859838274933, "grad_norm": 0.6866488456726074, "learning_rate": 0.000263702104695089, "loss": 3.3532, "step": 52050 }, { "epoch": 5.617250673854447, "grad_norm": 0.7162625193595886, "learning_rate": 0.0002633783054506206, "loss": 3.3288, "step": 52100 }, { "epoch": 5.622641509433962, "grad_norm": 0.7691003084182739, "learning_rate": 0.00026305450620615217, "loss": 3.3342, "step": 52150 }, { "epoch": 5.628032345013477, "grad_norm": 0.7199847102165222, "learning_rate": 0.0002627307069616837, "loss": 3.3293, "step": 52200 }, { "epoch": 5.633423180592992, "grad_norm": 0.6812527179718018, "learning_rate": 0.0002624069077172153, "loss": 3.3322, "step": 52250 }, { "epoch": 5.638814016172507, "grad_norm": 0.6690404415130615, "learning_rate": 0.0002620831084727469, "loss": 3.3338, "step": 52300 }, { "epoch": 5.644204851752022, "grad_norm": 0.6687739491462708, "learning_rate": 0.0002617593092282784, "loss": 3.3402, "step": 52350 }, { "epoch": 5.649595687331536, "grad_norm": 0.6985411047935486, "learning_rate": 0.00026143550998381003, "loss": 3.3269, "step": 52400 }, { "epoch": 5.654986522911051, "grad_norm": 0.6869599223136902, "learning_rate": 0.0002611117107393416, "loss": 3.3572, "step": 52450 }, { "epoch": 5.660377358490566, "grad_norm": 0.7344062328338623, "learning_rate": 0.00026078791149487313, "loss": 3.3319, "step": 52500 }, { "epoch": 5.665768194070081, "grad_norm": 0.7084963321685791, "learning_rate": 0.00026046411225040474, "loss": 3.3483, "step": 52550 }, { "epoch": 5.671159029649596, "grad_norm": 0.6755357980728149, "learning_rate": 0.0002601403130059363, "loss": 3.3606, "step": 52600 }, { "epoch": 5.67654986522911, "grad_norm": 0.7375144362449646, "learning_rate": 0.0002598165137614679, "loss": 3.3331, "step": 52650 }, { "epoch": 5.681940700808625, "grad_norm": 0.7138906121253967, "learning_rate": 0.00025949271451699944, "loss": 3.3237, "step": 52700 }, { "epoch": 5.6873315363881405, "grad_norm": 0.6811950206756592, "learning_rate": 0.000259168915272531, "loss": 3.3397, "step": 52750 }, { "epoch": 5.692722371967655, "grad_norm": 0.6969444751739502, "learning_rate": 0.0002588451160280626, "loss": 3.3228, "step": 52800 }, { "epoch": 5.69811320754717, "grad_norm": 0.6877330541610718, "learning_rate": 0.00025852131678359415, "loss": 3.3201, "step": 52850 }, { "epoch": 5.703504043126685, "grad_norm": 0.6857284903526306, "learning_rate": 0.0002581975175391257, "loss": 3.3244, "step": 52900 }, { "epoch": 5.708894878706199, "grad_norm": 0.7053078413009644, "learning_rate": 0.00025788019427954665, "loss": 3.3277, "step": 52950 }, { "epoch": 5.714285714285714, "grad_norm": 0.7215891480445862, "learning_rate": 0.00025755639503507826, "loss": 3.3407, "step": 53000 }, { "epoch": 5.714285714285714, "eval_accuracy": 0.38302468126403727, "eval_loss": 3.3901915550231934, "eval_runtime": 183.3315, "eval_samples_per_second": 98.243, "eval_steps_per_second": 6.142, "step": 53000 }, { "epoch": 5.719676549865229, "grad_norm": 0.7056070566177368, "learning_rate": 0.00025723907177549915, "loss": 3.3326, "step": 53050 }, { "epoch": 5.725067385444744, "grad_norm": 0.7360427975654602, "learning_rate": 0.00025691527253103076, "loss": 3.3417, "step": 53100 }, { "epoch": 5.730458221024259, "grad_norm": 0.6608803868293762, "learning_rate": 0.0002565914732865623, "loss": 3.3341, "step": 53150 }, { "epoch": 5.735849056603773, "grad_norm": 0.7453342080116272, "learning_rate": 0.00025626767404209386, "loss": 3.3453, "step": 53200 }, { "epoch": 5.741239892183288, "grad_norm": 0.6823360919952393, "learning_rate": 0.00025594387479762546, "loss": 3.317, "step": 53250 }, { "epoch": 5.7466307277628035, "grad_norm": 0.7249504923820496, "learning_rate": 0.000255620075553157, "loss": 3.3596, "step": 53300 }, { "epoch": 5.752021563342318, "grad_norm": 0.6726418137550354, "learning_rate": 0.00025529627630868857, "loss": 3.342, "step": 53350 }, { "epoch": 5.757412398921833, "grad_norm": 0.7085649371147156, "learning_rate": 0.00025497247706422017, "loss": 3.3468, "step": 53400 }, { "epoch": 5.762803234501348, "grad_norm": 0.6718982458114624, "learning_rate": 0.0002546486778197517, "loss": 3.3439, "step": 53450 }, { "epoch": 5.768194070080862, "grad_norm": 0.6999180316925049, "learning_rate": 0.00025432487857528327, "loss": 3.326, "step": 53500 }, { "epoch": 5.773584905660377, "grad_norm": 0.7056639790534973, "learning_rate": 0.0002540010793308149, "loss": 3.325, "step": 53550 }, { "epoch": 5.7789757412398925, "grad_norm": 0.7867892384529114, "learning_rate": 0.0002536772800863464, "loss": 3.3445, "step": 53600 }, { "epoch": 5.784366576819407, "grad_norm": 0.673121988773346, "learning_rate": 0.00025335348084187803, "loss": 3.3291, "step": 53650 }, { "epoch": 5.789757412398922, "grad_norm": 0.6664978265762329, "learning_rate": 0.0002530296815974096, "loss": 3.349, "step": 53700 }, { "epoch": 5.795148247978437, "grad_norm": 0.6702294945716858, "learning_rate": 0.0002527058823529412, "loss": 3.3366, "step": 53750 }, { "epoch": 5.800539083557951, "grad_norm": 0.6929095387458801, "learning_rate": 0.0002523820831084727, "loss": 3.3253, "step": 53800 }, { "epoch": 5.8059299191374665, "grad_norm": 0.7197151184082031, "learning_rate": 0.0002520582838640043, "loss": 3.3309, "step": 53850 }, { "epoch": 5.811320754716981, "grad_norm": 0.7062203884124756, "learning_rate": 0.00025173448461953584, "loss": 3.3312, "step": 53900 }, { "epoch": 5.816711590296496, "grad_norm": 0.7010804414749146, "learning_rate": 0.00025141068537506744, "loss": 3.3279, "step": 53950 }, { "epoch": 5.822102425876011, "grad_norm": 0.6943368315696716, "learning_rate": 0.000251086886130599, "loss": 3.3243, "step": 54000 }, { "epoch": 5.822102425876011, "eval_accuracy": 0.3835389353820632, "eval_loss": 3.3847732543945312, "eval_runtime": 184.1098, "eval_samples_per_second": 97.827, "eval_steps_per_second": 6.116, "step": 54000 }, { "epoch": 5.827493261455525, "grad_norm": 0.7037324905395508, "learning_rate": 0.0002507630868861306, "loss": 3.3363, "step": 54050 }, { "epoch": 5.83288409703504, "grad_norm": 0.688883900642395, "learning_rate": 0.00025043928764166215, "loss": 3.3239, "step": 54100 }, { "epoch": 5.8382749326145555, "grad_norm": 0.7349210977554321, "learning_rate": 0.00025011548839719375, "loss": 3.3382, "step": 54150 }, { "epoch": 5.84366576819407, "grad_norm": 0.6746708154678345, "learning_rate": 0.0002497916891527253, "loss": 3.3284, "step": 54200 }, { "epoch": 5.849056603773585, "grad_norm": 0.6808030605316162, "learning_rate": 0.00024946788990825686, "loss": 3.3277, "step": 54250 }, { "epoch": 5.8544474393531, "grad_norm": 0.7390064001083374, "learning_rate": 0.0002491440906637884, "loss": 3.3295, "step": 54300 }, { "epoch": 5.859838274932614, "grad_norm": 0.743797779083252, "learning_rate": 0.00024882029141932, "loss": 3.3404, "step": 54350 }, { "epoch": 5.8652291105121295, "grad_norm": 0.6696168184280396, "learning_rate": 0.00024849649217485156, "loss": 3.3267, "step": 54400 }, { "epoch": 5.870619946091644, "grad_norm": 0.7122966051101685, "learning_rate": 0.00024817269293038317, "loss": 3.345, "step": 54450 }, { "epoch": 5.876010781671159, "grad_norm": 0.6853727698326111, "learning_rate": 0.0002478488936859147, "loss": 3.345, "step": 54500 }, { "epoch": 5.881401617250674, "grad_norm": 0.7064388394355774, "learning_rate": 0.00024752509444144627, "loss": 3.3174, "step": 54550 }, { "epoch": 5.886792452830189, "grad_norm": 0.6728473901748657, "learning_rate": 0.00024720129519697787, "loss": 3.3434, "step": 54600 }, { "epoch": 5.892183288409703, "grad_norm": 0.6776952743530273, "learning_rate": 0.0002468774959525094, "loss": 3.3255, "step": 54650 }, { "epoch": 5.8975741239892185, "grad_norm": 0.7131171226501465, "learning_rate": 0.00024655369670804097, "loss": 3.332, "step": 54700 }, { "epoch": 5.902964959568733, "grad_norm": 0.665278434753418, "learning_rate": 0.0002462298974635726, "loss": 3.3409, "step": 54750 }, { "epoch": 5.908355795148248, "grad_norm": 0.6969947814941406, "learning_rate": 0.00024590609821910413, "loss": 3.3351, "step": 54800 }, { "epoch": 5.913746630727763, "grad_norm": 0.7671633362770081, "learning_rate": 0.0002455822989746357, "loss": 3.3422, "step": 54850 }, { "epoch": 5.919137466307277, "grad_norm": 0.707665741443634, "learning_rate": 0.0002452584997301673, "loss": 3.3422, "step": 54900 }, { "epoch": 5.9245283018867925, "grad_norm": 0.7261333465576172, "learning_rate": 0.00024493470048569883, "loss": 3.3448, "step": 54950 }, { "epoch": 5.929919137466308, "grad_norm": 0.7175887823104858, "learning_rate": 0.00024461090124123044, "loss": 3.3262, "step": 55000 }, { "epoch": 5.929919137466308, "eval_accuracy": 0.3838151310228876, "eval_loss": 3.3818440437316895, "eval_runtime": 183.5588, "eval_samples_per_second": 98.121, "eval_steps_per_second": 6.134, "step": 55000 }, { "epoch": 5.935309973045822, "grad_norm": 0.7148836255073547, "learning_rate": 0.000244287101996762, "loss": 3.3569, "step": 55050 }, { "epoch": 5.940700808625337, "grad_norm": 0.6707710027694702, "learning_rate": 0.00024396330275229354, "loss": 3.3205, "step": 55100 }, { "epoch": 5.946091644204852, "grad_norm": 0.6846052408218384, "learning_rate": 0.00024363950350782512, "loss": 3.3286, "step": 55150 }, { "epoch": 5.951482479784366, "grad_norm": 0.7205194234848022, "learning_rate": 0.0002433157042633567, "loss": 3.325, "step": 55200 }, { "epoch": 5.9568733153638815, "grad_norm": 0.683093786239624, "learning_rate": 0.00024299190501888827, "loss": 3.3315, "step": 55250 }, { "epoch": 5.962264150943396, "grad_norm": 0.6996801495552063, "learning_rate": 0.00024266810577441985, "loss": 3.3234, "step": 55300 }, { "epoch": 5.967654986522911, "grad_norm": 0.7320035099983215, "learning_rate": 0.00024234430652995143, "loss": 3.3252, "step": 55350 }, { "epoch": 5.973045822102426, "grad_norm": 0.6926111578941345, "learning_rate": 0.00024202050728548298, "loss": 3.3272, "step": 55400 }, { "epoch": 5.97843665768194, "grad_norm": 0.702363133430481, "learning_rate": 0.00024169670804101456, "loss": 3.3475, "step": 55450 }, { "epoch": 5.9838274932614555, "grad_norm": 0.7452805042266846, "learning_rate": 0.0002413729087965461, "loss": 3.3308, "step": 55500 }, { "epoch": 5.989218328840971, "grad_norm": 0.6895799040794373, "learning_rate": 0.00024104910955207768, "loss": 3.3347, "step": 55550 }, { "epoch": 5.994609164420485, "grad_norm": 0.7022313475608826, "learning_rate": 0.00024072531030760926, "loss": 3.3497, "step": 55600 }, { "epoch": 6.0, "grad_norm": 1.475501298904419, "learning_rate": 0.00024040151106314084, "loss": 3.3485, "step": 55650 }, { "epoch": 6.005390835579515, "grad_norm": 0.7008572816848755, "learning_rate": 0.0002400777118186724, "loss": 3.2428, "step": 55700 }, { "epoch": 6.010781671159029, "grad_norm": 0.7365021705627441, "learning_rate": 0.00023975391257420397, "loss": 3.2469, "step": 55750 }, { "epoch": 6.0161725067385445, "grad_norm": 0.7550241947174072, "learning_rate": 0.00023943011332973555, "loss": 3.239, "step": 55800 }, { "epoch": 6.02156334231806, "grad_norm": 0.6840744018554688, "learning_rate": 0.00023910631408526712, "loss": 3.2463, "step": 55850 }, { "epoch": 6.026954177897574, "grad_norm": 0.6900550723075867, "learning_rate": 0.0002387825148407987, "loss": 3.2467, "step": 55900 }, { "epoch": 6.032345013477089, "grad_norm": 0.7249858975410461, "learning_rate": 0.00023845871559633025, "loss": 3.2418, "step": 55950 }, { "epoch": 6.037735849056604, "grad_norm": 0.6880434155464172, "learning_rate": 0.0002381349163518618, "loss": 3.2386, "step": 56000 }, { "epoch": 6.037735849056604, "eval_accuracy": 0.38375395944704804, "eval_loss": 3.3848366737365723, "eval_runtime": 183.6771, "eval_samples_per_second": 98.058, "eval_steps_per_second": 6.13, "step": 56000 }, { "epoch": 6.0431266846361185, "grad_norm": 0.7010095119476318, "learning_rate": 0.00023781111710739338, "loss": 3.2457, "step": 56050 }, { "epoch": 6.048517520215634, "grad_norm": 0.7619755864143372, "learning_rate": 0.00023748731786292496, "loss": 3.2441, "step": 56100 }, { "epoch": 6.053908355795148, "grad_norm": 0.756662130355835, "learning_rate": 0.00023716351861845654, "loss": 3.2619, "step": 56150 }, { "epoch": 6.059299191374663, "grad_norm": 0.7468305826187134, "learning_rate": 0.0002368397193739881, "loss": 3.2425, "step": 56200 }, { "epoch": 6.064690026954178, "grad_norm": 0.7461797595024109, "learning_rate": 0.0002365159201295197, "loss": 3.2516, "step": 56250 }, { "epoch": 6.070080862533692, "grad_norm": 0.7048510313034058, "learning_rate": 0.00023619212088505127, "loss": 3.2592, "step": 56300 }, { "epoch": 6.0754716981132075, "grad_norm": 0.7342618703842163, "learning_rate": 0.0002358683216405828, "loss": 3.2552, "step": 56350 }, { "epoch": 6.080862533692723, "grad_norm": 0.7538067698478699, "learning_rate": 0.00023554452239611437, "loss": 3.2766, "step": 56400 }, { "epoch": 6.086253369272237, "grad_norm": 0.7238980531692505, "learning_rate": 0.00023522072315164595, "loss": 3.264, "step": 56450 }, { "epoch": 6.091644204851752, "grad_norm": 0.7239121794700623, "learning_rate": 0.00023489692390717752, "loss": 3.2454, "step": 56500 }, { "epoch": 6.097035040431267, "grad_norm": 0.6743500828742981, "learning_rate": 0.0002345731246627091, "loss": 3.2766, "step": 56550 }, { "epoch": 6.1024258760107815, "grad_norm": 0.740608811378479, "learning_rate": 0.00023424932541824068, "loss": 3.2583, "step": 56600 }, { "epoch": 6.107816711590297, "grad_norm": 0.729874849319458, "learning_rate": 0.00023392552617377226, "loss": 3.2427, "step": 56650 }, { "epoch": 6.113207547169812, "grad_norm": 0.758840799331665, "learning_rate": 0.00023360172692930384, "loss": 3.2734, "step": 56700 }, { "epoch": 6.118598382749326, "grad_norm": 0.7450424432754517, "learning_rate": 0.00023327792768483539, "loss": 3.2821, "step": 56750 }, { "epoch": 6.123989218328841, "grad_norm": 0.7052544355392456, "learning_rate": 0.00023295412844036694, "loss": 3.2703, "step": 56800 }, { "epoch": 6.129380053908355, "grad_norm": 0.7544761300086975, "learning_rate": 0.00023263032919589851, "loss": 3.253, "step": 56850 }, { "epoch": 6.1347708894878705, "grad_norm": 0.7224695086479187, "learning_rate": 0.0002323065299514301, "loss": 3.2691, "step": 56900 }, { "epoch": 6.140161725067386, "grad_norm": 0.7088389992713928, "learning_rate": 0.00023198273070696167, "loss": 3.2525, "step": 56950 }, { "epoch": 6.1455525606469, "grad_norm": 0.7072451114654541, "learning_rate": 0.00023165893146249325, "loss": 3.2689, "step": 57000 }, { "epoch": 6.1455525606469, "eval_accuracy": 0.3841858546761106, "eval_loss": 3.3838181495666504, "eval_runtime": 183.7277, "eval_samples_per_second": 98.031, "eval_steps_per_second": 6.129, "step": 57000 }, { "epoch": 6.150943396226415, "grad_norm": 0.6568054556846619, "learning_rate": 0.0002313351322180248, "loss": 3.2615, "step": 57050 }, { "epoch": 6.15633423180593, "grad_norm": 0.7381845116615295, "learning_rate": 0.00023101133297355638, "loss": 3.2692, "step": 57100 }, { "epoch": 6.1617250673854445, "grad_norm": 0.6702138185501099, "learning_rate": 0.00023068753372908795, "loss": 3.2844, "step": 57150 }, { "epoch": 6.16711590296496, "grad_norm": 0.7349806427955627, "learning_rate": 0.0002303637344846195, "loss": 3.2594, "step": 57200 }, { "epoch": 6.172506738544475, "grad_norm": 0.694933295249939, "learning_rate": 0.00023003993524015108, "loss": 3.2597, "step": 57250 }, { "epoch": 6.177897574123989, "grad_norm": 0.7540972232818604, "learning_rate": 0.00022971613599568266, "loss": 3.275, "step": 57300 }, { "epoch": 6.183288409703504, "grad_norm": 0.6981645226478577, "learning_rate": 0.0002293923367512142, "loss": 3.2605, "step": 57350 }, { "epoch": 6.188679245283019, "grad_norm": 0.7014349699020386, "learning_rate": 0.0002290685375067458, "loss": 3.267, "step": 57400 }, { "epoch": 6.1940700808625335, "grad_norm": 0.758674144744873, "learning_rate": 0.00022874473826227736, "loss": 3.2639, "step": 57450 }, { "epoch": 6.199460916442049, "grad_norm": 0.7108901739120483, "learning_rate": 0.00022842093901780894, "loss": 3.2551, "step": 57500 }, { "epoch": 6.204851752021563, "grad_norm": 0.7790617942810059, "learning_rate": 0.00022810361575822987, "loss": 3.2789, "step": 57550 }, { "epoch": 6.210242587601078, "grad_norm": 0.7338255643844604, "learning_rate": 0.00022777981651376145, "loss": 3.2685, "step": 57600 }, { "epoch": 6.215633423180593, "grad_norm": 0.7300395965576172, "learning_rate": 0.00022745601726929302, "loss": 3.2808, "step": 57650 }, { "epoch": 6.2210242587601075, "grad_norm": 0.755389928817749, "learning_rate": 0.00022713221802482457, "loss": 3.2727, "step": 57700 }, { "epoch": 6.226415094339623, "grad_norm": 0.6979179382324219, "learning_rate": 0.00022680841878035615, "loss": 3.2646, "step": 57750 }, { "epoch": 6.231805929919138, "grad_norm": 0.7372843027114868, "learning_rate": 0.00022648461953588773, "loss": 3.2586, "step": 57800 }, { "epoch": 6.237196765498652, "grad_norm": 0.6861436367034912, "learning_rate": 0.0002261608202914193, "loss": 3.2727, "step": 57850 }, { "epoch": 6.242587601078167, "grad_norm": 0.7225656509399414, "learning_rate": 0.00022583702104695088, "loss": 3.2816, "step": 57900 }, { "epoch": 6.247978436657682, "grad_norm": 0.7209280729293823, "learning_rate": 0.00022551322180248246, "loss": 3.2733, "step": 57950 }, { "epoch": 6.2533692722371965, "grad_norm": 0.7402727603912354, "learning_rate": 0.00022518942255801399, "loss": 3.2754, "step": 58000 }, { "epoch": 6.2533692722371965, "eval_accuracy": 0.3843185198521321, "eval_loss": 3.3821375370025635, "eval_runtime": 183.8391, "eval_samples_per_second": 97.972, "eval_steps_per_second": 6.125, "step": 58000 }, { "epoch": 6.258760107816712, "grad_norm": 0.7029194831848145, "learning_rate": 0.00022486562331354556, "loss": 3.2612, "step": 58050 }, { "epoch": 6.264150943396227, "grad_norm": 0.717560350894928, "learning_rate": 0.00022454182406907714, "loss": 3.2752, "step": 58100 }, { "epoch": 6.269541778975741, "grad_norm": 0.7272930145263672, "learning_rate": 0.00022421802482460872, "loss": 3.2697, "step": 58150 }, { "epoch": 6.274932614555256, "grad_norm": 0.7455615997314453, "learning_rate": 0.0002238942255801403, "loss": 3.292, "step": 58200 }, { "epoch": 6.280323450134771, "grad_norm": 0.7354786992073059, "learning_rate": 0.00022357042633567187, "loss": 3.2735, "step": 58250 }, { "epoch": 6.285714285714286, "grad_norm": 0.7349162697792053, "learning_rate": 0.00022324662709120345, "loss": 3.2632, "step": 58300 }, { "epoch": 6.291105121293801, "grad_norm": 0.7539188861846924, "learning_rate": 0.00022292282784673503, "loss": 3.2846, "step": 58350 }, { "epoch": 6.296495956873315, "grad_norm": 0.719907283782959, "learning_rate": 0.00022259902860226655, "loss": 3.2689, "step": 58400 }, { "epoch": 6.30188679245283, "grad_norm": 0.7047970294952393, "learning_rate": 0.00022227522935779813, "loss": 3.2944, "step": 58450 }, { "epoch": 6.307277628032345, "grad_norm": 0.7011443972587585, "learning_rate": 0.0002219514301133297, "loss": 3.2935, "step": 58500 }, { "epoch": 6.3126684636118595, "grad_norm": 0.766208827495575, "learning_rate": 0.00022162763086886129, "loss": 3.2833, "step": 58550 }, { "epoch": 6.318059299191375, "grad_norm": 0.7611593008041382, "learning_rate": 0.00022130383162439286, "loss": 3.2873, "step": 58600 }, { "epoch": 6.32345013477089, "grad_norm": 0.7359244227409363, "learning_rate": 0.00022098003237992444, "loss": 3.2847, "step": 58650 }, { "epoch": 6.328840970350404, "grad_norm": 0.7401198148727417, "learning_rate": 0.00022065623313545602, "loss": 3.284, "step": 58700 }, { "epoch": 6.334231805929919, "grad_norm": 0.7132663726806641, "learning_rate": 0.00022033243389098757, "loss": 3.2731, "step": 58750 }, { "epoch": 6.339622641509434, "grad_norm": 0.7397975325584412, "learning_rate": 0.00022000863464651915, "loss": 3.2809, "step": 58800 }, { "epoch": 6.345013477088949, "grad_norm": 0.6927112340927124, "learning_rate": 0.0002196848354020507, "loss": 3.2931, "step": 58850 }, { "epoch": 6.350404312668464, "grad_norm": 0.7106407880783081, "learning_rate": 0.00021936103615758227, "loss": 3.2802, "step": 58900 }, { "epoch": 6.355795148247978, "grad_norm": 0.7471071481704712, "learning_rate": 0.00021903723691311385, "loss": 3.2769, "step": 58950 }, { "epoch": 6.361185983827493, "grad_norm": 0.7540058493614197, "learning_rate": 0.00021871343766864543, "loss": 3.2938, "step": 59000 }, { "epoch": 6.361185983827493, "eval_accuracy": 0.38488079854657203, "eval_loss": 3.3768014907836914, "eval_runtime": 183.5526, "eval_samples_per_second": 98.124, "eval_steps_per_second": 6.134, "step": 59000 }, { "epoch": 6.366576819407008, "grad_norm": 0.732081413269043, "learning_rate": 0.00021838963842417698, "loss": 3.2813, "step": 59050 }, { "epoch": 6.3719676549865225, "grad_norm": 0.7750065326690674, "learning_rate": 0.00021806583917970856, "loss": 3.2791, "step": 59100 }, { "epoch": 6.377358490566038, "grad_norm": 0.7145469188690186, "learning_rate": 0.00021774203993524014, "loss": 3.2749, "step": 59150 }, { "epoch": 6.382749326145553, "grad_norm": 0.7660088539123535, "learning_rate": 0.00021741824069077171, "loss": 3.2785, "step": 59200 }, { "epoch": 6.388140161725067, "grad_norm": 0.7172083854675293, "learning_rate": 0.00021709444144630326, "loss": 3.2812, "step": 59250 }, { "epoch": 6.393530997304582, "grad_norm": 0.6981208920478821, "learning_rate": 0.00021677064220183484, "loss": 3.2828, "step": 59300 }, { "epoch": 6.398921832884097, "grad_norm": 0.6857271790504456, "learning_rate": 0.00021644684295736642, "loss": 3.2783, "step": 59350 }, { "epoch": 6.404312668463612, "grad_norm": 0.7612074613571167, "learning_rate": 0.00021612304371289797, "loss": 3.3057, "step": 59400 }, { "epoch": 6.409703504043127, "grad_norm": 0.8293981552124023, "learning_rate": 0.00021579924446842955, "loss": 3.2896, "step": 59450 }, { "epoch": 6.415094339622642, "grad_norm": 0.7098532319068909, "learning_rate": 0.00021547544522396113, "loss": 3.2764, "step": 59500 }, { "epoch": 6.420485175202156, "grad_norm": 0.7373313903808594, "learning_rate": 0.0002151516459794927, "loss": 3.2737, "step": 59550 }, { "epoch": 6.425876010781671, "grad_norm": 0.7213019132614136, "learning_rate": 0.00021482784673502428, "loss": 3.2958, "step": 59600 }, { "epoch": 6.431266846361186, "grad_norm": 0.7404071092605591, "learning_rate": 0.00021450404749055586, "loss": 3.2866, "step": 59650 }, { "epoch": 6.436657681940701, "grad_norm": 0.7444236278533936, "learning_rate": 0.00021418024824608738, "loss": 3.2949, "step": 59700 }, { "epoch": 6.442048517520216, "grad_norm": 0.7594828605651855, "learning_rate": 0.00021385644900161896, "loss": 3.2932, "step": 59750 }, { "epoch": 6.44743935309973, "grad_norm": 0.7362959980964661, "learning_rate": 0.00021353264975715054, "loss": 3.2844, "step": 59800 }, { "epoch": 6.452830188679245, "grad_norm": 0.7293346524238586, "learning_rate": 0.00021320885051268211, "loss": 3.2673, "step": 59850 }, { "epoch": 6.45822102425876, "grad_norm": 0.774370551109314, "learning_rate": 0.0002128850512682137, "loss": 3.2825, "step": 59900 }, { "epoch": 6.463611859838275, "grad_norm": 0.7451016902923584, "learning_rate": 0.00021256125202374527, "loss": 3.2725, "step": 59950 }, { "epoch": 6.46900269541779, "grad_norm": 0.7309680581092834, "learning_rate": 0.0002122439287641662, "loss": 3.2827, "step": 60000 }, { "epoch": 6.46900269541779, "eval_accuracy": 0.38543851381972377, "eval_loss": 3.3723459243774414, "eval_runtime": 183.9807, "eval_samples_per_second": 97.896, "eval_steps_per_second": 6.12, "step": 60000 }, { "epoch": 6.474393530997305, "grad_norm": 0.7711241841316223, "learning_rate": 0.00021192012951969775, "loss": 3.2794, "step": 60050 }, { "epoch": 6.479784366576819, "grad_norm": 0.7258850336074829, "learning_rate": 0.00021159633027522932, "loss": 3.2932, "step": 60100 }, { "epoch": 6.485175202156334, "grad_norm": 0.7217243909835815, "learning_rate": 0.0002112725310307609, "loss": 3.2709, "step": 60150 }, { "epoch": 6.490566037735849, "grad_norm": 0.714972734451294, "learning_rate": 0.00021094873178629248, "loss": 3.284, "step": 60200 }, { "epoch": 6.495956873315364, "grad_norm": 0.7145763635635376, "learning_rate": 0.00021062493254182406, "loss": 3.2794, "step": 60250 }, { "epoch": 6.501347708894879, "grad_norm": 0.719743013381958, "learning_rate": 0.00021030113329735563, "loss": 3.276, "step": 60300 }, { "epoch": 6.506738544474393, "grad_norm": 0.8085545897483826, "learning_rate": 0.0002099773340528872, "loss": 3.2608, "step": 60350 }, { "epoch": 6.512129380053908, "grad_norm": 0.7587844133377075, "learning_rate": 0.0002096535348084188, "loss": 3.2921, "step": 60400 }, { "epoch": 6.517520215633423, "grad_norm": 0.8297287821769714, "learning_rate": 0.0002093297355639503, "loss": 3.2801, "step": 60450 }, { "epoch": 6.5229110512129385, "grad_norm": 0.7191682457923889, "learning_rate": 0.0002090059363194819, "loss": 3.2757, "step": 60500 }, { "epoch": 6.528301886792453, "grad_norm": 0.701370358467102, "learning_rate": 0.00020868213707501347, "loss": 3.2991, "step": 60550 }, { "epoch": 6.533692722371968, "grad_norm": 0.7378591895103455, "learning_rate": 0.00020835833783054505, "loss": 3.2726, "step": 60600 }, { "epoch": 6.539083557951482, "grad_norm": 0.7391147613525391, "learning_rate": 0.00020803453858607662, "loss": 3.2876, "step": 60650 }, { "epoch": 6.544474393530997, "grad_norm": 0.6959494948387146, "learning_rate": 0.0002077107393416082, "loss": 3.2869, "step": 60700 }, { "epoch": 6.549865229110512, "grad_norm": 0.6961156725883484, "learning_rate": 0.00020738694009713975, "loss": 3.2903, "step": 60750 }, { "epoch": 6.555256064690027, "grad_norm": 0.819736123085022, "learning_rate": 0.00020706314085267133, "loss": 3.2861, "step": 60800 }, { "epoch": 6.560646900269542, "grad_norm": 0.7416958808898926, "learning_rate": 0.00020673934160820288, "loss": 3.2745, "step": 60850 }, { "epoch": 6.566037735849057, "grad_norm": 0.7390998005867004, "learning_rate": 0.00020641554236373446, "loss": 3.271, "step": 60900 }, { "epoch": 6.571428571428571, "grad_norm": 0.823673665523529, "learning_rate": 0.00020609174311926604, "loss": 3.2896, "step": 60950 }, { "epoch": 6.576819407008086, "grad_norm": 0.7763093113899231, "learning_rate": 0.0002057679438747976, "loss": 3.2757, "step": 61000 }, { "epoch": 6.576819407008086, "eval_accuracy": 0.38573676599677453, "eval_loss": 3.3671045303344727, "eval_runtime": 183.4205, "eval_samples_per_second": 98.195, "eval_steps_per_second": 6.139, "step": 61000 }, { "epoch": 6.5822102425876015, "grad_norm": 0.7161189913749695, "learning_rate": 0.00020544414463032916, "loss": 3.2724, "step": 61050 }, { "epoch": 6.587601078167116, "grad_norm": 0.7647314071655273, "learning_rate": 0.00020512034538586074, "loss": 3.2795, "step": 61100 }, { "epoch": 6.592991913746631, "grad_norm": 0.7199732065200806, "learning_rate": 0.00020479654614139232, "loss": 3.2953, "step": 61150 }, { "epoch": 6.598382749326145, "grad_norm": 0.707043468952179, "learning_rate": 0.0002044727468969239, "loss": 3.2936, "step": 61200 }, { "epoch": 6.60377358490566, "grad_norm": 0.7686954140663147, "learning_rate": 0.00020414894765245547, "loss": 3.2855, "step": 61250 }, { "epoch": 6.609164420485175, "grad_norm": 0.7518014907836914, "learning_rate": 0.00020382514840798702, "loss": 3.2775, "step": 61300 }, { "epoch": 6.6145552560646905, "grad_norm": 0.8239266872406006, "learning_rate": 0.0002035013491635186, "loss": 3.2874, "step": 61350 }, { "epoch": 6.619946091644205, "grad_norm": 0.7640479207038879, "learning_rate": 0.00020317754991905015, "loss": 3.2888, "step": 61400 }, { "epoch": 6.62533692722372, "grad_norm": 0.764625072479248, "learning_rate": 0.00020285375067458173, "loss": 3.2869, "step": 61450 }, { "epoch": 6.630727762803234, "grad_norm": 0.7239562273025513, "learning_rate": 0.0002025299514301133, "loss": 3.2814, "step": 61500 }, { "epoch": 6.636118598382749, "grad_norm": 0.7507439255714417, "learning_rate": 0.00020220615218564489, "loss": 3.2802, "step": 61550 }, { "epoch": 6.6415094339622645, "grad_norm": 0.7298441529273987, "learning_rate": 0.00020188235294117646, "loss": 3.3071, "step": 61600 }, { "epoch": 6.646900269541779, "grad_norm": 0.719257116317749, "learning_rate": 0.00020155855369670804, "loss": 3.2638, "step": 61650 }, { "epoch": 6.652291105121294, "grad_norm": 0.7430747747421265, "learning_rate": 0.00020123475445223956, "loss": 3.2825, "step": 61700 }, { "epoch": 6.657681940700809, "grad_norm": 0.7874649167060852, "learning_rate": 0.00020091095520777114, "loss": 3.2985, "step": 61750 }, { "epoch": 6.663072776280323, "grad_norm": 0.7608209252357483, "learning_rate": 0.00020058715596330272, "loss": 3.2834, "step": 61800 }, { "epoch": 6.668463611859838, "grad_norm": 0.7211445569992065, "learning_rate": 0.0002002633567188343, "loss": 3.2955, "step": 61850 }, { "epoch": 6.6738544474393535, "grad_norm": 0.7505002617835999, "learning_rate": 0.00019993955747436588, "loss": 3.2848, "step": 61900 }, { "epoch": 6.679245283018868, "grad_norm": 0.7617312669754028, "learning_rate": 0.00019961575822989745, "loss": 3.2796, "step": 61950 }, { "epoch": 6.684636118598383, "grad_norm": 0.7387272119522095, "learning_rate": 0.00019929195898542903, "loss": 3.278, "step": 62000 }, { "epoch": 6.684636118598383, "eval_accuracy": 0.3858140182000106, "eval_loss": 3.3650400638580322, "eval_runtime": 183.6699, "eval_samples_per_second": 98.062, "eval_steps_per_second": 6.131, "step": 62000 }, { "epoch": 6.690026954177897, "grad_norm": 0.7572900652885437, "learning_rate": 0.0001989681597409606, "loss": 3.2769, "step": 62050 }, { "epoch": 6.695417789757412, "grad_norm": 0.7592546343803406, "learning_rate": 0.0001986508364813815, "loss": 3.2829, "step": 62100 }, { "epoch": 6.7008086253369274, "grad_norm": 0.7549013495445251, "learning_rate": 0.00019832703723691308, "loss": 3.2989, "step": 62150 }, { "epoch": 6.706199460916442, "grad_norm": 0.7903435230255127, "learning_rate": 0.00019800323799244466, "loss": 3.3063, "step": 62200 }, { "epoch": 6.711590296495957, "grad_norm": 0.791134774684906, "learning_rate": 0.00019767943874797624, "loss": 3.2809, "step": 62250 }, { "epoch": 6.716981132075472, "grad_norm": 0.7578250169754028, "learning_rate": 0.00019735563950350782, "loss": 3.2862, "step": 62300 }, { "epoch": 6.722371967654986, "grad_norm": 0.7541927695274353, "learning_rate": 0.0001970318402590394, "loss": 3.2864, "step": 62350 }, { "epoch": 6.727762803234501, "grad_norm": 0.7560279369354248, "learning_rate": 0.00019670804101457097, "loss": 3.2722, "step": 62400 }, { "epoch": 6.7331536388140165, "grad_norm": 0.7128151059150696, "learning_rate": 0.00019638424177010252, "loss": 3.2774, "step": 62450 }, { "epoch": 6.738544474393531, "grad_norm": 0.8017320036888123, "learning_rate": 0.00019606044252563407, "loss": 3.2861, "step": 62500 }, { "epoch": 6.743935309973046, "grad_norm": 0.7617440819740295, "learning_rate": 0.00019573664328116565, "loss": 3.2845, "step": 62550 }, { "epoch": 6.74932614555256, "grad_norm": 0.7816303968429565, "learning_rate": 0.00019541284403669723, "loss": 3.2797, "step": 62600 }, { "epoch": 6.754716981132075, "grad_norm": 0.7712652087211609, "learning_rate": 0.0001950890447922288, "loss": 3.2916, "step": 62650 }, { "epoch": 6.7601078167115904, "grad_norm": 0.7972909808158875, "learning_rate": 0.00019476524554776038, "loss": 3.2795, "step": 62700 }, { "epoch": 6.765498652291106, "grad_norm": 0.7326833605766296, "learning_rate": 0.00019444144630329193, "loss": 3.2975, "step": 62750 }, { "epoch": 6.77088948787062, "grad_norm": 0.7460118532180786, "learning_rate": 0.0001941176470588235, "loss": 3.2945, "step": 62800 }, { "epoch": 6.776280323450135, "grad_norm": 0.820135235786438, "learning_rate": 0.0001937938478143551, "loss": 3.2876, "step": 62850 }, { "epoch": 6.781671159029649, "grad_norm": 0.7615317702293396, "learning_rate": 0.00019347004856988664, "loss": 3.2822, "step": 62900 }, { "epoch": 6.787061994609164, "grad_norm": 0.7746335864067078, "learning_rate": 0.00019314624932541822, "loss": 3.283, "step": 62950 }, { "epoch": 6.7924528301886795, "grad_norm": 0.745581865310669, "learning_rate": 0.0001928224500809498, "loss": 3.2879, "step": 63000 }, { "epoch": 6.7924528301886795, "eval_accuracy": 0.38680895269372784, "eval_loss": 3.35954213142395, "eval_runtime": 183.6719, "eval_samples_per_second": 98.061, "eval_steps_per_second": 6.13, "step": 63000 }, { "epoch": 6.797843665768194, "grad_norm": 0.7740397453308105, "learning_rate": 0.00019249865083648137, "loss": 3.2726, "step": 63050 }, { "epoch": 6.803234501347709, "grad_norm": 0.8130731582641602, "learning_rate": 0.00019217485159201292, "loss": 3.2999, "step": 63100 }, { "epoch": 6.808625336927224, "grad_norm": 0.7525258660316467, "learning_rate": 0.0001918510523475445, "loss": 3.2857, "step": 63150 }, { "epoch": 6.814016172506738, "grad_norm": 0.7616598606109619, "learning_rate": 0.00019152725310307608, "loss": 3.2845, "step": 63200 }, { "epoch": 6.819407008086253, "grad_norm": 0.7608476281166077, "learning_rate": 0.00019120345385860766, "loss": 3.293, "step": 63250 }, { "epoch": 6.824797843665769, "grad_norm": 0.7839943766593933, "learning_rate": 0.00019087965461413923, "loss": 3.2917, "step": 63300 }, { "epoch": 6.830188679245283, "grad_norm": 0.7487995028495789, "learning_rate": 0.00019055585536967079, "loss": 3.2826, "step": 63350 }, { "epoch": 6.835579514824798, "grad_norm": 0.7642489671707153, "learning_rate": 0.00019023205612520234, "loss": 3.2767, "step": 63400 }, { "epoch": 6.840970350404312, "grad_norm": 0.7495631575584412, "learning_rate": 0.00018990825688073391, "loss": 3.2824, "step": 63450 }, { "epoch": 6.846361185983827, "grad_norm": 0.7581333518028259, "learning_rate": 0.0001895844576362655, "loss": 3.2842, "step": 63500 }, { "epoch": 6.8517520215633425, "grad_norm": 0.7547215819358826, "learning_rate": 0.00018926065839179707, "loss": 3.3006, "step": 63550 }, { "epoch": 6.857142857142857, "grad_norm": 0.821328341960907, "learning_rate": 0.00018893685914732865, "loss": 3.286, "step": 63600 }, { "epoch": 6.862533692722372, "grad_norm": 0.7756742238998413, "learning_rate": 0.00018861305990286022, "loss": 3.2891, "step": 63650 }, { "epoch": 6.867924528301887, "grad_norm": 0.7616881728172302, "learning_rate": 0.0001882892606583918, "loss": 3.2645, "step": 63700 }, { "epoch": 6.873315363881401, "grad_norm": 0.7382721900939941, "learning_rate": 0.00018796546141392333, "loss": 3.275, "step": 63750 }, { "epoch": 6.878706199460916, "grad_norm": 0.7397435307502747, "learning_rate": 0.0001876416621694549, "loss": 3.2884, "step": 63800 }, { "epoch": 6.884097035040432, "grad_norm": 0.7460421323776245, "learning_rate": 0.00018731786292498648, "loss": 3.2613, "step": 63850 }, { "epoch": 6.889487870619946, "grad_norm": 0.7353686690330505, "learning_rate": 0.00018699406368051806, "loss": 3.276, "step": 63900 }, { "epoch": 6.894878706199461, "grad_norm": 0.7506459355354309, "learning_rate": 0.00018667026443604964, "loss": 3.293, "step": 63950 }, { "epoch": 6.900269541778976, "grad_norm": 0.7485863566398621, "learning_rate": 0.00018634646519158121, "loss": 3.3019, "step": 64000 }, { "epoch": 6.900269541778976, "eval_accuracy": 0.3870750436159855, "eval_loss": 3.3544468879699707, "eval_runtime": 183.6704, "eval_samples_per_second": 98.062, "eval_steps_per_second": 6.131, "step": 64000 }, { "epoch": 6.90566037735849, "grad_norm": 0.7352957129478455, "learning_rate": 0.0001860226659471128, "loss": 3.2824, "step": 64050 }, { "epoch": 6.9110512129380055, "grad_norm": 0.7454565763473511, "learning_rate": 0.00018569886670264434, "loss": 3.2811, "step": 64100 }, { "epoch": 6.916442048517521, "grad_norm": 0.7280657291412354, "learning_rate": 0.00018537506745817592, "loss": 3.2829, "step": 64150 }, { "epoch": 6.921832884097035, "grad_norm": 0.7745518088340759, "learning_rate": 0.00018505774419859684, "loss": 3.3071, "step": 64200 }, { "epoch": 6.92722371967655, "grad_norm": 0.79837566614151, "learning_rate": 0.00018473394495412842, "loss": 3.2772, "step": 64250 }, { "epoch": 6.932614555256064, "grad_norm": 0.8125883936882019, "learning_rate": 0.00018441014570966, "loss": 3.2835, "step": 64300 }, { "epoch": 6.938005390835579, "grad_norm": 0.7624317407608032, "learning_rate": 0.00018408634646519158, "loss": 3.2743, "step": 64350 }, { "epoch": 6.943396226415095, "grad_norm": 0.7544397711753845, "learning_rate": 0.00018376254722072316, "loss": 3.2835, "step": 64400 }, { "epoch": 6.948787061994609, "grad_norm": 0.7494552731513977, "learning_rate": 0.0001834387479762547, "loss": 3.2593, "step": 64450 }, { "epoch": 6.954177897574124, "grad_norm": 0.7906284332275391, "learning_rate": 0.00018311494873178628, "loss": 3.3027, "step": 64500 }, { "epoch": 6.959568733153639, "grad_norm": 0.76076340675354, "learning_rate": 0.00018279114948731783, "loss": 3.2956, "step": 64550 }, { "epoch": 6.964959568733153, "grad_norm": 0.7786262035369873, "learning_rate": 0.0001824673502428494, "loss": 3.296, "step": 64600 }, { "epoch": 6.9703504043126685, "grad_norm": 0.8318013548851013, "learning_rate": 0.000182143550998381, "loss": 3.3069, "step": 64650 }, { "epoch": 6.975741239892184, "grad_norm": 0.7505510449409485, "learning_rate": 0.00018181975175391257, "loss": 3.3001, "step": 64700 }, { "epoch": 6.981132075471698, "grad_norm": 0.7878348231315613, "learning_rate": 0.00018149595250944414, "loss": 3.2874, "step": 64750 }, { "epoch": 6.986522911051213, "grad_norm": 0.8053597807884216, "learning_rate": 0.0001811721532649757, "loss": 3.2644, "step": 64800 }, { "epoch": 6.991913746630727, "grad_norm": 0.7475190162658691, "learning_rate": 0.00018084835402050727, "loss": 3.2769, "step": 64850 }, { "epoch": 6.997304582210242, "grad_norm": 0.7410888671875, "learning_rate": 0.00018052455477603885, "loss": 3.2803, "step": 64900 }, { "epoch": 7.002695417789758, "grad_norm": 0.7842205762863159, "learning_rate": 0.0001802007555315704, "loss": 3.2315, "step": 64950 }, { "epoch": 7.008086253369272, "grad_norm": 0.8470344543457031, "learning_rate": 0.00017987695628710198, "loss": 3.2106, "step": 65000 }, { "epoch": 7.008086253369272, "eval_accuracy": 0.38683339959348606, "eval_loss": 3.3595986366271973, "eval_runtime": 183.7302, "eval_samples_per_second": 98.03, "eval_steps_per_second": 6.129, "step": 65000 }, { "epoch": 7.013477088948787, "grad_norm": 0.7936300039291382, "learning_rate": 0.00017955315704263356, "loss": 3.1962, "step": 65050 }, { "epoch": 7.018867924528302, "grad_norm": 0.8864548802375793, "learning_rate": 0.0001792293577981651, "loss": 3.2104, "step": 65100 }, { "epoch": 7.024258760107816, "grad_norm": 0.7770150303840637, "learning_rate": 0.00017890555855369668, "loss": 3.2211, "step": 65150 }, { "epoch": 7.0296495956873315, "grad_norm": 0.7711372375488281, "learning_rate": 0.00017858175930922826, "loss": 3.1993, "step": 65200 }, { "epoch": 7.035040431266847, "grad_norm": 0.7707984447479248, "learning_rate": 0.00017825796006475984, "loss": 3.1926, "step": 65250 }, { "epoch": 7.040431266846361, "grad_norm": 0.7653559446334839, "learning_rate": 0.00017793416082029142, "loss": 3.2114, "step": 65300 }, { "epoch": 7.045822102425876, "grad_norm": 0.7938640713691711, "learning_rate": 0.000177610361575823, "loss": 3.1917, "step": 65350 }, { "epoch": 7.051212938005391, "grad_norm": 0.7933249473571777, "learning_rate": 0.00017728656233135452, "loss": 3.2102, "step": 65400 }, { "epoch": 7.056603773584905, "grad_norm": 0.7714968323707581, "learning_rate": 0.0001769627630868861, "loss": 3.2091, "step": 65450 }, { "epoch": 7.061994609164421, "grad_norm": 0.7854500412940979, "learning_rate": 0.00017663896384241767, "loss": 3.2047, "step": 65500 }, { "epoch": 7.067385444743936, "grad_norm": 0.8066527247428894, "learning_rate": 0.00017631516459794925, "loss": 3.2168, "step": 65550 }, { "epoch": 7.07277628032345, "grad_norm": 0.7777254581451416, "learning_rate": 0.00017599136535348083, "loss": 3.2176, "step": 65600 }, { "epoch": 7.078167115902965, "grad_norm": 0.8117446899414062, "learning_rate": 0.0001756675661090124, "loss": 3.2035, "step": 65650 }, { "epoch": 7.083557951482479, "grad_norm": 0.7938959002494812, "learning_rate": 0.00017534376686454398, "loss": 3.2154, "step": 65700 }, { "epoch": 7.0889487870619945, "grad_norm": 0.7688341736793518, "learning_rate": 0.00017501996762007556, "loss": 3.2104, "step": 65750 }, { "epoch": 7.09433962264151, "grad_norm": 0.8066598176956177, "learning_rate": 0.00017469616837560709, "loss": 3.2143, "step": 65800 }, { "epoch": 7.099730458221024, "grad_norm": 0.7198970317840576, "learning_rate": 0.00017437236913113866, "loss": 3.1953, "step": 65850 }, { "epoch": 7.105121293800539, "grad_norm": 0.7910106778144836, "learning_rate": 0.00017404856988667024, "loss": 3.206, "step": 65900 }, { "epoch": 7.110512129380054, "grad_norm": 0.8067068457603455, "learning_rate": 0.00017372477064220182, "loss": 3.194, "step": 65950 }, { "epoch": 7.115902964959568, "grad_norm": 0.7943997383117676, "learning_rate": 0.0001734009713977334, "loss": 3.2226, "step": 66000 }, { "epoch": 7.115902964959568, "eval_accuracy": 0.38742360208009435, "eval_loss": 3.3600785732269287, "eval_runtime": 183.7791, "eval_samples_per_second": 98.004, "eval_steps_per_second": 6.127, "step": 66000 }, { "epoch": 7.121293800539084, "grad_norm": 0.7855022549629211, "learning_rate": 0.00017307717215326497, "loss": 3.211, "step": 66050 }, { "epoch": 7.126684636118599, "grad_norm": 0.8030198216438293, "learning_rate": 0.00017275337290879655, "loss": 3.2127, "step": 66100 }, { "epoch": 7.132075471698113, "grad_norm": 0.7985265851020813, "learning_rate": 0.0001724295736643281, "loss": 3.2214, "step": 66150 }, { "epoch": 7.137466307277628, "grad_norm": 0.779312014579773, "learning_rate": 0.00017210577441985968, "loss": 3.2065, "step": 66200 }, { "epoch": 7.142857142857143, "grad_norm": 0.8055819272994995, "learning_rate": 0.00017178197517539123, "loss": 3.2262, "step": 66250 }, { "epoch": 7.1482479784366575, "grad_norm": 0.7920130491256714, "learning_rate": 0.0001714581759309228, "loss": 3.2101, "step": 66300 }, { "epoch": 7.153638814016173, "grad_norm": 0.786949098110199, "learning_rate": 0.00017113437668645439, "loss": 3.2134, "step": 66350 }, { "epoch": 7.159029649595688, "grad_norm": 0.7556127309799194, "learning_rate": 0.00017081057744198596, "loss": 3.2158, "step": 66400 }, { "epoch": 7.164420485175202, "grad_norm": 0.7686795592308044, "learning_rate": 0.00017048677819751751, "loss": 3.223, "step": 66450 }, { "epoch": 7.169811320754717, "grad_norm": 0.7993310689926147, "learning_rate": 0.0001701629789530491, "loss": 3.2131, "step": 66500 }, { "epoch": 7.175202156334231, "grad_norm": 0.8089261651039124, "learning_rate": 0.00016983917970858067, "loss": 3.228, "step": 66550 }, { "epoch": 7.180592991913747, "grad_norm": 0.7769164443016052, "learning_rate": 0.00016951538046411225, "loss": 3.2136, "step": 66600 }, { "epoch": 7.185983827493262, "grad_norm": 0.7841304540634155, "learning_rate": 0.0001691915812196438, "loss": 3.2292, "step": 66650 }, { "epoch": 7.191374663072776, "grad_norm": 0.7953046560287476, "learning_rate": 0.00016886778197517538, "loss": 3.2204, "step": 66700 }, { "epoch": 7.196765498652291, "grad_norm": 0.7785905599594116, "learning_rate": 0.00016854398273070693, "loss": 3.2299, "step": 66750 }, { "epoch": 7.202156334231806, "grad_norm": 0.7831030488014221, "learning_rate": 0.0001682201834862385, "loss": 3.2033, "step": 66800 }, { "epoch": 7.2075471698113205, "grad_norm": 0.7967961430549622, "learning_rate": 0.00016789638424177008, "loss": 3.2196, "step": 66850 }, { "epoch": 7.212938005390836, "grad_norm": 0.786827564239502, "learning_rate": 0.00016757258499730166, "loss": 3.2386, "step": 66900 }, { "epoch": 7.218328840970351, "grad_norm": 0.8236501216888428, "learning_rate": 0.00016724878575283324, "loss": 3.2287, "step": 66950 }, { "epoch": 7.223719676549865, "grad_norm": 0.7607845067977905, "learning_rate": 0.00016692498650836481, "loss": 3.2306, "step": 67000 }, { "epoch": 7.223719676549865, "eval_accuracy": 0.38756930560265357, "eval_loss": 3.355984926223755, "eval_runtime": 183.5772, "eval_samples_per_second": 98.111, "eval_steps_per_second": 6.134, "step": 67000 }, { "epoch": 7.22911051212938, "grad_norm": 0.7747772932052612, "learning_rate": 0.0001666011872638964, "loss": 3.2265, "step": 67050 }, { "epoch": 7.234501347708895, "grad_norm": 0.8035948872566223, "learning_rate": 0.00016627738801942792, "loss": 3.2282, "step": 67100 }, { "epoch": 7.2398921832884096, "grad_norm": 0.771018922328949, "learning_rate": 0.0001659535887749595, "loss": 3.2286, "step": 67150 }, { "epoch": 7.245283018867925, "grad_norm": 0.8212960362434387, "learning_rate": 0.00016562978953049107, "loss": 3.2199, "step": 67200 }, { "epoch": 7.250673854447439, "grad_norm": 0.8093283772468567, "learning_rate": 0.00016530599028602265, "loss": 3.2192, "step": 67250 }, { "epoch": 7.256064690026954, "grad_norm": 0.8153406977653503, "learning_rate": 0.00016498219104155423, "loss": 3.2356, "step": 67300 }, { "epoch": 7.261455525606469, "grad_norm": 0.7982839345932007, "learning_rate": 0.0001646583917970858, "loss": 3.224, "step": 67350 }, { "epoch": 7.2668463611859835, "grad_norm": 0.8163326978683472, "learning_rate": 0.00016433459255261738, "loss": 3.2309, "step": 67400 }, { "epoch": 7.272237196765499, "grad_norm": 0.8001326322555542, "learning_rate": 0.00016401079330814896, "loss": 3.2408, "step": 67450 }, { "epoch": 7.277628032345014, "grad_norm": 0.8096315860748291, "learning_rate": 0.00016368699406368048, "loss": 3.2359, "step": 67500 }, { "epoch": 7.283018867924528, "grad_norm": 0.9220945239067078, "learning_rate": 0.00016336319481921206, "loss": 3.2263, "step": 67550 }, { "epoch": 7.288409703504043, "grad_norm": 0.7517732977867126, "learning_rate": 0.00016303939557474364, "loss": 3.2471, "step": 67600 }, { "epoch": 7.293800539083558, "grad_norm": 0.8864023089408875, "learning_rate": 0.00016271559633027522, "loss": 3.2311, "step": 67650 }, { "epoch": 7.2991913746630726, "grad_norm": 0.7982566952705383, "learning_rate": 0.0001623917970858068, "loss": 3.23, "step": 67700 }, { "epoch": 7.304582210242588, "grad_norm": 0.7961156964302063, "learning_rate": 0.00016206799784133837, "loss": 3.2334, "step": 67750 }, { "epoch": 7.309973045822103, "grad_norm": 0.7799623012542725, "learning_rate": 0.00016174419859686992, "loss": 3.2151, "step": 67800 }, { "epoch": 7.315363881401617, "grad_norm": 0.7993525266647339, "learning_rate": 0.0001614203993524015, "loss": 3.2525, "step": 67850 }, { "epoch": 7.320754716981132, "grad_norm": 0.7678369879722595, "learning_rate": 0.00016110307609282242, "loss": 3.2317, "step": 67900 }, { "epoch": 7.3261455525606465, "grad_norm": 0.8402950763702393, "learning_rate": 0.000160779276848354, "loss": 3.2364, "step": 67950 }, { "epoch": 7.331536388140162, "grad_norm": 0.8143036365509033, "learning_rate": 0.00016045547760388558, "loss": 3.2122, "step": 68000 }, { "epoch": 7.331536388140162, "eval_accuracy": 0.38808019148115724, "eval_loss": 3.353743553161621, "eval_runtime": 183.5805, "eval_samples_per_second": 98.11, "eval_steps_per_second": 6.134, "step": 68000 }, { "epoch": 7.336927223719677, "grad_norm": 0.7968747019767761, "learning_rate": 0.00016013167835941716, "loss": 3.2338, "step": 68050 }, { "epoch": 7.342318059299191, "grad_norm": 0.8042038679122925, "learning_rate": 0.00015980787911494873, "loss": 3.2394, "step": 68100 }, { "epoch": 7.347708894878706, "grad_norm": 0.8089545369148254, "learning_rate": 0.00015948407987048029, "loss": 3.2243, "step": 68150 }, { "epoch": 7.353099730458221, "grad_norm": 0.839443564414978, "learning_rate": 0.00015916028062601186, "loss": 3.2456, "step": 68200 }, { "epoch": 7.3584905660377355, "grad_norm": 0.8253589868545532, "learning_rate": 0.00015883648138154344, "loss": 3.2482, "step": 68250 }, { "epoch": 7.363881401617251, "grad_norm": 0.805784285068512, "learning_rate": 0.000158512682137075, "loss": 3.2355, "step": 68300 }, { "epoch": 7.369272237196766, "grad_norm": 0.8239293098449707, "learning_rate": 0.00015818888289260657, "loss": 3.2337, "step": 68350 }, { "epoch": 7.37466307277628, "grad_norm": 0.8073683381080627, "learning_rate": 0.00015786508364813815, "loss": 3.2261, "step": 68400 }, { "epoch": 7.380053908355795, "grad_norm": 0.871188759803772, "learning_rate": 0.0001575412844036697, "loss": 3.2112, "step": 68450 }, { "epoch": 7.38544474393531, "grad_norm": 0.8160022497177124, "learning_rate": 0.00015721748515920127, "loss": 3.2173, "step": 68500 }, { "epoch": 7.390835579514825, "grad_norm": 0.7948161959648132, "learning_rate": 0.00015689368591473285, "loss": 3.2271, "step": 68550 }, { "epoch": 7.39622641509434, "grad_norm": 0.7891157269477844, "learning_rate": 0.00015656988667026443, "loss": 3.2428, "step": 68600 }, { "epoch": 7.401617250673855, "grad_norm": 0.824519157409668, "learning_rate": 0.000156246087425796, "loss": 3.2153, "step": 68650 }, { "epoch": 7.407008086253369, "grad_norm": 0.8005858659744263, "learning_rate": 0.00015592228818132756, "loss": 3.2209, "step": 68700 }, { "epoch": 7.412398921832884, "grad_norm": 0.8065062761306763, "learning_rate": 0.0001555984889368591, "loss": 3.2241, "step": 68750 }, { "epoch": 7.4177897574123985, "grad_norm": 0.8123283386230469, "learning_rate": 0.0001552746896923907, "loss": 3.235, "step": 68800 }, { "epoch": 7.423180592991914, "grad_norm": 0.7728703022003174, "learning_rate": 0.00015495089044792226, "loss": 3.2198, "step": 68850 }, { "epoch": 7.428571428571429, "grad_norm": 0.7691994905471802, "learning_rate": 0.00015462709120345384, "loss": 3.2346, "step": 68900 }, { "epoch": 7.433962264150943, "grad_norm": 0.835544228553772, "learning_rate": 0.00015430329195898542, "loss": 3.2275, "step": 68950 }, { "epoch": 7.439353099730458, "grad_norm": 0.7981140613555908, "learning_rate": 0.000153979492714517, "loss": 3.2476, "step": 69000 }, { "epoch": 7.439353099730458, "eval_accuracy": 0.3881780877330781, "eval_loss": 3.350919723510742, "eval_runtime": 183.8391, "eval_samples_per_second": 97.972, "eval_steps_per_second": 6.125, "step": 69000 }, { "epoch": 7.444743935309973, "grad_norm": 0.804359495639801, "learning_rate": 0.00015365569347004858, "loss": 3.2264, "step": 69050 }, { "epoch": 7.450134770889488, "grad_norm": 0.7978847026824951, "learning_rate": 0.00015333189422558015, "loss": 3.2213, "step": 69100 }, { "epoch": 7.455525606469003, "grad_norm": 0.8682163953781128, "learning_rate": 0.00015300809498111168, "loss": 3.2043, "step": 69150 }, { "epoch": 7.460916442048518, "grad_norm": 0.7981349229812622, "learning_rate": 0.00015268429573664325, "loss": 3.2463, "step": 69200 }, { "epoch": 7.466307277628032, "grad_norm": 0.8066042065620422, "learning_rate": 0.00015236049649217483, "loss": 3.2327, "step": 69250 }, { "epoch": 7.471698113207547, "grad_norm": 0.8053681254386902, "learning_rate": 0.0001520366972477064, "loss": 3.2211, "step": 69300 }, { "epoch": 7.4770889487870615, "grad_norm": 0.8309054374694824, "learning_rate": 0.000151712898003238, "loss": 3.2419, "step": 69350 }, { "epoch": 7.482479784366577, "grad_norm": 0.8017661571502686, "learning_rate": 0.00015138909875876956, "loss": 3.243, "step": 69400 }, { "epoch": 7.487870619946092, "grad_norm": 0.7868233919143677, "learning_rate": 0.00015106529951430114, "loss": 3.2369, "step": 69450 }, { "epoch": 7.493261455525606, "grad_norm": 0.8187854290008545, "learning_rate": 0.0001507415002698327, "loss": 3.2252, "step": 69500 }, { "epoch": 7.498652291105121, "grad_norm": 0.841691255569458, "learning_rate": 0.00015041770102536424, "loss": 3.2313, "step": 69550 }, { "epoch": 7.504043126684636, "grad_norm": 0.8016337752342224, "learning_rate": 0.00015009390178089582, "loss": 3.2255, "step": 69600 }, { "epoch": 7.509433962264151, "grad_norm": 0.7767087817192078, "learning_rate": 0.0001497701025364274, "loss": 3.2288, "step": 69650 }, { "epoch": 7.514824797843666, "grad_norm": 0.8359373211860657, "learning_rate": 0.00014944630329195898, "loss": 3.2401, "step": 69700 }, { "epoch": 7.520215633423181, "grad_norm": 0.818621039390564, "learning_rate": 0.00014912250404749055, "loss": 3.2298, "step": 69750 }, { "epoch": 7.525606469002695, "grad_norm": 0.8097342848777771, "learning_rate": 0.0001487987048030221, "loss": 3.2457, "step": 69800 }, { "epoch": 7.53099730458221, "grad_norm": 0.7905571460723877, "learning_rate": 0.00014847490555855368, "loss": 3.2211, "step": 69850 }, { "epoch": 7.536388140161725, "grad_norm": 0.7882384061813354, "learning_rate": 0.00014815110631408526, "loss": 3.2294, "step": 69900 }, { "epoch": 7.54177897574124, "grad_norm": 0.8214282393455505, "learning_rate": 0.0001478273070696168, "loss": 3.2369, "step": 69950 }, { "epoch": 7.547169811320755, "grad_norm": 0.8215126395225525, "learning_rate": 0.0001475035078251484, "loss": 3.2342, "step": 70000 }, { "epoch": 7.547169811320755, "eval_accuracy": 0.3886583334972181, "eval_loss": 3.3478920459747314, "eval_runtime": 183.3704, "eval_samples_per_second": 98.222, "eval_steps_per_second": 6.141, "step": 70000 }, { "epoch": 7.55256064690027, "grad_norm": 0.8579515218734741, "learning_rate": 0.00014717970858067997, "loss": 3.215, "step": 70050 }, { "epoch": 7.557951482479784, "grad_norm": 0.7795184850692749, "learning_rate": 0.00014685590933621154, "loss": 3.2367, "step": 70100 }, { "epoch": 7.563342318059299, "grad_norm": 0.8258717060089111, "learning_rate": 0.0001465321100917431, "loss": 3.2346, "step": 70150 }, { "epoch": 7.568733153638814, "grad_norm": 0.778454065322876, "learning_rate": 0.00014620831084727467, "loss": 3.2334, "step": 70200 }, { "epoch": 7.574123989218329, "grad_norm": 0.7907705307006836, "learning_rate": 0.00014588451160280625, "loss": 3.2301, "step": 70250 }, { "epoch": 7.579514824797844, "grad_norm": 0.8008063435554504, "learning_rate": 0.0001455607123583378, "loss": 3.2434, "step": 70300 }, { "epoch": 7.584905660377358, "grad_norm": 0.7994003891944885, "learning_rate": 0.00014523691311386938, "loss": 3.2157, "step": 70350 }, { "epoch": 7.590296495956873, "grad_norm": 0.8054956197738647, "learning_rate": 0.00014491311386940096, "loss": 3.2219, "step": 70400 }, { "epoch": 7.595687331536388, "grad_norm": 0.7711500525474548, "learning_rate": 0.00014458931462493253, "loss": 3.2291, "step": 70450 }, { "epoch": 7.601078167115903, "grad_norm": 0.7859018445014954, "learning_rate": 0.00014426551538046408, "loss": 3.2422, "step": 70500 }, { "epoch": 7.606469002695418, "grad_norm": 0.8233794569969177, "learning_rate": 0.00014394171613599566, "loss": 3.2292, "step": 70550 }, { "epoch": 7.611859838274933, "grad_norm": 0.8168668746948242, "learning_rate": 0.0001436243928764166, "loss": 3.254, "step": 70600 }, { "epoch": 7.617250673854447, "grad_norm": 0.8281172513961792, "learning_rate": 0.00014330059363194816, "loss": 3.2292, "step": 70650 }, { "epoch": 7.622641509433962, "grad_norm": 0.824066698551178, "learning_rate": 0.00014297679438747974, "loss": 3.2311, "step": 70700 }, { "epoch": 7.628032345013477, "grad_norm": 0.8089962005615234, "learning_rate": 0.00014265299514301132, "loss": 3.2236, "step": 70750 }, { "epoch": 7.633423180592992, "grad_norm": 0.7989951372146606, "learning_rate": 0.0001423291958985429, "loss": 3.227, "step": 70800 }, { "epoch": 7.638814016172507, "grad_norm": 0.8355391621589661, "learning_rate": 0.00014200539665407445, "loss": 3.2313, "step": 70850 }, { "epoch": 7.644204851752022, "grad_norm": 0.8052586317062378, "learning_rate": 0.00014168159740960602, "loss": 3.2264, "step": 70900 }, { "epoch": 7.649595687331536, "grad_norm": 0.8242946863174438, "learning_rate": 0.0001413577981651376, "loss": 3.2342, "step": 70950 }, { "epoch": 7.654986522911051, "grad_norm": 0.829434335231781, "learning_rate": 0.00014103399892066918, "loss": 3.2355, "step": 71000 }, { "epoch": 7.654986522911051, "eval_accuracy": 0.3890025458458144, "eval_loss": 3.3414785861968994, "eval_runtime": 183.6585, "eval_samples_per_second": 98.068, "eval_steps_per_second": 6.131, "step": 71000 }, { "epoch": 7.660377358490566, "grad_norm": 0.7990546226501465, "learning_rate": 0.00014071019967620073, "loss": 3.2366, "step": 71050 }, { "epoch": 7.665768194070081, "grad_norm": 0.807453453540802, "learning_rate": 0.0001403864004317323, "loss": 3.2209, "step": 71100 }, { "epoch": 7.671159029649596, "grad_norm": 0.8239540457725525, "learning_rate": 0.00014006260118726389, "loss": 3.2114, "step": 71150 }, { "epoch": 7.67654986522911, "grad_norm": 0.7842625975608826, "learning_rate": 0.00013973880194279546, "loss": 3.22, "step": 71200 }, { "epoch": 7.681940700808625, "grad_norm": 0.7766878008842468, "learning_rate": 0.00013941500269832704, "loss": 3.2296, "step": 71250 }, { "epoch": 7.6873315363881405, "grad_norm": 0.7896228432655334, "learning_rate": 0.0001390912034538586, "loss": 3.2524, "step": 71300 }, { "epoch": 7.692722371967655, "grad_norm": 0.7728307247161865, "learning_rate": 0.00013876740420939017, "loss": 3.2405, "step": 71350 }, { "epoch": 7.69811320754717, "grad_norm": 0.8142659664154053, "learning_rate": 0.00013844360496492175, "loss": 3.2361, "step": 71400 }, { "epoch": 7.703504043126685, "grad_norm": 0.8092532753944397, "learning_rate": 0.00013811980572045333, "loss": 3.224, "step": 71450 }, { "epoch": 7.708894878706199, "grad_norm": 0.8074396252632141, "learning_rate": 0.00013779600647598488, "loss": 3.227, "step": 71500 }, { "epoch": 7.714285714285714, "grad_norm": 0.8047695159912109, "learning_rate": 0.00013747220723151645, "loss": 3.2548, "step": 71550 }, { "epoch": 7.719676549865229, "grad_norm": 0.8205470442771912, "learning_rate": 0.00013714840798704803, "loss": 3.2417, "step": 71600 }, { "epoch": 7.725067385444744, "grad_norm": 0.8592079281806946, "learning_rate": 0.00013682460874257958, "loss": 3.2491, "step": 71650 }, { "epoch": 7.730458221024259, "grad_norm": 0.8482884168624878, "learning_rate": 0.00013650080949811116, "loss": 3.2452, "step": 71700 }, { "epoch": 7.735849056603773, "grad_norm": 0.820476233959198, "learning_rate": 0.00013617701025364274, "loss": 3.2502, "step": 71750 }, { "epoch": 7.741239892183288, "grad_norm": 0.8236547112464905, "learning_rate": 0.0001358532110091743, "loss": 3.2414, "step": 71800 }, { "epoch": 7.7466307277628035, "grad_norm": 0.8307732343673706, "learning_rate": 0.00013552941176470587, "loss": 3.2554, "step": 71850 }, { "epoch": 7.752021563342318, "grad_norm": 0.7903879284858704, "learning_rate": 0.00013520561252023744, "loss": 3.2328, "step": 71900 }, { "epoch": 7.757412398921833, "grad_norm": 0.8310343027114868, "learning_rate": 0.00013488181327576902, "loss": 3.2614, "step": 71950 }, { "epoch": 7.762803234501348, "grad_norm": 0.8106740117073059, "learning_rate": 0.00013455801403130057, "loss": 3.2304, "step": 72000 }, { "epoch": 7.762803234501348, "eval_accuracy": 0.3895956819603936, "eval_loss": 3.340195417404175, "eval_runtime": 183.5574, "eval_samples_per_second": 98.122, "eval_steps_per_second": 6.134, "step": 72000 }, { "epoch": 7.768194070080862, "grad_norm": 0.749928891658783, "learning_rate": 0.00013423421478683215, "loss": 3.2317, "step": 72050 }, { "epoch": 7.773584905660377, "grad_norm": 0.8262786269187927, "learning_rate": 0.00013391041554236373, "loss": 3.22, "step": 72100 }, { "epoch": 7.7789757412398925, "grad_norm": 0.79350745677948, "learning_rate": 0.00013358661629789528, "loss": 3.234, "step": 72150 }, { "epoch": 7.784366576819407, "grad_norm": 0.8237420320510864, "learning_rate": 0.00013326281705342685, "loss": 3.2357, "step": 72200 }, { "epoch": 7.789757412398922, "grad_norm": 0.806442141532898, "learning_rate": 0.00013293901780895843, "loss": 3.2334, "step": 72250 }, { "epoch": 7.795148247978437, "grad_norm": 0.8202387690544128, "learning_rate": 0.00013261521856449, "loss": 3.2303, "step": 72300 }, { "epoch": 7.800539083557951, "grad_norm": 0.8479371070861816, "learning_rate": 0.00013229141932002156, "loss": 3.2328, "step": 72350 }, { "epoch": 7.8059299191374665, "grad_norm": 0.8242699503898621, "learning_rate": 0.00013196762007555314, "loss": 3.2365, "step": 72400 }, { "epoch": 7.811320754716981, "grad_norm": 0.882240355014801, "learning_rate": 0.00013164382083108472, "loss": 3.2313, "step": 72450 }, { "epoch": 7.816711590296496, "grad_norm": 0.8102225661277771, "learning_rate": 0.0001313200215866163, "loss": 3.239, "step": 72500 }, { "epoch": 7.822102425876011, "grad_norm": 0.8708174824714661, "learning_rate": 0.00013099622234214784, "loss": 3.2317, "step": 72550 }, { "epoch": 7.827493261455525, "grad_norm": 0.8136692643165588, "learning_rate": 0.00013067242309767942, "loss": 3.2421, "step": 72600 }, { "epoch": 7.83288409703504, "grad_norm": 0.7785462737083435, "learning_rate": 0.000130348623853211, "loss": 3.2495, "step": 72650 }, { "epoch": 7.8382749326145555, "grad_norm": 0.8305777907371521, "learning_rate": 0.00013002482460874258, "loss": 3.2455, "step": 72700 }, { "epoch": 7.84366576819407, "grad_norm": Infinity, "learning_rate": 0.0001297075013491635, "loss": 3.2382, "step": 72750 }, { "epoch": 7.849056603773585, "grad_norm": 0.8630931377410889, "learning_rate": 0.00012938370210469508, "loss": 3.2622, "step": 72800 }, { "epoch": 7.8544474393531, "grad_norm": 0.817644476890564, "learning_rate": 0.00012905990286022666, "loss": 3.2318, "step": 72850 }, { "epoch": 7.859838274932614, "grad_norm": 0.8427354693412781, "learning_rate": 0.0001287361036157582, "loss": 3.2369, "step": 72900 }, { "epoch": 7.8652291105121295, "grad_norm": 0.8348943591117859, "learning_rate": 0.00012841230437128979, "loss": 3.2545, "step": 72950 }, { "epoch": 7.870619946091644, "grad_norm": 0.8035464882850647, "learning_rate": 0.00012808850512682136, "loss": 3.2316, "step": 73000 }, { "epoch": 7.870619946091644, "eval_accuracy": 0.38966630633747307, "eval_loss": 3.3341665267944336, "eval_runtime": 183.9628, "eval_samples_per_second": 97.906, "eval_steps_per_second": 6.121, "step": 73000 }, { "epoch": 7.876010781671159, "grad_norm": 0.8106042146682739, "learning_rate": 0.00012776470588235294, "loss": 3.2295, "step": 73050 }, { "epoch": 7.881401617250674, "grad_norm": 0.8646166920661926, "learning_rate": 0.0001274409066378845, "loss": 3.2397, "step": 73100 }, { "epoch": 7.886792452830189, "grad_norm": 0.8176259398460388, "learning_rate": 0.00012711710739341607, "loss": 3.2437, "step": 73150 }, { "epoch": 7.892183288409703, "grad_norm": 0.7888458967208862, "learning_rate": 0.00012679330814894765, "loss": 3.2228, "step": 73200 }, { "epoch": 7.8975741239892185, "grad_norm": 0.8205003142356873, "learning_rate": 0.00012646950890447922, "loss": 3.2399, "step": 73250 }, { "epoch": 7.902964959568733, "grad_norm": 0.8603438138961792, "learning_rate": 0.00012614570966001077, "loss": 3.24, "step": 73300 }, { "epoch": 7.908355795148248, "grad_norm": 0.8492730259895325, "learning_rate": 0.00012582191041554235, "loss": 3.2435, "step": 73350 }, { "epoch": 7.913746630727763, "grad_norm": 0.8674182295799255, "learning_rate": 0.00012549811117107393, "loss": 3.2483, "step": 73400 }, { "epoch": 7.919137466307277, "grad_norm": 0.8385972380638123, "learning_rate": 0.0001251743119266055, "loss": 3.2268, "step": 73450 }, { "epoch": 7.9245283018867925, "grad_norm": 0.8631293177604675, "learning_rate": 0.00012485051268213706, "loss": 3.2479, "step": 73500 }, { "epoch": 7.929919137466308, "grad_norm": 0.8175431489944458, "learning_rate": 0.00012452671343766864, "loss": 3.2322, "step": 73550 }, { "epoch": 7.935309973045822, "grad_norm": 0.8723227381706238, "learning_rate": 0.00012420291419320021, "loss": 3.2427, "step": 73600 }, { "epoch": 7.940700808625337, "grad_norm": 0.8406923413276672, "learning_rate": 0.0001238791149487318, "loss": 3.2237, "step": 73650 }, { "epoch": 7.946091644204852, "grad_norm": 0.8450952172279358, "learning_rate": 0.00012355531570426334, "loss": 3.228, "step": 73700 }, { "epoch": 7.951482479784366, "grad_norm": 0.7840113043785095, "learning_rate": 0.00012323151645979492, "loss": 3.221, "step": 73750 }, { "epoch": 7.9568733153638815, "grad_norm": 0.8030226230621338, "learning_rate": 0.0001229077172153265, "loss": 3.2, "step": 73800 }, { "epoch": 7.962264150943396, "grad_norm": 0.8302611112594604, "learning_rate": 0.00012258391797085805, "loss": 3.2252, "step": 73850 }, { "epoch": 7.967654986522911, "grad_norm": 0.8094905614852905, "learning_rate": 0.00012226011872638963, "loss": 3.2324, "step": 73900 }, { "epoch": 7.973045822102426, "grad_norm": 0.8156700730323792, "learning_rate": 0.00012193631948192119, "loss": 3.2545, "step": 73950 }, { "epoch": 7.97843665768194, "grad_norm": 0.810791552066803, "learning_rate": 0.00012161252023745277, "loss": 3.2465, "step": 74000 }, { "epoch": 7.97843665768194, "eval_accuracy": 0.3901558962499651, "eval_loss": 3.331369400024414, "eval_runtime": 183.6514, "eval_samples_per_second": 98.072, "eval_steps_per_second": 6.131, "step": 74000 }, { "epoch": 7.9838274932614555, "grad_norm": 0.8125215172767639, "learning_rate": 0.00012128872099298435, "loss": 3.239, "step": 74050 }, { "epoch": 7.989218328840971, "grad_norm": 0.8124715685844421, "learning_rate": 0.00012096492174851591, "loss": 3.2293, "step": 74100 }, { "epoch": 7.994609164420485, "grad_norm": 0.9727672934532166, "learning_rate": 0.00012064112250404749, "loss": 3.2225, "step": 74150 }, { "epoch": 8.0, "grad_norm": 1.8616753816604614, "learning_rate": 0.00012031732325957905, "loss": 3.2173, "step": 74200 }, { "epoch": 8.005390835579515, "grad_norm": 0.8144136667251587, "learning_rate": 0.00011999352401511062, "loss": 3.1608, "step": 74250 }, { "epoch": 8.01078167115903, "grad_norm": 0.8414373397827148, "learning_rate": 0.00011966972477064219, "loss": 3.1587, "step": 74300 }, { "epoch": 8.016172506738544, "grad_norm": 0.8705826997756958, "learning_rate": 0.00011934592552617377, "loss": 3.1476, "step": 74350 }, { "epoch": 8.021563342318059, "grad_norm": 0.7987647652626038, "learning_rate": 0.00011902212628170532, "loss": 3.1579, "step": 74400 }, { "epoch": 8.026954177897574, "grad_norm": 0.8605558276176453, "learning_rate": 0.0001186983270372369, "loss": 3.1628, "step": 74450 }, { "epoch": 8.032345013477089, "grad_norm": 0.8623753786087036, "learning_rate": 0.00011837452779276848, "loss": 3.1631, "step": 74500 }, { "epoch": 8.037735849056604, "grad_norm": 0.831736147403717, "learning_rate": 0.00011805072854830005, "loss": 3.1766, "step": 74550 }, { "epoch": 8.04312668463612, "grad_norm": 0.7906613945960999, "learning_rate": 0.0001177269293038316, "loss": 3.1654, "step": 74600 }, { "epoch": 8.048517520215633, "grad_norm": 0.8411964178085327, "learning_rate": 0.00011740313005936318, "loss": 3.1646, "step": 74650 }, { "epoch": 8.053908355795148, "grad_norm": 0.8155192732810974, "learning_rate": 0.00011707933081489476, "loss": 3.1527, "step": 74700 }, { "epoch": 8.059299191374663, "grad_norm": 0.830322802066803, "learning_rate": 0.00011675553157042632, "loss": 3.1701, "step": 74750 }, { "epoch": 8.064690026954178, "grad_norm": 0.8275706171989441, "learning_rate": 0.00011643173232595789, "loss": 3.1642, "step": 74800 }, { "epoch": 8.070080862533693, "grad_norm": 0.884319007396698, "learning_rate": 0.00011610793308148947, "loss": 3.1643, "step": 74850 }, { "epoch": 8.075471698113208, "grad_norm": 0.8118297457695007, "learning_rate": 0.00011578413383702104, "loss": 3.1777, "step": 74900 }, { "epoch": 8.080862533692722, "grad_norm": 0.8348281979560852, "learning_rate": 0.00011546033459255261, "loss": 3.1559, "step": 74950 }, { "epoch": 8.086253369272237, "grad_norm": 0.8485154509544373, "learning_rate": 0.00011513653534808419, "loss": 3.1592, "step": 75000 }, { "epoch": 8.086253369272237, "eval_accuracy": 0.38975431517660275, "eval_loss": 3.3387372493743896, "eval_runtime": 183.7567, "eval_samples_per_second": 98.015, "eval_steps_per_second": 6.128, "step": 75000 }, { "epoch": 8.091644204851752, "grad_norm": 0.8829059600830078, "learning_rate": 0.00011481273610361575, "loss": 3.1685, "step": 75050 }, { "epoch": 8.097035040431267, "grad_norm": 0.8124864101409912, "learning_rate": 0.00011448893685914731, "loss": 3.1565, "step": 75100 }, { "epoch": 8.102425876010782, "grad_norm": 0.8549532294273376, "learning_rate": 0.00011416513761467889, "loss": 3.1708, "step": 75150 }, { "epoch": 8.107816711590296, "grad_norm": 0.8692648410797119, "learning_rate": 0.00011384133837021047, "loss": 3.1843, "step": 75200 }, { "epoch": 8.11320754716981, "grad_norm": 0.8498212695121765, "learning_rate": 0.00011351753912574202, "loss": 3.1675, "step": 75250 }, { "epoch": 8.118598382749326, "grad_norm": 0.7981224656105042, "learning_rate": 0.0001131937398812736, "loss": 3.1784, "step": 75300 }, { "epoch": 8.123989218328841, "grad_norm": 0.8637334704399109, "learning_rate": 0.00011286994063680517, "loss": 3.1798, "step": 75350 }, { "epoch": 8.129380053908356, "grad_norm": 0.8062846660614014, "learning_rate": 0.00011254614139233675, "loss": 3.1836, "step": 75400 }, { "epoch": 8.134770889487871, "grad_norm": 0.8146725296974182, "learning_rate": 0.0001122223421478683, "loss": 3.172, "step": 75450 }, { "epoch": 8.140161725067385, "grad_norm": 0.8586001992225647, "learning_rate": 0.00011189854290339988, "loss": 3.1706, "step": 75500 }, { "epoch": 8.1455525606469, "grad_norm": 0.8062219619750977, "learning_rate": 0.00011157474365893146, "loss": 3.1751, "step": 75550 }, { "epoch": 8.150943396226415, "grad_norm": 0.8286907076835632, "learning_rate": 0.00011125094441446302, "loss": 3.1779, "step": 75600 }, { "epoch": 8.15633423180593, "grad_norm": 0.8133478164672852, "learning_rate": 0.00011092714516999459, "loss": 3.1643, "step": 75650 }, { "epoch": 8.161725067385445, "grad_norm": 0.8967101573944092, "learning_rate": 0.00011060334592552616, "loss": 3.176, "step": 75700 }, { "epoch": 8.167115902964959, "grad_norm": 0.8152874112129211, "learning_rate": 0.00011027954668105773, "loss": 3.1753, "step": 75750 }, { "epoch": 8.172506738544474, "grad_norm": 0.8211420774459839, "learning_rate": 0.0001099557474365893, "loss": 3.194, "step": 75800 }, { "epoch": 8.177897574123989, "grad_norm": 0.8731687068939209, "learning_rate": 0.00010963194819212088, "loss": 3.199, "step": 75850 }, { "epoch": 8.183288409703504, "grad_norm": 0.8784967660903931, "learning_rate": 0.00010930814894765243, "loss": 3.1609, "step": 75900 }, { "epoch": 8.18867924528302, "grad_norm": 0.8387054204940796, "learning_rate": 0.00010898434970318401, "loss": 3.1826, "step": 75950 }, { "epoch": 8.194070080862534, "grad_norm": 0.8362334370613098, "learning_rate": 0.00010866055045871559, "loss": 3.185, "step": 76000 }, { "epoch": 8.194070080862534, "eval_accuracy": 0.3901588298779361, "eval_loss": 3.3355491161346436, "eval_runtime": 183.8342, "eval_samples_per_second": 97.974, "eval_steps_per_second": 6.125, "step": 76000 }, { "epoch": 8.199460916442048, "grad_norm": 0.8048451542854309, "learning_rate": 0.00010833675121424717, "loss": 3.1804, "step": 76050 }, { "epoch": 8.204851752021563, "grad_norm": 0.8230847120285034, "learning_rate": 0.00010801295196977872, "loss": 3.1555, "step": 76100 }, { "epoch": 8.210242587601078, "grad_norm": 0.82015061378479, "learning_rate": 0.0001076891527253103, "loss": 3.1869, "step": 76150 }, { "epoch": 8.215633423180593, "grad_norm": 0.8503551483154297, "learning_rate": 0.00010737182946573123, "loss": 3.1691, "step": 76200 }, { "epoch": 8.221024258760108, "grad_norm": 0.8172155022621155, "learning_rate": 0.0001070480302212628, "loss": 3.1655, "step": 76250 }, { "epoch": 8.226415094339623, "grad_norm": 0.7921382188796997, "learning_rate": 0.00010672423097679438, "loss": 3.1808, "step": 76300 }, { "epoch": 8.231805929919137, "grad_norm": 0.8233168125152588, "learning_rate": 0.00010640043173232595, "loss": 3.1727, "step": 76350 }, { "epoch": 8.237196765498652, "grad_norm": 0.8275365233421326, "learning_rate": 0.00010607663248785753, "loss": 3.166, "step": 76400 }, { "epoch": 8.242587601078167, "grad_norm": 0.8599715232849121, "learning_rate": 0.00010575283324338908, "loss": 3.1878, "step": 76450 }, { "epoch": 8.247978436657682, "grad_norm": 0.8927331566810608, "learning_rate": 0.00010542903399892066, "loss": 3.1882, "step": 76500 }, { "epoch": 8.253369272237197, "grad_norm": 0.8399921655654907, "learning_rate": 0.00010510523475445224, "loss": 3.1921, "step": 76550 }, { "epoch": 8.25876010781671, "grad_norm": 0.8249356150627136, "learning_rate": 0.0001047814355099838, "loss": 3.1809, "step": 76600 }, { "epoch": 8.264150943396226, "grad_norm": 0.8486528396606445, "learning_rate": 0.00010445763626551537, "loss": 3.1722, "step": 76650 }, { "epoch": 8.269541778975741, "grad_norm": 0.8354886174201965, "learning_rate": 0.00010413383702104694, "loss": 3.1934, "step": 76700 }, { "epoch": 8.274932614555256, "grad_norm": 0.8391222357749939, "learning_rate": 0.00010381003777657852, "loss": 3.1813, "step": 76750 }, { "epoch": 8.280323450134771, "grad_norm": 0.8095042705535889, "learning_rate": 0.00010348623853211008, "loss": 3.1764, "step": 76800 }, { "epoch": 8.285714285714286, "grad_norm": 0.8261136412620544, "learning_rate": 0.00010316243928764165, "loss": 3.1795, "step": 76850 }, { "epoch": 8.2911051212938, "grad_norm": 0.8423893451690674, "learning_rate": 0.00010283864004317323, "loss": 3.1935, "step": 76900 }, { "epoch": 8.296495956873315, "grad_norm": 0.8349312543869019, "learning_rate": 0.00010251484079870479, "loss": 3.1764, "step": 76950 }, { "epoch": 8.30188679245283, "grad_norm": 0.864117443561554, "learning_rate": 0.00010219104155423637, "loss": 3.1758, "step": 77000 }, { "epoch": 8.30188679245283, "eval_accuracy": 0.3905072796891571, "eval_loss": 3.3320937156677246, "eval_runtime": 183.9498, "eval_samples_per_second": 97.913, "eval_steps_per_second": 6.121, "step": 77000 }, { "epoch": 8.307277628032345, "grad_norm": 0.8279615044593811, "learning_rate": 0.00010186724230976793, "loss": 3.1842, "step": 77050 }, { "epoch": 8.31266846361186, "grad_norm": 0.9203056693077087, "learning_rate": 0.0001015434430652995, "loss": 3.1882, "step": 77100 }, { "epoch": 8.318059299191376, "grad_norm": 0.9166232943534851, "learning_rate": 0.00010121964382083107, "loss": 3.1777, "step": 77150 }, { "epoch": 8.323450134770889, "grad_norm": 0.8042053580284119, "learning_rate": 0.00010089584457636265, "loss": 3.171, "step": 77200 }, { "epoch": 8.328840970350404, "grad_norm": 0.7936014533042908, "learning_rate": 0.00010057204533189423, "loss": 3.1757, "step": 77250 }, { "epoch": 8.33423180592992, "grad_norm": 0.8509289026260376, "learning_rate": 0.00010024824608742578, "loss": 3.1787, "step": 77300 }, { "epoch": 8.339622641509434, "grad_norm": 0.8344526290893555, "learning_rate": 9.992444684295736e-05, "loss": 3.1696, "step": 77350 }, { "epoch": 8.34501347708895, "grad_norm": 0.8416390419006348, "learning_rate": 9.960064759848894e-05, "loss": 3.2041, "step": 77400 }, { "epoch": 8.350404312668463, "grad_norm": 0.8684095740318298, "learning_rate": 9.92768483540205e-05, "loss": 3.1784, "step": 77450 }, { "epoch": 8.355795148247978, "grad_norm": 0.8323090672492981, "learning_rate": 9.895304910955206e-05, "loss": 3.1704, "step": 77500 }, { "epoch": 8.361185983827493, "grad_norm": 0.8483664989471436, "learning_rate": 9.862924986508364e-05, "loss": 3.1927, "step": 77550 }, { "epoch": 8.366576819407008, "grad_norm": 0.818683922290802, "learning_rate": 9.83054506206152e-05, "loss": 3.186, "step": 77600 }, { "epoch": 8.371967654986523, "grad_norm": 0.8603020906448364, "learning_rate": 9.798165137614678e-05, "loss": 3.1818, "step": 77650 }, { "epoch": 8.377358490566039, "grad_norm": 0.8788433074951172, "learning_rate": 9.765785213167835e-05, "loss": 3.1842, "step": 77700 }, { "epoch": 8.382749326145552, "grad_norm": 0.8362876772880554, "learning_rate": 9.733405288720992e-05, "loss": 3.1768, "step": 77750 }, { "epoch": 8.388140161725067, "grad_norm": 0.8131971955299377, "learning_rate": 9.701025364274149e-05, "loss": 3.1838, "step": 77800 }, { "epoch": 8.393530997304582, "grad_norm": 0.8119576573371887, "learning_rate": 9.668645439827307e-05, "loss": 3.1686, "step": 77850 }, { "epoch": 8.398921832884097, "grad_norm": 0.8404786586761475, "learning_rate": 9.636265515380464e-05, "loss": 3.1771, "step": 77900 }, { "epoch": 8.404312668463612, "grad_norm": 0.8290375471115112, "learning_rate": 9.60388559093362e-05, "loss": 3.1822, "step": 77950 }, { "epoch": 8.409703504043126, "grad_norm": 0.8503365516662598, "learning_rate": 9.571505666486777e-05, "loss": 3.1928, "step": 78000 }, { "epoch": 8.409703504043126, "eval_accuracy": 0.390971770784564, "eval_loss": 3.3296592235565186, "eval_runtime": 183.6471, "eval_samples_per_second": 98.074, "eval_steps_per_second": 6.131, "step": 78000 }, { "epoch": 8.415094339622641, "grad_norm": 0.8500294089317322, "learning_rate": 9.539125742039935e-05, "loss": 3.1717, "step": 78050 }, { "epoch": 8.420485175202156, "grad_norm": 0.8462092280387878, "learning_rate": 9.506745817593093e-05, "loss": 3.1679, "step": 78100 }, { "epoch": 8.425876010781671, "grad_norm": 0.8475029468536377, "learning_rate": 9.474365893146248e-05, "loss": 3.1917, "step": 78150 }, { "epoch": 8.431266846361186, "grad_norm": 0.887370765209198, "learning_rate": 9.441985968699406e-05, "loss": 3.2017, "step": 78200 }, { "epoch": 8.436657681940702, "grad_norm": 0.8378453254699707, "learning_rate": 9.409606044252563e-05, "loss": 3.1763, "step": 78250 }, { "epoch": 8.442048517520215, "grad_norm": 0.8680948615074158, "learning_rate": 9.37722611980572e-05, "loss": 3.1699, "step": 78300 }, { "epoch": 8.44743935309973, "grad_norm": 0.8394586443901062, "learning_rate": 9.344846195358876e-05, "loss": 3.177, "step": 78350 }, { "epoch": 8.452830188679245, "grad_norm": 0.850395679473877, "learning_rate": 9.312466270912034e-05, "loss": 3.1694, "step": 78400 }, { "epoch": 8.45822102425876, "grad_norm": 0.8615196943283081, "learning_rate": 9.28008634646519e-05, "loss": 3.1778, "step": 78450 }, { "epoch": 8.463611859838275, "grad_norm": 0.8771566152572632, "learning_rate": 9.247706422018348e-05, "loss": 3.1854, "step": 78500 }, { "epoch": 8.46900269541779, "grad_norm": 0.8475139737129211, "learning_rate": 9.215326497571505e-05, "loss": 3.1787, "step": 78550 }, { "epoch": 8.474393530997304, "grad_norm": 0.9131626486778259, "learning_rate": 9.182946573124661e-05, "loss": 3.1901, "step": 78600 }, { "epoch": 8.479784366576819, "grad_norm": 0.8300061821937561, "learning_rate": 9.151214247166756e-05, "loss": 3.1735, "step": 78650 }, { "epoch": 8.485175202156334, "grad_norm": 0.8313758373260498, "learning_rate": 9.118834322719913e-05, "loss": 3.1882, "step": 78700 }, { "epoch": 8.49056603773585, "grad_norm": 0.8368052244186401, "learning_rate": 9.08645439827307e-05, "loss": 3.1876, "step": 78750 }, { "epoch": 8.495956873315365, "grad_norm": 0.9024884700775146, "learning_rate": 9.054074473826227e-05, "loss": 3.1979, "step": 78800 }, { "epoch": 8.501347708894878, "grad_norm": 0.8912298679351807, "learning_rate": 9.021694549379385e-05, "loss": 3.1707, "step": 78850 }, { "epoch": 8.506738544474393, "grad_norm": 0.838214099407196, "learning_rate": 8.989314624932541e-05, "loss": 3.1788, "step": 78900 }, { "epoch": 8.512129380053908, "grad_norm": 0.7878483533859253, "learning_rate": 8.956934700485697e-05, "loss": 3.1794, "step": 78950 }, { "epoch": 8.517520215633423, "grad_norm": 0.8773841857910156, "learning_rate": 8.924554776038855e-05, "loss": 3.1793, "step": 79000 }, { "epoch": 8.517520215633423, "eval_accuracy": 0.3911975514854425, "eval_loss": 3.3264498710632324, "eval_runtime": 184.0527, "eval_samples_per_second": 97.858, "eval_steps_per_second": 6.118, "step": 79000 }, { "epoch": 8.522911051212938, "grad_norm": 0.885176956653595, "learning_rate": 8.892174851592013e-05, "loss": 3.1912, "step": 79050 }, { "epoch": 8.528301886792454, "grad_norm": 0.8777654767036438, "learning_rate": 8.859794927145168e-05, "loss": 3.187, "step": 79100 }, { "epoch": 8.533692722371967, "grad_norm": 0.8870535492897034, "learning_rate": 8.827415002698326e-05, "loss": 3.1758, "step": 79150 }, { "epoch": 8.539083557951482, "grad_norm": 0.8505139946937561, "learning_rate": 8.795035078251483e-05, "loss": 3.1889, "step": 79200 }, { "epoch": 8.544474393530997, "grad_norm": 0.8751934766769409, "learning_rate": 8.762655153804641e-05, "loss": 3.1847, "step": 79250 }, { "epoch": 8.549865229110512, "grad_norm": 0.9024791717529297, "learning_rate": 8.730275229357798e-05, "loss": 3.1797, "step": 79300 }, { "epoch": 8.555256064690028, "grad_norm": 0.9003559947013855, "learning_rate": 8.697895304910954e-05, "loss": 3.2043, "step": 79350 }, { "epoch": 8.560646900269543, "grad_norm": 0.8512437343597412, "learning_rate": 8.665515380464112e-05, "loss": 3.1781, "step": 79400 }, { "epoch": 8.566037735849056, "grad_norm": 0.8742110729217529, "learning_rate": 8.633135456017268e-05, "loss": 3.1958, "step": 79450 }, { "epoch": 8.571428571428571, "grad_norm": 0.8985169529914856, "learning_rate": 8.600755531570426e-05, "loss": 3.1726, "step": 79500 }, { "epoch": 8.576819407008086, "grad_norm": 0.8324291110038757, "learning_rate": 8.568375607123582e-05, "loss": 3.1744, "step": 79550 }, { "epoch": 8.582210242587601, "grad_norm": 0.8357748985290527, "learning_rate": 8.53599568267674e-05, "loss": 3.1933, "step": 79600 }, { "epoch": 8.587601078167117, "grad_norm": 0.8282589912414551, "learning_rate": 8.503615758229897e-05, "loss": 3.1728, "step": 79650 }, { "epoch": 8.59299191374663, "grad_norm": 0.9156341552734375, "learning_rate": 8.471235833783054e-05, "loss": 3.1786, "step": 79700 }, { "epoch": 8.598382749326145, "grad_norm": 0.8525185585021973, "learning_rate": 8.438855909336211e-05, "loss": 3.1799, "step": 79750 }, { "epoch": 8.60377358490566, "grad_norm": 0.8564472794532776, "learning_rate": 8.406475984889367e-05, "loss": 3.1775, "step": 79800 }, { "epoch": 8.609164420485175, "grad_norm": 0.8167390823364258, "learning_rate": 8.374096060442525e-05, "loss": 3.176, "step": 79850 }, { "epoch": 8.61455525606469, "grad_norm": 0.8467491269111633, "learning_rate": 8.341716135995683e-05, "loss": 3.1902, "step": 79900 }, { "epoch": 8.619946091644206, "grad_norm": 0.8552255034446716, "learning_rate": 8.309336211548838e-05, "loss": 3.1948, "step": 79950 }, { "epoch": 8.625336927223719, "grad_norm": 0.8199063539505005, "learning_rate": 8.276956287101996e-05, "loss": 3.1757, "step": 80000 }, { "epoch": 8.625336927223719, "eval_accuracy": 0.3916971374636135, "eval_loss": 3.32309889793396, "eval_runtime": 183.6042, "eval_samples_per_second": 98.097, "eval_steps_per_second": 6.133, "step": 80000 }, { "epoch": 8.630727762803234, "grad_norm": 0.8239213228225708, "learning_rate": 8.244576362655153e-05, "loss": 3.173, "step": 80050 }, { "epoch": 8.63611859838275, "grad_norm": 0.8665240406990051, "learning_rate": 8.212196438208311e-05, "loss": 3.174, "step": 80100 }, { "epoch": 8.641509433962264, "grad_norm": 0.8977245092391968, "learning_rate": 8.179816513761467e-05, "loss": 3.1986, "step": 80150 }, { "epoch": 8.64690026954178, "grad_norm": 0.821174144744873, "learning_rate": 8.147436589314624e-05, "loss": 3.1776, "step": 80200 }, { "epoch": 8.652291105121293, "grad_norm": 0.8627246618270874, "learning_rate": 8.115056664867782e-05, "loss": 3.1993, "step": 80250 }, { "epoch": 8.657681940700808, "grad_norm": 0.8275755643844604, "learning_rate": 8.082676740420938e-05, "loss": 3.1825, "step": 80300 }, { "epoch": 8.663072776280323, "grad_norm": 0.8466624617576599, "learning_rate": 8.050296815974096e-05, "loss": 3.186, "step": 80350 }, { "epoch": 8.668463611859838, "grad_norm": 0.8501802086830139, "learning_rate": 8.017916891527252e-05, "loss": 3.1807, "step": 80400 }, { "epoch": 8.673854447439354, "grad_norm": 0.8679507970809937, "learning_rate": 7.985536967080409e-05, "loss": 3.1781, "step": 80450 }, { "epoch": 8.679245283018869, "grad_norm": 0.8288442492485046, "learning_rate": 7.953157042633566e-05, "loss": 3.1756, "step": 80500 }, { "epoch": 8.684636118598382, "grad_norm": 0.9615182280540466, "learning_rate": 7.920777118186724e-05, "loss": 3.1839, "step": 80550 }, { "epoch": 8.690026954177897, "grad_norm": 0.8873838186264038, "learning_rate": 7.888397193739879e-05, "loss": 3.1769, "step": 80600 }, { "epoch": 8.695417789757412, "grad_norm": 0.8448495268821716, "learning_rate": 7.856017269293037e-05, "loss": 3.1886, "step": 80650 }, { "epoch": 8.700808625336927, "grad_norm": 0.880897045135498, "learning_rate": 7.823637344846195e-05, "loss": 3.1907, "step": 80700 }, { "epoch": 8.706199460916443, "grad_norm": 0.858959436416626, "learning_rate": 7.791257420399353e-05, "loss": 3.1827, "step": 80750 }, { "epoch": 8.711590296495956, "grad_norm": 0.8667418360710144, "learning_rate": 7.758877495952508e-05, "loss": 3.1533, "step": 80800 }, { "epoch": 8.716981132075471, "grad_norm": 0.8277961611747742, "learning_rate": 7.726497571505665e-05, "loss": 3.1854, "step": 80850 }, { "epoch": 8.722371967654986, "grad_norm": 0.8159651160240173, "learning_rate": 7.694117647058823e-05, "loss": 3.1746, "step": 80900 }, { "epoch": 8.727762803234501, "grad_norm": 0.8652306795120239, "learning_rate": 7.661737722611981e-05, "loss": 3.1753, "step": 80950 }, { "epoch": 8.733153638814017, "grad_norm": 0.8533473014831543, "learning_rate": 7.629357798165137e-05, "loss": 3.1885, "step": 81000 }, { "epoch": 8.733153638814017, "eval_accuracy": 0.3921426143036529, "eval_loss": 3.3177406787872314, "eval_runtime": 183.8875, "eval_samples_per_second": 97.946, "eval_steps_per_second": 6.123, "step": 81000 }, { "epoch": 8.738544474393532, "grad_norm": 0.881313145160675, "learning_rate": 7.596977873718294e-05, "loss": 3.1966, "step": 81050 }, { "epoch": 8.743935309973045, "grad_norm": 0.8376140594482422, "learning_rate": 7.564597949271451e-05, "loss": 3.1998, "step": 81100 }, { "epoch": 8.74932614555256, "grad_norm": 0.8664501309394836, "learning_rate": 7.532218024824608e-05, "loss": 3.1735, "step": 81150 }, { "epoch": 8.754716981132075, "grad_norm": 0.8317979574203491, "learning_rate": 7.499838100377764e-05, "loss": 3.1951, "step": 81200 }, { "epoch": 8.76010781671159, "grad_norm": 0.8491113185882568, "learning_rate": 7.467458175930922e-05, "loss": 3.1775, "step": 81250 }, { "epoch": 8.765498652291106, "grad_norm": 0.8893978595733643, "learning_rate": 7.435078251484078e-05, "loss": 3.1669, "step": 81300 }, { "epoch": 8.77088948787062, "grad_norm": 0.8895040154457092, "learning_rate": 7.402698327037236e-05, "loss": 3.1894, "step": 81350 }, { "epoch": 8.776280323450134, "grad_norm": 0.9167323708534241, "learning_rate": 7.370318402590393e-05, "loss": 3.2023, "step": 81400 }, { "epoch": 8.78167115902965, "grad_norm": 0.8975420594215393, "learning_rate": 7.338586076632488e-05, "loss": 3.1803, "step": 81450 }, { "epoch": 8.787061994609164, "grad_norm": 0.8334147334098816, "learning_rate": 7.306206152185644e-05, "loss": 3.1854, "step": 81500 }, { "epoch": 8.79245283018868, "grad_norm": 0.8818899393081665, "learning_rate": 7.273826227738801e-05, "loss": 3.1842, "step": 81550 }, { "epoch": 8.797843665768195, "grad_norm": 0.8167200088500977, "learning_rate": 7.241446303291958e-05, "loss": 3.1631, "step": 81600 }, { "epoch": 8.80323450134771, "grad_norm": 0.8305187821388245, "learning_rate": 7.209066378845115e-05, "loss": 3.1923, "step": 81650 }, { "epoch": 8.808625336927223, "grad_norm": 0.8849042057991028, "learning_rate": 7.176686454398273e-05, "loss": 3.1956, "step": 81700 }, { "epoch": 8.814016172506738, "grad_norm": 0.8665215969085693, "learning_rate": 7.144306529951429e-05, "loss": 3.1857, "step": 81750 }, { "epoch": 8.819407008086253, "grad_norm": 0.8836647868156433, "learning_rate": 7.111926605504587e-05, "loss": 3.1706, "step": 81800 }, { "epoch": 8.824797843665769, "grad_norm": 0.8913038372993469, "learning_rate": 7.079546681057743e-05, "loss": 3.1907, "step": 81850 }, { "epoch": 8.830188679245284, "grad_norm": 0.8813517093658447, "learning_rate": 7.047166756610901e-05, "loss": 3.1911, "step": 81900 }, { "epoch": 8.835579514824797, "grad_norm": 0.8769224286079407, "learning_rate": 7.014786832164057e-05, "loss": 3.1783, "step": 81950 }, { "epoch": 8.840970350404312, "grad_norm": 0.8626630306243896, "learning_rate": 6.982406907717215e-05, "loss": 3.1939, "step": 82000 }, { "epoch": 8.840970350404312, "eval_accuracy": 0.39245173176948506, "eval_loss": 3.3154091835021973, "eval_runtime": 183.9266, "eval_samples_per_second": 97.925, "eval_steps_per_second": 6.122, "step": 82000 }, { "epoch": 8.846361185983827, "grad_norm": 0.8761478662490845, "learning_rate": 6.950026983270372e-05, "loss": 3.1862, "step": 82050 }, { "epoch": 8.851752021563343, "grad_norm": 0.8595954179763794, "learning_rate": 6.91764705882353e-05, "loss": 3.1753, "step": 82100 }, { "epoch": 8.857142857142858, "grad_norm": 0.8653388619422913, "learning_rate": 6.885267134376686e-05, "loss": 3.1944, "step": 82150 }, { "epoch": 8.862533692722373, "grad_norm": 0.9042940735816956, "learning_rate": 6.852887209929844e-05, "loss": 3.1794, "step": 82200 }, { "epoch": 8.867924528301886, "grad_norm": 0.8679096698760986, "learning_rate": 6.820507285483e-05, "loss": 3.1972, "step": 82250 }, { "epoch": 8.873315363881401, "grad_norm": 0.8591122627258301, "learning_rate": 6.788127361036156e-05, "loss": 3.1823, "step": 82300 }, { "epoch": 8.878706199460916, "grad_norm": 0.829720675945282, "learning_rate": 6.755747436589314e-05, "loss": 3.1901, "step": 82350 }, { "epoch": 8.884097035040432, "grad_norm": 0.8924640417098999, "learning_rate": 6.72336751214247e-05, "loss": 3.1803, "step": 82400 }, { "epoch": 8.889487870619947, "grad_norm": 0.8685306906700134, "learning_rate": 6.690987587695628e-05, "loss": 3.1928, "step": 82450 }, { "epoch": 8.89487870619946, "grad_norm": 0.8657023906707764, "learning_rate": 6.658607663248785e-05, "loss": 3.1798, "step": 82500 }, { "epoch": 8.900269541778975, "grad_norm": 0.8230668902397156, "learning_rate": 6.626227738801942e-05, "loss": 3.1695, "step": 82550 }, { "epoch": 8.90566037735849, "grad_norm": 0.8264906406402588, "learning_rate": 6.593847814355099e-05, "loss": 3.1872, "step": 82600 }, { "epoch": 8.911051212938006, "grad_norm": 0.872795820236206, "learning_rate": 6.561467889908257e-05, "loss": 3.1811, "step": 82650 }, { "epoch": 8.91644204851752, "grad_norm": 0.8508303761482239, "learning_rate": 6.529087965461413e-05, "loss": 3.1977, "step": 82700 }, { "epoch": 8.921832884097036, "grad_norm": 0.8353860974311829, "learning_rate": 6.496708041014571e-05, "loss": 3.1709, "step": 82750 }, { "epoch": 8.92722371967655, "grad_norm": 0.8931666016578674, "learning_rate": 6.464328116567727e-05, "loss": 3.1748, "step": 82800 }, { "epoch": 8.932614555256064, "grad_norm": 0.8589481115341187, "learning_rate": 6.431948192120885e-05, "loss": 3.1785, "step": 82850 }, { "epoch": 8.93800539083558, "grad_norm": 0.8512519598007202, "learning_rate": 6.399568267674041e-05, "loss": 3.1763, "step": 82900 }, { "epoch": 8.943396226415095, "grad_norm": 0.8806709051132202, "learning_rate": 6.367188343227199e-05, "loss": 3.1768, "step": 82950 }, { "epoch": 8.94878706199461, "grad_norm": 0.8603725433349609, "learning_rate": 6.334808418780356e-05, "loss": 3.172, "step": 83000 }, { "epoch": 8.94878706199461, "eval_accuracy": 0.3926380714720869, "eval_loss": 3.3128795623779297, "eval_runtime": 184.4695, "eval_samples_per_second": 97.637, "eval_steps_per_second": 6.104, "step": 83000 }, { "epoch": 8.954177897574123, "grad_norm": 0.8853550553321838, "learning_rate": 6.302428494333512e-05, "loss": 3.169, "step": 83050 }, { "epoch": 8.959568733153638, "grad_norm": 0.8855602741241455, "learning_rate": 6.27004856988667e-05, "loss": 3.1791, "step": 83100 }, { "epoch": 8.964959568733153, "grad_norm": 0.8563063740730286, "learning_rate": 6.237668645439826e-05, "loss": 3.2049, "step": 83150 }, { "epoch": 8.970350404312669, "grad_norm": 0.8717796206474304, "learning_rate": 6.205288720992984e-05, "loss": 3.182, "step": 83200 }, { "epoch": 8.975741239892184, "grad_norm": 0.8790916204452515, "learning_rate": 6.17290879654614e-05, "loss": 3.1686, "step": 83250 }, { "epoch": 8.981132075471699, "grad_norm": 0.8493698239326477, "learning_rate": 6.140528872099298e-05, "loss": 3.178, "step": 83300 }, { "epoch": 8.986522911051212, "grad_norm": 0.8447183966636658, "learning_rate": 6.108148947652455e-05, "loss": 3.182, "step": 83350 }, { "epoch": 8.991913746630727, "grad_norm": 0.8738790154457092, "learning_rate": 6.075769023205612e-05, "loss": 3.1719, "step": 83400 }, { "epoch": 8.997304582210242, "grad_norm": 0.9319518804550171, "learning_rate": 6.043389098758769e-05, "loss": 3.1974, "step": 83450 }, { "epoch": 9.002695417789758, "grad_norm": 0.8708575367927551, "learning_rate": 6.0110091743119265e-05, "loss": 3.1247, "step": 83500 }, { "epoch": 9.008086253369273, "grad_norm": 0.8379282355308533, "learning_rate": 5.978629249865083e-05, "loss": 3.1218, "step": 83550 }, { "epoch": 9.013477088948788, "grad_norm": 0.8564487099647522, "learning_rate": 5.94624932541824e-05, "loss": 3.1391, "step": 83600 }, { "epoch": 9.018867924528301, "grad_norm": 0.8536072969436646, "learning_rate": 5.9145169994603345e-05, "loss": 3.1322, "step": 83650 }, { "epoch": 9.024258760107816, "grad_norm": 0.8448429703712463, "learning_rate": 5.882137075013491e-05, "loss": 3.1157, "step": 83700 }, { "epoch": 9.029649595687331, "grad_norm": 0.8528944849967957, "learning_rate": 5.849757150566649e-05, "loss": 3.1142, "step": 83750 }, { "epoch": 9.035040431266847, "grad_norm": 0.8785792589187622, "learning_rate": 5.817377226119805e-05, "loss": 3.1166, "step": 83800 }, { "epoch": 9.040431266846362, "grad_norm": 0.872730553150177, "learning_rate": 5.784997301672963e-05, "loss": 3.1049, "step": 83850 }, { "epoch": 9.045822102425875, "grad_norm": 0.8977234363555908, "learning_rate": 5.752617377226119e-05, "loss": 3.1209, "step": 83900 }, { "epoch": 9.05121293800539, "grad_norm": 0.8592477440834045, "learning_rate": 5.7202374527792764e-05, "loss": 3.125, "step": 83950 }, { "epoch": 9.056603773584905, "grad_norm": 0.8540244698524475, "learning_rate": 5.6878575283324335e-05, "loss": 3.1069, "step": 84000 }, { "epoch": 9.056603773584905, "eval_accuracy": 0.39254386941835173, "eval_loss": 3.316199541091919, "eval_runtime": 183.6321, "eval_samples_per_second": 98.082, "eval_steps_per_second": 6.132, "step": 84000 }, { "epoch": 9.06199460916442, "grad_norm": 0.8707414865493774, "learning_rate": 5.6554776038855905e-05, "loss": 3.1212, "step": 84050 }, { "epoch": 9.067385444743936, "grad_norm": 0.8910436630249023, "learning_rate": 5.623097679438747e-05, "loss": 3.1472, "step": 84100 }, { "epoch": 9.07277628032345, "grad_norm": 0.9022126197814941, "learning_rate": 5.590717754991905e-05, "loss": 3.1256, "step": 84150 }, { "epoch": 9.078167115902964, "grad_norm": 0.8683561682701111, "learning_rate": 5.558337830545061e-05, "loss": 3.1405, "step": 84200 }, { "epoch": 9.08355795148248, "grad_norm": 0.8293735384941101, "learning_rate": 5.525957906098219e-05, "loss": 3.1366, "step": 84250 }, { "epoch": 9.088948787061994, "grad_norm": 0.8638619780540466, "learning_rate": 5.493577981651375e-05, "loss": 3.1273, "step": 84300 }, { "epoch": 9.09433962264151, "grad_norm": 0.8600364327430725, "learning_rate": 5.4611980572045324e-05, "loss": 3.1203, "step": 84350 }, { "epoch": 9.099730458221025, "grad_norm": 0.9247821569442749, "learning_rate": 5.4288181327576895e-05, "loss": 3.1105, "step": 84400 }, { "epoch": 9.10512129380054, "grad_norm": 0.8678122758865356, "learning_rate": 5.3964382083108466e-05, "loss": 3.1373, "step": 84450 }, { "epoch": 9.110512129380053, "grad_norm": 0.8575831651687622, "learning_rate": 5.3640582838640043e-05, "loss": 3.1233, "step": 84500 }, { "epoch": 9.115902964959568, "grad_norm": 0.8633356690406799, "learning_rate": 5.331678359417161e-05, "loss": 3.133, "step": 84550 }, { "epoch": 9.121293800539084, "grad_norm": 0.8657795190811157, "learning_rate": 5.2992984349703185e-05, "loss": 3.1199, "step": 84600 }, { "epoch": 9.126684636118599, "grad_norm": 0.8592625856399536, "learning_rate": 5.266918510523475e-05, "loss": 3.108, "step": 84650 }, { "epoch": 9.132075471698114, "grad_norm": 0.8899664282798767, "learning_rate": 5.234538586076632e-05, "loss": 3.1206, "step": 84700 }, { "epoch": 9.137466307277627, "grad_norm": 0.9175904393196106, "learning_rate": 5.202158661629789e-05, "loss": 3.1242, "step": 84750 }, { "epoch": 9.142857142857142, "grad_norm": 0.8841559886932373, "learning_rate": 5.169778737182946e-05, "loss": 3.1228, "step": 84800 }, { "epoch": 9.148247978436657, "grad_norm": 0.8705527186393738, "learning_rate": 5.1373988127361026e-05, "loss": 3.1341, "step": 84850 }, { "epoch": 9.153638814016173, "grad_norm": 0.8766990303993225, "learning_rate": 5.1050188882892604e-05, "loss": 3.1433, "step": 84900 }, { "epoch": 9.159029649595688, "grad_norm": 0.817348062992096, "learning_rate": 5.072638963842417e-05, "loss": 3.1296, "step": 84950 }, { "epoch": 9.164420485175203, "grad_norm": 0.9388226270675659, "learning_rate": 5.0402590393955746e-05, "loss": 3.1433, "step": 85000 }, { "epoch": 9.164420485175203, "eval_accuracy": 0.39282951786041603, "eval_loss": 3.314669370651245, "eval_runtime": 183.9047, "eval_samples_per_second": 97.937, "eval_steps_per_second": 6.123, "step": 85000 }, { "epoch": 9.169811320754716, "grad_norm": 0.8864387273788452, "learning_rate": 5.007879114948731e-05, "loss": 3.1343, "step": 85050 }, { "epoch": 9.175202156334231, "grad_norm": 0.8899182677268982, "learning_rate": 4.975499190501889e-05, "loss": 3.1324, "step": 85100 }, { "epoch": 9.180592991913747, "grad_norm": 0.8436346054077148, "learning_rate": 4.943119266055045e-05, "loss": 3.1394, "step": 85150 }, { "epoch": 9.185983827493262, "grad_norm": 0.8905600905418396, "learning_rate": 4.910739341608202e-05, "loss": 3.142, "step": 85200 }, { "epoch": 9.191374663072777, "grad_norm": 0.8945043087005615, "learning_rate": 4.87835941716136e-05, "loss": 3.1179, "step": 85250 }, { "epoch": 9.19676549865229, "grad_norm": 0.8980481624603271, "learning_rate": 4.8459794927145164e-05, "loss": 3.1269, "step": 85300 }, { "epoch": 9.202156334231805, "grad_norm": 0.8833536505699158, "learning_rate": 4.813599568267674e-05, "loss": 3.1221, "step": 85350 }, { "epoch": 9.20754716981132, "grad_norm": 0.8827757835388184, "learning_rate": 4.7812196438208306e-05, "loss": 3.1308, "step": 85400 }, { "epoch": 9.212938005390836, "grad_norm": 0.8767707347869873, "learning_rate": 4.748839719373988e-05, "loss": 3.1349, "step": 85450 }, { "epoch": 9.21832884097035, "grad_norm": 0.8896255493164062, "learning_rate": 4.716459794927145e-05, "loss": 3.1217, "step": 85500 }, { "epoch": 9.223719676549866, "grad_norm": 0.9134429693222046, "learning_rate": 4.684079870480302e-05, "loss": 3.1118, "step": 85550 }, { "epoch": 9.22911051212938, "grad_norm": 0.8544310927391052, "learning_rate": 4.651699946033459e-05, "loss": 3.1185, "step": 85600 }, { "epoch": 9.234501347708894, "grad_norm": 0.8505896925926208, "learning_rate": 4.619320021586616e-05, "loss": 3.1347, "step": 85650 }, { "epoch": 9.23989218328841, "grad_norm": 0.8800784349441528, "learning_rate": 4.587587695628709e-05, "loss": 3.1258, "step": 85700 }, { "epoch": 9.245283018867925, "grad_norm": 0.8774470686912537, "learning_rate": 4.555207771181867e-05, "loss": 3.1123, "step": 85750 }, { "epoch": 9.25067385444744, "grad_norm": 0.87869793176651, "learning_rate": 4.522827846735024e-05, "loss": 3.1254, "step": 85800 }, { "epoch": 9.256064690026955, "grad_norm": 0.8597740530967712, "learning_rate": 4.490447922288181e-05, "loss": 3.1507, "step": 85850 }, { "epoch": 9.261455525606468, "grad_norm": 0.849094033241272, "learning_rate": 4.458067997841338e-05, "loss": 3.1339, "step": 85900 }, { "epoch": 9.266846361185983, "grad_norm": 0.945573627948761, "learning_rate": 4.4256880733944947e-05, "loss": 3.1356, "step": 85950 }, { "epoch": 9.272237196765499, "grad_norm": 0.8902300000190735, "learning_rate": 4.3933081489476524e-05, "loss": 3.1322, "step": 86000 }, { "epoch": 9.272237196765499, "eval_accuracy": 0.39319730788566803, "eval_loss": 3.3121745586395264, "eval_runtime": 183.5939, "eval_samples_per_second": 98.102, "eval_steps_per_second": 6.133, "step": 86000 }, { "epoch": 9.277628032345014, "grad_norm": 0.8416682481765747, "learning_rate": 4.360928224500809e-05, "loss": 3.1422, "step": 86050 }, { "epoch": 9.283018867924529, "grad_norm": 0.8767873644828796, "learning_rate": 4.3285483000539666e-05, "loss": 3.1349, "step": 86100 }, { "epoch": 9.288409703504042, "grad_norm": 0.8905349969863892, "learning_rate": 4.296168375607123e-05, "loss": 3.1462, "step": 86150 }, { "epoch": 9.293800539083557, "grad_norm": 0.9114990234375, "learning_rate": 4.263788451160281e-05, "loss": 3.1402, "step": 86200 }, { "epoch": 9.299191374663073, "grad_norm": 0.9382705092430115, "learning_rate": 4.231408526713437e-05, "loss": 3.1439, "step": 86250 }, { "epoch": 9.304582210242588, "grad_norm": 0.9107972979545593, "learning_rate": 4.199028602266594e-05, "loss": 3.1434, "step": 86300 }, { "epoch": 9.309973045822103, "grad_norm": 0.8746132850646973, "learning_rate": 4.1666486778197514e-05, "loss": 3.1147, "step": 86350 }, { "epoch": 9.315363881401618, "grad_norm": 0.8748690485954285, "learning_rate": 4.1342687533729085e-05, "loss": 3.1236, "step": 86400 }, { "epoch": 9.320754716981131, "grad_norm": 0.8657193183898926, "learning_rate": 4.101888828926065e-05, "loss": 3.1341, "step": 86450 }, { "epoch": 9.326145552560646, "grad_norm": 0.8812388181686401, "learning_rate": 4.0695089044792226e-05, "loss": 3.1317, "step": 86500 }, { "epoch": 9.331536388140162, "grad_norm": 0.8681718111038208, "learning_rate": 4.037128980032379e-05, "loss": 3.1414, "step": 86550 }, { "epoch": 9.336927223719677, "grad_norm": 0.8916813135147095, "learning_rate": 4.004749055585537e-05, "loss": 3.1533, "step": 86600 }, { "epoch": 9.342318059299192, "grad_norm": 0.8998400568962097, "learning_rate": 3.972369131138694e-05, "loss": 3.1206, "step": 86650 }, { "epoch": 9.347708894878707, "grad_norm": 0.8771690726280212, "learning_rate": 3.93998920669185e-05, "loss": 3.1332, "step": 86700 }, { "epoch": 9.35309973045822, "grad_norm": 0.8819947242736816, "learning_rate": 3.907609282245008e-05, "loss": 3.1126, "step": 86750 }, { "epoch": 9.358490566037736, "grad_norm": 0.9157130718231201, "learning_rate": 3.8752293577981645e-05, "loss": 3.1176, "step": 86800 }, { "epoch": 9.36388140161725, "grad_norm": 0.8591070771217346, "learning_rate": 3.842849433351322e-05, "loss": 3.1351, "step": 86850 }, { "epoch": 9.369272237196766, "grad_norm": 0.8667553663253784, "learning_rate": 3.810469508904479e-05, "loss": 3.1406, "step": 86900 }, { "epoch": 9.374663072776281, "grad_norm": 0.9189602136611938, "learning_rate": 3.7780895844576364e-05, "loss": 3.1398, "step": 86950 }, { "epoch": 9.380053908355794, "grad_norm": 0.9026527404785156, "learning_rate": 3.745709660010793e-05, "loss": 3.1188, "step": 87000 }, { "epoch": 9.380053908355794, "eval_accuracy": 0.3933680015724246, "eval_loss": 3.3105313777923584, "eval_runtime": 183.4007, "eval_samples_per_second": 98.206, "eval_steps_per_second": 6.14, "step": 87000 }, { "epoch": 9.38544474393531, "grad_norm": 0.8697655200958252, "learning_rate": 3.71332973556395e-05, "loss": 3.1387, "step": 87050 }, { "epoch": 9.390835579514825, "grad_norm": 0.8661813735961914, "learning_rate": 3.680949811117107e-05, "loss": 3.1282, "step": 87100 }, { "epoch": 9.39622641509434, "grad_norm": 0.8661714196205139, "learning_rate": 3.648569886670264e-05, "loss": 3.1331, "step": 87150 }, { "epoch": 9.401617250673855, "grad_norm": 0.8739043474197388, "learning_rate": 3.616189962223421e-05, "loss": 3.1303, "step": 87200 }, { "epoch": 9.40700808625337, "grad_norm": 0.8703292608261108, "learning_rate": 3.583810037776578e-05, "loss": 3.1426, "step": 87250 }, { "epoch": 9.412398921832883, "grad_norm": 0.9095264077186584, "learning_rate": 3.5514301133297354e-05, "loss": 3.1497, "step": 87300 }, { "epoch": 9.417789757412399, "grad_norm": 0.8935539126396179, "learning_rate": 3.5190501888828925e-05, "loss": 3.1295, "step": 87350 }, { "epoch": 9.423180592991914, "grad_norm": 0.9008485674858093, "learning_rate": 3.4866702644360496e-05, "loss": 3.1277, "step": 87400 }, { "epoch": 9.428571428571429, "grad_norm": 0.9170891642570496, "learning_rate": 3.4542903399892067e-05, "loss": 3.1333, "step": 87450 }, { "epoch": 9.433962264150944, "grad_norm": 0.9074438214302063, "learning_rate": 3.421910415542363e-05, "loss": 3.1358, "step": 87500 }, { "epoch": 9.439353099730457, "grad_norm": 0.887983500957489, "learning_rate": 3.38953049109552e-05, "loss": 3.1457, "step": 87550 }, { "epoch": 9.444743935309972, "grad_norm": 0.9073708653450012, "learning_rate": 3.357150566648677e-05, "loss": 3.1329, "step": 87600 }, { "epoch": 9.450134770889488, "grad_norm": 0.8670662641525269, "learning_rate": 3.324770642201834e-05, "loss": 3.1483, "step": 87650 }, { "epoch": 9.455525606469003, "grad_norm": 0.9071794748306274, "learning_rate": 3.2923907177549914e-05, "loss": 3.1072, "step": 87700 }, { "epoch": 9.460916442048518, "grad_norm": 0.8758538961410522, "learning_rate": 3.2600107933081485e-05, "loss": 3.1269, "step": 87750 }, { "epoch": 9.466307277628033, "grad_norm": 0.8275009989738464, "learning_rate": 3.2276308688613056e-05, "loss": 3.1231, "step": 87800 }, { "epoch": 9.471698113207546, "grad_norm": 0.8741101026535034, "learning_rate": 3.195250944414463e-05, "loss": 3.1334, "step": 87850 }, { "epoch": 9.477088948787062, "grad_norm": 0.9081462025642395, "learning_rate": 3.16287101996762e-05, "loss": 3.1442, "step": 87900 }, { "epoch": 9.482479784366577, "grad_norm": 0.8778511881828308, "learning_rate": 3.130491095520777e-05, "loss": 3.1457, "step": 87950 }, { "epoch": 9.487870619946092, "grad_norm": 0.8574414849281311, "learning_rate": 3.098111171073934e-05, "loss": 3.1361, "step": 88000 }, { "epoch": 9.487870619946092, "eval_accuracy": 0.39378316425676374, "eval_loss": 3.3077938556671143, "eval_runtime": 183.7065, "eval_samples_per_second": 98.042, "eval_steps_per_second": 6.129, "step": 88000 }, { "epoch": 9.493261455525607, "grad_norm": 0.8746349811553955, "learning_rate": 3.065731246627091e-05, "loss": 3.1334, "step": 88050 }, { "epoch": 9.498652291105122, "grad_norm": 0.8603171706199646, "learning_rate": 3.033351322180248e-05, "loss": 3.1328, "step": 88100 }, { "epoch": 9.504043126684635, "grad_norm": 0.8541721105575562, "learning_rate": 3.000971397733405e-05, "loss": 3.1322, "step": 88150 }, { "epoch": 9.50943396226415, "grad_norm": 0.8555578589439392, "learning_rate": 2.968591473286562e-05, "loss": 3.1273, "step": 88200 }, { "epoch": 9.514824797843666, "grad_norm": 0.8623125553131104, "learning_rate": 2.936211548839719e-05, "loss": 3.1337, "step": 88250 }, { "epoch": 9.520215633423181, "grad_norm": 0.9109192490577698, "learning_rate": 2.903831624392876e-05, "loss": 3.1421, "step": 88300 }, { "epoch": 9.525606469002696, "grad_norm": 0.8681851625442505, "learning_rate": 2.8714516999460332e-05, "loss": 3.1416, "step": 88350 }, { "epoch": 9.530997304582211, "grad_norm": 0.8718369603157043, "learning_rate": 2.83907177549919e-05, "loss": 3.1426, "step": 88400 }, { "epoch": 9.536388140161725, "grad_norm": 0.8970053791999817, "learning_rate": 2.806691851052347e-05, "loss": 3.1343, "step": 88450 }, { "epoch": 9.54177897574124, "grad_norm": 0.9087472558021545, "learning_rate": 2.774311926605504e-05, "loss": 3.1285, "step": 88500 }, { "epoch": 9.547169811320755, "grad_norm": 0.8881940245628357, "learning_rate": 2.7419320021586613e-05, "loss": 3.1207, "step": 88550 }, { "epoch": 9.55256064690027, "grad_norm": 0.8824938535690308, "learning_rate": 2.7095520777118187e-05, "loss": 3.1361, "step": 88600 }, { "epoch": 9.557951482479785, "grad_norm": 0.878555178642273, "learning_rate": 2.6771721532649758e-05, "loss": 3.107, "step": 88650 }, { "epoch": 9.563342318059298, "grad_norm": 0.8404247760772705, "learning_rate": 2.6447922288181325e-05, "loss": 3.1298, "step": 88700 }, { "epoch": 9.568733153638814, "grad_norm": 0.8609907627105713, "learning_rate": 2.6124123043712896e-05, "loss": 3.1371, "step": 88750 }, { "epoch": 9.574123989218329, "grad_norm": 0.9168536067008972, "learning_rate": 2.5800323799244467e-05, "loss": 3.1142, "step": 88800 }, { "epoch": 9.579514824797844, "grad_norm": 0.9115109443664551, "learning_rate": 2.5476524554776038e-05, "loss": 3.1325, "step": 88850 }, { "epoch": 9.584905660377359, "grad_norm": 0.8815240263938904, "learning_rate": 2.5159201295196976e-05, "loss": 3.1531, "step": 88900 }, { "epoch": 9.590296495956874, "grad_norm": 0.8526226282119751, "learning_rate": 2.4835402050728544e-05, "loss": 3.1387, "step": 88950 }, { "epoch": 9.595687331536388, "grad_norm": 0.8618078231811523, "learning_rate": 2.4511602806260115e-05, "loss": 3.1304, "step": 89000 }, { "epoch": 9.595687331536388, "eval_accuracy": 0.3937702345631138, "eval_loss": 3.305745840072632, "eval_runtime": 183.6824, "eval_samples_per_second": 98.055, "eval_steps_per_second": 6.13, "step": 89000 }, { "epoch": 9.601078167115903, "grad_norm": 0.8251466155052185, "learning_rate": 2.4187803561791686e-05, "loss": 3.127, "step": 89050 }, { "epoch": 9.606469002695418, "grad_norm": 0.849942684173584, "learning_rate": 2.3864004317323257e-05, "loss": 3.1325, "step": 89100 }, { "epoch": 9.611859838274933, "grad_norm": 0.9657033681869507, "learning_rate": 2.354020507285483e-05, "loss": 3.1566, "step": 89150 }, { "epoch": 9.617250673854448, "grad_norm": 0.8909383416175842, "learning_rate": 2.3216405828386402e-05, "loss": 3.1215, "step": 89200 }, { "epoch": 9.622641509433961, "grad_norm": 0.9422598481178284, "learning_rate": 2.289260658391797e-05, "loss": 3.1439, "step": 89250 }, { "epoch": 9.628032345013477, "grad_norm": 0.8946119546890259, "learning_rate": 2.256880733944954e-05, "loss": 3.1319, "step": 89300 }, { "epoch": 9.633423180592992, "grad_norm": 0.8867332935333252, "learning_rate": 2.224500809498111e-05, "loss": 3.1286, "step": 89350 }, { "epoch": 9.638814016172507, "grad_norm": 0.8961672782897949, "learning_rate": 2.1921208850512682e-05, "loss": 3.1327, "step": 89400 }, { "epoch": 9.644204851752022, "grad_norm": 0.8897280097007751, "learning_rate": 2.1597409606044253e-05, "loss": 3.1312, "step": 89450 }, { "epoch": 9.649595687331537, "grad_norm": 0.8461384773254395, "learning_rate": 2.127361036157582e-05, "loss": 3.1449, "step": 89500 }, { "epoch": 9.65498652291105, "grad_norm": 0.8975701332092285, "learning_rate": 2.094981111710739e-05, "loss": 3.1314, "step": 89550 }, { "epoch": 9.660377358490566, "grad_norm": 0.8743809461593628, "learning_rate": 2.0626011872638962e-05, "loss": 3.1226, "step": 89600 }, { "epoch": 9.66576819407008, "grad_norm": 0.8966118693351746, "learning_rate": 2.0302212628170533e-05, "loss": 3.1243, "step": 89650 }, { "epoch": 9.671159029649596, "grad_norm": 0.9084198474884033, "learning_rate": 1.99784133837021e-05, "loss": 3.126, "step": 89700 }, { "epoch": 9.676549865229111, "grad_norm": 0.8550707697868347, "learning_rate": 1.965461413923367e-05, "loss": 3.1412, "step": 89750 }, { "epoch": 9.681940700808624, "grad_norm": 0.883452832698822, "learning_rate": 1.9330814894765242e-05, "loss": 3.1228, "step": 89800 }, { "epoch": 9.68733153638814, "grad_norm": 0.8623883724212646, "learning_rate": 1.9007015650296813e-05, "loss": 3.1269, "step": 89850 }, { "epoch": 9.692722371967655, "grad_norm": 0.8669401407241821, "learning_rate": 1.8683216405828384e-05, "loss": 3.1452, "step": 89900 }, { "epoch": 9.69811320754717, "grad_norm": 0.8647443652153015, "learning_rate": 1.8359417161359955e-05, "loss": 3.1487, "step": 89950 }, { "epoch": 9.703504043126685, "grad_norm": 0.8886281847953796, "learning_rate": 1.8035617916891526e-05, "loss": 3.1329, "step": 90000 }, { "epoch": 9.703504043126685, "eval_accuracy": 0.39417724828086687, "eval_loss": 3.3030855655670166, "eval_runtime": 183.5836, "eval_samples_per_second": 98.108, "eval_steps_per_second": 6.133, "step": 90000 }, { "epoch": 9.7088948787062, "grad_norm": 0.8624467849731445, "learning_rate": 1.7711818672423097e-05, "loss": 3.1197, "step": 90050 }, { "epoch": 9.714285714285714, "grad_norm": 0.9024360179901123, "learning_rate": 1.7388019427954664e-05, "loss": 3.1357, "step": 90100 }, { "epoch": 9.719676549865229, "grad_norm": 0.8829125761985779, "learning_rate": 1.706422018348624e-05, "loss": 3.1514, "step": 90150 }, { "epoch": 9.725067385444744, "grad_norm": 0.8832122683525085, "learning_rate": 1.674042093901781e-05, "loss": 3.1308, "step": 90200 }, { "epoch": 9.730458221024259, "grad_norm": 0.8811697959899902, "learning_rate": 1.6416621694549377e-05, "loss": 3.1369, "step": 90250 }, { "epoch": 9.735849056603774, "grad_norm": 0.9028815031051636, "learning_rate": 1.6092822450080948e-05, "loss": 3.1302, "step": 90300 }, { "epoch": 9.74123989218329, "grad_norm": 0.8779157996177673, "learning_rate": 1.576902320561252e-05, "loss": 3.1376, "step": 90350 }, { "epoch": 9.746630727762803, "grad_norm": 0.8744044899940491, "learning_rate": 1.544522396114409e-05, "loss": 3.1471, "step": 90400 }, { "epoch": 9.752021563342318, "grad_norm": 0.8857881426811218, "learning_rate": 1.5121424716675659e-05, "loss": 3.1234, "step": 90450 }, { "epoch": 9.757412398921833, "grad_norm": 0.8945528864860535, "learning_rate": 1.479762547220723e-05, "loss": 3.1355, "step": 90500 }, { "epoch": 9.762803234501348, "grad_norm": 0.9267755746841431, "learning_rate": 1.4473826227738802e-05, "loss": 3.152, "step": 90550 }, { "epoch": 9.768194070080863, "grad_norm": 0.8681300282478333, "learning_rate": 1.4150026983270371e-05, "loss": 3.1292, "step": 90600 }, { "epoch": 9.773584905660378, "grad_norm": 0.8631065487861633, "learning_rate": 1.3826227738801942e-05, "loss": 3.1229, "step": 90650 }, { "epoch": 9.778975741239892, "grad_norm": 0.8668704032897949, "learning_rate": 1.3502428494333512e-05, "loss": 3.1076, "step": 90700 }, { "epoch": 9.784366576819407, "grad_norm": 0.8805832862854004, "learning_rate": 1.3178629249865082e-05, "loss": 3.1499, "step": 90750 }, { "epoch": 9.789757412398922, "grad_norm": 0.9205412864685059, "learning_rate": 1.2854830005396653e-05, "loss": 3.1072, "step": 90800 }, { "epoch": 9.795148247978437, "grad_norm": 0.8832406997680664, "learning_rate": 1.2531030760928222e-05, "loss": 3.1221, "step": 90850 }, { "epoch": 9.800539083557952, "grad_norm": 0.8675433397293091, "learning_rate": 1.2207231516459793e-05, "loss": 3.1423, "step": 90900 }, { "epoch": 9.805929919137466, "grad_norm": 0.9155154824256897, "learning_rate": 1.1883432271991366e-05, "loss": 3.1176, "step": 90950 }, { "epoch": 9.81132075471698, "grad_norm": 0.8952852487564087, "learning_rate": 1.1559633027522935e-05, "loss": 3.133, "step": 91000 }, { "epoch": 9.81132075471698, "eval_accuracy": 0.3943811897512946, "eval_loss": 3.3014745712280273, "eval_runtime": 183.7431, "eval_samples_per_second": 98.023, "eval_steps_per_second": 6.128, "step": 91000 }, { "epoch": 9.816711590296496, "grad_norm": 0.8473813533782959, "learning_rate": 1.1235833783054506e-05, "loss": 3.1286, "step": 91050 }, { "epoch": 9.822102425876011, "grad_norm": 0.8618873357772827, "learning_rate": 1.0912034538586075e-05, "loss": 3.1257, "step": 91100 }, { "epoch": 9.827493261455526, "grad_norm": 0.8717328906059265, "learning_rate": 1.0588235294117646e-05, "loss": 3.1372, "step": 91150 }, { "epoch": 9.832884097035041, "grad_norm": 0.8867208957672119, "learning_rate": 1.0264436049649217e-05, "loss": 3.1372, "step": 91200 }, { "epoch": 9.838274932614555, "grad_norm": 0.8444369435310364, "learning_rate": 9.940636805180786e-06, "loss": 3.1292, "step": 91250 }, { "epoch": 9.84366576819407, "grad_norm": 0.8623387813568115, "learning_rate": 9.616837560712357e-06, "loss": 3.1277, "step": 91300 }, { "epoch": 9.849056603773585, "grad_norm": 0.9045534729957581, "learning_rate": 9.293038316243928e-06, "loss": 3.1321, "step": 91350 }, { "epoch": 9.8544474393531, "grad_norm": 0.8703294992446899, "learning_rate": 8.969239071775497e-06, "loss": 3.1132, "step": 91400 }, { "epoch": 9.859838274932615, "grad_norm": 0.8696820139884949, "learning_rate": 8.64543982730707e-06, "loss": 3.1257, "step": 91450 }, { "epoch": 9.865229110512129, "grad_norm": 0.8719615340232849, "learning_rate": 8.321640582838639e-06, "loss": 3.1223, "step": 91500 }, { "epoch": 9.870619946091644, "grad_norm": 0.8584866523742676, "learning_rate": 7.99784133837021e-06, "loss": 3.1242, "step": 91550 }, { "epoch": 9.876010781671159, "grad_norm": 0.855100691318512, "learning_rate": 7.674042093901779e-06, "loss": 3.13, "step": 91600 }, { "epoch": 9.881401617250674, "grad_norm": 0.8618912696838379, "learning_rate": 7.350242849433351e-06, "loss": 3.1491, "step": 91650 }, { "epoch": 9.88679245283019, "grad_norm": 0.9182708263397217, "learning_rate": 7.026443604964921e-06, "loss": 3.1439, "step": 91700 }, { "epoch": 9.892183288409704, "grad_norm": 0.911716103553772, "learning_rate": 6.702644360496492e-06, "loss": 3.1325, "step": 91750 }, { "epoch": 9.897574123989218, "grad_norm": 0.888717770576477, "learning_rate": 6.378845116028062e-06, "loss": 3.1463, "step": 91800 }, { "epoch": 9.902964959568733, "grad_norm": 0.8845711946487427, "learning_rate": 6.055045871559633e-06, "loss": 3.1092, "step": 91850 }, { "epoch": 9.908355795148248, "grad_norm": 0.8652920722961426, "learning_rate": 5.731246627091203e-06, "loss": 3.1351, "step": 91900 }, { "epoch": 9.913746630727763, "grad_norm": 0.901452362537384, "learning_rate": 5.407447382622774e-06, "loss": 3.1131, "step": 91950 }, { "epoch": 9.919137466307278, "grad_norm": 0.8996922373771667, "learning_rate": 5.083648138154344e-06, "loss": 3.1425, "step": 92000 }, { "epoch": 9.919137466307278, "eval_accuracy": 0.3946086002454903, "eval_loss": 3.2996339797973633, "eval_runtime": 183.757, "eval_samples_per_second": 98.015, "eval_steps_per_second": 6.128, "step": 92000 }, { "epoch": 9.924528301886792, "grad_norm": 0.8938733339309692, "learning_rate": 4.7598488936859145e-06, "loss": 3.1449, "step": 92050 }, { "epoch": 9.929919137466307, "grad_norm": 0.855878472328186, "learning_rate": 4.4360496492174846e-06, "loss": 3.127, "step": 92100 }, { "epoch": 9.935309973045822, "grad_norm": 0.9077852964401245, "learning_rate": 4.112250404749055e-06, "loss": 3.1375, "step": 92150 }, { "epoch": 9.940700808625337, "grad_norm": 0.8704142570495605, "learning_rate": 3.788451160280626e-06, "loss": 3.1189, "step": 92200 }, { "epoch": 9.946091644204852, "grad_norm": 0.8859520554542542, "learning_rate": 3.464651915812196e-06, "loss": 3.1297, "step": 92250 }, { "epoch": 9.951482479784367, "grad_norm": 0.8482578992843628, "learning_rate": 3.1408526713437664e-06, "loss": 3.1209, "step": 92300 }, { "epoch": 9.95687331536388, "grad_norm": 0.8768433332443237, "learning_rate": 2.817053426875337e-06, "loss": 3.1263, "step": 92350 }, { "epoch": 9.962264150943396, "grad_norm": 0.8390297889709473, "learning_rate": 2.4932541824069074e-06, "loss": 3.1259, "step": 92400 }, { "epoch": 9.967654986522911, "grad_norm": 0.9365578889846802, "learning_rate": 2.169454937938478e-06, "loss": 3.128, "step": 92450 }, { "epoch": 9.973045822102426, "grad_norm": 0.8617887496948242, "learning_rate": 1.8456556934700485e-06, "loss": 3.1347, "step": 92500 }, { "epoch": 9.978436657681941, "grad_norm": 0.9396256804466248, "learning_rate": 1.521856449001619e-06, "loss": 3.1253, "step": 92550 }, { "epoch": 9.983827493261456, "grad_norm": 0.9035614728927612, "learning_rate": 1.1980572045331894e-06, "loss": 3.1229, "step": 92600 }, { "epoch": 9.98921832884097, "grad_norm": 0.840107798576355, "learning_rate": 8.742579600647598e-07, "loss": 3.1141, "step": 92650 }, { "epoch": 9.994609164420485, "grad_norm": 0.9073900580406189, "learning_rate": 5.504587155963303e-07, "loss": 3.1322, "step": 92700 }, { "epoch": 10.0, "grad_norm": 1.8777025938034058, "learning_rate": 2.2665947112790068e-07, "loss": 3.1244, "step": 92750 }, { "epoch": 10.0, "step": 92750, "total_flos": 7.75449427968e+17, "train_loss": 3.4578574852133697, "train_runtime": 79538.6235, "train_samples_per_second": 37.312, "train_steps_per_second": 1.166 } ], "logging_steps": 50, "max_steps": 92750, "num_input_tokens_seen": 0, "num_train_epochs": 10, "save_steps": 10000, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 7.75449427968e+17, "train_batch_size": 32, "trial_name": null, "trial_params": null }