| { |
| "best_metric": 3.7572708129882812, |
| "best_model_checkpoint": "/scratch/cl5625/exceptions/models/100M_high_2000_8397/checkpoint-10000", |
| "epoch": 1.0781671159029649, |
| "eval_steps": 1000, |
| "global_step": 10000, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.005390835579514825, |
| "grad_norm": 1.6020084619522095, |
| "learning_rate": 0.00028199999999999997, |
| "loss": 8.5757, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.01078167115902965, |
| "grad_norm": 1.8397823572158813, |
| "learning_rate": 0.0005819999999999999, |
| "loss": 6.94, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.016172506738544475, |
| "grad_norm": 2.0749616622924805, |
| "learning_rate": 0.0005996956287101997, |
| "loss": 6.498, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.0215633423180593, |
| "grad_norm": 2.3127713203430176, |
| "learning_rate": 0.0005993718294657311, |
| "loss": 6.2576, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.026954177897574125, |
| "grad_norm": 0.8723729848861694, |
| "learning_rate": 0.0005990480302212627, |
| "loss": 6.1064, |
| "step": 250 |
| }, |
| { |
| "epoch": 0.03234501347708895, |
| "grad_norm": 1.36876380443573, |
| "learning_rate": 0.0005987242309767944, |
| "loss": 6.0069, |
| "step": 300 |
| }, |
| { |
| "epoch": 0.03773584905660377, |
| "grad_norm": 1.1116160154342651, |
| "learning_rate": 0.0005984004317323259, |
| "loss": 5.8938, |
| "step": 350 |
| }, |
| { |
| "epoch": 0.0431266846361186, |
| "grad_norm": 1.7246553897857666, |
| "learning_rate": 0.0005980766324878575, |
| "loss": 5.8482, |
| "step": 400 |
| }, |
| { |
| "epoch": 0.04851752021563342, |
| "grad_norm": 1.5949625968933105, |
| "learning_rate": 0.000597752833243389, |
| "loss": 5.7717, |
| "step": 450 |
| }, |
| { |
| "epoch": 0.05390835579514825, |
| "grad_norm": 1.2913446426391602, |
| "learning_rate": 0.0005974290339989207, |
| "loss": 5.6782, |
| "step": 500 |
| }, |
| { |
| "epoch": 0.05929919137466307, |
| "grad_norm": 1.3474897146224976, |
| "learning_rate": 0.0005971052347544522, |
| "loss": 5.6001, |
| "step": 550 |
| }, |
| { |
| "epoch": 0.0646900269541779, |
| "grad_norm": 1.230842113494873, |
| "learning_rate": 0.0005967814355099838, |
| "loss": 5.5233, |
| "step": 600 |
| }, |
| { |
| "epoch": 0.07008086253369272, |
| "grad_norm": 1.3157235383987427, |
| "learning_rate": 0.0005964576362655153, |
| "loss": 5.4574, |
| "step": 650 |
| }, |
| { |
| "epoch": 0.07547169811320754, |
| "grad_norm": 1.1696827411651611, |
| "learning_rate": 0.0005961338370210469, |
| "loss": 5.4063, |
| "step": 700 |
| }, |
| { |
| "epoch": 0.08086253369272237, |
| "grad_norm": 1.3056632280349731, |
| "learning_rate": 0.0005958100377765785, |
| "loss": 5.3331, |
| "step": 750 |
| }, |
| { |
| "epoch": 0.0862533692722372, |
| "grad_norm": 1.6241542100906372, |
| "learning_rate": 0.00059548623853211, |
| "loss": 5.2849, |
| "step": 800 |
| }, |
| { |
| "epoch": 0.09164420485175202, |
| "grad_norm": 1.0650733709335327, |
| "learning_rate": 0.0005951624392876416, |
| "loss": 5.2456, |
| "step": 850 |
| }, |
| { |
| "epoch": 0.09703504043126684, |
| "grad_norm": 1.5372745990753174, |
| "learning_rate": 0.0005948386400431732, |
| "loss": 5.191, |
| "step": 900 |
| }, |
| { |
| "epoch": 0.10242587601078167, |
| "grad_norm": 1.4312268495559692, |
| "learning_rate": 0.0005945148407987047, |
| "loss": 5.1659, |
| "step": 950 |
| }, |
| { |
| "epoch": 0.1078167115902965, |
| "grad_norm": 0.9435037970542908, |
| "learning_rate": 0.0005941910415542363, |
| "loss": 5.1266, |
| "step": 1000 |
| }, |
| { |
| "epoch": 0.1078167115902965, |
| "eval_accuracy": 0.22553308094716198, |
| "eval_loss": 5.041633605957031, |
| "eval_runtime": 152.6311, |
| "eval_samples_per_second": 118.004, |
| "eval_steps_per_second": 7.377, |
| "step": 1000 |
| }, |
| { |
| "epoch": 0.11320754716981132, |
| "grad_norm": 1.088536024093628, |
| "learning_rate": 0.0005938672423097679, |
| "loss": 5.0725, |
| "step": 1050 |
| }, |
| { |
| "epoch": 0.11859838274932614, |
| "grad_norm": 0.9872896075248718, |
| "learning_rate": 0.0005935434430652995, |
| "loss": 5.0443, |
| "step": 1100 |
| }, |
| { |
| "epoch": 0.12398921832884097, |
| "grad_norm": 0.9727984070777893, |
| "learning_rate": 0.000593219643820831, |
| "loss": 5.0164, |
| "step": 1150 |
| }, |
| { |
| "epoch": 0.1293800539083558, |
| "grad_norm": 1.0641847848892212, |
| "learning_rate": 0.0005928958445763626, |
| "loss": 4.978, |
| "step": 1200 |
| }, |
| { |
| "epoch": 0.1347708894878706, |
| "grad_norm": 0.8890565633773804, |
| "learning_rate": 0.0005925720453318941, |
| "loss": 4.9292, |
| "step": 1250 |
| }, |
| { |
| "epoch": 0.14016172506738545, |
| "grad_norm": 1.0303993225097656, |
| "learning_rate": 0.0005922482460874258, |
| "loss": 4.9, |
| "step": 1300 |
| }, |
| { |
| "epoch": 0.14555256064690028, |
| "grad_norm": 1.0149179697036743, |
| "learning_rate": 0.0005919244468429573, |
| "loss": 4.8846, |
| "step": 1350 |
| }, |
| { |
| "epoch": 0.1509433962264151, |
| "grad_norm": 1.0036956071853638, |
| "learning_rate": 0.0005916006475984889, |
| "loss": 4.8728, |
| "step": 1400 |
| }, |
| { |
| "epoch": 0.15633423180592992, |
| "grad_norm": 0.8696756958961487, |
| "learning_rate": 0.0005912768483540205, |
| "loss": 4.8206, |
| "step": 1450 |
| }, |
| { |
| "epoch": 0.16172506738544473, |
| "grad_norm": 1.1622956991195679, |
| "learning_rate": 0.0005909530491095521, |
| "loss": 4.8359, |
| "step": 1500 |
| }, |
| { |
| "epoch": 0.16711590296495957, |
| "grad_norm": 1.1141048669815063, |
| "learning_rate": 0.0005906292498650836, |
| "loss": 4.7878, |
| "step": 1550 |
| }, |
| { |
| "epoch": 0.1725067385444744, |
| "grad_norm": 1.274352788925171, |
| "learning_rate": 0.0005903054506206151, |
| "loss": 4.7862, |
| "step": 1600 |
| }, |
| { |
| "epoch": 0.1778975741239892, |
| "grad_norm": 0.8880785703659058, |
| "learning_rate": 0.0005899816513761468, |
| "loss": 4.7445, |
| "step": 1650 |
| }, |
| { |
| "epoch": 0.18328840970350405, |
| "grad_norm": 0.9446322321891785, |
| "learning_rate": 0.0005896578521316783, |
| "loss": 4.7346, |
| "step": 1700 |
| }, |
| { |
| "epoch": 0.18867924528301888, |
| "grad_norm": 0.9463135600090027, |
| "learning_rate": 0.0005893340528872099, |
| "loss": 4.6952, |
| "step": 1750 |
| }, |
| { |
| "epoch": 0.1940700808625337, |
| "grad_norm": 0.8582187294960022, |
| "learning_rate": 0.0005890102536427414, |
| "loss": 4.7029, |
| "step": 1800 |
| }, |
| { |
| "epoch": 0.19946091644204852, |
| "grad_norm": 0.9058123826980591, |
| "learning_rate": 0.0005886864543982731, |
| "loss": 4.6735, |
| "step": 1850 |
| }, |
| { |
| "epoch": 0.20485175202156333, |
| "grad_norm": 0.9203746318817139, |
| "learning_rate": 0.0005883626551538046, |
| "loss": 4.6596, |
| "step": 1900 |
| }, |
| { |
| "epoch": 0.21024258760107817, |
| "grad_norm": 1.0241466760635376, |
| "learning_rate": 0.0005880388559093362, |
| "loss": 4.6282, |
| "step": 1950 |
| }, |
| { |
| "epoch": 0.215633423180593, |
| "grad_norm": 1.1103599071502686, |
| "learning_rate": 0.0005877150566648677, |
| "loss": 4.6194, |
| "step": 2000 |
| }, |
| { |
| "epoch": 0.215633423180593, |
| "eval_accuracy": 0.2670949835939572, |
| "eval_loss": 4.5360541343688965, |
| "eval_runtime": 152.732, |
| "eval_samples_per_second": 117.926, |
| "eval_steps_per_second": 7.372, |
| "step": 2000 |
| }, |
| { |
| "epoch": 0.2210242587601078, |
| "grad_norm": 0.8629128336906433, |
| "learning_rate": 0.0005873912574203993, |
| "loss": 4.5954, |
| "step": 2050 |
| }, |
| { |
| "epoch": 0.22641509433962265, |
| "grad_norm": 0.7517758011817932, |
| "learning_rate": 0.0005870674581759309, |
| "loss": 4.5774, |
| "step": 2100 |
| }, |
| { |
| "epoch": 0.23180592991913745, |
| "grad_norm": 1.1525517702102661, |
| "learning_rate": 0.0005867436589314624, |
| "loss": 4.5567, |
| "step": 2150 |
| }, |
| { |
| "epoch": 0.2371967654986523, |
| "grad_norm": 1.0665569305419922, |
| "learning_rate": 0.000586419859686994, |
| "loss": 4.5313, |
| "step": 2200 |
| }, |
| { |
| "epoch": 0.24258760107816713, |
| "grad_norm": 0.8165591359138489, |
| "learning_rate": 0.0005860960604425256, |
| "loss": 4.5156, |
| "step": 2250 |
| }, |
| { |
| "epoch": 0.24797843665768193, |
| "grad_norm": 1.0752588510513306, |
| "learning_rate": 0.0005857722611980571, |
| "loss": 4.526, |
| "step": 2300 |
| }, |
| { |
| "epoch": 0.25336927223719674, |
| "grad_norm": 0.9010254740715027, |
| "learning_rate": 0.0005854484619535887, |
| "loss": 4.4768, |
| "step": 2350 |
| }, |
| { |
| "epoch": 0.2587601078167116, |
| "grad_norm": 0.813308596611023, |
| "learning_rate": 0.0005851246627091202, |
| "loss": 4.486, |
| "step": 2400 |
| }, |
| { |
| "epoch": 0.2641509433962264, |
| "grad_norm": 0.997901976108551, |
| "learning_rate": 0.0005848008634646519, |
| "loss": 4.4602, |
| "step": 2450 |
| }, |
| { |
| "epoch": 0.2695417789757412, |
| "grad_norm": 1.0612961053848267, |
| "learning_rate": 0.0005844770642201834, |
| "loss": 4.4441, |
| "step": 2500 |
| }, |
| { |
| "epoch": 0.2749326145552561, |
| "grad_norm": 0.7074645757675171, |
| "learning_rate": 0.000584153264975715, |
| "loss": 4.4268, |
| "step": 2550 |
| }, |
| { |
| "epoch": 0.2803234501347709, |
| "grad_norm": 0.7893711924552917, |
| "learning_rate": 0.0005838294657312465, |
| "loss": 4.3983, |
| "step": 2600 |
| }, |
| { |
| "epoch": 0.2857142857142857, |
| "grad_norm": 0.8888121843338013, |
| "learning_rate": 0.0005835056664867782, |
| "loss": 4.4127, |
| "step": 2650 |
| }, |
| { |
| "epoch": 0.29110512129380056, |
| "grad_norm": 0.8462523221969604, |
| "learning_rate": 0.0005831818672423098, |
| "loss": 4.4129, |
| "step": 2700 |
| }, |
| { |
| "epoch": 0.29649595687331537, |
| "grad_norm": 0.8949580192565918, |
| "learning_rate": 0.0005828580679978413, |
| "loss": 4.3751, |
| "step": 2750 |
| }, |
| { |
| "epoch": 0.3018867924528302, |
| "grad_norm": 0.7590197324752808, |
| "learning_rate": 0.0005825342687533729, |
| "loss": 4.3503, |
| "step": 2800 |
| }, |
| { |
| "epoch": 0.30727762803234504, |
| "grad_norm": 0.6843862533569336, |
| "learning_rate": 0.0005822104695089044, |
| "loss": 4.3515, |
| "step": 2850 |
| }, |
| { |
| "epoch": 0.31266846361185985, |
| "grad_norm": 0.8599426746368408, |
| "learning_rate": 0.000581886670264436, |
| "loss": 4.342, |
| "step": 2900 |
| }, |
| { |
| "epoch": 0.31805929919137466, |
| "grad_norm": 0.893551766872406, |
| "learning_rate": 0.0005815628710199675, |
| "loss": 4.3359, |
| "step": 2950 |
| }, |
| { |
| "epoch": 0.32345013477088946, |
| "grad_norm": 0.8023635149002075, |
| "learning_rate": 0.0005812390717754992, |
| "loss": 4.3296, |
| "step": 3000 |
| }, |
| { |
| "epoch": 0.32345013477088946, |
| "eval_accuracy": 0.29539221573769714, |
| "eval_loss": 4.2696380615234375, |
| "eval_runtime": 152.7589, |
| "eval_samples_per_second": 117.905, |
| "eval_steps_per_second": 7.371, |
| "step": 3000 |
| }, |
| { |
| "epoch": 0.3288409703504043, |
| "grad_norm": 0.6987361907958984, |
| "learning_rate": 0.0005809152725310307, |
| "loss": 4.3208, |
| "step": 3050 |
| }, |
| { |
| "epoch": 0.33423180592991913, |
| "grad_norm": 0.7668080925941467, |
| "learning_rate": 0.0005805914732865623, |
| "loss": 4.3246, |
| "step": 3100 |
| }, |
| { |
| "epoch": 0.33962264150943394, |
| "grad_norm": 0.7270567417144775, |
| "learning_rate": 0.0005802676740420938, |
| "loss": 4.2823, |
| "step": 3150 |
| }, |
| { |
| "epoch": 0.3450134770889488, |
| "grad_norm": 0.6801213622093201, |
| "learning_rate": 0.0005799438747976255, |
| "loss": 4.2811, |
| "step": 3200 |
| }, |
| { |
| "epoch": 0.3504043126684636, |
| "grad_norm": 0.7790431976318359, |
| "learning_rate": 0.000579620075553157, |
| "loss": 4.28, |
| "step": 3250 |
| }, |
| { |
| "epoch": 0.3557951482479784, |
| "grad_norm": 0.8592586517333984, |
| "learning_rate": 0.0005792962763086886, |
| "loss": 4.2634, |
| "step": 3300 |
| }, |
| { |
| "epoch": 0.3611859838274933, |
| "grad_norm": 0.7063053846359253, |
| "learning_rate": 0.0005789724770642201, |
| "loss": 4.2784, |
| "step": 3350 |
| }, |
| { |
| "epoch": 0.3665768194070081, |
| "grad_norm": 0.7619624733924866, |
| "learning_rate": 0.0005786486778197517, |
| "loss": 4.2507, |
| "step": 3400 |
| }, |
| { |
| "epoch": 0.3719676549865229, |
| "grad_norm": 0.7230337858200073, |
| "learning_rate": 0.0005783248785752833, |
| "loss": 4.2259, |
| "step": 3450 |
| }, |
| { |
| "epoch": 0.37735849056603776, |
| "grad_norm": 0.7475656270980835, |
| "learning_rate": 0.0005780010793308148, |
| "loss": 4.2353, |
| "step": 3500 |
| }, |
| { |
| "epoch": 0.38274932614555257, |
| "grad_norm": 0.7857275009155273, |
| "learning_rate": 0.0005776772800863464, |
| "loss": 4.2317, |
| "step": 3550 |
| }, |
| { |
| "epoch": 0.3881401617250674, |
| "grad_norm": 0.6113690733909607, |
| "learning_rate": 0.000577353480841878, |
| "loss": 4.2344, |
| "step": 3600 |
| }, |
| { |
| "epoch": 0.3935309973045822, |
| "grad_norm": 0.8256933093070984, |
| "learning_rate": 0.0005770296815974095, |
| "loss": 4.2054, |
| "step": 3650 |
| }, |
| { |
| "epoch": 0.39892183288409705, |
| "grad_norm": 0.8064551949501038, |
| "learning_rate": 0.0005767058823529411, |
| "loss": 4.2078, |
| "step": 3700 |
| }, |
| { |
| "epoch": 0.40431266846361186, |
| "grad_norm": 0.6000850200653076, |
| "learning_rate": 0.0005763820831084726, |
| "loss": 4.2117, |
| "step": 3750 |
| }, |
| { |
| "epoch": 0.40970350404312667, |
| "grad_norm": 0.6357919573783875, |
| "learning_rate": 0.0005760582838640043, |
| "loss": 4.205, |
| "step": 3800 |
| }, |
| { |
| "epoch": 0.41509433962264153, |
| "grad_norm": 0.6840759515762329, |
| "learning_rate": 0.0005757344846195359, |
| "loss": 4.1844, |
| "step": 3850 |
| }, |
| { |
| "epoch": 0.42048517520215634, |
| "grad_norm": 0.7748379111289978, |
| "learning_rate": 0.0005754106853750674, |
| "loss": 4.1815, |
| "step": 3900 |
| }, |
| { |
| "epoch": 0.42587601078167114, |
| "grad_norm": 0.6725760698318481, |
| "learning_rate": 0.000575086886130599, |
| "loss": 4.1742, |
| "step": 3950 |
| }, |
| { |
| "epoch": 0.431266846361186, |
| "grad_norm": 0.8388188481330872, |
| "learning_rate": 0.0005747630868861306, |
| "loss": 4.1751, |
| "step": 4000 |
| }, |
| { |
| "epoch": 0.431266846361186, |
| "eval_accuracy": 0.31143709633414934, |
| "eval_loss": 4.099897384643555, |
| "eval_runtime": 152.799, |
| "eval_samples_per_second": 117.874, |
| "eval_steps_per_second": 7.369, |
| "step": 4000 |
| }, |
| { |
| "epoch": 0.4366576819407008, |
| "grad_norm": 0.5372797250747681, |
| "learning_rate": 0.0005744392876416622, |
| "loss": 4.1637, |
| "step": 4050 |
| }, |
| { |
| "epoch": 0.4420485175202156, |
| "grad_norm": 0.7316073775291443, |
| "learning_rate": 0.0005741154883971936, |
| "loss": 4.1648, |
| "step": 4100 |
| }, |
| { |
| "epoch": 0.4474393530997305, |
| "grad_norm": 0.6784662008285522, |
| "learning_rate": 0.0005737916891527253, |
| "loss": 4.1653, |
| "step": 4150 |
| }, |
| { |
| "epoch": 0.4528301886792453, |
| "grad_norm": 0.6223421692848206, |
| "learning_rate": 0.0005734678899082568, |
| "loss": 4.1481, |
| "step": 4200 |
| }, |
| { |
| "epoch": 0.4582210242587601, |
| "grad_norm": 0.6360011100769043, |
| "learning_rate": 0.0005731440906637884, |
| "loss": 4.1347, |
| "step": 4250 |
| }, |
| { |
| "epoch": 0.4636118598382749, |
| "grad_norm": 0.9023606181144714, |
| "learning_rate": 0.0005728202914193199, |
| "loss": 4.1318, |
| "step": 4300 |
| }, |
| { |
| "epoch": 0.46900269541778977, |
| "grad_norm": 0.5858056545257568, |
| "learning_rate": 0.0005724964921748516, |
| "loss": 4.1316, |
| "step": 4350 |
| }, |
| { |
| "epoch": 0.4743935309973046, |
| "grad_norm": 0.7229328751564026, |
| "learning_rate": 0.0005721726929303831, |
| "loss": 4.1268, |
| "step": 4400 |
| }, |
| { |
| "epoch": 0.4797843665768194, |
| "grad_norm": 0.8891165256500244, |
| "learning_rate": 0.0005718488936859147, |
| "loss": 4.1138, |
| "step": 4450 |
| }, |
| { |
| "epoch": 0.48517520215633425, |
| "grad_norm": 0.6901257038116455, |
| "learning_rate": 0.0005715250944414462, |
| "loss": 4.1243, |
| "step": 4500 |
| }, |
| { |
| "epoch": 0.49056603773584906, |
| "grad_norm": 0.6422321200370789, |
| "learning_rate": 0.0005712012951969778, |
| "loss": 4.1152, |
| "step": 4550 |
| }, |
| { |
| "epoch": 0.49595687331536387, |
| "grad_norm": 0.7133573293685913, |
| "learning_rate": 0.0005708774959525094, |
| "loss": 4.0984, |
| "step": 4600 |
| }, |
| { |
| "epoch": 0.5013477088948787, |
| "grad_norm": 0.625822126865387, |
| "learning_rate": 0.000570553696708041, |
| "loss": 4.0931, |
| "step": 4650 |
| }, |
| { |
| "epoch": 0.5067385444743935, |
| "grad_norm": 0.6901353001594543, |
| "learning_rate": 0.0005702298974635725, |
| "loss": 4.1064, |
| "step": 4700 |
| }, |
| { |
| "epoch": 0.5121293800539084, |
| "grad_norm": 0.6352577805519104, |
| "learning_rate": 0.0005699060982191041, |
| "loss": 4.0958, |
| "step": 4750 |
| }, |
| { |
| "epoch": 0.5175202156334232, |
| "grad_norm": 0.7718018293380737, |
| "learning_rate": 0.0005695822989746357, |
| "loss": 4.0929, |
| "step": 4800 |
| }, |
| { |
| "epoch": 0.522911051212938, |
| "grad_norm": 0.6640039682388306, |
| "learning_rate": 0.0005692584997301672, |
| "loss": 4.0793, |
| "step": 4850 |
| }, |
| { |
| "epoch": 0.5283018867924528, |
| "grad_norm": 0.5986649990081787, |
| "learning_rate": 0.0005689347004856988, |
| "loss": 4.0624, |
| "step": 4900 |
| }, |
| { |
| "epoch": 0.5336927223719676, |
| "grad_norm": 0.6509894132614136, |
| "learning_rate": 0.0005686109012412304, |
| "loss": 4.08, |
| "step": 4950 |
| }, |
| { |
| "epoch": 0.5390835579514824, |
| "grad_norm": 0.5881455540657043, |
| "learning_rate": 0.000568287101996762, |
| "loss": 4.0618, |
| "step": 5000 |
| }, |
| { |
| "epoch": 0.5390835579514824, |
| "eval_accuracy": 0.3202667732623931, |
| "eval_loss": 4.000813007354736, |
| "eval_runtime": 152.9398, |
| "eval_samples_per_second": 117.765, |
| "eval_steps_per_second": 7.362, |
| "step": 5000 |
| }, |
| { |
| "epoch": 0.5444743935309974, |
| "grad_norm": 0.6732229590415955, |
| "learning_rate": 0.0005679633027522935, |
| "loss": 4.068, |
| "step": 5050 |
| }, |
| { |
| "epoch": 0.5498652291105122, |
| "grad_norm": 0.6501848697662354, |
| "learning_rate": 0.000567639503507825, |
| "loss": 4.0611, |
| "step": 5100 |
| }, |
| { |
| "epoch": 0.555256064690027, |
| "grad_norm": 0.6546076536178589, |
| "learning_rate": 0.0005673157042633567, |
| "loss": 4.0601, |
| "step": 5150 |
| }, |
| { |
| "epoch": 0.5606469002695418, |
| "grad_norm": 0.6216645836830139, |
| "learning_rate": 0.0005669919050188883, |
| "loss": 4.0788, |
| "step": 5200 |
| }, |
| { |
| "epoch": 0.5660377358490566, |
| "grad_norm": 0.6688370704650879, |
| "learning_rate": 0.0005666681057744198, |
| "loss": 4.0529, |
| "step": 5250 |
| }, |
| { |
| "epoch": 0.5714285714285714, |
| "grad_norm": 0.7299574613571167, |
| "learning_rate": 0.0005663443065299514, |
| "loss": 4.0448, |
| "step": 5300 |
| }, |
| { |
| "epoch": 0.5768194070080862, |
| "grad_norm": 0.6612991094589233, |
| "learning_rate": 0.000566020507285483, |
| "loss": 4.0386, |
| "step": 5350 |
| }, |
| { |
| "epoch": 0.5822102425876011, |
| "grad_norm": 0.5907670855522156, |
| "learning_rate": 0.0005656967080410146, |
| "loss": 4.0478, |
| "step": 5400 |
| }, |
| { |
| "epoch": 0.5876010781671159, |
| "grad_norm": 0.539881706237793, |
| "learning_rate": 0.000565372908796546, |
| "loss": 4.0481, |
| "step": 5450 |
| }, |
| { |
| "epoch": 0.5929919137466307, |
| "grad_norm": 0.6923815608024597, |
| "learning_rate": 0.0005650491095520777, |
| "loss": 4.028, |
| "step": 5500 |
| }, |
| { |
| "epoch": 0.5983827493261455, |
| "grad_norm": 0.6604423522949219, |
| "learning_rate": 0.0005647253103076092, |
| "loss": 4.0191, |
| "step": 5550 |
| }, |
| { |
| "epoch": 0.6037735849056604, |
| "grad_norm": 0.6762202382087708, |
| "learning_rate": 0.0005644015110631408, |
| "loss": 4.038, |
| "step": 5600 |
| }, |
| { |
| "epoch": 0.6091644204851752, |
| "grad_norm": 0.6797250509262085, |
| "learning_rate": 0.0005640777118186723, |
| "loss": 4.015, |
| "step": 5650 |
| }, |
| { |
| "epoch": 0.6145552560646901, |
| "grad_norm": 0.5718669891357422, |
| "learning_rate": 0.000563753912574204, |
| "loss": 4.0142, |
| "step": 5700 |
| }, |
| { |
| "epoch": 0.6199460916442049, |
| "grad_norm": 0.6036492586135864, |
| "learning_rate": 0.0005634301133297355, |
| "loss": 3.9904, |
| "step": 5750 |
| }, |
| { |
| "epoch": 0.6253369272237197, |
| "grad_norm": 0.6295326352119446, |
| "learning_rate": 0.0005631063140852671, |
| "loss": 3.9973, |
| "step": 5800 |
| }, |
| { |
| "epoch": 0.6307277628032345, |
| "grad_norm": 0.6866003274917603, |
| "learning_rate": 0.0005627825148407986, |
| "loss": 4.0086, |
| "step": 5850 |
| }, |
| { |
| "epoch": 0.6361185983827493, |
| "grad_norm": 0.6472943425178528, |
| "learning_rate": 0.0005624587155963302, |
| "loss": 4.0048, |
| "step": 5900 |
| }, |
| { |
| "epoch": 0.6415094339622641, |
| "grad_norm": 0.6805757284164429, |
| "learning_rate": 0.0005621349163518618, |
| "loss": 3.9913, |
| "step": 5950 |
| }, |
| { |
| "epoch": 0.6469002695417789, |
| "grad_norm": 0.6941430568695068, |
| "learning_rate": 0.0005618111171073934, |
| "loss": 4.0077, |
| "step": 6000 |
| }, |
| { |
| "epoch": 0.6469002695417789, |
| "eval_accuracy": 0.3263130891634934, |
| "eval_loss": 3.9324727058410645, |
| "eval_runtime": 152.9066, |
| "eval_samples_per_second": 117.791, |
| "eval_steps_per_second": 7.364, |
| "step": 6000 |
| }, |
| { |
| "epoch": 0.6522911051212938, |
| "grad_norm": 0.6350868344306946, |
| "learning_rate": 0.0005614873178629249, |
| "loss": 3.9789, |
| "step": 6050 |
| }, |
| { |
| "epoch": 0.6576819407008087, |
| "grad_norm": 0.6860512495040894, |
| "learning_rate": 0.0005611635186184565, |
| "loss": 4.0102, |
| "step": 6100 |
| }, |
| { |
| "epoch": 0.6630727762803235, |
| "grad_norm": 0.771823525428772, |
| "learning_rate": 0.0005608397193739882, |
| "loss": 3.9907, |
| "step": 6150 |
| }, |
| { |
| "epoch": 0.6684636118598383, |
| "grad_norm": 0.6326267123222351, |
| "learning_rate": 0.0005605159201295196, |
| "loss": 3.9633, |
| "step": 6200 |
| }, |
| { |
| "epoch": 0.6738544474393531, |
| "grad_norm": 0.5878480672836304, |
| "learning_rate": 0.0005601921208850511, |
| "loss": 3.9821, |
| "step": 6250 |
| }, |
| { |
| "epoch": 0.6792452830188679, |
| "grad_norm": 0.6786486506462097, |
| "learning_rate": 0.0005598683216405828, |
| "loss": 3.9856, |
| "step": 6300 |
| }, |
| { |
| "epoch": 0.6846361185983828, |
| "grad_norm": 0.5655982494354248, |
| "learning_rate": 0.0005595445223961144, |
| "loss": 3.9732, |
| "step": 6350 |
| }, |
| { |
| "epoch": 0.6900269541778976, |
| "grad_norm": 0.6204545497894287, |
| "learning_rate": 0.0005592207231516459, |
| "loss": 3.9792, |
| "step": 6400 |
| }, |
| { |
| "epoch": 0.6954177897574124, |
| "grad_norm": 0.718676745891571, |
| "learning_rate": 0.0005588969239071775, |
| "loss": 3.9757, |
| "step": 6450 |
| }, |
| { |
| "epoch": 0.7008086253369272, |
| "grad_norm": 0.5987905263900757, |
| "learning_rate": 0.0005585731246627091, |
| "loss": 3.9713, |
| "step": 6500 |
| }, |
| { |
| "epoch": 0.706199460916442, |
| "grad_norm": 0.6304261684417725, |
| "learning_rate": 0.0005582493254182407, |
| "loss": 3.9699, |
| "step": 6550 |
| }, |
| { |
| "epoch": 0.7115902964959568, |
| "grad_norm": 0.5621398091316223, |
| "learning_rate": 0.0005579255261737722, |
| "loss": 3.9707, |
| "step": 6600 |
| }, |
| { |
| "epoch": 0.7169811320754716, |
| "grad_norm": 0.637505829334259, |
| "learning_rate": 0.0005576017269293038, |
| "loss": 3.9813, |
| "step": 6650 |
| }, |
| { |
| "epoch": 0.7223719676549866, |
| "grad_norm": 0.6425164341926575, |
| "learning_rate": 0.0005572779276848353, |
| "loss": 3.9482, |
| "step": 6700 |
| }, |
| { |
| "epoch": 0.7277628032345014, |
| "grad_norm": 0.624649703502655, |
| "learning_rate": 0.000556954128440367, |
| "loss": 3.9741, |
| "step": 6750 |
| }, |
| { |
| "epoch": 0.7331536388140162, |
| "grad_norm": 0.6176960468292236, |
| "learning_rate": 0.0005566303291958984, |
| "loss": 3.9436, |
| "step": 6800 |
| }, |
| { |
| "epoch": 0.738544474393531, |
| "grad_norm": 0.6119649410247803, |
| "learning_rate": 0.0005563065299514301, |
| "loss": 3.9442, |
| "step": 6850 |
| }, |
| { |
| "epoch": 0.7439353099730458, |
| "grad_norm": 0.5820748805999756, |
| "learning_rate": 0.0005559827307069616, |
| "loss": 3.9463, |
| "step": 6900 |
| }, |
| { |
| "epoch": 0.7493261455525606, |
| "grad_norm": 0.6512102484703064, |
| "learning_rate": 0.0005556589314624932, |
| "loss": 3.9498, |
| "step": 6950 |
| }, |
| { |
| "epoch": 0.7547169811320755, |
| "grad_norm": 0.548623263835907, |
| "learning_rate": 0.0005553351322180247, |
| "loss": 3.9306, |
| "step": 7000 |
| }, |
| { |
| "epoch": 0.7547169811320755, |
| "eval_accuracy": 0.3323316985782009, |
| "eval_loss": 3.8741683959960938, |
| "eval_runtime": 152.843, |
| "eval_samples_per_second": 117.84, |
| "eval_steps_per_second": 7.367, |
| "step": 7000 |
| }, |
| { |
| "epoch": 0.7601078167115903, |
| "grad_norm": 0.6566927433013916, |
| "learning_rate": 0.0005550113329735564, |
| "loss": 3.9435, |
| "step": 7050 |
| }, |
| { |
| "epoch": 0.7654986522911051, |
| "grad_norm": 0.8524190187454224, |
| "learning_rate": 0.0005546875337290879, |
| "loss": 3.9177, |
| "step": 7100 |
| }, |
| { |
| "epoch": 0.77088948787062, |
| "grad_norm": 0.6614950299263, |
| "learning_rate": 0.0005543637344846195, |
| "loss": 3.9448, |
| "step": 7150 |
| }, |
| { |
| "epoch": 0.7762803234501348, |
| "grad_norm": 0.5254852175712585, |
| "learning_rate": 0.000554039935240151, |
| "loss": 3.9164, |
| "step": 7200 |
| }, |
| { |
| "epoch": 0.7816711590296496, |
| "grad_norm": 0.6216305494308472, |
| "learning_rate": 0.0005537161359956826, |
| "loss": 3.909, |
| "step": 7250 |
| }, |
| { |
| "epoch": 0.7870619946091644, |
| "grad_norm": 0.6032219529151917, |
| "learning_rate": 0.0005533923367512143, |
| "loss": 3.9232, |
| "step": 7300 |
| }, |
| { |
| "epoch": 0.7924528301886793, |
| "grad_norm": 0.6744025349617004, |
| "learning_rate": 0.0005530685375067458, |
| "loss": 3.9224, |
| "step": 7350 |
| }, |
| { |
| "epoch": 0.7978436657681941, |
| "grad_norm": 0.568401038646698, |
| "learning_rate": 0.0005527447382622774, |
| "loss": 3.918, |
| "step": 7400 |
| }, |
| { |
| "epoch": 0.8032345013477089, |
| "grad_norm": 0.5871582627296448, |
| "learning_rate": 0.0005524209390178089, |
| "loss": 3.8955, |
| "step": 7450 |
| }, |
| { |
| "epoch": 0.8086253369272237, |
| "grad_norm": 0.641424834728241, |
| "learning_rate": 0.0005520971397733406, |
| "loss": 3.9128, |
| "step": 7500 |
| }, |
| { |
| "epoch": 0.8140161725067385, |
| "grad_norm": 0.60320645570755, |
| "learning_rate": 0.000551773340528872, |
| "loss": 3.908, |
| "step": 7550 |
| }, |
| { |
| "epoch": 0.8194070080862533, |
| "grad_norm": 0.6258028149604797, |
| "learning_rate": 0.0005514495412844036, |
| "loss": 3.9018, |
| "step": 7600 |
| }, |
| { |
| "epoch": 0.8247978436657682, |
| "grad_norm": 0.5553128123283386, |
| "learning_rate": 0.0005511257420399352, |
| "loss": 3.9058, |
| "step": 7650 |
| }, |
| { |
| "epoch": 0.8301886792452831, |
| "grad_norm": 0.6213424801826477, |
| "learning_rate": 0.0005508019427954668, |
| "loss": 3.9005, |
| "step": 7700 |
| }, |
| { |
| "epoch": 0.8355795148247979, |
| "grad_norm": 0.6496004462242126, |
| "learning_rate": 0.0005504781435509983, |
| "loss": 3.9025, |
| "step": 7750 |
| }, |
| { |
| "epoch": 0.8409703504043127, |
| "grad_norm": 0.6373844146728516, |
| "learning_rate": 0.0005501543443065299, |
| "loss": 3.8844, |
| "step": 7800 |
| }, |
| { |
| "epoch": 0.8463611859838275, |
| "grad_norm": 0.5957589745521545, |
| "learning_rate": 0.0005498305450620615, |
| "loss": 3.8902, |
| "step": 7850 |
| }, |
| { |
| "epoch": 0.8517520215633423, |
| "grad_norm": 0.6516701579093933, |
| "learning_rate": 0.0005495067458175931, |
| "loss": 3.8876, |
| "step": 7900 |
| }, |
| { |
| "epoch": 0.8571428571428571, |
| "grad_norm": 0.6374366283416748, |
| "learning_rate": 0.0005491829465731246, |
| "loss": 3.9173, |
| "step": 7950 |
| }, |
| { |
| "epoch": 0.862533692722372, |
| "grad_norm": 0.5943326354026794, |
| "learning_rate": 0.0005488591473286562, |
| "loss": 3.8636, |
| "step": 8000 |
| }, |
| { |
| "epoch": 0.862533692722372, |
| "eval_accuracy": 0.3369106572293666, |
| "eval_loss": 3.824968099594116, |
| "eval_runtime": 152.7242, |
| "eval_samples_per_second": 117.932, |
| "eval_steps_per_second": 7.373, |
| "step": 8000 |
| }, |
| { |
| "epoch": 0.8679245283018868, |
| "grad_norm": 0.6996535062789917, |
| "learning_rate": 0.0005485353480841877, |
| "loss": 3.906, |
| "step": 8050 |
| }, |
| { |
| "epoch": 0.8733153638814016, |
| "grad_norm": 0.6234637498855591, |
| "learning_rate": 0.0005482115488397194, |
| "loss": 3.8831, |
| "step": 8100 |
| }, |
| { |
| "epoch": 0.8787061994609164, |
| "grad_norm": 0.6020538806915283, |
| "learning_rate": 0.0005478877495952508, |
| "loss": 3.8864, |
| "step": 8150 |
| }, |
| { |
| "epoch": 0.8840970350404312, |
| "grad_norm": 0.5640770792961121, |
| "learning_rate": 0.0005475639503507825, |
| "loss": 3.8799, |
| "step": 8200 |
| }, |
| { |
| "epoch": 0.889487870619946, |
| "grad_norm": 0.6602328419685364, |
| "learning_rate": 0.000547240151106314, |
| "loss": 3.8788, |
| "step": 8250 |
| }, |
| { |
| "epoch": 0.894878706199461, |
| "grad_norm": 0.6336698532104492, |
| "learning_rate": 0.0005469163518618456, |
| "loss": 3.8969, |
| "step": 8300 |
| }, |
| { |
| "epoch": 0.9002695417789758, |
| "grad_norm": 0.562479555606842, |
| "learning_rate": 0.0005465925526173771, |
| "loss": 3.8686, |
| "step": 8350 |
| }, |
| { |
| "epoch": 0.9056603773584906, |
| "grad_norm": 0.5576388835906982, |
| "learning_rate": 0.0005462687533729087, |
| "loss": 3.8729, |
| "step": 8400 |
| }, |
| { |
| "epoch": 0.9110512129380054, |
| "grad_norm": 0.5111304521560669, |
| "learning_rate": 0.0005459449541284403, |
| "loss": 3.8679, |
| "step": 8450 |
| }, |
| { |
| "epoch": 0.9164420485175202, |
| "grad_norm": 0.6154835224151611, |
| "learning_rate": 0.0005456211548839719, |
| "loss": 3.8749, |
| "step": 8500 |
| }, |
| { |
| "epoch": 0.921832884097035, |
| "grad_norm": 0.5426989197731018, |
| "learning_rate": 0.0005452973556395034, |
| "loss": 3.8685, |
| "step": 8550 |
| }, |
| { |
| "epoch": 0.9272237196765498, |
| "grad_norm": 0.5612382888793945, |
| "learning_rate": 0.000544973556395035, |
| "loss": 3.8747, |
| "step": 8600 |
| }, |
| { |
| "epoch": 0.9326145552560647, |
| "grad_norm": 0.5568158626556396, |
| "learning_rate": 0.0005446497571505667, |
| "loss": 3.8547, |
| "step": 8650 |
| }, |
| { |
| "epoch": 0.9380053908355795, |
| "grad_norm": 0.6064712405204773, |
| "learning_rate": 0.0005443259579060982, |
| "loss": 3.8589, |
| "step": 8700 |
| }, |
| { |
| "epoch": 0.9433962264150944, |
| "grad_norm": 0.5729287266731262, |
| "learning_rate": 0.0005440021586616298, |
| "loss": 3.8548, |
| "step": 8750 |
| }, |
| { |
| "epoch": 0.9487870619946092, |
| "grad_norm": 0.5489872694015503, |
| "learning_rate": 0.0005436783594171613, |
| "loss": 3.8599, |
| "step": 8800 |
| }, |
| { |
| "epoch": 0.954177897574124, |
| "grad_norm": 0.6285766959190369, |
| "learning_rate": 0.0005433545601726929, |
| "loss": 3.8606, |
| "step": 8850 |
| }, |
| { |
| "epoch": 0.9595687331536388, |
| "grad_norm": 0.566647469997406, |
| "learning_rate": 0.0005430307609282244, |
| "loss": 3.8472, |
| "step": 8900 |
| }, |
| { |
| "epoch": 0.9649595687331537, |
| "grad_norm": 0.5826287269592285, |
| "learning_rate": 0.000542706961683756, |
| "loss": 3.8404, |
| "step": 8950 |
| }, |
| { |
| "epoch": 0.9703504043126685, |
| "grad_norm": 0.6150389313697815, |
| "learning_rate": 0.0005423831624392876, |
| "loss": 3.8653, |
| "step": 9000 |
| }, |
| { |
| "epoch": 0.9703504043126685, |
| "eval_accuracy": 0.3401243923451433, |
| "eval_loss": 3.788287878036499, |
| "eval_runtime": 153.2128, |
| "eval_samples_per_second": 117.555, |
| "eval_steps_per_second": 7.349, |
| "step": 9000 |
| }, |
| { |
| "epoch": 0.9757412398921833, |
| "grad_norm": 0.6907781958580017, |
| "learning_rate": 0.0005420593631948192, |
| "loss": 3.8789, |
| "step": 9050 |
| }, |
| { |
| "epoch": 0.9811320754716981, |
| "grad_norm": 0.5165227055549622, |
| "learning_rate": 0.0005417355639503507, |
| "loss": 3.8372, |
| "step": 9100 |
| }, |
| { |
| "epoch": 0.9865229110512129, |
| "grad_norm": 0.563310980796814, |
| "learning_rate": 0.0005414117647058823, |
| "loss": 3.848, |
| "step": 9150 |
| }, |
| { |
| "epoch": 0.9919137466307277, |
| "grad_norm": 0.5229910016059875, |
| "learning_rate": 0.0005410879654614139, |
| "loss": 3.8388, |
| "step": 9200 |
| }, |
| { |
| "epoch": 0.9973045822102425, |
| "grad_norm": 0.5532335638999939, |
| "learning_rate": 0.0005407641662169455, |
| "loss": 3.8559, |
| "step": 9250 |
| }, |
| { |
| "epoch": 1.0026954177897573, |
| "grad_norm": 0.5877416729927063, |
| "learning_rate": 0.000540440366972477, |
| "loss": 3.8053, |
| "step": 9300 |
| }, |
| { |
| "epoch": 1.0080862533692723, |
| "grad_norm": 0.5423709154129028, |
| "learning_rate": 0.0005401165677280086, |
| "loss": 3.7729, |
| "step": 9350 |
| }, |
| { |
| "epoch": 1.013477088948787, |
| "grad_norm": 0.6410120725631714, |
| "learning_rate": 0.0005397927684835401, |
| "loss": 3.7655, |
| "step": 9400 |
| }, |
| { |
| "epoch": 1.0188679245283019, |
| "grad_norm": 0.5875224471092224, |
| "learning_rate": 0.0005394689692390718, |
| "loss": 3.7643, |
| "step": 9450 |
| }, |
| { |
| "epoch": 1.0242587601078168, |
| "grad_norm": 0.5834726095199585, |
| "learning_rate": 0.0005391451699946032, |
| "loss": 3.7698, |
| "step": 9500 |
| }, |
| { |
| "epoch": 1.0296495956873315, |
| "grad_norm": 0.5419567227363586, |
| "learning_rate": 0.0005388213707501349, |
| "loss": 3.7799, |
| "step": 9550 |
| }, |
| { |
| "epoch": 1.0350404312668464, |
| "grad_norm": 0.6511868238449097, |
| "learning_rate": 0.0005384975715056664, |
| "loss": 3.7784, |
| "step": 9600 |
| }, |
| { |
| "epoch": 1.0404312668463611, |
| "grad_norm": 0.6302853226661682, |
| "learning_rate": 0.000538173772261198, |
| "loss": 3.7842, |
| "step": 9650 |
| }, |
| { |
| "epoch": 1.045822102425876, |
| "grad_norm": 0.531711220741272, |
| "learning_rate": 0.0005378499730167295, |
| "loss": 3.7661, |
| "step": 9700 |
| }, |
| { |
| "epoch": 1.0512129380053907, |
| "grad_norm": 0.5548348426818848, |
| "learning_rate": 0.0005375261737722611, |
| "loss": 3.7646, |
| "step": 9750 |
| }, |
| { |
| "epoch": 1.0566037735849056, |
| "grad_norm": 0.5238717794418335, |
| "learning_rate": 0.0005372023745277928, |
| "loss": 3.7914, |
| "step": 9800 |
| }, |
| { |
| "epoch": 1.0619946091644206, |
| "grad_norm": 0.5447802543640137, |
| "learning_rate": 0.0005368785752833243, |
| "loss": 3.7911, |
| "step": 9850 |
| }, |
| { |
| "epoch": 1.0673854447439353, |
| "grad_norm": 0.5512890815734863, |
| "learning_rate": 0.0005365547760388559, |
| "loss": 3.7828, |
| "step": 9900 |
| }, |
| { |
| "epoch": 1.0727762803234502, |
| "grad_norm": 0.5664125680923462, |
| "learning_rate": 0.0005362309767943874, |
| "loss": 3.7949, |
| "step": 9950 |
| }, |
| { |
| "epoch": 1.0781671159029649, |
| "grad_norm": 0.6107265949249268, |
| "learning_rate": 0.0005359071775499191, |
| "loss": 3.7721, |
| "step": 10000 |
| }, |
| { |
| "epoch": 1.0781671159029649, |
| "eval_accuracy": 0.34332498046149446, |
| "eval_loss": 3.7572708129882812, |
| "eval_runtime": 152.6822, |
| "eval_samples_per_second": 117.964, |
| "eval_steps_per_second": 7.375, |
| "step": 10000 |
| } |
| ], |
| "logging_steps": 50, |
| "max_steps": 92750, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 10, |
| "save_steps": 10000, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 8.36069179392e+16, |
| "train_batch_size": 32, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|