| { |
| "best_metric": 3.7489497661590576, |
| "best_model_checkpoint": "/scratch/cl5625/exceptions/models/100M__1208/checkpoint-10000", |
| "epoch": 1.0781671159029649, |
| "eval_steps": 1000, |
| "global_step": 10000, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.005390835579514825, |
| "grad_norm": 1.3872060775756836, |
| "learning_rate": 0.0003, |
| "loss": 8.7938, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.01078167115902965, |
| "grad_norm": 3.891347885131836, |
| "learning_rate": 0.0006, |
| "loss": 6.992, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.016172506738544475, |
| "grad_norm": 1.848716378211975, |
| "learning_rate": 0.0005996762007555315, |
| "loss": 6.5045, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.0215633423180593, |
| "grad_norm": 1.3053127527236938, |
| "learning_rate": 0.000599352401511063, |
| "loss": 6.2433, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.026954177897574125, |
| "grad_norm": 1.2040534019470215, |
| "learning_rate": 0.0005990286022665946, |
| "loss": 6.0719, |
| "step": 250 |
| }, |
| { |
| "epoch": 0.03234501347708895, |
| "grad_norm": 1.105197548866272, |
| "learning_rate": 0.0005987048030221263, |
| "loss": 5.9606, |
| "step": 300 |
| }, |
| { |
| "epoch": 0.03773584905660377, |
| "grad_norm": 1.502025842666626, |
| "learning_rate": 0.0005983810037776578, |
| "loss": 5.8818, |
| "step": 350 |
| }, |
| { |
| "epoch": 0.0431266846361186, |
| "grad_norm": 2.493607759475708, |
| "learning_rate": 0.0005980572045331894, |
| "loss": 5.8101, |
| "step": 400 |
| }, |
| { |
| "epoch": 0.04851752021563342, |
| "grad_norm": 1.3217182159423828, |
| "learning_rate": 0.0005977334052887209, |
| "loss": 5.7175, |
| "step": 450 |
| }, |
| { |
| "epoch": 0.05390835579514825, |
| "grad_norm": 1.215240240097046, |
| "learning_rate": 0.0005974096060442526, |
| "loss": 5.6639, |
| "step": 500 |
| }, |
| { |
| "epoch": 0.05929919137466307, |
| "grad_norm": 1.3821635246276855, |
| "learning_rate": 0.0005970858067997841, |
| "loss": 5.5744, |
| "step": 550 |
| }, |
| { |
| "epoch": 0.0646900269541779, |
| "grad_norm": 1.378275752067566, |
| "learning_rate": 0.0005967620075553157, |
| "loss": 5.5287, |
| "step": 600 |
| }, |
| { |
| "epoch": 0.07008086253369272, |
| "grad_norm": 1.1444066762924194, |
| "learning_rate": 0.0005964382083108472, |
| "loss": 5.4332, |
| "step": 650 |
| }, |
| { |
| "epoch": 0.07547169811320754, |
| "grad_norm": 1.657812476158142, |
| "learning_rate": 0.0005961144090663788, |
| "loss": 5.4098, |
| "step": 700 |
| }, |
| { |
| "epoch": 0.08086253369272237, |
| "grad_norm": 1.3864067792892456, |
| "learning_rate": 0.0005957906098219104, |
| "loss": 5.3168, |
| "step": 750 |
| }, |
| { |
| "epoch": 0.0862533692722372, |
| "grad_norm": 1.0762931108474731, |
| "learning_rate": 0.0005954668105774419, |
| "loss": 5.2678, |
| "step": 800 |
| }, |
| { |
| "epoch": 0.09164420485175202, |
| "grad_norm": 1.2499072551727295, |
| "learning_rate": 0.0005951430113329735, |
| "loss": 5.2107, |
| "step": 850 |
| }, |
| { |
| "epoch": 0.09703504043126684, |
| "grad_norm": 0.9282752275466919, |
| "learning_rate": 0.0005948192120885051, |
| "loss": 5.1849, |
| "step": 900 |
| }, |
| { |
| "epoch": 0.10242587601078167, |
| "grad_norm": 1.071282982826233, |
| "learning_rate": 0.0005944954128440366, |
| "loss": 5.1275, |
| "step": 950 |
| }, |
| { |
| "epoch": 0.1078167115902965, |
| "grad_norm": 1.3316810131072998, |
| "learning_rate": 0.0005941716135995682, |
| "loss": 5.1151, |
| "step": 1000 |
| }, |
| { |
| "epoch": 0.1078167115902965, |
| "eval_accuracy": 0.22672739349001877, |
| "eval_loss": 5.027144432067871, |
| "eval_runtime": 184.654, |
| "eval_samples_per_second": 97.539, |
| "eval_steps_per_second": 6.098, |
| "step": 1000 |
| }, |
| { |
| "epoch": 0.11320754716981132, |
| "grad_norm": 1.1736186742782593, |
| "learning_rate": 0.0005938478143550997, |
| "loss": 5.0742, |
| "step": 1050 |
| }, |
| { |
| "epoch": 0.11859838274932614, |
| "grad_norm": 1.217553973197937, |
| "learning_rate": 0.0005935240151106314, |
| "loss": 5.0002, |
| "step": 1100 |
| }, |
| { |
| "epoch": 0.12398921832884097, |
| "grad_norm": 1.3116732835769653, |
| "learning_rate": 0.0005932002158661629, |
| "loss": 4.9959, |
| "step": 1150 |
| }, |
| { |
| "epoch": 0.1293800539083558, |
| "grad_norm": 1.10372793674469, |
| "learning_rate": 0.0005928764166216945, |
| "loss": 4.9613, |
| "step": 1200 |
| }, |
| { |
| "epoch": 0.1347708894878706, |
| "grad_norm": 1.0251359939575195, |
| "learning_rate": 0.000592552617377226, |
| "loss": 4.9081, |
| "step": 1250 |
| }, |
| { |
| "epoch": 0.14016172506738545, |
| "grad_norm": 1.1212270259857178, |
| "learning_rate": 0.0005922288181327577, |
| "loss": 4.9098, |
| "step": 1300 |
| }, |
| { |
| "epoch": 0.14555256064690028, |
| "grad_norm": 1.0358482599258423, |
| "learning_rate": 0.0005919050188882893, |
| "loss": 4.8835, |
| "step": 1350 |
| }, |
| { |
| "epoch": 0.1509433962264151, |
| "grad_norm": 1.127200722694397, |
| "learning_rate": 0.0005915812196438207, |
| "loss": 4.85, |
| "step": 1400 |
| }, |
| { |
| "epoch": 0.15633423180592992, |
| "grad_norm": 0.9541453123092651, |
| "learning_rate": 0.0005912574203993524, |
| "loss": 4.8199, |
| "step": 1450 |
| }, |
| { |
| "epoch": 0.16172506738544473, |
| "grad_norm": 0.9905341863632202, |
| "learning_rate": 0.0005909336211548839, |
| "loss": 4.812, |
| "step": 1500 |
| }, |
| { |
| "epoch": 0.16711590296495957, |
| "grad_norm": 0.9883430600166321, |
| "learning_rate": 0.0005906098219104155, |
| "loss": 4.7776, |
| "step": 1550 |
| }, |
| { |
| "epoch": 0.1725067385444744, |
| "grad_norm": 1.14104425907135, |
| "learning_rate": 0.000590286022665947, |
| "loss": 4.7877, |
| "step": 1600 |
| }, |
| { |
| "epoch": 0.1778975741239892, |
| "grad_norm": 0.915554404258728, |
| "learning_rate": 0.0005899622234214787, |
| "loss": 4.7362, |
| "step": 1650 |
| }, |
| { |
| "epoch": 0.18328840970350405, |
| "grad_norm": 0.8896968364715576, |
| "learning_rate": 0.0005896384241770102, |
| "loss": 4.6944, |
| "step": 1700 |
| }, |
| { |
| "epoch": 0.18867924528301888, |
| "grad_norm": 0.7295605540275574, |
| "learning_rate": 0.0005893146249325418, |
| "loss": 4.679, |
| "step": 1750 |
| }, |
| { |
| "epoch": 0.1940700808625337, |
| "grad_norm": 0.8035542368888855, |
| "learning_rate": 0.0005889908256880733, |
| "loss": 4.669, |
| "step": 1800 |
| }, |
| { |
| "epoch": 0.19946091644204852, |
| "grad_norm": 0.9095346927642822, |
| "learning_rate": 0.0005886670264436049, |
| "loss": 4.6768, |
| "step": 1850 |
| }, |
| { |
| "epoch": 0.20485175202156333, |
| "grad_norm": 0.8672581315040588, |
| "learning_rate": 0.0005883432271991365, |
| "loss": 4.6219, |
| "step": 1900 |
| }, |
| { |
| "epoch": 0.21024258760107817, |
| "grad_norm": 0.7349112033843994, |
| "learning_rate": 0.0005880194279546681, |
| "loss": 4.6077, |
| "step": 1950 |
| }, |
| { |
| "epoch": 0.215633423180593, |
| "grad_norm": 0.9716205596923828, |
| "learning_rate": 0.0005876956287101996, |
| "loss": 4.5792, |
| "step": 2000 |
| }, |
| { |
| "epoch": 0.215633423180593, |
| "eval_accuracy": 0.27099464439050674, |
| "eval_loss": 4.504695415496826, |
| "eval_runtime": 184.1142, |
| "eval_samples_per_second": 97.825, |
| "eval_steps_per_second": 6.116, |
| "step": 2000 |
| }, |
| { |
| "epoch": 0.2210242587601078, |
| "grad_norm": 1.203096628189087, |
| "learning_rate": 0.0005873718294657312, |
| "loss": 4.5695, |
| "step": 2050 |
| }, |
| { |
| "epoch": 0.22641509433962265, |
| "grad_norm": 0.9952515959739685, |
| "learning_rate": 0.0005870480302212628, |
| "loss": 4.5412, |
| "step": 2100 |
| }, |
| { |
| "epoch": 0.23180592991913745, |
| "grad_norm": 0.8814882040023804, |
| "learning_rate": 0.0005867242309767943, |
| "loss": 4.5392, |
| "step": 2150 |
| }, |
| { |
| "epoch": 0.2371967654986523, |
| "grad_norm": 1.0191090106964111, |
| "learning_rate": 0.0005864004317323259, |
| "loss": 4.5151, |
| "step": 2200 |
| }, |
| { |
| "epoch": 0.24258760107816713, |
| "grad_norm": 0.8580870628356934, |
| "learning_rate": 0.0005860766324878575, |
| "loss": 4.5014, |
| "step": 2250 |
| }, |
| { |
| "epoch": 0.24797843665768193, |
| "grad_norm": 0.8280946016311646, |
| "learning_rate": 0.000585752833243389, |
| "loss": 4.4755, |
| "step": 2300 |
| }, |
| { |
| "epoch": 0.25336927223719674, |
| "grad_norm": 0.9512032866477966, |
| "learning_rate": 0.0005854290339989206, |
| "loss": 4.4624, |
| "step": 2350 |
| }, |
| { |
| "epoch": 0.2587601078167116, |
| "grad_norm": 1.000420093536377, |
| "learning_rate": 0.0005851052347544521, |
| "loss": 4.4523, |
| "step": 2400 |
| }, |
| { |
| "epoch": 0.2641509433962264, |
| "grad_norm": 0.9130716919898987, |
| "learning_rate": 0.0005847814355099838, |
| "loss": 4.4414, |
| "step": 2450 |
| }, |
| { |
| "epoch": 0.2695417789757412, |
| "grad_norm": 0.8131120800971985, |
| "learning_rate": 0.0005844576362655154, |
| "loss": 4.4556, |
| "step": 2500 |
| }, |
| { |
| "epoch": 0.2749326145552561, |
| "grad_norm": 0.9587961435317993, |
| "learning_rate": 0.0005841338370210469, |
| "loss": 4.4083, |
| "step": 2550 |
| }, |
| { |
| "epoch": 0.2803234501347709, |
| "grad_norm": 0.846928060054779, |
| "learning_rate": 0.0005838100377765785, |
| "loss": 4.3825, |
| "step": 2600 |
| }, |
| { |
| "epoch": 0.2857142857142857, |
| "grad_norm": 0.9020141363143921, |
| "learning_rate": 0.0005834862385321101, |
| "loss": 4.3869, |
| "step": 2650 |
| }, |
| { |
| "epoch": 0.29110512129380056, |
| "grad_norm": 1.0460323095321655, |
| "learning_rate": 0.0005831624392876417, |
| "loss": 4.381, |
| "step": 2700 |
| }, |
| { |
| "epoch": 0.29649595687331537, |
| "grad_norm": 0.8714868426322937, |
| "learning_rate": 0.0005828386400431731, |
| "loss": 4.3618, |
| "step": 2750 |
| }, |
| { |
| "epoch": 0.3018867924528302, |
| "grad_norm": 0.7531731724739075, |
| "learning_rate": 0.0005825148407987048, |
| "loss": 4.361, |
| "step": 2800 |
| }, |
| { |
| "epoch": 0.30727762803234504, |
| "grad_norm": 0.7624960541725159, |
| "learning_rate": 0.0005821910415542363, |
| "loss": 4.3642, |
| "step": 2850 |
| }, |
| { |
| "epoch": 0.31266846361185985, |
| "grad_norm": 0.8426908850669861, |
| "learning_rate": 0.0005818672423097679, |
| "loss": 4.3367, |
| "step": 2900 |
| }, |
| { |
| "epoch": 0.31805929919137466, |
| "grad_norm": 0.9197669625282288, |
| "learning_rate": 0.0005815434430652994, |
| "loss": 4.3496, |
| "step": 2950 |
| }, |
| { |
| "epoch": 0.32345013477088946, |
| "grad_norm": 0.8955463171005249, |
| "learning_rate": 0.0005812196438208311, |
| "loss": 4.2988, |
| "step": 3000 |
| }, |
| { |
| "epoch": 0.32345013477088946, |
| "eval_accuracy": 0.2984499253065723, |
| "eval_loss": 4.238968372344971, |
| "eval_runtime": 185.178, |
| "eval_samples_per_second": 97.263, |
| "eval_steps_per_second": 6.081, |
| "step": 3000 |
| }, |
| { |
| "epoch": 0.3288409703504043, |
| "grad_norm": 0.7810043096542358, |
| "learning_rate": 0.0005808958445763626, |
| "loss": 4.3082, |
| "step": 3050 |
| }, |
| { |
| "epoch": 0.33423180592991913, |
| "grad_norm": 0.7354643940925598, |
| "learning_rate": 0.0005805720453318942, |
| "loss": 4.2858, |
| "step": 3100 |
| }, |
| { |
| "epoch": 0.33962264150943394, |
| "grad_norm": 0.7374374270439148, |
| "learning_rate": 0.0005802482460874257, |
| "loss": 4.2772, |
| "step": 3150 |
| }, |
| { |
| "epoch": 0.3450134770889488, |
| "grad_norm": 0.7399438619613647, |
| "learning_rate": 0.0005799244468429573, |
| "loss": 4.2993, |
| "step": 3200 |
| }, |
| { |
| "epoch": 0.3504043126684636, |
| "grad_norm": 0.7688404321670532, |
| "learning_rate": 0.0005796006475984889, |
| "loss": 4.2841, |
| "step": 3250 |
| }, |
| { |
| "epoch": 0.3557951482479784, |
| "grad_norm": 0.7221906781196594, |
| "learning_rate": 0.0005792768483540205, |
| "loss": 4.2667, |
| "step": 3300 |
| }, |
| { |
| "epoch": 0.3611859838274933, |
| "grad_norm": 0.7419180274009705, |
| "learning_rate": 0.000578953049109552, |
| "loss": 4.2709, |
| "step": 3350 |
| }, |
| { |
| "epoch": 0.3665768194070081, |
| "grad_norm": 0.7906516790390015, |
| "learning_rate": 0.0005786292498650836, |
| "loss": 4.2394, |
| "step": 3400 |
| }, |
| { |
| "epoch": 0.3719676549865229, |
| "grad_norm": 0.7784256339073181, |
| "learning_rate": 0.0005783054506206152, |
| "loss": 4.2519, |
| "step": 3450 |
| }, |
| { |
| "epoch": 0.37735849056603776, |
| "grad_norm": 0.7829201221466064, |
| "learning_rate": 0.0005779816513761467, |
| "loss": 4.2314, |
| "step": 3500 |
| }, |
| { |
| "epoch": 0.38274932614555257, |
| "grad_norm": 0.8310431241989136, |
| "learning_rate": 0.0005776578521316782, |
| "loss": 4.2097, |
| "step": 3550 |
| }, |
| { |
| "epoch": 0.3881401617250674, |
| "grad_norm": 0.8430302739143372, |
| "learning_rate": 0.0005773340528872099, |
| "loss": 4.2306, |
| "step": 3600 |
| }, |
| { |
| "epoch": 0.3935309973045822, |
| "grad_norm": 0.733945906162262, |
| "learning_rate": 0.0005770102536427414, |
| "loss": 4.2221, |
| "step": 3650 |
| }, |
| { |
| "epoch": 0.39892183288409705, |
| "grad_norm": 0.6383342742919922, |
| "learning_rate": 0.000576686454398273, |
| "loss": 4.2028, |
| "step": 3700 |
| }, |
| { |
| "epoch": 0.40431266846361186, |
| "grad_norm": 0.6672252416610718, |
| "learning_rate": 0.0005763626551538045, |
| "loss": 4.1801, |
| "step": 3750 |
| }, |
| { |
| "epoch": 0.40970350404312667, |
| "grad_norm": 0.7545523643493652, |
| "learning_rate": 0.0005760388559093362, |
| "loss": 4.1881, |
| "step": 3800 |
| }, |
| { |
| "epoch": 0.41509433962264153, |
| "grad_norm": 0.7340240478515625, |
| "learning_rate": 0.0005757150566648678, |
| "loss": 4.171, |
| "step": 3850 |
| }, |
| { |
| "epoch": 0.42048517520215634, |
| "grad_norm": 0.7903727889060974, |
| "learning_rate": 0.0005753912574203993, |
| "loss": 4.1784, |
| "step": 3900 |
| }, |
| { |
| "epoch": 0.42587601078167114, |
| "grad_norm": 0.8018081188201904, |
| "learning_rate": 0.0005750674581759309, |
| "loss": 4.1838, |
| "step": 3950 |
| }, |
| { |
| "epoch": 0.431266846361186, |
| "grad_norm": 0.722425103187561, |
| "learning_rate": 0.0005747436589314624, |
| "loss": 4.1617, |
| "step": 4000 |
| }, |
| { |
| "epoch": 0.431266846361186, |
| "eval_accuracy": 0.31149631215800827, |
| "eval_loss": 4.093442440032959, |
| "eval_runtime": 185.3481, |
| "eval_samples_per_second": 97.174, |
| "eval_steps_per_second": 6.075, |
| "step": 4000 |
| }, |
| { |
| "epoch": 0.4366576819407008, |
| "grad_norm": 0.9414203763008118, |
| "learning_rate": 0.0005744198596869941, |
| "loss": 4.15, |
| "step": 4050 |
| }, |
| { |
| "epoch": 0.4420485175202156, |
| "grad_norm": 0.7414451241493225, |
| "learning_rate": 0.0005740960604425255, |
| "loss": 4.1367, |
| "step": 4100 |
| }, |
| { |
| "epoch": 0.4474393530997305, |
| "grad_norm": 0.693946361541748, |
| "learning_rate": 0.0005737722611980572, |
| "loss": 4.1503, |
| "step": 4150 |
| }, |
| { |
| "epoch": 0.4528301886792453, |
| "grad_norm": 0.8669963479042053, |
| "learning_rate": 0.0005734484619535887, |
| "loss": 4.1342, |
| "step": 4200 |
| }, |
| { |
| "epoch": 0.4582210242587601, |
| "grad_norm": 0.717522144317627, |
| "learning_rate": 0.0005731246627091203, |
| "loss": 4.1361, |
| "step": 4250 |
| }, |
| { |
| "epoch": 0.4636118598382749, |
| "grad_norm": 0.5836500525474548, |
| "learning_rate": 0.0005728008634646518, |
| "loss": 4.1184, |
| "step": 4300 |
| }, |
| { |
| "epoch": 0.46900269541778977, |
| "grad_norm": 0.6450395584106445, |
| "learning_rate": 0.0005724770642201835, |
| "loss": 4.1149, |
| "step": 4350 |
| }, |
| { |
| "epoch": 0.4743935309973046, |
| "grad_norm": 0.6113871335983276, |
| "learning_rate": 0.000572153264975715, |
| "loss": 4.1102, |
| "step": 4400 |
| }, |
| { |
| "epoch": 0.4797843665768194, |
| "grad_norm": 0.7142659425735474, |
| "learning_rate": 0.0005718294657312466, |
| "loss": 4.1382, |
| "step": 4450 |
| }, |
| { |
| "epoch": 0.48517520215633425, |
| "grad_norm": 0.6929596066474915, |
| "learning_rate": 0.0005715056664867781, |
| "loss": 4.0957, |
| "step": 4500 |
| }, |
| { |
| "epoch": 0.49056603773584906, |
| "grad_norm": 0.5346873998641968, |
| "learning_rate": 0.0005711818672423097, |
| "loss": 4.1046, |
| "step": 4550 |
| }, |
| { |
| "epoch": 0.49595687331536387, |
| "grad_norm": 0.702147901058197, |
| "learning_rate": 0.0005708580679978413, |
| "loss": 4.1, |
| "step": 4600 |
| }, |
| { |
| "epoch": 0.5013477088948787, |
| "grad_norm": 0.5867359042167664, |
| "learning_rate": 0.0005705342687533729, |
| "loss": 4.0953, |
| "step": 4650 |
| }, |
| { |
| "epoch": 0.5067385444743935, |
| "grad_norm": 0.6331294178962708, |
| "learning_rate": 0.0005702104695089044, |
| "loss": 4.0979, |
| "step": 4700 |
| }, |
| { |
| "epoch": 0.5121293800539084, |
| "grad_norm": 0.6199395656585693, |
| "learning_rate": 0.000569886670264436, |
| "loss": 4.0987, |
| "step": 4750 |
| }, |
| { |
| "epoch": 0.5175202156334232, |
| "grad_norm": 0.6783130764961243, |
| "learning_rate": 0.0005695628710199675, |
| "loss": 4.0776, |
| "step": 4800 |
| }, |
| { |
| "epoch": 0.522911051212938, |
| "grad_norm": 0.6518664956092834, |
| "learning_rate": 0.0005692390717754991, |
| "loss": 4.1048, |
| "step": 4850 |
| }, |
| { |
| "epoch": 0.5283018867924528, |
| "grad_norm": 0.6022597551345825, |
| "learning_rate": 0.0005689152725310306, |
| "loss": 4.0778, |
| "step": 4900 |
| }, |
| { |
| "epoch": 0.5336927223719676, |
| "grad_norm": 0.6618348360061646, |
| "learning_rate": 0.0005685914732865623, |
| "loss": 4.0617, |
| "step": 4950 |
| }, |
| { |
| "epoch": 0.5390835579514824, |
| "grad_norm": 0.5986215472221375, |
| "learning_rate": 0.0005682676740420939, |
| "loss": 4.0655, |
| "step": 5000 |
| }, |
| { |
| "epoch": 0.5390835579514824, |
| "eval_accuracy": 0.32159864036122304, |
| "eval_loss": 3.9903128147125244, |
| "eval_runtime": 185.2705, |
| "eval_samples_per_second": 97.215, |
| "eval_steps_per_second": 6.078, |
| "step": 5000 |
| }, |
| { |
| "epoch": 0.5444743935309974, |
| "grad_norm": 0.5965196490287781, |
| "learning_rate": 0.0005679438747976254, |
| "loss": 4.0629, |
| "step": 5050 |
| }, |
| { |
| "epoch": 0.5498652291105122, |
| "grad_norm": 0.618198812007904, |
| "learning_rate": 0.000567620075553157, |
| "loss": 4.048, |
| "step": 5100 |
| }, |
| { |
| "epoch": 0.555256064690027, |
| "grad_norm": 0.6078091263771057, |
| "learning_rate": 0.0005672962763086886, |
| "loss": 4.0601, |
| "step": 5150 |
| }, |
| { |
| "epoch": 0.5606469002695418, |
| "grad_norm": 0.5713509917259216, |
| "learning_rate": 0.0005669724770642202, |
| "loss": 4.054, |
| "step": 5200 |
| }, |
| { |
| "epoch": 0.5660377358490566, |
| "grad_norm": 0.7706556916236877, |
| "learning_rate": 0.0005666486778197517, |
| "loss": 4.0499, |
| "step": 5250 |
| }, |
| { |
| "epoch": 0.5714285714285714, |
| "grad_norm": 0.6719933748245239, |
| "learning_rate": 0.0005663248785752833, |
| "loss": 4.0337, |
| "step": 5300 |
| }, |
| { |
| "epoch": 0.5768194070080862, |
| "grad_norm": 0.6685216426849365, |
| "learning_rate": 0.0005660010793308148, |
| "loss": 4.0071, |
| "step": 5350 |
| }, |
| { |
| "epoch": 0.5822102425876011, |
| "grad_norm": 0.6304830312728882, |
| "learning_rate": 0.0005656772800863465, |
| "loss": 4.0476, |
| "step": 5400 |
| }, |
| { |
| "epoch": 0.5876010781671159, |
| "grad_norm": 0.6301653385162354, |
| "learning_rate": 0.0005653534808418779, |
| "loss": 4.0282, |
| "step": 5450 |
| }, |
| { |
| "epoch": 0.5929919137466307, |
| "grad_norm": 0.6767484545707703, |
| "learning_rate": 0.0005650296815974096, |
| "loss": 4.0349, |
| "step": 5500 |
| }, |
| { |
| "epoch": 0.5983827493261455, |
| "grad_norm": 0.6710530519485474, |
| "learning_rate": 0.0005647058823529411, |
| "loss": 4.0179, |
| "step": 5550 |
| }, |
| { |
| "epoch": 0.6037735849056604, |
| "grad_norm": 0.5611121654510498, |
| "learning_rate": 0.0005643820831084727, |
| "loss": 4.0202, |
| "step": 5600 |
| }, |
| { |
| "epoch": 0.6091644204851752, |
| "grad_norm": 0.6187557578086853, |
| "learning_rate": 0.0005640582838640042, |
| "loss": 3.9935, |
| "step": 5650 |
| }, |
| { |
| "epoch": 0.6145552560646901, |
| "grad_norm": 0.6690448522567749, |
| "learning_rate": 0.0005637344846195358, |
| "loss": 4.0082, |
| "step": 5700 |
| }, |
| { |
| "epoch": 0.6199460916442049, |
| "grad_norm": 0.6771255135536194, |
| "learning_rate": 0.0005634106853750674, |
| "loss": 4.0026, |
| "step": 5750 |
| }, |
| { |
| "epoch": 0.6253369272237197, |
| "grad_norm": 0.5823601484298706, |
| "learning_rate": 0.000563086886130599, |
| "loss": 4.014, |
| "step": 5800 |
| }, |
| { |
| "epoch": 0.6307277628032345, |
| "grad_norm": 0.6157869100570679, |
| "learning_rate": 0.0005627630868861305, |
| "loss": 3.9952, |
| "step": 5850 |
| }, |
| { |
| "epoch": 0.6361185983827493, |
| "grad_norm": 0.6894795894622803, |
| "learning_rate": 0.0005624392876416621, |
| "loss": 3.9947, |
| "step": 5900 |
| }, |
| { |
| "epoch": 0.6415094339622641, |
| "grad_norm": 0.6236327290534973, |
| "learning_rate": 0.0005621154883971937, |
| "loss": 3.996, |
| "step": 5950 |
| }, |
| { |
| "epoch": 0.6469002695417789, |
| "grad_norm": 0.6344878077507019, |
| "learning_rate": 0.0005617916891527253, |
| "loss": 3.9934, |
| "step": 6000 |
| }, |
| { |
| "epoch": 0.6469002695417789, |
| "eval_accuracy": 0.3280596843351111, |
| "eval_loss": 3.924257278442383, |
| "eval_runtime": 185.4361, |
| "eval_samples_per_second": 97.128, |
| "eval_steps_per_second": 6.072, |
| "step": 6000 |
| }, |
| { |
| "epoch": 0.6522911051212938, |
| "grad_norm": 0.5802189111709595, |
| "learning_rate": 0.0005614743658931463, |
| "loss": 3.9855, |
| "step": 6050 |
| }, |
| { |
| "epoch": 0.6576819407008087, |
| "grad_norm": 0.6164999604225159, |
| "learning_rate": 0.0005611505666486777, |
| "loss": 4.0042, |
| "step": 6100 |
| }, |
| { |
| "epoch": 0.6630727762803235, |
| "grad_norm": 0.6946608424186707, |
| "learning_rate": 0.0005608267674042094, |
| "loss": 3.989, |
| "step": 6150 |
| }, |
| { |
| "epoch": 0.6684636118598383, |
| "grad_norm": 0.6158466935157776, |
| "learning_rate": 0.0005605029681597409, |
| "loss": 3.9871, |
| "step": 6200 |
| }, |
| { |
| "epoch": 0.6738544474393531, |
| "grad_norm": 0.6288596391677856, |
| "learning_rate": 0.0005601791689152725, |
| "loss": 3.9773, |
| "step": 6250 |
| }, |
| { |
| "epoch": 0.6792452830188679, |
| "grad_norm": 0.7042039036750793, |
| "learning_rate": 0.000559855369670804, |
| "loss": 3.9791, |
| "step": 6300 |
| }, |
| { |
| "epoch": 0.6846361185983828, |
| "grad_norm": 0.5582059025764465, |
| "learning_rate": 0.0005595315704263357, |
| "loss": 3.9639, |
| "step": 6350 |
| }, |
| { |
| "epoch": 0.6900269541778976, |
| "grad_norm": 0.6060189604759216, |
| "learning_rate": 0.0005592077711818672, |
| "loss": 3.9775, |
| "step": 6400 |
| }, |
| { |
| "epoch": 0.6954177897574124, |
| "grad_norm": 0.7476151585578918, |
| "learning_rate": 0.0005588839719373988, |
| "loss": 3.9536, |
| "step": 6450 |
| }, |
| { |
| "epoch": 0.7008086253369272, |
| "grad_norm": 0.6221930384635925, |
| "learning_rate": 0.0005585601726929303, |
| "loss": 3.9583, |
| "step": 6500 |
| }, |
| { |
| "epoch": 0.706199460916442, |
| "grad_norm": 0.6130439639091492, |
| "learning_rate": 0.0005582363734484619, |
| "loss": 3.9417, |
| "step": 6550 |
| }, |
| { |
| "epoch": 0.7115902964959568, |
| "grad_norm": 0.5701190233230591, |
| "learning_rate": 0.0005579125742039935, |
| "loss": 3.9488, |
| "step": 6600 |
| }, |
| { |
| "epoch": 0.7169811320754716, |
| "grad_norm": 0.5955173969268799, |
| "learning_rate": 0.0005575887749595251, |
| "loss": 3.9511, |
| "step": 6650 |
| }, |
| { |
| "epoch": 0.7223719676549866, |
| "grad_norm": 0.6578053832054138, |
| "learning_rate": 0.0005572649757150566, |
| "loss": 3.9526, |
| "step": 6700 |
| }, |
| { |
| "epoch": 0.7277628032345014, |
| "grad_norm": 0.7318617105484009, |
| "learning_rate": 0.0005569411764705882, |
| "loss": 3.9511, |
| "step": 6750 |
| }, |
| { |
| "epoch": 0.7331536388140162, |
| "grad_norm": 0.6341660618782043, |
| "learning_rate": 0.0005566173772261198, |
| "loss": 3.918, |
| "step": 6800 |
| }, |
| { |
| "epoch": 0.738544474393531, |
| "grad_norm": 0.5913158059120178, |
| "learning_rate": 0.0005562935779816513, |
| "loss": 3.9336, |
| "step": 6850 |
| }, |
| { |
| "epoch": 0.7439353099730458, |
| "grad_norm": 0.6217501759529114, |
| "learning_rate": 0.0005559697787371828, |
| "loss": 3.9485, |
| "step": 6900 |
| }, |
| { |
| "epoch": 0.7493261455525606, |
| "grad_norm": 0.629990816116333, |
| "learning_rate": 0.0005556459794927145, |
| "loss": 3.9304, |
| "step": 6950 |
| }, |
| { |
| "epoch": 0.7547169811320755, |
| "grad_norm": 0.598209798336029, |
| "learning_rate": 0.000555322180248246, |
| "loss": 3.9432, |
| "step": 7000 |
| }, |
| { |
| "epoch": 0.7547169811320755, |
| "eval_accuracy": 0.33339541034990466, |
| "eval_loss": 3.867119312286377, |
| "eval_runtime": 185.4718, |
| "eval_samples_per_second": 97.109, |
| "eval_steps_per_second": 6.071, |
| "step": 7000 |
| }, |
| { |
| "epoch": 0.7601078167115903, |
| "grad_norm": 0.7586105465888977, |
| "learning_rate": 0.0005549983810037776, |
| "loss": 3.911, |
| "step": 7050 |
| }, |
| { |
| "epoch": 0.7654986522911051, |
| "grad_norm": 0.6012836694717407, |
| "learning_rate": 0.0005546745817593091, |
| "loss": 3.9126, |
| "step": 7100 |
| }, |
| { |
| "epoch": 0.77088948787062, |
| "grad_norm": 0.6788042187690735, |
| "learning_rate": 0.0005543507825148408, |
| "loss": 3.9149, |
| "step": 7150 |
| }, |
| { |
| "epoch": 0.7762803234501348, |
| "grad_norm": 0.60019451379776, |
| "learning_rate": 0.0005540269832703723, |
| "loss": 3.8948, |
| "step": 7200 |
| }, |
| { |
| "epoch": 0.7816711590296496, |
| "grad_norm": 0.5468128323554993, |
| "learning_rate": 0.0005537031840259039, |
| "loss": 3.9228, |
| "step": 7250 |
| }, |
| { |
| "epoch": 0.7870619946091644, |
| "grad_norm": 0.560189425945282, |
| "learning_rate": 0.0005533793847814354, |
| "loss": 3.8974, |
| "step": 7300 |
| }, |
| { |
| "epoch": 0.7924528301886793, |
| "grad_norm": 0.6238011717796326, |
| "learning_rate": 0.000553055585536967, |
| "loss": 3.9169, |
| "step": 7350 |
| }, |
| { |
| "epoch": 0.7978436657681941, |
| "grad_norm": 0.6309143304824829, |
| "learning_rate": 0.0005527317862924987, |
| "loss": 3.9167, |
| "step": 7400 |
| }, |
| { |
| "epoch": 0.8032345013477089, |
| "grad_norm": 0.5579202771186829, |
| "learning_rate": 0.0005524079870480301, |
| "loss": 3.9102, |
| "step": 7450 |
| }, |
| { |
| "epoch": 0.8086253369272237, |
| "grad_norm": 0.637144923210144, |
| "learning_rate": 0.0005520841878035618, |
| "loss": 3.9305, |
| "step": 7500 |
| }, |
| { |
| "epoch": 0.8140161725067385, |
| "grad_norm": 0.560736358165741, |
| "learning_rate": 0.0005517603885590933, |
| "loss": 3.9012, |
| "step": 7550 |
| }, |
| { |
| "epoch": 0.8194070080862533, |
| "grad_norm": 0.5624358057975769, |
| "learning_rate": 0.0005514365893146249, |
| "loss": 3.8973, |
| "step": 7600 |
| }, |
| { |
| "epoch": 0.8247978436657682, |
| "grad_norm": 0.5344393253326416, |
| "learning_rate": 0.0005511127900701564, |
| "loss": 3.8894, |
| "step": 7650 |
| }, |
| { |
| "epoch": 0.8301886792452831, |
| "grad_norm": 0.5487794280052185, |
| "learning_rate": 0.000550788990825688, |
| "loss": 3.9173, |
| "step": 7700 |
| }, |
| { |
| "epoch": 0.8355795148247979, |
| "grad_norm": 0.5416852831840515, |
| "learning_rate": 0.0005504651915812196, |
| "loss": 3.9131, |
| "step": 7750 |
| }, |
| { |
| "epoch": 0.8409703504043127, |
| "grad_norm": 0.6219443678855896, |
| "learning_rate": 0.0005501413923367512, |
| "loss": 3.8783, |
| "step": 7800 |
| }, |
| { |
| "epoch": 0.8463611859838275, |
| "grad_norm": 0.5484825968742371, |
| "learning_rate": 0.0005498175930922827, |
| "loss": 3.8889, |
| "step": 7850 |
| }, |
| { |
| "epoch": 0.8517520215633423, |
| "grad_norm": 0.5506182909011841, |
| "learning_rate": 0.0005494937938478143, |
| "loss": 3.8945, |
| "step": 7900 |
| }, |
| { |
| "epoch": 0.8571428571428571, |
| "grad_norm": 0.5658506155014038, |
| "learning_rate": 0.0005491699946033459, |
| "loss": 3.883, |
| "step": 7950 |
| }, |
| { |
| "epoch": 0.862533692722372, |
| "grad_norm": 0.5695346593856812, |
| "learning_rate": 0.0005488461953588775, |
| "loss": 3.8869, |
| "step": 8000 |
| }, |
| { |
| "epoch": 0.862533692722372, |
| "eval_accuracy": 0.33750498852571176, |
| "eval_loss": 3.818067789077759, |
| "eval_runtime": 185.1439, |
| "eval_samples_per_second": 97.281, |
| "eval_steps_per_second": 6.082, |
| "step": 8000 |
| }, |
| { |
| "epoch": 0.8679245283018868, |
| "grad_norm": 0.5362111926078796, |
| "learning_rate": 0.000548522396114409, |
| "loss": 3.8853, |
| "step": 8050 |
| }, |
| { |
| "epoch": 0.8733153638814016, |
| "grad_norm": 0.6255955696105957, |
| "learning_rate": 0.0005482050728548299, |
| "loss": 3.8955, |
| "step": 8100 |
| }, |
| { |
| "epoch": 0.8787061994609164, |
| "grad_norm": 0.4989166557788849, |
| "learning_rate": 0.0005478812736103615, |
| "loss": 3.8747, |
| "step": 8150 |
| }, |
| { |
| "epoch": 0.8840970350404312, |
| "grad_norm": 0.5338106751441956, |
| "learning_rate": 0.0005475574743658931, |
| "loss": 3.8772, |
| "step": 8200 |
| }, |
| { |
| "epoch": 0.889487870619946, |
| "grad_norm": 0.6587180495262146, |
| "learning_rate": 0.0005472336751214246, |
| "loss": 3.8723, |
| "step": 8250 |
| }, |
| { |
| "epoch": 0.894878706199461, |
| "grad_norm": 0.6544439196586609, |
| "learning_rate": 0.0005469098758769562, |
| "loss": 3.856, |
| "step": 8300 |
| }, |
| { |
| "epoch": 0.9002695417789758, |
| "grad_norm": 0.5896086692810059, |
| "learning_rate": 0.0005465860766324878, |
| "loss": 3.8439, |
| "step": 8350 |
| }, |
| { |
| "epoch": 0.9056603773584906, |
| "grad_norm": 0.579795241355896, |
| "learning_rate": 0.0005462622773880194, |
| "loss": 3.892, |
| "step": 8400 |
| }, |
| { |
| "epoch": 0.9110512129380054, |
| "grad_norm": 0.6916389465332031, |
| "learning_rate": 0.000545938478143551, |
| "loss": 3.8569, |
| "step": 8450 |
| }, |
| { |
| "epoch": 0.9164420485175202, |
| "grad_norm": 0.5650646686553955, |
| "learning_rate": 0.0005456146788990825, |
| "loss": 3.8475, |
| "step": 8500 |
| }, |
| { |
| "epoch": 0.921832884097035, |
| "grad_norm": 0.5492226481437683, |
| "learning_rate": 0.000545290879654614, |
| "loss": 3.86, |
| "step": 8550 |
| }, |
| { |
| "epoch": 0.9272237196765498, |
| "grad_norm": 0.6170901656150818, |
| "learning_rate": 0.0005449670804101457, |
| "loss": 3.86, |
| "step": 8600 |
| }, |
| { |
| "epoch": 0.9326145552560647, |
| "grad_norm": 0.605499804019928, |
| "learning_rate": 0.0005446432811656773, |
| "loss": 3.8616, |
| "step": 8650 |
| }, |
| { |
| "epoch": 0.9380053908355795, |
| "grad_norm": 0.5699788331985474, |
| "learning_rate": 0.0005443194819212088, |
| "loss": 3.8398, |
| "step": 8700 |
| }, |
| { |
| "epoch": 0.9433962264150944, |
| "grad_norm": 0.6459490656852722, |
| "learning_rate": 0.0005439956826767404, |
| "loss": 3.8561, |
| "step": 8750 |
| }, |
| { |
| "epoch": 0.9487870619946092, |
| "grad_norm": 0.5878280401229858, |
| "learning_rate": 0.000543671883432272, |
| "loss": 3.8437, |
| "step": 8800 |
| }, |
| { |
| "epoch": 0.954177897574124, |
| "grad_norm": 0.5819820165634155, |
| "learning_rate": 0.0005433480841878035, |
| "loss": 3.8392, |
| "step": 8850 |
| }, |
| { |
| "epoch": 0.9595687331536388, |
| "grad_norm": 0.6361739635467529, |
| "learning_rate": 0.000543024284943335, |
| "loss": 3.8312, |
| "step": 8900 |
| }, |
| { |
| "epoch": 0.9649595687331537, |
| "grad_norm": 0.6311874389648438, |
| "learning_rate": 0.0005427004856988667, |
| "loss": 3.8481, |
| "step": 8950 |
| }, |
| { |
| "epoch": 0.9703504043126685, |
| "grad_norm": 0.6704027056694031, |
| "learning_rate": 0.0005423766864543982, |
| "loss": 3.848, |
| "step": 9000 |
| }, |
| { |
| "epoch": 0.9703504043126685, |
| "eval_accuracy": 0.3414659295509887, |
| "eval_loss": 3.779182195663452, |
| "eval_runtime": 184.1503, |
| "eval_samples_per_second": 97.806, |
| "eval_steps_per_second": 6.115, |
| "step": 9000 |
| }, |
| { |
| "epoch": 0.9757412398921833, |
| "grad_norm": 0.5311286449432373, |
| "learning_rate": 0.0005420528872099298, |
| "loss": 3.8298, |
| "step": 9050 |
| }, |
| { |
| "epoch": 0.9811320754716981, |
| "grad_norm": 0.5362455248832703, |
| "learning_rate": 0.0005417290879654613, |
| "loss": 3.8289, |
| "step": 9100 |
| }, |
| { |
| "epoch": 0.9865229110512129, |
| "grad_norm": 0.6195308566093445, |
| "learning_rate": 0.000541405288720993, |
| "loss": 3.8465, |
| "step": 9150 |
| }, |
| { |
| "epoch": 0.9919137466307277, |
| "grad_norm": 0.5365903973579407, |
| "learning_rate": 0.0005410814894765245, |
| "loss": 3.8248, |
| "step": 9200 |
| }, |
| { |
| "epoch": 0.9973045822102425, |
| "grad_norm": 0.652845025062561, |
| "learning_rate": 0.0005407576902320561, |
| "loss": 3.8518, |
| "step": 9250 |
| }, |
| { |
| "epoch": 1.0026954177897573, |
| "grad_norm": 0.60700523853302, |
| "learning_rate": 0.0005404338909875876, |
| "loss": 3.7881, |
| "step": 9300 |
| }, |
| { |
| "epoch": 1.0080862533692723, |
| "grad_norm": 0.6487429738044739, |
| "learning_rate": 0.0005401100917431192, |
| "loss": 3.7647, |
| "step": 9350 |
| }, |
| { |
| "epoch": 1.013477088948787, |
| "grad_norm": 0.561632513999939, |
| "learning_rate": 0.0005397862924986508, |
| "loss": 3.7791, |
| "step": 9400 |
| }, |
| { |
| "epoch": 1.0188679245283019, |
| "grad_norm": 0.5459903478622437, |
| "learning_rate": 0.0005394624932541824, |
| "loss": 3.7855, |
| "step": 9450 |
| }, |
| { |
| "epoch": 1.0242587601078168, |
| "grad_norm": 0.5852130055427551, |
| "learning_rate": 0.0005391386940097139, |
| "loss": 3.7552, |
| "step": 9500 |
| }, |
| { |
| "epoch": 1.0296495956873315, |
| "grad_norm": 0.6032885909080505, |
| "learning_rate": 0.0005388148947652455, |
| "loss": 3.7671, |
| "step": 9550 |
| }, |
| { |
| "epoch": 1.0350404312668464, |
| "grad_norm": 0.6321649551391602, |
| "learning_rate": 0.000538491095520777, |
| "loss": 3.795, |
| "step": 9600 |
| }, |
| { |
| "epoch": 1.0404312668463611, |
| "grad_norm": 0.615997314453125, |
| "learning_rate": 0.0005381672962763086, |
| "loss": 3.7544, |
| "step": 9650 |
| }, |
| { |
| "epoch": 1.045822102425876, |
| "grad_norm": 0.6234314441680908, |
| "learning_rate": 0.0005378434970318403, |
| "loss": 3.7772, |
| "step": 9700 |
| }, |
| { |
| "epoch": 1.0512129380053907, |
| "grad_norm": 0.638446569442749, |
| "learning_rate": 0.0005375196977873718, |
| "loss": 3.753, |
| "step": 9750 |
| }, |
| { |
| "epoch": 1.0566037735849056, |
| "grad_norm": 0.6064075827598572, |
| "learning_rate": 0.0005371958985429034, |
| "loss": 3.765, |
| "step": 9800 |
| }, |
| { |
| "epoch": 1.0619946091644206, |
| "grad_norm": 0.572862982749939, |
| "learning_rate": 0.0005368720992984349, |
| "loss": 3.7706, |
| "step": 9850 |
| }, |
| { |
| "epoch": 1.0673854447439353, |
| "grad_norm": 0.6439206600189209, |
| "learning_rate": 0.0005365483000539665, |
| "loss": 3.7649, |
| "step": 9900 |
| }, |
| { |
| "epoch": 1.0727762803234502, |
| "grad_norm": 0.5527343153953552, |
| "learning_rate": 0.0005362245008094981, |
| "loss": 3.7804, |
| "step": 9950 |
| }, |
| { |
| "epoch": 1.0781671159029649, |
| "grad_norm": 0.5652170777320862, |
| "learning_rate": 0.0005359007015650297, |
| "loss": 3.7701, |
| "step": 10000 |
| }, |
| { |
| "epoch": 1.0781671159029649, |
| "eval_accuracy": 0.3446935722364057, |
| "eval_loss": 3.7489497661590576, |
| "eval_runtime": 184.1084, |
| "eval_samples_per_second": 97.828, |
| "eval_steps_per_second": 6.116, |
| "step": 10000 |
| } |
| ], |
| "logging_steps": 50, |
| "max_steps": 92750, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 10, |
| "save_steps": 10000, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 8.36069179392e+16, |
| "train_batch_size": 32, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|