Invalid JSON: Unexpected token 'I', ..."ad_norm": Infinity,
"... is not valid JSON
| { | |
| "best_metric": 3.5728728771209717, | |
| "best_model_checkpoint": "/scratch/cl5625/exceptions/models/100M__634/checkpoint-20000", | |
| "epoch": 2.1563342318059298, | |
| "eval_steps": 1000, | |
| "global_step": 20000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.005390835579514825, | |
| "grad_norm": 1.412644624710083, | |
| "learning_rate": 0.0003, | |
| "loss": 8.6259, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.01078167115902965, | |
| "grad_norm": 3.4297802448272705, | |
| "learning_rate": 0.0006, | |
| "loss": 6.925, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.016172506738544475, | |
| "grad_norm": 1.0797241926193237, | |
| "learning_rate": 0.0005996762007555315, | |
| "loss": 6.4904, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.0215633423180593, | |
| "grad_norm": 1.8681477308273315, | |
| "learning_rate": 0.000599352401511063, | |
| "loss": 6.2514, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.026954177897574125, | |
| "grad_norm": 1.0239183902740479, | |
| "learning_rate": 0.0005990286022665946, | |
| "loss": 6.0911, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.03234501347708895, | |
| "grad_norm": 1.5482831001281738, | |
| "learning_rate": 0.0005987048030221263, | |
| "loss": 5.9845, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.03773584905660377, | |
| "grad_norm": 1.6334092617034912, | |
| "learning_rate": 0.0005983810037776578, | |
| "loss": 5.875, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.0431266846361186, | |
| "grad_norm": 1.5844093561172485, | |
| "learning_rate": 0.0005980572045331894, | |
| "loss": 5.7818, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.04851752021563342, | |
| "grad_norm": 1.2689578533172607, | |
| "learning_rate": 0.0005977334052887209, | |
| "loss": 5.712, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.05390835579514825, | |
| "grad_norm": 1.3288975954055786, | |
| "learning_rate": 0.0005974096060442526, | |
| "loss": 5.6535, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.05929919137466307, | |
| "grad_norm": 2.041428804397583, | |
| "learning_rate": 0.0005970858067997841, | |
| "loss": 5.5725, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.0646900269541779, | |
| "grad_norm": 1.4345282316207886, | |
| "learning_rate": 0.0005967620075553157, | |
| "loss": 5.5081, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.07008086253369272, | |
| "grad_norm": 1.1454274654388428, | |
| "learning_rate": 0.0005964382083108472, | |
| "loss": 5.4223, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.07547169811320754, | |
| "grad_norm": 1.3169114589691162, | |
| "learning_rate": 0.0005961144090663788, | |
| "loss": 5.3711, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.08086253369272237, | |
| "grad_norm": 1.3919389247894287, | |
| "learning_rate": 0.0005957906098219104, | |
| "loss": 5.3003, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.0862533692722372, | |
| "grad_norm": 1.0709481239318848, | |
| "learning_rate": 0.0005954668105774419, | |
| "loss": 5.2637, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.09164420485175202, | |
| "grad_norm": 0.8944941759109497, | |
| "learning_rate": 0.0005951430113329735, | |
| "loss": 5.1967, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.09703504043126684, | |
| "grad_norm": 1.1984753608703613, | |
| "learning_rate": 0.0005948192120885051, | |
| "loss": 5.1709, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.10242587601078167, | |
| "grad_norm": 1.0900869369506836, | |
| "learning_rate": 0.0005944954128440366, | |
| "loss": 5.1332, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.1078167115902965, | |
| "grad_norm": 1.4634649753570557, | |
| "learning_rate": 0.0005941716135995682, | |
| "loss": 5.0778, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.1078167115902965, | |
| "eval_accuracy": 0.22697957684263617, | |
| "eval_loss": 5.024423599243164, | |
| "eval_runtime": 183.2168, | |
| "eval_samples_per_second": 98.304, | |
| "eval_steps_per_second": 6.146, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.11320754716981132, | |
| "grad_norm": 1.0724071264266968, | |
| "learning_rate": 0.0005938478143550997, | |
| "loss": 5.039, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.11859838274932614, | |
| "grad_norm": 0.7434335947036743, | |
| "learning_rate": 0.0005935240151106314, | |
| "loss": 5.0145, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.12398921832884097, | |
| "grad_norm": 1.1407990455627441, | |
| "learning_rate": 0.0005932002158661629, | |
| "loss": 4.9884, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.1293800539083558, | |
| "grad_norm": 1.3579697608947754, | |
| "learning_rate": 0.0005928764166216945, | |
| "loss": 4.9232, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.1347708894878706, | |
| "grad_norm": 1.170091986656189, | |
| "learning_rate": 0.000592552617377226, | |
| "loss": 4.9022, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.14016172506738545, | |
| "grad_norm": 1.056681513786316, | |
| "learning_rate": 0.0005922288181327577, | |
| "loss": 4.8796, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.14555256064690028, | |
| "grad_norm": 0.8261628746986389, | |
| "learning_rate": 0.0005919050188882893, | |
| "loss": 4.8425, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.1509433962264151, | |
| "grad_norm": 0.7253502011299133, | |
| "learning_rate": 0.0005915812196438207, | |
| "loss": 4.836, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.15633423180592992, | |
| "grad_norm": 1.0881081819534302, | |
| "learning_rate": 0.0005912574203993524, | |
| "loss": 4.8436, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.16172506738544473, | |
| "grad_norm": 0.8581550717353821, | |
| "learning_rate": 0.0005909336211548839, | |
| "loss": 4.8009, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.16711590296495957, | |
| "grad_norm": 0.9875918030738831, | |
| "learning_rate": 0.0005906098219104155, | |
| "loss": 4.7541, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.1725067385444744, | |
| "grad_norm": 0.8188138008117676, | |
| "learning_rate": 0.000590286022665947, | |
| "loss": 4.744, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.1778975741239892, | |
| "grad_norm": 0.8326888084411621, | |
| "learning_rate": 0.0005899622234214787, | |
| "loss": 4.7177, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.18328840970350405, | |
| "grad_norm": 1.0679171085357666, | |
| "learning_rate": 0.0005896384241770102, | |
| "loss": 4.6933, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.18867924528301888, | |
| "grad_norm": 0.8685047626495361, | |
| "learning_rate": 0.0005893146249325418, | |
| "loss": 4.7025, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.1940700808625337, | |
| "grad_norm": 0.8494108319282532, | |
| "learning_rate": 0.0005889908256880733, | |
| "loss": 4.6703, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.19946091644204852, | |
| "grad_norm": 1.175525426864624, | |
| "learning_rate": 0.0005886670264436049, | |
| "loss": 4.6366, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.20485175202156333, | |
| "grad_norm": 1.0531870126724243, | |
| "learning_rate": 0.0005883432271991365, | |
| "loss": 4.6263, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.21024258760107817, | |
| "grad_norm": 0.9467228651046753, | |
| "learning_rate": 0.0005880194279546681, | |
| "loss": 4.6254, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.215633423180593, | |
| "grad_norm": 0.9411425590515137, | |
| "learning_rate": 0.0005876956287101996, | |
| "loss": 4.5738, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.215633423180593, | |
| "eval_accuracy": 0.271918085284042, | |
| "eval_loss": 4.5008015632629395, | |
| "eval_runtime": 181.4309, | |
| "eval_samples_per_second": 99.272, | |
| "eval_steps_per_second": 6.206, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.2210242587601078, | |
| "grad_norm": 0.920215368270874, | |
| "learning_rate": 0.0005873718294657312, | |
| "loss": 4.5559, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 0.22641509433962265, | |
| "grad_norm": 0.7012114524841309, | |
| "learning_rate": 0.0005870480302212628, | |
| "loss": 4.5445, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.23180592991913745, | |
| "grad_norm": 0.9538075923919678, | |
| "learning_rate": 0.0005867242309767943, | |
| "loss": 4.5259, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 0.2371967654986523, | |
| "grad_norm": 0.7847324013710022, | |
| "learning_rate": 0.0005864004317323259, | |
| "loss": 4.4889, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.24258760107816713, | |
| "grad_norm": 1.0440484285354614, | |
| "learning_rate": 0.0005860766324878575, | |
| "loss": 4.504, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 0.24797843665768193, | |
| "grad_norm": 0.782093346118927, | |
| "learning_rate": 0.000585752833243389, | |
| "loss": 4.4768, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.25336927223719674, | |
| "grad_norm": 0.9644035696983337, | |
| "learning_rate": 0.0005854290339989206, | |
| "loss": 4.4522, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 0.2587601078167116, | |
| "grad_norm": 1.0412849187850952, | |
| "learning_rate": 0.0005851052347544521, | |
| "loss": 4.4489, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.2641509433962264, | |
| "grad_norm": 0.7987921237945557, | |
| "learning_rate": 0.0005847814355099838, | |
| "loss": 4.4304, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 0.2695417789757412, | |
| "grad_norm": 0.8646295070648193, | |
| "learning_rate": 0.0005844576362655154, | |
| "loss": 4.4196, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.2749326145552561, | |
| "grad_norm": 1.0314178466796875, | |
| "learning_rate": 0.0005841338370210469, | |
| "loss": 4.4017, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 0.2803234501347709, | |
| "grad_norm": 0.9287024140357971, | |
| "learning_rate": 0.0005838100377765785, | |
| "loss": 4.3729, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.2857142857142857, | |
| "grad_norm": 0.8492061495780945, | |
| "learning_rate": 0.0005834862385321101, | |
| "loss": 4.37, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 0.29110512129380056, | |
| "grad_norm": 0.7161704301834106, | |
| "learning_rate": 0.0005831624392876417, | |
| "loss": 4.3608, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.29649595687331537, | |
| "grad_norm": 1.0816575288772583, | |
| "learning_rate": 0.0005828386400431731, | |
| "loss": 4.3705, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 0.3018867924528302, | |
| "grad_norm": 0.8127713799476624, | |
| "learning_rate": 0.0005825148407987048, | |
| "loss": 4.3681, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.30727762803234504, | |
| "grad_norm": 0.9167231321334839, | |
| "learning_rate": 0.0005821910415542363, | |
| "loss": 4.33, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 0.31266846361185985, | |
| "grad_norm": 0.8297504782676697, | |
| "learning_rate": 0.0005818672423097679, | |
| "loss": 4.3137, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.31805929919137466, | |
| "grad_norm": 0.9186894297599792, | |
| "learning_rate": 0.0005815434430652994, | |
| "loss": 4.3441, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 0.32345013477088946, | |
| "grad_norm": 0.7359763979911804, | |
| "learning_rate": 0.0005812196438208311, | |
| "loss": 4.309, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.32345013477088946, | |
| "eval_accuracy": 0.29948495271589304, | |
| "eval_loss": 4.228354454040527, | |
| "eval_runtime": 183.1945, | |
| "eval_samples_per_second": 98.316, | |
| "eval_steps_per_second": 6.146, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.3288409703504043, | |
| "grad_norm": 0.7310630679130554, | |
| "learning_rate": 0.0005808958445763626, | |
| "loss": 4.3024, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 0.33423180592991913, | |
| "grad_norm": 0.8260939717292786, | |
| "learning_rate": 0.0005805720453318942, | |
| "loss": 4.2831, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.33962264150943394, | |
| "grad_norm": 0.7478753924369812, | |
| "learning_rate": 0.0005802482460874257, | |
| "loss": 4.2722, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 0.3450134770889488, | |
| "grad_norm": 0.8138642907142639, | |
| "learning_rate": 0.0005799244468429573, | |
| "loss": 4.2816, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.3504043126684636, | |
| "grad_norm": 0.7284408211708069, | |
| "learning_rate": 0.0005796006475984889, | |
| "loss": 4.2495, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 0.3557951482479784, | |
| "grad_norm": 0.6755571365356445, | |
| "learning_rate": 0.0005792768483540205, | |
| "loss": 4.2595, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.3611859838274933, | |
| "grad_norm": 0.9542193412780762, | |
| "learning_rate": 0.000578953049109552, | |
| "loss": 4.2538, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 0.3665768194070081, | |
| "grad_norm": 0.6383764743804932, | |
| "learning_rate": 0.0005786292498650836, | |
| "loss": 4.2319, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.3719676549865229, | |
| "grad_norm": 0.8126682639122009, | |
| "learning_rate": 0.0005783054506206152, | |
| "loss": 4.2272, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 0.37735849056603776, | |
| "grad_norm": 0.7132463455200195, | |
| "learning_rate": 0.0005779816513761467, | |
| "loss": 4.2186, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.38274932614555257, | |
| "grad_norm": 0.818424642086029, | |
| "learning_rate": 0.0005776578521316782, | |
| "loss": 4.1982, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 0.3881401617250674, | |
| "grad_norm": 0.6916500926017761, | |
| "learning_rate": 0.0005773340528872099, | |
| "loss": 4.2057, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.3935309973045822, | |
| "grad_norm": 0.6858584880828857, | |
| "learning_rate": 0.0005770102536427414, | |
| "loss": 4.2202, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 0.39892183288409705, | |
| "grad_norm": 0.7574723362922668, | |
| "learning_rate": 0.000576686454398273, | |
| "loss": 4.2073, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.40431266846361186, | |
| "grad_norm": 0.7107515931129456, | |
| "learning_rate": 0.0005763626551538045, | |
| "loss": 4.1939, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 0.40970350404312667, | |
| "grad_norm": 0.7424683570861816, | |
| "learning_rate": 0.0005760388559093362, | |
| "loss": 4.1827, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.41509433962264153, | |
| "grad_norm": 0.6541684865951538, | |
| "learning_rate": 0.0005757150566648678, | |
| "loss": 4.1782, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 0.42048517520215634, | |
| "grad_norm": 0.797566294670105, | |
| "learning_rate": 0.0005753912574203993, | |
| "loss": 4.17, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.42587601078167114, | |
| "grad_norm": 0.5999880433082581, | |
| "learning_rate": 0.0005750674581759309, | |
| "loss": 4.1651, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 0.431266846361186, | |
| "grad_norm": 0.8264360427856445, | |
| "learning_rate": 0.0005747436589314624, | |
| "loss": 4.1524, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.431266846361186, | |
| "eval_accuracy": 0.312240258480874, | |
| "eval_loss": 4.087605953216553, | |
| "eval_runtime": 183.2024, | |
| "eval_samples_per_second": 98.312, | |
| "eval_steps_per_second": 6.146, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.4366576819407008, | |
| "grad_norm": 0.7332233786582947, | |
| "learning_rate": 0.0005744198596869941, | |
| "loss": 4.1654, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 0.4420485175202156, | |
| "grad_norm": 0.5775137543678284, | |
| "learning_rate": 0.0005740960604425255, | |
| "loss": 4.158, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 0.4474393530997305, | |
| "grad_norm": 0.8009674549102783, | |
| "learning_rate": 0.0005737722611980572, | |
| "loss": 4.1392, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 0.4528301886792453, | |
| "grad_norm": 0.6772514581680298, | |
| "learning_rate": 0.0005734484619535887, | |
| "loss": 4.1186, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 0.4582210242587601, | |
| "grad_norm": 0.6068300604820251, | |
| "learning_rate": 0.0005731246627091203, | |
| "loss": 4.1518, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 0.4636118598382749, | |
| "grad_norm": 0.6603842973709106, | |
| "learning_rate": 0.0005728008634646518, | |
| "loss": 4.1228, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 0.46900269541778977, | |
| "grad_norm": 0.6870344877243042, | |
| "learning_rate": 0.0005724770642201835, | |
| "loss": 4.1306, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 0.4743935309973046, | |
| "grad_norm": 0.8701305985450745, | |
| "learning_rate": 0.000572153264975715, | |
| "loss": 4.1236, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 0.4797843665768194, | |
| "grad_norm": 0.646145224571228, | |
| "learning_rate": 0.0005718294657312466, | |
| "loss": 4.0979, | |
| "step": 4450 | |
| }, | |
| { | |
| "epoch": 0.48517520215633425, | |
| "grad_norm": 0.5847651362419128, | |
| "learning_rate": 0.0005715056664867781, | |
| "loss": 4.0943, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.49056603773584906, | |
| "grad_norm": 0.7485958933830261, | |
| "learning_rate": 0.0005711818672423097, | |
| "loss": 4.0872, | |
| "step": 4550 | |
| }, | |
| { | |
| "epoch": 0.49595687331536387, | |
| "grad_norm": 0.6113314628601074, | |
| "learning_rate": 0.0005708580679978413, | |
| "loss": 4.0971, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 0.5013477088948787, | |
| "grad_norm": 0.814666748046875, | |
| "learning_rate": 0.0005705342687533729, | |
| "loss": 4.088, | |
| "step": 4650 | |
| }, | |
| { | |
| "epoch": 0.5067385444743935, | |
| "grad_norm": 0.6813623905181885, | |
| "learning_rate": 0.0005702104695089044, | |
| "loss": 4.0697, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 0.5121293800539084, | |
| "grad_norm": 0.7049392461776733, | |
| "learning_rate": 0.000569886670264436, | |
| "loss": 4.083, | |
| "step": 4750 | |
| }, | |
| { | |
| "epoch": 0.5175202156334232, | |
| "grad_norm": 0.6689280867576599, | |
| "learning_rate": 0.0005695628710199675, | |
| "loss": 4.0704, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.522911051212938, | |
| "grad_norm": 0.5872611403465271, | |
| "learning_rate": 0.0005692390717754991, | |
| "loss": 4.0659, | |
| "step": 4850 | |
| }, | |
| { | |
| "epoch": 0.5283018867924528, | |
| "grad_norm": 0.7064571976661682, | |
| "learning_rate": 0.0005689152725310306, | |
| "loss": 4.0586, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 0.5336927223719676, | |
| "grad_norm": 0.7021653652191162, | |
| "learning_rate": 0.0005685914732865623, | |
| "loss": 4.0619, | |
| "step": 4950 | |
| }, | |
| { | |
| "epoch": 0.5390835579514824, | |
| "grad_norm": 0.6203348636627197, | |
| "learning_rate": 0.0005682676740420939, | |
| "loss": 4.0714, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.5390835579514824, | |
| "eval_accuracy": 0.32160700663358477, | |
| "eval_loss": 3.9901158809661865, | |
| "eval_runtime": 183.2916, | |
| "eval_samples_per_second": 98.264, | |
| "eval_steps_per_second": 6.143, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.5444743935309974, | |
| "grad_norm": 0.7498524188995361, | |
| "learning_rate": 0.0005679438747976254, | |
| "loss": 4.0504, | |
| "step": 5050 | |
| }, | |
| { | |
| "epoch": 0.5498652291105122, | |
| "grad_norm": 0.6415616869926453, | |
| "learning_rate": 0.000567620075553157, | |
| "loss": 4.045, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 0.555256064690027, | |
| "grad_norm": 0.68656986951828, | |
| "learning_rate": 0.0005672962763086886, | |
| "loss": 4.0469, | |
| "step": 5150 | |
| }, | |
| { | |
| "epoch": 0.5606469002695418, | |
| "grad_norm": 0.6866742968559265, | |
| "learning_rate": 0.0005669724770642202, | |
| "loss": 4.0368, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 0.5660377358490566, | |
| "grad_norm": 0.6404621601104736, | |
| "learning_rate": 0.0005666486778197517, | |
| "loss": 4.0323, | |
| "step": 5250 | |
| }, | |
| { | |
| "epoch": 0.5714285714285714, | |
| "grad_norm": 0.6272525191307068, | |
| "learning_rate": 0.0005663248785752833, | |
| "loss": 4.0431, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 0.5768194070080862, | |
| "grad_norm": 0.6219531297683716, | |
| "learning_rate": 0.0005660010793308148, | |
| "loss": 4.0283, | |
| "step": 5350 | |
| }, | |
| { | |
| "epoch": 0.5822102425876011, | |
| "grad_norm": 0.5969902276992798, | |
| "learning_rate": 0.0005656772800863465, | |
| "loss": 4.0455, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 0.5876010781671159, | |
| "grad_norm": 0.7162837982177734, | |
| "learning_rate": 0.0005653534808418779, | |
| "loss": 4.0187, | |
| "step": 5450 | |
| }, | |
| { | |
| "epoch": 0.5929919137466307, | |
| "grad_norm": 0.5840233564376831, | |
| "learning_rate": 0.0005650296815974096, | |
| "loss": 4.0264, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.5983827493261455, | |
| "grad_norm": 0.5966022610664368, | |
| "learning_rate": 0.0005647058823529411, | |
| "loss": 4.0154, | |
| "step": 5550 | |
| }, | |
| { | |
| "epoch": 0.6037735849056604, | |
| "grad_norm": 0.5551064610481262, | |
| "learning_rate": 0.0005643820831084727, | |
| "loss": 4.0069, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 0.6091644204851752, | |
| "grad_norm": 0.7473669052124023, | |
| "learning_rate": 0.0005640582838640042, | |
| "loss": 4.0204, | |
| "step": 5650 | |
| }, | |
| { | |
| "epoch": 0.6145552560646901, | |
| "grad_norm": 0.6089016795158386, | |
| "learning_rate": 0.0005637344846195358, | |
| "loss": 4.0132, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 0.6199460916442049, | |
| "grad_norm": 0.5742376446723938, | |
| "learning_rate": 0.0005634106853750674, | |
| "loss": 4.02, | |
| "step": 5750 | |
| }, | |
| { | |
| "epoch": 0.6253369272237197, | |
| "grad_norm": 0.857323169708252, | |
| "learning_rate": 0.000563086886130599, | |
| "loss": 3.9884, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 0.6307277628032345, | |
| "grad_norm": 0.6711622476577759, | |
| "learning_rate": 0.0005627630868861305, | |
| "loss": 3.9889, | |
| "step": 5850 | |
| }, | |
| { | |
| "epoch": 0.6361185983827493, | |
| "grad_norm": 0.6008835434913635, | |
| "learning_rate": 0.0005624392876416621, | |
| "loss": 3.9763, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 0.6415094339622641, | |
| "grad_norm": 0.6592042446136475, | |
| "learning_rate": 0.0005621154883971937, | |
| "loss": 3.9816, | |
| "step": 5950 | |
| }, | |
| { | |
| "epoch": 0.6469002695417789, | |
| "grad_norm": 0.6811545491218567, | |
| "learning_rate": 0.0005617916891527253, | |
| "loss": 3.9631, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.6469002695417789, | |
| "eval_accuracy": 0.328109556010618, | |
| "eval_loss": 3.9180023670196533, | |
| "eval_runtime": 183.5563, | |
| "eval_samples_per_second": 98.122, | |
| "eval_steps_per_second": 6.134, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.6522911051212938, | |
| "grad_norm": 0.838979959487915, | |
| "learning_rate": 0.0005614678899082568, | |
| "loss": 3.9977, | |
| "step": 6050 | |
| }, | |
| { | |
| "epoch": 0.6576819407008087, | |
| "grad_norm": 0.6069373488426208, | |
| "learning_rate": 0.0005611440906637884, | |
| "loss": 3.9844, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 0.6630727762803235, | |
| "grad_norm": 0.6294558048248291, | |
| "learning_rate": 0.00056082029141932, | |
| "loss": 3.9769, | |
| "step": 6150 | |
| }, | |
| { | |
| "epoch": 0.6684636118598383, | |
| "grad_norm": 0.6527539491653442, | |
| "learning_rate": 0.0005604964921748515, | |
| "loss": 3.9875, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 0.6738544474393531, | |
| "grad_norm": 0.6204699873924255, | |
| "learning_rate": 0.000560172692930383, | |
| "loss": 3.949, | |
| "step": 6250 | |
| }, | |
| { | |
| "epoch": 0.6792452830188679, | |
| "grad_norm": 0.6665420532226562, | |
| "learning_rate": 0.0005598488936859147, | |
| "loss": 3.96, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 0.6846361185983828, | |
| "grad_norm": 0.6489077806472778, | |
| "learning_rate": 0.0005595250944414463, | |
| "loss": 3.9534, | |
| "step": 6350 | |
| }, | |
| { | |
| "epoch": 0.6900269541778976, | |
| "grad_norm": 0.6623448729515076, | |
| "learning_rate": 0.0005592012951969778, | |
| "loss": 3.9454, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 0.6954177897574124, | |
| "grad_norm": 0.7035852670669556, | |
| "learning_rate": 0.0005588774959525094, | |
| "loss": 3.9722, | |
| "step": 6450 | |
| }, | |
| { | |
| "epoch": 0.7008086253369272, | |
| "grad_norm": 0.5686920881271362, | |
| "learning_rate": 0.000558553696708041, | |
| "loss": 3.9525, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.706199460916442, | |
| "grad_norm": 0.5780633091926575, | |
| "learning_rate": 0.0005582298974635726, | |
| "loss": 3.9525, | |
| "step": 6550 | |
| }, | |
| { | |
| "epoch": 0.7115902964959568, | |
| "grad_norm": 0.6254565715789795, | |
| "learning_rate": 0.0005579060982191041, | |
| "loss": 3.9494, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 0.7169811320754716, | |
| "grad_norm": 0.6464234590530396, | |
| "learning_rate": 0.0005575822989746357, | |
| "loss": 3.9432, | |
| "step": 6650 | |
| }, | |
| { | |
| "epoch": 0.7223719676549866, | |
| "grad_norm": 0.5895872712135315, | |
| "learning_rate": 0.0005572584997301672, | |
| "loss": 3.9378, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 0.7277628032345014, | |
| "grad_norm": 0.6661088466644287, | |
| "learning_rate": 0.0005569347004856989, | |
| "loss": 3.9405, | |
| "step": 6750 | |
| }, | |
| { | |
| "epoch": 0.7331536388140162, | |
| "grad_norm": 0.5486308932304382, | |
| "learning_rate": 0.0005566109012412303, | |
| "loss": 3.9499, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 0.738544474393531, | |
| "grad_norm": 0.5206322073936462, | |
| "learning_rate": 0.000556287101996762, | |
| "loss": 3.9308, | |
| "step": 6850 | |
| }, | |
| { | |
| "epoch": 0.7439353099730458, | |
| "grad_norm": 0.60262531042099, | |
| "learning_rate": 0.0005559633027522935, | |
| "loss": 3.9279, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 0.7493261455525606, | |
| "grad_norm": 0.6978471875190735, | |
| "learning_rate": 0.0005556395035078251, | |
| "loss": 3.9152, | |
| "step": 6950 | |
| }, | |
| { | |
| "epoch": 0.7547169811320755, | |
| "grad_norm": 0.5400425791740417, | |
| "learning_rate": 0.0005553157042633566, | |
| "loss": 3.9285, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.7547169811320755, | |
| "eval_accuracy": 0.33344778104183126, | |
| "eval_loss": 3.8615846633911133, | |
| "eval_runtime": 182.602, | |
| "eval_samples_per_second": 98.635, | |
| "eval_steps_per_second": 6.166, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.7601078167115903, | |
| "grad_norm": 0.5189043283462524, | |
| "learning_rate": 0.0005549919050188882, | |
| "loss": 3.9302, | |
| "step": 7050 | |
| }, | |
| { | |
| "epoch": 0.7654986522911051, | |
| "grad_norm": 0.5751085877418518, | |
| "learning_rate": 0.0005546681057744198, | |
| "loss": 3.9332, | |
| "step": 7100 | |
| }, | |
| { | |
| "epoch": 0.77088948787062, | |
| "grad_norm": 0.6791032552719116, | |
| "learning_rate": 0.0005543443065299514, | |
| "loss": 3.9073, | |
| "step": 7150 | |
| }, | |
| { | |
| "epoch": 0.7762803234501348, | |
| "grad_norm": 0.7200894951820374, | |
| "learning_rate": 0.000554020507285483, | |
| "loss": 3.9193, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 0.7816711590296496, | |
| "grad_norm": 0.6101612448692322, | |
| "learning_rate": 0.0005536967080410145, | |
| "loss": 3.919, | |
| "step": 7250 | |
| }, | |
| { | |
| "epoch": 0.7870619946091644, | |
| "grad_norm": 0.5997413396835327, | |
| "learning_rate": 0.0005533729087965462, | |
| "loss": 3.9079, | |
| "step": 7300 | |
| }, | |
| { | |
| "epoch": 0.7924528301886793, | |
| "grad_norm": 0.5748898386955261, | |
| "learning_rate": 0.0005530491095520777, | |
| "loss": 3.9162, | |
| "step": 7350 | |
| }, | |
| { | |
| "epoch": 0.7978436657681941, | |
| "grad_norm": 0.6359212398529053, | |
| "learning_rate": 0.0005527253103076093, | |
| "loss": 3.8807, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 0.8032345013477089, | |
| "grad_norm": 0.6088876724243164, | |
| "learning_rate": 0.0005524015110631408, | |
| "loss": 3.8902, | |
| "step": 7450 | |
| }, | |
| { | |
| "epoch": 0.8086253369272237, | |
| "grad_norm": 0.6242630481719971, | |
| "learning_rate": 0.0005520777118186724, | |
| "loss": 3.9035, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.8140161725067385, | |
| "grad_norm": 0.622336208820343, | |
| "learning_rate": 0.0005517539125742039, | |
| "loss": 3.8986, | |
| "step": 7550 | |
| }, | |
| { | |
| "epoch": 0.8194070080862533, | |
| "grad_norm": 0.5424439311027527, | |
| "learning_rate": 0.0005514301133297355, | |
| "loss": 3.9166, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 0.8247978436657682, | |
| "grad_norm": 0.5808700323104858, | |
| "learning_rate": 0.0005511063140852671, | |
| "loss": 3.9005, | |
| "step": 7650 | |
| }, | |
| { | |
| "epoch": 0.8301886792452831, | |
| "grad_norm": 0.5555144548416138, | |
| "learning_rate": 0.0005507825148407987, | |
| "loss": 3.8722, | |
| "step": 7700 | |
| }, | |
| { | |
| "epoch": 0.8355795148247979, | |
| "grad_norm": 0.7033872604370117, | |
| "learning_rate": 0.0005504587155963302, | |
| "loss": 3.8852, | |
| "step": 7750 | |
| }, | |
| { | |
| "epoch": 0.8409703504043127, | |
| "grad_norm": 0.5577759742736816, | |
| "learning_rate": 0.0005501349163518618, | |
| "loss": 3.9008, | |
| "step": 7800 | |
| }, | |
| { | |
| "epoch": 0.8463611859838275, | |
| "grad_norm": Infinity, | |
| "learning_rate": 0.0005498175930922827, | |
| "loss": 3.888, | |
| "step": 7850 | |
| }, | |
| { | |
| "epoch": 0.8517520215633423, | |
| "grad_norm": 0.6767401099205017, | |
| "learning_rate": 0.0005494937938478143, | |
| "loss": 3.8863, | |
| "step": 7900 | |
| }, | |
| { | |
| "epoch": 0.8571428571428571, | |
| "grad_norm": 0.5402886271476746, | |
| "learning_rate": 0.0005491699946033459, | |
| "loss": 3.8885, | |
| "step": 7950 | |
| }, | |
| { | |
| "epoch": 0.862533692722372, | |
| "grad_norm": 0.5724102258682251, | |
| "learning_rate": 0.0005488461953588775, | |
| "loss": 3.8676, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.862533692722372, | |
| "eval_accuracy": 0.3375052058314874, | |
| "eval_loss": 3.816683292388916, | |
| "eval_runtime": 181.6977, | |
| "eval_samples_per_second": 99.126, | |
| "eval_steps_per_second": 6.197, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.8679245283018868, | |
| "grad_norm": 0.5436832308769226, | |
| "learning_rate": 0.000548522396114409, | |
| "loss": 3.8838, | |
| "step": 8050 | |
| }, | |
| { | |
| "epoch": 0.8733153638814016, | |
| "grad_norm": 0.6710783243179321, | |
| "learning_rate": 0.0005481985968699406, | |
| "loss": 3.8623, | |
| "step": 8100 | |
| }, | |
| { | |
| "epoch": 0.8787061994609164, | |
| "grad_norm": 0.5671558380126953, | |
| "learning_rate": 0.0005478747976254721, | |
| "loss": 3.8746, | |
| "step": 8150 | |
| }, | |
| { | |
| "epoch": 0.8840970350404312, | |
| "grad_norm": 0.6092495322227478, | |
| "learning_rate": 0.0005475509983810037, | |
| "loss": 3.8635, | |
| "step": 8200 | |
| }, | |
| { | |
| "epoch": 0.889487870619946, | |
| "grad_norm": 0.6280654072761536, | |
| "learning_rate": 0.0005472271991365352, | |
| "loss": 3.8772, | |
| "step": 8250 | |
| }, | |
| { | |
| "epoch": 0.894878706199461, | |
| "grad_norm": 0.5402874946594238, | |
| "learning_rate": 0.0005469033998920669, | |
| "loss": 3.8525, | |
| "step": 8300 | |
| }, | |
| { | |
| "epoch": 0.9002695417789758, | |
| "grad_norm": 0.5597426295280457, | |
| "learning_rate": 0.0005465796006475984, | |
| "loss": 3.8685, | |
| "step": 8350 | |
| }, | |
| { | |
| "epoch": 0.9056603773584906, | |
| "grad_norm": 0.5542333126068115, | |
| "learning_rate": 0.00054625580140313, | |
| "loss": 3.8677, | |
| "step": 8400 | |
| }, | |
| { | |
| "epoch": 0.9110512129380054, | |
| "grad_norm": 0.5573087930679321, | |
| "learning_rate": 0.0005459320021586615, | |
| "loss": 3.8402, | |
| "step": 8450 | |
| }, | |
| { | |
| "epoch": 0.9164420485175202, | |
| "grad_norm": 0.5278663039207458, | |
| "learning_rate": 0.0005456082029141932, | |
| "loss": 3.8521, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.921832884097035, | |
| "grad_norm": 0.6010989546775818, | |
| "learning_rate": 0.0005452844036697248, | |
| "loss": 3.8536, | |
| "step": 8550 | |
| }, | |
| { | |
| "epoch": 0.9272237196765498, | |
| "grad_norm": 0.6041896939277649, | |
| "learning_rate": 0.0005449606044252563, | |
| "loss": 3.8816, | |
| "step": 8600 | |
| }, | |
| { | |
| "epoch": 0.9326145552560647, | |
| "grad_norm": 0.6230564117431641, | |
| "learning_rate": 0.0005446368051807879, | |
| "loss": 3.8547, | |
| "step": 8650 | |
| }, | |
| { | |
| "epoch": 0.9380053908355795, | |
| "grad_norm": 0.6061603426933289, | |
| "learning_rate": 0.0005443130059363194, | |
| "loss": 3.8612, | |
| "step": 8700 | |
| }, | |
| { | |
| "epoch": 0.9433962264150944, | |
| "grad_norm": 0.532725989818573, | |
| "learning_rate": 0.0005439892066918511, | |
| "loss": 3.8453, | |
| "step": 8750 | |
| }, | |
| { | |
| "epoch": 0.9487870619946092, | |
| "grad_norm": 0.6650099158287048, | |
| "learning_rate": 0.0005436654074473825, | |
| "loss": 3.845, | |
| "step": 8800 | |
| }, | |
| { | |
| "epoch": 0.954177897574124, | |
| "grad_norm": 0.5258676409721375, | |
| "learning_rate": 0.0005433416082029142, | |
| "loss": 3.8474, | |
| "step": 8850 | |
| }, | |
| { | |
| "epoch": 0.9595687331536388, | |
| "grad_norm": 0.5918166637420654, | |
| "learning_rate": 0.0005430178089584457, | |
| "loss": 3.8476, | |
| "step": 8900 | |
| }, | |
| { | |
| "epoch": 0.9649595687331537, | |
| "grad_norm": 0.7267642617225647, | |
| "learning_rate": 0.0005426940097139773, | |
| "loss": 3.8489, | |
| "step": 8950 | |
| }, | |
| { | |
| "epoch": 0.9703504043126685, | |
| "grad_norm": 0.639377772808075, | |
| "learning_rate": 0.0005423702104695088, | |
| "loss": 3.8648, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.9703504043126685, | |
| "eval_accuracy": 0.3408996306996996, | |
| "eval_loss": 3.779827356338501, | |
| "eval_runtime": 181.5426, | |
| "eval_samples_per_second": 99.211, | |
| "eval_steps_per_second": 6.202, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.9757412398921833, | |
| "grad_norm": 0.6780883073806763, | |
| "learning_rate": 0.0005420464112250404, | |
| "loss": 3.8448, | |
| "step": 9050 | |
| }, | |
| { | |
| "epoch": 0.9811320754716981, | |
| "grad_norm": 0.5915326476097107, | |
| "learning_rate": 0.000541722611980572, | |
| "loss": 3.8335, | |
| "step": 9100 | |
| }, | |
| { | |
| "epoch": 0.9865229110512129, | |
| "grad_norm": 0.5880796313285828, | |
| "learning_rate": 0.0005413988127361036, | |
| "loss": 3.8434, | |
| "step": 9150 | |
| }, | |
| { | |
| "epoch": 0.9919137466307277, | |
| "grad_norm": 0.551231861114502, | |
| "learning_rate": 0.0005410750134916351, | |
| "loss": 3.8469, | |
| "step": 9200 | |
| }, | |
| { | |
| "epoch": 0.9973045822102425, | |
| "grad_norm": 0.542789101600647, | |
| "learning_rate": 0.0005407512142471667, | |
| "loss": 3.8382, | |
| "step": 9250 | |
| }, | |
| { | |
| "epoch": 1.0026954177897573, | |
| "grad_norm": 0.5782141089439392, | |
| "learning_rate": 0.0005404274150026983, | |
| "loss": 3.8143, | |
| "step": 9300 | |
| }, | |
| { | |
| "epoch": 1.0080862533692723, | |
| "grad_norm": 0.585417628288269, | |
| "learning_rate": 0.0005401036157582299, | |
| "loss": 3.782, | |
| "step": 9350 | |
| }, | |
| { | |
| "epoch": 1.013477088948787, | |
| "grad_norm": 0.5786333680152893, | |
| "learning_rate": 0.0005397798165137614, | |
| "loss": 3.7919, | |
| "step": 9400 | |
| }, | |
| { | |
| "epoch": 1.0188679245283019, | |
| "grad_norm": 0.5752071738243103, | |
| "learning_rate": 0.000539456017269293, | |
| "loss": 3.7809, | |
| "step": 9450 | |
| }, | |
| { | |
| "epoch": 1.0242587601078168, | |
| "grad_norm": 0.5453478693962097, | |
| "learning_rate": 0.0005391322180248245, | |
| "loss": 3.7775, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.0296495956873315, | |
| "grad_norm": 0.5964511632919312, | |
| "learning_rate": 0.0005388084187803561, | |
| "loss": 3.7881, | |
| "step": 9550 | |
| }, | |
| { | |
| "epoch": 1.0350404312668464, | |
| "grad_norm": 0.6188067197799683, | |
| "learning_rate": 0.0005384846195358876, | |
| "loss": 3.7672, | |
| "step": 9600 | |
| }, | |
| { | |
| "epoch": 1.0404312668463611, | |
| "grad_norm": 0.5592512488365173, | |
| "learning_rate": 0.0005381608202914193, | |
| "loss": 3.7615, | |
| "step": 9650 | |
| }, | |
| { | |
| "epoch": 1.045822102425876, | |
| "grad_norm": 0.5626137256622314, | |
| "learning_rate": 0.0005378370210469509, | |
| "loss": 3.7723, | |
| "step": 9700 | |
| }, | |
| { | |
| "epoch": 1.0512129380053907, | |
| "grad_norm": 0.6109785437583923, | |
| "learning_rate": 0.0005375132218024824, | |
| "loss": 3.7644, | |
| "step": 9750 | |
| }, | |
| { | |
| "epoch": 1.0566037735849056, | |
| "grad_norm": 0.6150305271148682, | |
| "learning_rate": 0.000537189422558014, | |
| "loss": 3.7717, | |
| "step": 9800 | |
| }, | |
| { | |
| "epoch": 1.0619946091644206, | |
| "grad_norm": 0.5786086320877075, | |
| "learning_rate": 0.0005368656233135455, | |
| "loss": 3.7537, | |
| "step": 9850 | |
| }, | |
| { | |
| "epoch": 1.0673854447439353, | |
| "grad_norm": 0.5935583710670471, | |
| "learning_rate": 0.0005365418240690772, | |
| "loss": 3.7646, | |
| "step": 9900 | |
| }, | |
| { | |
| "epoch": 1.0727762803234502, | |
| "grad_norm": 0.5563629269599915, | |
| "learning_rate": 0.0005362180248246087, | |
| "loss": 3.7625, | |
| "step": 9950 | |
| }, | |
| { | |
| "epoch": 1.0781671159029649, | |
| "grad_norm": 0.5583340525627136, | |
| "learning_rate": 0.0005358942255801403, | |
| "loss": 3.7661, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.0781671159029649, | |
| "eval_accuracy": 0.3447591985806456, | |
| "eval_loss": 3.7474043369293213, | |
| "eval_runtime": 179.3333, | |
| "eval_samples_per_second": 100.433, | |
| "eval_steps_per_second": 6.279, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.0835579514824798, | |
| "grad_norm": 0.5278469324111938, | |
| "learning_rate": 0.0005355704263356718, | |
| "loss": 3.7572, | |
| "step": 10050 | |
| }, | |
| { | |
| "epoch": 1.0889487870619945, | |
| "grad_norm": 0.599793553352356, | |
| "learning_rate": 0.0005352466270912035, | |
| "loss": 3.7482, | |
| "step": 10100 | |
| }, | |
| { | |
| "epoch": 1.0943396226415094, | |
| "grad_norm": 0.594103991985321, | |
| "learning_rate": 0.000534922827846735, | |
| "loss": 3.7811, | |
| "step": 10150 | |
| }, | |
| { | |
| "epoch": 1.0997304582210243, | |
| "grad_norm": 0.539659321308136, | |
| "learning_rate": 0.0005345990286022666, | |
| "loss": 3.7808, | |
| "step": 10200 | |
| }, | |
| { | |
| "epoch": 1.105121293800539, | |
| "grad_norm": 0.5949695110321045, | |
| "learning_rate": 0.0005342752293577981, | |
| "loss": 3.7483, | |
| "step": 10250 | |
| }, | |
| { | |
| "epoch": 1.110512129380054, | |
| "grad_norm": 0.6354559659957886, | |
| "learning_rate": 0.0005339514301133297, | |
| "loss": 3.7743, | |
| "step": 10300 | |
| }, | |
| { | |
| "epoch": 1.1159029649595686, | |
| "grad_norm": 0.5751685500144958, | |
| "learning_rate": 0.0005336276308688612, | |
| "loss": 3.7727, | |
| "step": 10350 | |
| }, | |
| { | |
| "epoch": 1.1212938005390836, | |
| "grad_norm": 0.6152482032775879, | |
| "learning_rate": 0.0005333038316243928, | |
| "loss": 3.7604, | |
| "step": 10400 | |
| }, | |
| { | |
| "epoch": 1.1266846361185983, | |
| "grad_norm": 0.6259979009628296, | |
| "learning_rate": 0.0005329800323799244, | |
| "loss": 3.7242, | |
| "step": 10450 | |
| }, | |
| { | |
| "epoch": 1.1320754716981132, | |
| "grad_norm": 0.5735751390457153, | |
| "learning_rate": 0.000532656233135456, | |
| "loss": 3.75, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.137466307277628, | |
| "grad_norm": 0.5335408449172974, | |
| "learning_rate": 0.0005323324338909875, | |
| "loss": 3.7497, | |
| "step": 10550 | |
| }, | |
| { | |
| "epoch": 1.1428571428571428, | |
| "grad_norm": 0.5535331964492798, | |
| "learning_rate": 0.0005320086346465191, | |
| "loss": 3.757, | |
| "step": 10600 | |
| }, | |
| { | |
| "epoch": 1.1482479784366577, | |
| "grad_norm": 0.5518361926078796, | |
| "learning_rate": 0.0005316848354020507, | |
| "loss": 3.7635, | |
| "step": 10650 | |
| }, | |
| { | |
| "epoch": 1.1536388140161726, | |
| "grad_norm": 0.6065233945846558, | |
| "learning_rate": 0.0005313610361575823, | |
| "loss": 3.7482, | |
| "step": 10700 | |
| }, | |
| { | |
| "epoch": 1.1590296495956873, | |
| "grad_norm": 0.5510832667350769, | |
| "learning_rate": 0.0005310372369131138, | |
| "loss": 3.7464, | |
| "step": 10750 | |
| }, | |
| { | |
| "epoch": 1.1644204851752022, | |
| "grad_norm": 0.5745865702629089, | |
| "learning_rate": 0.0005307134376686454, | |
| "loss": 3.7237, | |
| "step": 10800 | |
| }, | |
| { | |
| "epoch": 1.169811320754717, | |
| "grad_norm": 0.6756680011749268, | |
| "learning_rate": 0.000530389638424177, | |
| "loss": 3.7627, | |
| "step": 10850 | |
| }, | |
| { | |
| "epoch": 1.1752021563342319, | |
| "grad_norm": 0.6508898138999939, | |
| "learning_rate": 0.0005300658391797085, | |
| "loss": 3.7616, | |
| "step": 10900 | |
| }, | |
| { | |
| "epoch": 1.1805929919137466, | |
| "grad_norm": 0.6372632384300232, | |
| "learning_rate": 0.00052974203993524, | |
| "loss": 3.7205, | |
| "step": 10950 | |
| }, | |
| { | |
| "epoch": 1.1859838274932615, | |
| "grad_norm": 0.584235668182373, | |
| "learning_rate": 0.0005294182406907717, | |
| "loss": 3.7415, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.1859838274932615, | |
| "eval_accuracy": 0.3469530090385078, | |
| "eval_loss": 3.7233991622924805, | |
| "eval_runtime": 179.2943, | |
| "eval_samples_per_second": 100.455, | |
| "eval_steps_per_second": 6.28, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.1913746630727764, | |
| "grad_norm": 0.5607488751411438, | |
| "learning_rate": 0.0005290944414463033, | |
| "loss": 3.7504, | |
| "step": 11050 | |
| }, | |
| { | |
| "epoch": 1.196765498652291, | |
| "grad_norm": 0.6091164946556091, | |
| "learning_rate": 0.0005287706422018348, | |
| "loss": 3.744, | |
| "step": 11100 | |
| }, | |
| { | |
| "epoch": 1.202156334231806, | |
| "grad_norm": 0.6206035614013672, | |
| "learning_rate": 0.0005284468429573664, | |
| "loss": 3.7378, | |
| "step": 11150 | |
| }, | |
| { | |
| "epoch": 1.2075471698113207, | |
| "grad_norm": 0.6528010964393616, | |
| "learning_rate": 0.0005281230437128979, | |
| "loss": 3.7509, | |
| "step": 11200 | |
| }, | |
| { | |
| "epoch": 1.2129380053908356, | |
| "grad_norm": 0.520330011844635, | |
| "learning_rate": 0.0005278057204533189, | |
| "loss": 3.7608, | |
| "step": 11250 | |
| }, | |
| { | |
| "epoch": 1.2183288409703503, | |
| "grad_norm": 0.6227236390113831, | |
| "learning_rate": 0.0005274819212088505, | |
| "loss": 3.7286, | |
| "step": 11300 | |
| }, | |
| { | |
| "epoch": 1.2237196765498652, | |
| "grad_norm": 0.5682984590530396, | |
| "learning_rate": 0.0005271581219643821, | |
| "loss": 3.7505, | |
| "step": 11350 | |
| }, | |
| { | |
| "epoch": 1.2291105121293802, | |
| "grad_norm": 0.6123554706573486, | |
| "learning_rate": 0.0005268343227199136, | |
| "loss": 3.7508, | |
| "step": 11400 | |
| }, | |
| { | |
| "epoch": 1.2345013477088949, | |
| "grad_norm": 0.507030189037323, | |
| "learning_rate": 0.0005265105234754452, | |
| "loss": 3.7553, | |
| "step": 11450 | |
| }, | |
| { | |
| "epoch": 1.2398921832884098, | |
| "grad_norm": 0.5759048461914062, | |
| "learning_rate": 0.0005261867242309767, | |
| "loss": 3.7192, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 1.2452830188679245, | |
| "grad_norm": 0.5370573997497559, | |
| "learning_rate": 0.0005258629249865083, | |
| "loss": 3.7409, | |
| "step": 11550 | |
| }, | |
| { | |
| "epoch": 1.2506738544474394, | |
| "grad_norm": 0.5597508549690247, | |
| "learning_rate": 0.0005255391257420398, | |
| "loss": 3.739, | |
| "step": 11600 | |
| }, | |
| { | |
| "epoch": 1.256064690026954, | |
| "grad_norm": 0.6163648366928101, | |
| "learning_rate": 0.0005252153264975715, | |
| "loss": 3.7364, | |
| "step": 11650 | |
| }, | |
| { | |
| "epoch": 1.261455525606469, | |
| "grad_norm": 0.561890721321106, | |
| "learning_rate": 0.000524891527253103, | |
| "loss": 3.7371, | |
| "step": 11700 | |
| }, | |
| { | |
| "epoch": 1.266846361185984, | |
| "grad_norm": 0.6164953708648682, | |
| "learning_rate": 0.0005245677280086346, | |
| "loss": 3.7437, | |
| "step": 11750 | |
| }, | |
| { | |
| "epoch": 1.2722371967654986, | |
| "grad_norm": 0.5438469648361206, | |
| "learning_rate": 0.0005242439287641661, | |
| "loss": 3.7317, | |
| "step": 11800 | |
| }, | |
| { | |
| "epoch": 1.2776280323450135, | |
| "grad_norm": 0.5996424555778503, | |
| "learning_rate": 0.0005239201295196978, | |
| "loss": 3.7139, | |
| "step": 11850 | |
| }, | |
| { | |
| "epoch": 1.2830188679245282, | |
| "grad_norm": 0.5443153381347656, | |
| "learning_rate": 0.0005235963302752293, | |
| "loss": 3.7189, | |
| "step": 11900 | |
| }, | |
| { | |
| "epoch": 1.2884097035040432, | |
| "grad_norm": 0.5056083798408508, | |
| "learning_rate": 0.0005232725310307609, | |
| "loss": 3.7286, | |
| "step": 11950 | |
| }, | |
| { | |
| "epoch": 1.2938005390835579, | |
| "grad_norm": 0.5346047878265381, | |
| "learning_rate": 0.0005229487317862924, | |
| "loss": 3.7241, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.2938005390835579, | |
| "eval_accuracy": 0.3486271327339533, | |
| "eval_loss": 3.701392889022827, | |
| "eval_runtime": 179.3961, | |
| "eval_samples_per_second": 100.398, | |
| "eval_steps_per_second": 6.277, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.2991913746630728, | |
| "grad_norm": 0.561946451663971, | |
| "learning_rate": 0.000522624932541824, | |
| "loss": 3.7065, | |
| "step": 12050 | |
| }, | |
| { | |
| "epoch": 1.3045822102425877, | |
| "grad_norm": 0.6094053387641907, | |
| "learning_rate": 0.0005223011332973557, | |
| "loss": 3.7432, | |
| "step": 12100 | |
| }, | |
| { | |
| "epoch": 1.3099730458221024, | |
| "grad_norm": 0.548875093460083, | |
| "learning_rate": 0.0005219773340528872, | |
| "loss": 3.7251, | |
| "step": 12150 | |
| }, | |
| { | |
| "epoch": 1.3153638814016173, | |
| "grad_norm": 0.5728870630264282, | |
| "learning_rate": 0.0005216535348084188, | |
| "loss": 3.7107, | |
| "step": 12200 | |
| }, | |
| { | |
| "epoch": 1.320754716981132, | |
| "grad_norm": 0.6108008623123169, | |
| "learning_rate": 0.0005213297355639503, | |
| "loss": 3.7369, | |
| "step": 12250 | |
| }, | |
| { | |
| "epoch": 1.326145552560647, | |
| "grad_norm": 0.5879302024841309, | |
| "learning_rate": 0.0005210059363194819, | |
| "loss": 3.7043, | |
| "step": 12300 | |
| }, | |
| { | |
| "epoch": 1.3315363881401616, | |
| "grad_norm": 0.550482988357544, | |
| "learning_rate": 0.0005206821370750134, | |
| "loss": 3.7288, | |
| "step": 12350 | |
| }, | |
| { | |
| "epoch": 1.3369272237196765, | |
| "grad_norm": 0.5876588821411133, | |
| "learning_rate": 0.000520358337830545, | |
| "loss": 3.7, | |
| "step": 12400 | |
| }, | |
| { | |
| "epoch": 1.3423180592991915, | |
| "grad_norm": 0.5548244118690491, | |
| "learning_rate": 0.0005200345385860766, | |
| "loss": 3.7202, | |
| "step": 12450 | |
| }, | |
| { | |
| "epoch": 1.3477088948787062, | |
| "grad_norm": 0.6892051696777344, | |
| "learning_rate": 0.0005197107393416082, | |
| "loss": 3.7287, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 1.353099730458221, | |
| "grad_norm": 0.5671435594558716, | |
| "learning_rate": 0.0005193869400971397, | |
| "loss": 3.7361, | |
| "step": 12550 | |
| }, | |
| { | |
| "epoch": 1.3584905660377358, | |
| "grad_norm": 0.5692296624183655, | |
| "learning_rate": 0.0005190631408526713, | |
| "loss": 3.7137, | |
| "step": 12600 | |
| }, | |
| { | |
| "epoch": 1.3638814016172507, | |
| "grad_norm": 0.5560773611068726, | |
| "learning_rate": 0.0005187393416082029, | |
| "loss": 3.7092, | |
| "step": 12650 | |
| }, | |
| { | |
| "epoch": 1.3692722371967654, | |
| "grad_norm": 0.5214106440544128, | |
| "learning_rate": 0.0005184155423637345, | |
| "loss": 3.7076, | |
| "step": 12700 | |
| }, | |
| { | |
| "epoch": 1.3746630727762803, | |
| "grad_norm": 0.5432198643684387, | |
| "learning_rate": 0.000518091743119266, | |
| "loss": 3.7224, | |
| "step": 12750 | |
| }, | |
| { | |
| "epoch": 1.3800539083557952, | |
| "grad_norm": 0.5707585215568542, | |
| "learning_rate": 0.0005177679438747976, | |
| "loss": 3.7136, | |
| "step": 12800 | |
| }, | |
| { | |
| "epoch": 1.38544474393531, | |
| "grad_norm": 0.583604633808136, | |
| "learning_rate": 0.0005174441446303291, | |
| "loss": 3.7356, | |
| "step": 12850 | |
| }, | |
| { | |
| "epoch": 1.3908355795148248, | |
| "grad_norm": 0.567251443862915, | |
| "learning_rate": 0.0005171203453858607, | |
| "loss": 3.7341, | |
| "step": 12900 | |
| }, | |
| { | |
| "epoch": 1.3962264150943398, | |
| "grad_norm": 0.5615735054016113, | |
| "learning_rate": 0.0005167965461413922, | |
| "loss": 3.7331, | |
| "step": 12950 | |
| }, | |
| { | |
| "epoch": 1.4016172506738545, | |
| "grad_norm": 0.6066398024559021, | |
| "learning_rate": 0.0005164727468969239, | |
| "loss": 3.715, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.4016172506738545, | |
| "eval_accuracy": 0.3512098118773035, | |
| "eval_loss": 3.67993426322937, | |
| "eval_runtime": 181.5275, | |
| "eval_samples_per_second": 99.219, | |
| "eval_steps_per_second": 6.203, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.4070080862533692, | |
| "grad_norm": 0.6397793292999268, | |
| "learning_rate": 0.0005161489476524554, | |
| "loss": 3.6967, | |
| "step": 13050 | |
| }, | |
| { | |
| "epoch": 1.412398921832884, | |
| "grad_norm": 0.6502065062522888, | |
| "learning_rate": 0.000515825148407987, | |
| "loss": 3.7131, | |
| "step": 13100 | |
| }, | |
| { | |
| "epoch": 1.417789757412399, | |
| "grad_norm": 0.5458613038063049, | |
| "learning_rate": 0.0005155013491635185, | |
| "loss": 3.7147, | |
| "step": 13150 | |
| }, | |
| { | |
| "epoch": 1.4231805929919137, | |
| "grad_norm": 0.5654726028442383, | |
| "learning_rate": 0.0005151775499190501, | |
| "loss": 3.7392, | |
| "step": 13200 | |
| }, | |
| { | |
| "epoch": 1.4285714285714286, | |
| "grad_norm": 0.5382741689682007, | |
| "learning_rate": 0.0005148537506745818, | |
| "loss": 3.6966, | |
| "step": 13250 | |
| }, | |
| { | |
| "epoch": 1.4339622641509435, | |
| "grad_norm": 0.5260798931121826, | |
| "learning_rate": 0.0005145299514301133, | |
| "loss": 3.703, | |
| "step": 13300 | |
| }, | |
| { | |
| "epoch": 1.4393530997304582, | |
| "grad_norm": 0.5853448510169983, | |
| "learning_rate": 0.0005142061521856449, | |
| "loss": 3.684, | |
| "step": 13350 | |
| }, | |
| { | |
| "epoch": 1.444743935309973, | |
| "grad_norm": 0.538550615310669, | |
| "learning_rate": 0.0005138823529411764, | |
| "loss": 3.7219, | |
| "step": 13400 | |
| }, | |
| { | |
| "epoch": 1.4501347708894878, | |
| "grad_norm": 0.596662700176239, | |
| "learning_rate": 0.0005135585536967081, | |
| "loss": 3.7194, | |
| "step": 13450 | |
| }, | |
| { | |
| "epoch": 1.4555256064690028, | |
| "grad_norm": 0.6186038851737976, | |
| "learning_rate": 0.0005132347544522396, | |
| "loss": 3.7033, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 1.4609164420485174, | |
| "grad_norm": 0.542559802532196, | |
| "learning_rate": 0.0005129109552077712, | |
| "loss": 3.7194, | |
| "step": 13550 | |
| }, | |
| { | |
| "epoch": 1.4663072776280324, | |
| "grad_norm": 0.5352370738983154, | |
| "learning_rate": 0.0005125871559633027, | |
| "loss": 3.7046, | |
| "step": 13600 | |
| }, | |
| { | |
| "epoch": 1.4716981132075473, | |
| "grad_norm": 0.5471335649490356, | |
| "learning_rate": 0.0005122633567188343, | |
| "loss": 3.7091, | |
| "step": 13650 | |
| }, | |
| { | |
| "epoch": 1.477088948787062, | |
| "grad_norm": 0.6399145126342773, | |
| "learning_rate": 0.0005119395574743658, | |
| "loss": 3.7208, | |
| "step": 13700 | |
| }, | |
| { | |
| "epoch": 1.482479784366577, | |
| "grad_norm": 0.5687584280967712, | |
| "learning_rate": 0.0005116222342147868, | |
| "loss": 3.7, | |
| "step": 13750 | |
| }, | |
| { | |
| "epoch": 1.4878706199460916, | |
| "grad_norm": 0.5827769041061401, | |
| "learning_rate": 0.0005112984349703183, | |
| "loss": 3.7004, | |
| "step": 13800 | |
| }, | |
| { | |
| "epoch": 1.4932614555256065, | |
| "grad_norm": 0.5126985907554626, | |
| "learning_rate": 0.00051097463572585, | |
| "loss": 3.6955, | |
| "step": 13850 | |
| }, | |
| { | |
| "epoch": 1.4986522911051212, | |
| "grad_norm": 0.6048992872238159, | |
| "learning_rate": 0.0005106508364813815, | |
| "loss": 3.6853, | |
| "step": 13900 | |
| }, | |
| { | |
| "epoch": 1.5040431266846361, | |
| "grad_norm": 0.5209577679634094, | |
| "learning_rate": 0.0005103270372369131, | |
| "loss": 3.7061, | |
| "step": 13950 | |
| }, | |
| { | |
| "epoch": 1.509433962264151, | |
| "grad_norm": 0.558080792427063, | |
| "learning_rate": 0.0005100032379924446, | |
| "loss": 3.6982, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 1.509433962264151, | |
| "eval_accuracy": 0.35338667248466665, | |
| "eval_loss": 3.6556079387664795, | |
| "eval_runtime": 181.0746, | |
| "eval_samples_per_second": 99.467, | |
| "eval_steps_per_second": 6.218, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 1.5148247978436657, | |
| "grad_norm": 0.5223732590675354, | |
| "learning_rate": 0.0005096794387479762, | |
| "loss": 3.6743, | |
| "step": 14050 | |
| }, | |
| { | |
| "epoch": 1.5202156334231804, | |
| "grad_norm": 0.5266443490982056, | |
| "learning_rate": 0.0005093556395035078, | |
| "loss": 3.7049, | |
| "step": 14100 | |
| }, | |
| { | |
| "epoch": 1.5256064690026954, | |
| "grad_norm": 0.5410740971565247, | |
| "learning_rate": 0.0005090318402590394, | |
| "loss": 3.6871, | |
| "step": 14150 | |
| }, | |
| { | |
| "epoch": 1.5309973045822103, | |
| "grad_norm": 0.5198672413825989, | |
| "learning_rate": 0.0005087080410145709, | |
| "loss": 3.6848, | |
| "step": 14200 | |
| }, | |
| { | |
| "epoch": 1.536388140161725, | |
| "grad_norm": 0.5388085246086121, | |
| "learning_rate": 0.0005083842417701025, | |
| "loss": 3.6925, | |
| "step": 14250 | |
| }, | |
| { | |
| "epoch": 1.54177897574124, | |
| "grad_norm": 0.5680968761444092, | |
| "learning_rate": 0.000508060442525634, | |
| "loss": 3.7015, | |
| "step": 14300 | |
| }, | |
| { | |
| "epoch": 1.5471698113207548, | |
| "grad_norm": 0.529816746711731, | |
| "learning_rate": 0.0005077366432811656, | |
| "loss": 3.7089, | |
| "step": 14350 | |
| }, | |
| { | |
| "epoch": 1.5525606469002695, | |
| "grad_norm": 0.588886022567749, | |
| "learning_rate": 0.0005074128440366971, | |
| "loss": 3.6898, | |
| "step": 14400 | |
| }, | |
| { | |
| "epoch": 1.5579514824797842, | |
| "grad_norm": 0.53922438621521, | |
| "learning_rate": 0.0005070890447922288, | |
| "loss": 3.7101, | |
| "step": 14450 | |
| }, | |
| { | |
| "epoch": 1.5633423180592994, | |
| "grad_norm": 0.5846419930458069, | |
| "learning_rate": 0.0005067652455477604, | |
| "loss": 3.6752, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 1.568733153638814, | |
| "grad_norm": 0.5448037981987, | |
| "learning_rate": 0.0005064414463032919, | |
| "loss": 3.6868, | |
| "step": 14550 | |
| }, | |
| { | |
| "epoch": 1.5741239892183287, | |
| "grad_norm": 0.5100300908088684, | |
| "learning_rate": 0.0005061176470588235, | |
| "loss": 3.6773, | |
| "step": 14600 | |
| }, | |
| { | |
| "epoch": 1.5795148247978437, | |
| "grad_norm": 0.5662325620651245, | |
| "learning_rate": 0.0005057938478143551, | |
| "loss": 3.693, | |
| "step": 14650 | |
| }, | |
| { | |
| "epoch": 1.5849056603773586, | |
| "grad_norm": 0.5295085906982422, | |
| "learning_rate": 0.0005054700485698867, | |
| "loss": 3.6999, | |
| "step": 14700 | |
| }, | |
| { | |
| "epoch": 1.5902964959568733, | |
| "grad_norm": 0.6152496337890625, | |
| "learning_rate": 0.0005051462493254182, | |
| "loss": 3.6639, | |
| "step": 14750 | |
| }, | |
| { | |
| "epoch": 1.595687331536388, | |
| "grad_norm": 0.5873838067054749, | |
| "learning_rate": 0.0005048224500809498, | |
| "loss": 3.6879, | |
| "step": 14800 | |
| }, | |
| { | |
| "epoch": 1.6010781671159031, | |
| "grad_norm": 0.5197713375091553, | |
| "learning_rate": 0.0005044986508364813, | |
| "loss": 3.6784, | |
| "step": 14850 | |
| }, | |
| { | |
| "epoch": 1.6064690026954178, | |
| "grad_norm": 0.5193688869476318, | |
| "learning_rate": 0.0005041748515920129, | |
| "loss": 3.6756, | |
| "step": 14900 | |
| }, | |
| { | |
| "epoch": 1.6118598382749325, | |
| "grad_norm": 0.5456000566482544, | |
| "learning_rate": 0.0005038510523475444, | |
| "loss": 3.6745, | |
| "step": 14950 | |
| }, | |
| { | |
| "epoch": 1.6172506738544474, | |
| "grad_norm": 0.565951943397522, | |
| "learning_rate": 0.0005035272531030761, | |
| "loss": 3.6829, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 1.6172506738544474, | |
| "eval_accuracy": 0.3553477484568302, | |
| "eval_loss": 3.6376187801361084, | |
| "eval_runtime": 181.6027, | |
| "eval_samples_per_second": 99.178, | |
| "eval_steps_per_second": 6.2, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 1.6226415094339623, | |
| "grad_norm": 0.5930358171463013, | |
| "learning_rate": 0.0005032034538586076, | |
| "loss": 3.6899, | |
| "step": 15050 | |
| }, | |
| { | |
| "epoch": 1.628032345013477, | |
| "grad_norm": 0.5067706108093262, | |
| "learning_rate": 0.0005028796546141392, | |
| "loss": 3.6659, | |
| "step": 15100 | |
| }, | |
| { | |
| "epoch": 1.633423180592992, | |
| "grad_norm": 0.5201679468154907, | |
| "learning_rate": 0.0005025558553696707, | |
| "loss": 3.6864, | |
| "step": 15150 | |
| }, | |
| { | |
| "epoch": 1.6388140161725069, | |
| "grad_norm": 0.5754110217094421, | |
| "learning_rate": 0.0005022320561252023, | |
| "loss": 3.6606, | |
| "step": 15200 | |
| }, | |
| { | |
| "epoch": 1.6442048517520216, | |
| "grad_norm": 0.592934250831604, | |
| "learning_rate": 0.0005019082568807339, | |
| "loss": 3.7035, | |
| "step": 15250 | |
| }, | |
| { | |
| "epoch": 1.6495956873315363, | |
| "grad_norm": 0.5429046154022217, | |
| "learning_rate": 0.0005015844576362655, | |
| "loss": 3.687, | |
| "step": 15300 | |
| }, | |
| { | |
| "epoch": 1.6549865229110512, | |
| "grad_norm": 0.5521410703659058, | |
| "learning_rate": 0.000501260658391797, | |
| "loss": 3.6814, | |
| "step": 15350 | |
| }, | |
| { | |
| "epoch": 1.6603773584905661, | |
| "grad_norm": 0.5523940324783325, | |
| "learning_rate": 0.0005009368591473286, | |
| "loss": 3.671, | |
| "step": 15400 | |
| }, | |
| { | |
| "epoch": 1.6657681940700808, | |
| "grad_norm": 0.5145202875137329, | |
| "learning_rate": 0.0005006130599028602, | |
| "loss": 3.6823, | |
| "step": 15450 | |
| }, | |
| { | |
| "epoch": 1.6711590296495957, | |
| "grad_norm": 0.541299045085907, | |
| "learning_rate": 0.0005002892606583918, | |
| "loss": 3.6941, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 1.6765498652291106, | |
| "grad_norm": 0.578633189201355, | |
| "learning_rate": 0.0004999654614139233, | |
| "loss": 3.6671, | |
| "step": 15550 | |
| }, | |
| { | |
| "epoch": 1.6819407008086253, | |
| "grad_norm": 0.5630112290382385, | |
| "learning_rate": 0.0004996416621694549, | |
| "loss": 3.6661, | |
| "step": 15600 | |
| }, | |
| { | |
| "epoch": 1.68733153638814, | |
| "grad_norm": 0.546072244644165, | |
| "learning_rate": 0.0004993178629249864, | |
| "loss": 3.663, | |
| "step": 15650 | |
| }, | |
| { | |
| "epoch": 1.692722371967655, | |
| "grad_norm": 0.6002097725868225, | |
| "learning_rate": 0.000498994063680518, | |
| "loss": 3.6783, | |
| "step": 15700 | |
| }, | |
| { | |
| "epoch": 1.6981132075471699, | |
| "grad_norm": 0.6249855756759644, | |
| "learning_rate": 0.000498676740420939, | |
| "loss": 3.6534, | |
| "step": 15750 | |
| }, | |
| { | |
| "epoch": 1.7035040431266846, | |
| "grad_norm": 0.5264080166816711, | |
| "learning_rate": 0.0004983529411764705, | |
| "loss": 3.6742, | |
| "step": 15800 | |
| }, | |
| { | |
| "epoch": 1.7088948787061995, | |
| "grad_norm": 0.5488578081130981, | |
| "learning_rate": 0.0004980291419320022, | |
| "loss": 3.6645, | |
| "step": 15850 | |
| }, | |
| { | |
| "epoch": 1.7142857142857144, | |
| "grad_norm": 0.5747875571250916, | |
| "learning_rate": 0.0004977053426875337, | |
| "loss": 3.6661, | |
| "step": 15900 | |
| }, | |
| { | |
| "epoch": 1.719676549865229, | |
| "grad_norm": 0.5004916191101074, | |
| "learning_rate": 0.0004973815434430653, | |
| "loss": 3.6556, | |
| "step": 15950 | |
| }, | |
| { | |
| "epoch": 1.7250673854447438, | |
| "grad_norm": 0.5628464221954346, | |
| "learning_rate": 0.0004970577441985968, | |
| "loss": 3.6554, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 1.7250673854447438, | |
| "eval_accuracy": 0.3569113721653684, | |
| "eval_loss": 3.6237761974334717, | |
| "eval_runtime": 181.1991, | |
| "eval_samples_per_second": 99.399, | |
| "eval_steps_per_second": 6.214, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 1.7304582210242587, | |
| "grad_norm": 0.5369266271591187, | |
| "learning_rate": 0.0004967339449541284, | |
| "loss": 3.6758, | |
| "step": 16050 | |
| }, | |
| { | |
| "epoch": 1.7358490566037736, | |
| "grad_norm": 0.575958788394928, | |
| "learning_rate": 0.00049641014570966, | |
| "loss": 3.6766, | |
| "step": 16100 | |
| }, | |
| { | |
| "epoch": 1.7412398921832883, | |
| "grad_norm": 0.6171749234199524, | |
| "learning_rate": 0.0004960863464651916, | |
| "loss": 3.658, | |
| "step": 16150 | |
| }, | |
| { | |
| "epoch": 1.7466307277628033, | |
| "grad_norm": 0.5248631238937378, | |
| "learning_rate": 0.0004957625472207231, | |
| "loss": 3.6652, | |
| "step": 16200 | |
| }, | |
| { | |
| "epoch": 1.7520215633423182, | |
| "grad_norm": 0.5663668513298035, | |
| "learning_rate": 0.0004954387479762547, | |
| "loss": 3.6645, | |
| "step": 16250 | |
| }, | |
| { | |
| "epoch": 1.7574123989218329, | |
| "grad_norm": 0.5405056476593018, | |
| "learning_rate": 0.0004951149487317862, | |
| "loss": 3.6633, | |
| "step": 16300 | |
| }, | |
| { | |
| "epoch": 1.7628032345013476, | |
| "grad_norm": 0.5469680428504944, | |
| "learning_rate": 0.0004947911494873178, | |
| "loss": 3.6642, | |
| "step": 16350 | |
| }, | |
| { | |
| "epoch": 1.7681940700808625, | |
| "grad_norm": 0.5379207134246826, | |
| "learning_rate": 0.0004944673502428493, | |
| "loss": 3.6666, | |
| "step": 16400 | |
| }, | |
| { | |
| "epoch": 1.7735849056603774, | |
| "grad_norm": 0.5439175963401794, | |
| "learning_rate": 0.000494143550998381, | |
| "loss": 3.6703, | |
| "step": 16450 | |
| }, | |
| { | |
| "epoch": 1.778975741239892, | |
| "grad_norm": 0.5585680603981018, | |
| "learning_rate": 0.0004938197517539125, | |
| "loss": 3.666, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 1.784366576819407, | |
| "grad_norm": 0.5458288788795471, | |
| "learning_rate": 0.0004934959525094441, | |
| "loss": 3.6453, | |
| "step": 16550 | |
| }, | |
| { | |
| "epoch": 1.789757412398922, | |
| "grad_norm": 0.5485697388648987, | |
| "learning_rate": 0.0004931721532649756, | |
| "loss": 3.6609, | |
| "step": 16600 | |
| }, | |
| { | |
| "epoch": 1.7951482479784366, | |
| "grad_norm": 0.5749149918556213, | |
| "learning_rate": 0.0004928483540205073, | |
| "loss": 3.6645, | |
| "step": 16650 | |
| }, | |
| { | |
| "epoch": 1.8005390835579513, | |
| "grad_norm": 0.5598737001419067, | |
| "learning_rate": 0.0004925245547760388, | |
| "loss": 3.6582, | |
| "step": 16700 | |
| }, | |
| { | |
| "epoch": 1.8059299191374663, | |
| "grad_norm": 0.5720735788345337, | |
| "learning_rate": 0.0004922007555315704, | |
| "loss": 3.6479, | |
| "step": 16750 | |
| }, | |
| { | |
| "epoch": 1.8113207547169812, | |
| "grad_norm": 0.544169008731842, | |
| "learning_rate": 0.000491876956287102, | |
| "loss": 3.6539, | |
| "step": 16800 | |
| }, | |
| { | |
| "epoch": 1.8167115902964959, | |
| "grad_norm": 0.587104082107544, | |
| "learning_rate": 0.0004915531570426335, | |
| "loss": 3.6497, | |
| "step": 16850 | |
| }, | |
| { | |
| "epoch": 1.8221024258760108, | |
| "grad_norm": 0.556251585483551, | |
| "learning_rate": 0.0004912293577981652, | |
| "loss": 3.6587, | |
| "step": 16900 | |
| }, | |
| { | |
| "epoch": 1.8274932614555257, | |
| "grad_norm": 0.5477184057235718, | |
| "learning_rate": 0.0004909055585536966, | |
| "loss": 3.6421, | |
| "step": 16950 | |
| }, | |
| { | |
| "epoch": 1.8328840970350404, | |
| "grad_norm": 0.5440017580986023, | |
| "learning_rate": 0.0004905817593092283, | |
| "loss": 3.6345, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 1.8328840970350404, | |
| "eval_accuracy": 0.3582564949165117, | |
| "eval_loss": 3.606245994567871, | |
| "eval_runtime": 181.6324, | |
| "eval_samples_per_second": 99.162, | |
| "eval_steps_per_second": 6.199, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 1.838274932614555, | |
| "grad_norm": 0.5022566318511963, | |
| "learning_rate": 0.0004902579600647598, | |
| "loss": 3.6459, | |
| "step": 17050 | |
| }, | |
| { | |
| "epoch": 1.8436657681940702, | |
| "grad_norm": 0.572801947593689, | |
| "learning_rate": 0.0004899341608202914, | |
| "loss": 3.646, | |
| "step": 17100 | |
| }, | |
| { | |
| "epoch": 1.849056603773585, | |
| "grad_norm": 0.5857950448989868, | |
| "learning_rate": 0.0004896103615758229, | |
| "loss": 3.6343, | |
| "step": 17150 | |
| }, | |
| { | |
| "epoch": 1.8544474393530996, | |
| "grad_norm": 0.529326856136322, | |
| "learning_rate": 0.0004892865623313546, | |
| "loss": 3.6621, | |
| "step": 17200 | |
| }, | |
| { | |
| "epoch": 1.8598382749326146, | |
| "grad_norm": 0.5415377616882324, | |
| "learning_rate": 0.0004889627630868861, | |
| "loss": 3.6692, | |
| "step": 17250 | |
| }, | |
| { | |
| "epoch": 1.8652291105121295, | |
| "grad_norm": 0.5492255091667175, | |
| "learning_rate": 0.0004886389638424177, | |
| "loss": 3.6534, | |
| "step": 17300 | |
| }, | |
| { | |
| "epoch": 1.8706199460916442, | |
| "grad_norm": 0.5427126884460449, | |
| "learning_rate": 0.0004883151645979492, | |
| "loss": 3.6425, | |
| "step": 17350 | |
| }, | |
| { | |
| "epoch": 1.8760107816711589, | |
| "grad_norm": 0.561755359172821, | |
| "learning_rate": 0.00048799136535348077, | |
| "loss": 3.6583, | |
| "step": 17400 | |
| }, | |
| { | |
| "epoch": 1.881401617250674, | |
| "grad_norm": 0.5410698056221008, | |
| "learning_rate": 0.0004876675661090124, | |
| "loss": 3.6306, | |
| "step": 17450 | |
| }, | |
| { | |
| "epoch": 1.8867924528301887, | |
| "grad_norm": 0.542617678642273, | |
| "learning_rate": 0.0004873437668645439, | |
| "loss": 3.6534, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 1.8921832884097034, | |
| "grad_norm": 0.5104948878288269, | |
| "learning_rate": 0.00048701996762007553, | |
| "loss": 3.6543, | |
| "step": 17550 | |
| }, | |
| { | |
| "epoch": 1.8975741239892183, | |
| "grad_norm": 0.6261042356491089, | |
| "learning_rate": 0.0004866961683756071, | |
| "loss": 3.6425, | |
| "step": 17600 | |
| }, | |
| { | |
| "epoch": 1.9029649595687332, | |
| "grad_norm": 0.5958898067474365, | |
| "learning_rate": 0.0004863723691311387, | |
| "loss": 3.6418, | |
| "step": 17650 | |
| }, | |
| { | |
| "epoch": 1.908355795148248, | |
| "grad_norm": 0.5822218656539917, | |
| "learning_rate": 0.00048604856988667024, | |
| "loss": 3.6586, | |
| "step": 17700 | |
| }, | |
| { | |
| "epoch": 1.9137466307277629, | |
| "grad_norm": 0.5634023547172546, | |
| "learning_rate": 0.0004857247706422018, | |
| "loss": 3.6335, | |
| "step": 17750 | |
| }, | |
| { | |
| "epoch": 1.9191374663072778, | |
| "grad_norm": 0.5490560531616211, | |
| "learning_rate": 0.00048540744738262274, | |
| "loss": 3.6368, | |
| "step": 17800 | |
| }, | |
| { | |
| "epoch": 1.9245283018867925, | |
| "grad_norm": 0.5121088027954102, | |
| "learning_rate": 0.00048508364813815434, | |
| "loss": 3.6266, | |
| "step": 17850 | |
| }, | |
| { | |
| "epoch": 1.9299191374663072, | |
| "grad_norm": 0.5225231647491455, | |
| "learning_rate": 0.00048475984889368584, | |
| "loss": 3.6324, | |
| "step": 17900 | |
| }, | |
| { | |
| "epoch": 1.935309973045822, | |
| "grad_norm": 0.5291708111763, | |
| "learning_rate": 0.0004844360496492175, | |
| "loss": 3.6424, | |
| "step": 17950 | |
| }, | |
| { | |
| "epoch": 1.940700808625337, | |
| "grad_norm": 0.5437564849853516, | |
| "learning_rate": 0.000484112250404749, | |
| "loss": 3.6469, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.940700808625337, | |
| "eval_accuracy": 0.3598998698447057, | |
| "eval_loss": 3.5913586616516113, | |
| "eval_runtime": 181.4123, | |
| "eval_samples_per_second": 99.282, | |
| "eval_steps_per_second": 6.207, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.9460916442048517, | |
| "grad_norm": 0.5686632394790649, | |
| "learning_rate": 0.00048378845116028055, | |
| "loss": 3.6235, | |
| "step": 18050 | |
| }, | |
| { | |
| "epoch": 1.9514824797843666, | |
| "grad_norm": 0.5239707231521606, | |
| "learning_rate": 0.00048346465191581215, | |
| "loss": 3.6487, | |
| "step": 18100 | |
| }, | |
| { | |
| "epoch": 1.9568733153638815, | |
| "grad_norm": 0.5632966756820679, | |
| "learning_rate": 0.0004831408526713437, | |
| "loss": 3.642, | |
| "step": 18150 | |
| }, | |
| { | |
| "epoch": 1.9622641509433962, | |
| "grad_norm": 0.5901452898979187, | |
| "learning_rate": 0.0004828170534268753, | |
| "loss": 3.6363, | |
| "step": 18200 | |
| }, | |
| { | |
| "epoch": 1.967654986522911, | |
| "grad_norm": 0.5481223464012146, | |
| "learning_rate": 0.00048249325418240686, | |
| "loss": 3.6498, | |
| "step": 18250 | |
| }, | |
| { | |
| "epoch": 1.9730458221024259, | |
| "grad_norm": 0.5510320067405701, | |
| "learning_rate": 0.00048216945493793846, | |
| "loss": 3.6471, | |
| "step": 18300 | |
| }, | |
| { | |
| "epoch": 1.9784366576819408, | |
| "grad_norm": 0.5957186222076416, | |
| "learning_rate": 0.00048184565569347, | |
| "loss": 3.6295, | |
| "step": 18350 | |
| }, | |
| { | |
| "epoch": 1.9838274932614555, | |
| "grad_norm": 0.5391411781311035, | |
| "learning_rate": 0.0004815218564490016, | |
| "loss": 3.6625, | |
| "step": 18400 | |
| }, | |
| { | |
| "epoch": 1.9892183288409704, | |
| "grad_norm": 0.5556368231773376, | |
| "learning_rate": 0.00048119805720453317, | |
| "loss": 3.6166, | |
| "step": 18450 | |
| }, | |
| { | |
| "epoch": 1.9946091644204853, | |
| "grad_norm": 0.6190194487571716, | |
| "learning_rate": 0.0004808742579600647, | |
| "loss": 3.6231, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 1.197678804397583, | |
| "learning_rate": 0.0004805504587155963, | |
| "loss": 3.6412, | |
| "step": 18550 | |
| }, | |
| { | |
| "epoch": 2.0053908355795147, | |
| "grad_norm": 0.5590324401855469, | |
| "learning_rate": 0.0004802266594711278, | |
| "loss": 3.5499, | |
| "step": 18600 | |
| }, | |
| { | |
| "epoch": 2.01078167115903, | |
| "grad_norm": 0.5855118632316589, | |
| "learning_rate": 0.0004799028602266594, | |
| "loss": 3.5515, | |
| "step": 18650 | |
| }, | |
| { | |
| "epoch": 2.0161725067385445, | |
| "grad_norm": 0.5683425664901733, | |
| "learning_rate": 0.000479579060982191, | |
| "loss": 3.5434, | |
| "step": 18700 | |
| }, | |
| { | |
| "epoch": 2.0215633423180592, | |
| "grad_norm": 0.5408341884613037, | |
| "learning_rate": 0.0004792552617377226, | |
| "loss": 3.5613, | |
| "step": 18750 | |
| }, | |
| { | |
| "epoch": 2.026954177897574, | |
| "grad_norm": 0.5905522108078003, | |
| "learning_rate": 0.00047893146249325413, | |
| "loss": 3.5492, | |
| "step": 18800 | |
| }, | |
| { | |
| "epoch": 2.032345013477089, | |
| "grad_norm": 0.5483571887016296, | |
| "learning_rate": 0.0004786076632487857, | |
| "loss": 3.5309, | |
| "step": 18850 | |
| }, | |
| { | |
| "epoch": 2.0377358490566038, | |
| "grad_norm": 0.5604623556137085, | |
| "learning_rate": 0.0004782838640043173, | |
| "loss": 3.5583, | |
| "step": 18900 | |
| }, | |
| { | |
| "epoch": 2.0431266846361185, | |
| "grad_norm": 0.5344943404197693, | |
| "learning_rate": 0.00047796006475984883, | |
| "loss": 3.5482, | |
| "step": 18950 | |
| }, | |
| { | |
| "epoch": 2.0485175202156336, | |
| "grad_norm": 0.5930701494216919, | |
| "learning_rate": 0.00047763626551538044, | |
| "loss": 3.5639, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 2.0485175202156336, | |
| "eval_accuracy": 0.36086101329031256, | |
| "eval_loss": 3.584812641143799, | |
| "eval_runtime": 181.1798, | |
| "eval_samples_per_second": 99.41, | |
| "eval_steps_per_second": 6.215, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 2.0539083557951483, | |
| "grad_norm": 0.597104012966156, | |
| "learning_rate": 0.000477312466270912, | |
| "loss": 3.5241, | |
| "step": 19050 | |
| }, | |
| { | |
| "epoch": 2.059299191374663, | |
| "grad_norm": 0.5604715943336487, | |
| "learning_rate": 0.0004769886670264436, | |
| "loss": 3.5646, | |
| "step": 19100 | |
| }, | |
| { | |
| "epoch": 2.0646900269541777, | |
| "grad_norm": 0.5276287198066711, | |
| "learning_rate": 0.00047666486778197515, | |
| "loss": 3.549, | |
| "step": 19150 | |
| }, | |
| { | |
| "epoch": 2.070080862533693, | |
| "grad_norm": 0.5561823844909668, | |
| "learning_rate": 0.00047634106853750675, | |
| "loss": 3.5711, | |
| "step": 19200 | |
| }, | |
| { | |
| "epoch": 2.0754716981132075, | |
| "grad_norm": 0.6093335151672363, | |
| "learning_rate": 0.00047601726929303825, | |
| "loss": 3.5684, | |
| "step": 19250 | |
| }, | |
| { | |
| "epoch": 2.0808625336927222, | |
| "grad_norm": 0.588876485824585, | |
| "learning_rate": 0.0004756934700485698, | |
| "loss": 3.5646, | |
| "step": 19300 | |
| }, | |
| { | |
| "epoch": 2.0862533692722374, | |
| "grad_norm": 0.5568276643753052, | |
| "learning_rate": 0.0004753696708041014, | |
| "loss": 3.5551, | |
| "step": 19350 | |
| }, | |
| { | |
| "epoch": 2.091644204851752, | |
| "grad_norm": 0.5360884666442871, | |
| "learning_rate": 0.00047504587155963295, | |
| "loss": 3.5605, | |
| "step": 19400 | |
| }, | |
| { | |
| "epoch": 2.0970350404312668, | |
| "grad_norm": 0.5348809361457825, | |
| "learning_rate": 0.00047472207231516456, | |
| "loss": 3.5486, | |
| "step": 19450 | |
| }, | |
| { | |
| "epoch": 2.1024258760107815, | |
| "grad_norm": 0.6352576017379761, | |
| "learning_rate": 0.0004743982730706961, | |
| "loss": 3.5433, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 2.1078167115902966, | |
| "grad_norm": 0.570564329624176, | |
| "learning_rate": 0.0004740744738262277, | |
| "loss": 3.5608, | |
| "step": 19550 | |
| }, | |
| { | |
| "epoch": 2.1132075471698113, | |
| "grad_norm": 0.5465729832649231, | |
| "learning_rate": 0.00047375067458175926, | |
| "loss": 3.5578, | |
| "step": 19600 | |
| }, | |
| { | |
| "epoch": 2.118598382749326, | |
| "grad_norm": 0.5874576568603516, | |
| "learning_rate": 0.00047342687533729087, | |
| "loss": 3.5579, | |
| "step": 19650 | |
| }, | |
| { | |
| "epoch": 2.123989218328841, | |
| "grad_norm": 0.59136563539505, | |
| "learning_rate": 0.0004731030760928224, | |
| "loss": 3.5486, | |
| "step": 19700 | |
| }, | |
| { | |
| "epoch": 2.129380053908356, | |
| "grad_norm": 0.6302415728569031, | |
| "learning_rate": 0.00047277927684835397, | |
| "loss": 3.5422, | |
| "step": 19750 | |
| }, | |
| { | |
| "epoch": 2.1347708894878705, | |
| "grad_norm": 0.544438898563385, | |
| "learning_rate": 0.0004724619535887749, | |
| "loss": 3.5742, | |
| "step": 19800 | |
| }, | |
| { | |
| "epoch": 2.1401617250673857, | |
| "grad_norm": 0.6034201383590698, | |
| "learning_rate": 0.0004721381543443065, | |
| "loss": 3.5728, | |
| "step": 19850 | |
| }, | |
| { | |
| "epoch": 2.1455525606469004, | |
| "grad_norm": 0.6693742871284485, | |
| "learning_rate": 0.000471814355099838, | |
| "loss": 3.5634, | |
| "step": 19900 | |
| }, | |
| { | |
| "epoch": 2.150943396226415, | |
| "grad_norm": 0.5606766939163208, | |
| "learning_rate": 0.0004714905558553697, | |
| "loss": 3.5611, | |
| "step": 19950 | |
| }, | |
| { | |
| "epoch": 2.1563342318059298, | |
| "grad_norm": 0.600206196308136, | |
| "learning_rate": 0.0004711667566109012, | |
| "loss": 3.5606, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 2.1563342318059298, | |
| "eval_accuracy": 0.36217603919153124, | |
| "eval_loss": 3.5728728771209717, | |
| "eval_runtime": 181.6212, | |
| "eval_samples_per_second": 99.168, | |
| "eval_steps_per_second": 6.2, | |
| "step": 20000 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 92750, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 10000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.672138358784e+17, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |