| { | |
| "best_metric": 0.03486299887299538, | |
| "best_model_checkpoint": "saves/psy-course/Llama3-OpenBioLLM-8B/train/fold4/checkpoint-1300", | |
| "epoch": 4.999522946283752, | |
| "eval_steps": 50, | |
| "global_step": 3275, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.015265718919950386, | |
| "grad_norm": 5.458767414093018, | |
| "learning_rate": 3.0487804878048782e-06, | |
| "loss": 1.4201, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.030531437839900772, | |
| "grad_norm": 5.880963325500488, | |
| "learning_rate": 6.0975609756097564e-06, | |
| "loss": 1.3361, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.04579715675985116, | |
| "grad_norm": 4.0639777183532715, | |
| "learning_rate": 9.146341463414634e-06, | |
| "loss": 1.1539, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.061062875679801544, | |
| "grad_norm": 3.002682685852051, | |
| "learning_rate": 1.2195121951219513e-05, | |
| "loss": 0.8971, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.07632859459975193, | |
| "grad_norm": 1.940627098083496, | |
| "learning_rate": 1.524390243902439e-05, | |
| "loss": 0.4799, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.07632859459975193, | |
| "eval_loss": 0.2933095097541809, | |
| "eval_runtime": 159.4102, | |
| "eval_samples_per_second": 7.308, | |
| "eval_steps_per_second": 7.308, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.09159431351970232, | |
| "grad_norm": 1.761772632598877, | |
| "learning_rate": 1.8292682926829268e-05, | |
| "loss": 0.2509, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.10686003243965271, | |
| "grad_norm": 1.450770616531372, | |
| "learning_rate": 2.134146341463415e-05, | |
| "loss": 0.2104, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.12212575135960309, | |
| "grad_norm": 1.4452862739562988, | |
| "learning_rate": 2.4390243902439026e-05, | |
| "loss": 0.1552, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.13739147027955348, | |
| "grad_norm": 1.2762833833694458, | |
| "learning_rate": 2.7439024390243906e-05, | |
| "loss": 0.1008, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.15265718919950386, | |
| "grad_norm": 0.8838859796524048, | |
| "learning_rate": 3.048780487804878e-05, | |
| "loss": 0.1053, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.15265718919950386, | |
| "eval_loss": 0.07661888748407364, | |
| "eval_runtime": 159.8002, | |
| "eval_samples_per_second": 7.29, | |
| "eval_steps_per_second": 7.29, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.16792290811945426, | |
| "grad_norm": 0.9274832606315613, | |
| "learning_rate": 3.353658536585366e-05, | |
| "loss": 0.0829, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.18318862703940464, | |
| "grad_norm": 0.9330371618270874, | |
| "learning_rate": 3.6585365853658535e-05, | |
| "loss": 0.0803, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.19845434595935502, | |
| "grad_norm": 1.1532729864120483, | |
| "learning_rate": 3.9634146341463416e-05, | |
| "loss": 0.0772, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.21372006487930542, | |
| "grad_norm": 0.9599048495292664, | |
| "learning_rate": 4.26829268292683e-05, | |
| "loss": 0.0777, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.2289857837992558, | |
| "grad_norm": 1.0932921171188354, | |
| "learning_rate": 4.573170731707318e-05, | |
| "loss": 0.0594, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.2289857837992558, | |
| "eval_loss": 0.072237528860569, | |
| "eval_runtime": 159.5845, | |
| "eval_samples_per_second": 7.3, | |
| "eval_steps_per_second": 7.3, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.24425150271920618, | |
| "grad_norm": 0.9019838571548462, | |
| "learning_rate": 4.878048780487805e-05, | |
| "loss": 0.0845, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.2595172216391566, | |
| "grad_norm": 1.1281777620315552, | |
| "learning_rate": 5.182926829268293e-05, | |
| "loss": 0.0777, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.27478294055910696, | |
| "grad_norm": 0.8680335283279419, | |
| "learning_rate": 5.487804878048781e-05, | |
| "loss": 0.0886, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.29004865947905734, | |
| "grad_norm": 0.7780885696411133, | |
| "learning_rate": 5.792682926829268e-05, | |
| "loss": 0.0648, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.3053143783990077, | |
| "grad_norm": 0.784793496131897, | |
| "learning_rate": 6.097560975609756e-05, | |
| "loss": 0.0586, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.3053143783990077, | |
| "eval_loss": 0.053653329610824585, | |
| "eval_runtime": 159.5655, | |
| "eval_samples_per_second": 7.301, | |
| "eval_steps_per_second": 7.301, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.3205800973189581, | |
| "grad_norm": 0.8396239876747131, | |
| "learning_rate": 6.402439024390244e-05, | |
| "loss": 0.0569, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.3358458162389085, | |
| "grad_norm": 1.3665101528167725, | |
| "learning_rate": 6.707317073170732e-05, | |
| "loss": 0.0523, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.3511115351588589, | |
| "grad_norm": 0.4989739954471588, | |
| "learning_rate": 7.012195121951219e-05, | |
| "loss": 0.045, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.3663772540788093, | |
| "grad_norm": 0.5166968703269958, | |
| "learning_rate": 7.317073170731707e-05, | |
| "loss": 0.0667, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.38164297299875966, | |
| "grad_norm": 0.7004739046096802, | |
| "learning_rate": 7.621951219512195e-05, | |
| "loss": 0.066, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.38164297299875966, | |
| "eval_loss": 0.0550156868994236, | |
| "eval_runtime": 159.9016, | |
| "eval_samples_per_second": 7.286, | |
| "eval_steps_per_second": 7.286, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.39690869191871003, | |
| "grad_norm": 1.4189491271972656, | |
| "learning_rate": 7.926829268292683e-05, | |
| "loss": 0.0635, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.4121744108386604, | |
| "grad_norm": 0.7976926565170288, | |
| "learning_rate": 8.231707317073171e-05, | |
| "loss": 0.0653, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.42744012975861084, | |
| "grad_norm": 0.3455027937889099, | |
| "learning_rate": 8.53658536585366e-05, | |
| "loss": 0.0568, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.4427058486785612, | |
| "grad_norm": 0.564358115196228, | |
| "learning_rate": 8.841463414634147e-05, | |
| "loss": 0.0629, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.4579715675985116, | |
| "grad_norm": 0.3834143877029419, | |
| "learning_rate": 9.146341463414635e-05, | |
| "loss": 0.0553, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.4579715675985116, | |
| "eval_loss": 0.055096790194511414, | |
| "eval_runtime": 159.7991, | |
| "eval_samples_per_second": 7.29, | |
| "eval_steps_per_second": 7.29, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.473237286518462, | |
| "grad_norm": 0.8542683720588684, | |
| "learning_rate": 9.451219512195122e-05, | |
| "loss": 0.067, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.48850300543841235, | |
| "grad_norm": 0.41747626662254333, | |
| "learning_rate": 9.75609756097561e-05, | |
| "loss": 0.05, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.5037687243583627, | |
| "grad_norm": 0.5380941033363342, | |
| "learning_rate": 9.999988635788465e-05, | |
| "loss": 0.0552, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.5190344432783132, | |
| "grad_norm": 0.7595194578170776, | |
| "learning_rate": 9.999590893808788e-05, | |
| "loss": 0.0437, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.5343001621982635, | |
| "grad_norm": 0.8340535759925842, | |
| "learning_rate": 9.998624992909386e-05, | |
| "loss": 0.0468, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5343001621982635, | |
| "eval_loss": 0.04763290658593178, | |
| "eval_runtime": 159.9408, | |
| "eval_samples_per_second": 7.284, | |
| "eval_steps_per_second": 7.284, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5495658811182139, | |
| "grad_norm": 0.32083097100257874, | |
| "learning_rate": 9.997091042856284e-05, | |
| "loss": 0.0503, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.5648316000381643, | |
| "grad_norm": 0.26985791325569153, | |
| "learning_rate": 9.994989217969224e-05, | |
| "loss": 0.0576, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.5800973189581147, | |
| "grad_norm": 1.5231789350509644, | |
| "learning_rate": 9.992319757101863e-05, | |
| "loss": 0.0706, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.5953630378780651, | |
| "grad_norm": 0.46429556608200073, | |
| "learning_rate": 9.98908296361462e-05, | |
| "loss": 0.0597, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.6106287567980154, | |
| "grad_norm": 0.5127663016319275, | |
| "learning_rate": 9.98527920534021e-05, | |
| "loss": 0.0515, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6106287567980154, | |
| "eval_loss": 0.04808073118329048, | |
| "eval_runtime": 159.9539, | |
| "eval_samples_per_second": 7.283, | |
| "eval_steps_per_second": 7.283, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6258944757179659, | |
| "grad_norm": 0.6281822323799133, | |
| "learning_rate": 9.980908914541844e-05, | |
| "loss": 0.0517, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.6411601946379162, | |
| "grad_norm": 0.4508628249168396, | |
| "learning_rate": 9.975972587864095e-05, | |
| "loss": 0.0514, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.6564259135578666, | |
| "grad_norm": 0.8544013500213623, | |
| "learning_rate": 9.970470786276467e-05, | |
| "loss": 0.041, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.671691632477817, | |
| "grad_norm": 0.3640694320201874, | |
| "learning_rate": 9.964404135009648e-05, | |
| "loss": 0.0507, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.6869573513977674, | |
| "grad_norm": 0.203624427318573, | |
| "learning_rate": 9.957773323484454e-05, | |
| "loss": 0.0562, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.6869573513977674, | |
| "eval_loss": 0.04656098783016205, | |
| "eval_runtime": 159.9758, | |
| "eval_samples_per_second": 7.282, | |
| "eval_steps_per_second": 7.282, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.7022230703177178, | |
| "grad_norm": 0.3792983591556549, | |
| "learning_rate": 9.950579105233483e-05, | |
| "loss": 0.0496, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.7174887892376681, | |
| "grad_norm": 0.3796481490135193, | |
| "learning_rate": 9.94282229781548e-05, | |
| "loss": 0.0425, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.7327545081576186, | |
| "grad_norm": 0.7942231297492981, | |
| "learning_rate": 9.934503782722438e-05, | |
| "loss": 0.0541, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.748020227077569, | |
| "grad_norm": 0.2960570156574249, | |
| "learning_rate": 9.925624505279411e-05, | |
| "loss": 0.0369, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.7632859459975193, | |
| "grad_norm": 0.2349403351545334, | |
| "learning_rate": 9.916185474537098e-05, | |
| "loss": 0.0347, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7632859459975193, | |
| "eval_loss": 0.04013669118285179, | |
| "eval_runtime": 159.8253, | |
| "eval_samples_per_second": 7.289, | |
| "eval_steps_per_second": 7.289, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7785516649174697, | |
| "grad_norm": 0.6597937345504761, | |
| "learning_rate": 9.906187763157168e-05, | |
| "loss": 0.0491, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.7938173838374201, | |
| "grad_norm": 0.38688021898269653, | |
| "learning_rate": 9.895632507290362e-05, | |
| "loss": 0.0526, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.8090831027573705, | |
| "grad_norm": 0.3897739052772522, | |
| "learning_rate": 9.884520906447379e-05, | |
| "loss": 0.0397, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.8243488216773208, | |
| "grad_norm": 0.5511626601219177, | |
| "learning_rate": 9.872854223362562e-05, | |
| "loss": 0.0539, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.8396145405972713, | |
| "grad_norm": 0.5939027667045593, | |
| "learning_rate": 9.860633783850406e-05, | |
| "loss": 0.0417, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8396145405972713, | |
| "eval_loss": 0.04075213149189949, | |
| "eval_runtime": 159.8385, | |
| "eval_samples_per_second": 7.289, | |
| "eval_steps_per_second": 7.289, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8548802595172217, | |
| "grad_norm": 0.25341716408729553, | |
| "learning_rate": 9.847860976654879e-05, | |
| "loss": 0.0475, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.870145978437172, | |
| "grad_norm": 0.2727987468242645, | |
| "learning_rate": 9.834537253291616e-05, | |
| "loss": 0.0445, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.8854116973571224, | |
| "grad_norm": 1.1958130598068237, | |
| "learning_rate": 9.820664127882957e-05, | |
| "loss": 0.0583, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.9006774162770728, | |
| "grad_norm": 0.5200381875038147, | |
| "learning_rate": 9.806243176985888e-05, | |
| "loss": 0.0517, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.9159431351970232, | |
| "grad_norm": 0.4853935241699219, | |
| "learning_rate": 9.791276039412875e-05, | |
| "loss": 0.0466, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.9159431351970232, | |
| "eval_loss": 0.03929273039102554, | |
| "eval_runtime": 159.8836, | |
| "eval_samples_per_second": 7.287, | |
| "eval_steps_per_second": 7.287, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.9312088541169735, | |
| "grad_norm": 0.5579703450202942, | |
| "learning_rate": 9.775764416045628e-05, | |
| "loss": 0.0625, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.946474573036924, | |
| "grad_norm": 0.3649356961250305, | |
| "learning_rate": 9.759710069641814e-05, | |
| "loss": 0.0485, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.9617402919568744, | |
| "grad_norm": 0.3876245319843292, | |
| "learning_rate": 9.743114824634734e-05, | |
| "loss": 0.0321, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.9770060108768247, | |
| "grad_norm": 0.4524376392364502, | |
| "learning_rate": 9.725980566925989e-05, | |
| "loss": 0.0453, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.9922717297967751, | |
| "grad_norm": 0.27509111166000366, | |
| "learning_rate": 9.708309243671165e-05, | |
| "loss": 0.0346, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.9922717297967751, | |
| "eval_loss": 0.040449682623147964, | |
| "eval_runtime": 159.9169, | |
| "eval_samples_per_second": 7.285, | |
| "eval_steps_per_second": 7.285, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.0075374487167255, | |
| "grad_norm": 0.21305875480175018, | |
| "learning_rate": 9.690102863058563e-05, | |
| "loss": 0.0335, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.0228031676366758, | |
| "grad_norm": 0.3463568091392517, | |
| "learning_rate": 9.67136349408098e-05, | |
| "loss": 0.0349, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.0380688865566263, | |
| "grad_norm": 0.25874170660972595, | |
| "learning_rate": 9.652093266300583e-05, | |
| "loss": 0.0403, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.0533346054765766, | |
| "grad_norm": 0.1967257261276245, | |
| "learning_rate": 9.632294369606916e-05, | |
| "loss": 0.0254, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.068600324396527, | |
| "grad_norm": 0.3825061619281769, | |
| "learning_rate": 9.61196905396802e-05, | |
| "loss": 0.035, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.068600324396527, | |
| "eval_loss": 0.03885221853852272, | |
| "eval_runtime": 159.557, | |
| "eval_samples_per_second": 7.301, | |
| "eval_steps_per_second": 7.301, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.0838660433164775, | |
| "grad_norm": 0.32184284925460815, | |
| "learning_rate": 9.591119629174764e-05, | |
| "loss": 0.0431, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.0991317622364278, | |
| "grad_norm": 0.2665984332561493, | |
| "learning_rate": 9.569748464578343e-05, | |
| "loss": 0.0271, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.1143974811563782, | |
| "grad_norm": 0.35844355821609497, | |
| "learning_rate": 9.54785798882103e-05, | |
| "loss": 0.029, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.1296632000763287, | |
| "grad_norm": 0.2451615184545517, | |
| "learning_rate": 9.525450689560181e-05, | |
| "loss": 0.0301, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.144928918996279, | |
| "grad_norm": 0.29228684306144714, | |
| "learning_rate": 9.502529113185532e-05, | |
| "loss": 0.0408, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.144928918996279, | |
| "eval_loss": 0.038966357707977295, | |
| "eval_runtime": 158.592, | |
| "eval_samples_per_second": 7.346, | |
| "eval_steps_per_second": 7.346, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.1601946379162293, | |
| "grad_norm": 0.25255903601646423, | |
| "learning_rate": 9.479095864529828e-05, | |
| "loss": 0.0323, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.1754603568361797, | |
| "grad_norm": 0.245975524187088, | |
| "learning_rate": 9.455153606572806e-05, | |
| "loss": 0.0296, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.1907260757561302, | |
| "grad_norm": 0.271707683801651, | |
| "learning_rate": 9.430705060138569e-05, | |
| "loss": 0.0337, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.2059917946760805, | |
| "grad_norm": 0.6799024939537048, | |
| "learning_rate": 9.405753003586395e-05, | |
| "loss": 0.0281, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.2212575135960309, | |
| "grad_norm": 0.2844121754169464, | |
| "learning_rate": 9.38030027249499e-05, | |
| "loss": 0.0324, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.2212575135960309, | |
| "eval_loss": 0.04265734553337097, | |
| "eval_runtime": 157.6777, | |
| "eval_samples_per_second": 7.388, | |
| "eval_steps_per_second": 7.388, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.2365232325159814, | |
| "grad_norm": 0.3621199131011963, | |
| "learning_rate": 9.354349759340263e-05, | |
| "loss": 0.0382, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.2517889514359317, | |
| "grad_norm": 0.4844062030315399, | |
| "learning_rate": 9.327904413166615e-05, | |
| "loss": 0.0353, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.267054670355882, | |
| "grad_norm": 0.4185052216053009, | |
| "learning_rate": 9.300967239251798e-05, | |
| "loss": 0.0352, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.2823203892758324, | |
| "grad_norm": 0.21575963497161865, | |
| "learning_rate": 9.27354129876541e-05, | |
| "loss": 0.0344, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.297586108195783, | |
| "grad_norm": 0.18729855120182037, | |
| "learning_rate": 9.245629708421008e-05, | |
| "loss": 0.0331, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.297586108195783, | |
| "eval_loss": 0.038167908787727356, | |
| "eval_runtime": 157.202, | |
| "eval_samples_per_second": 7.411, | |
| "eval_steps_per_second": 7.411, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.3128518271157332, | |
| "grad_norm": 0.505947470664978, | |
| "learning_rate": 9.217235640121926e-05, | |
| "loss": 0.0336, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.3281175460356835, | |
| "grad_norm": 0.6576479077339172, | |
| "learning_rate": 9.188362320600812e-05, | |
| "loss": 0.0418, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.343383264955634, | |
| "grad_norm": 0.45256391167640686, | |
| "learning_rate": 9.159013031052943e-05, | |
| "loss": 0.0349, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.3586489838755844, | |
| "grad_norm": 0.4001959264278412, | |
| "learning_rate": 9.129191106763346e-05, | |
| "loss": 0.03, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 1.3739147027955347, | |
| "grad_norm": 0.2914159297943115, | |
| "learning_rate": 9.098899936727771e-05, | |
| "loss": 0.0335, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.3739147027955347, | |
| "eval_loss": 0.04329466447234154, | |
| "eval_runtime": 156.5773, | |
| "eval_samples_per_second": 7.44, | |
| "eval_steps_per_second": 7.44, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.3891804217154853, | |
| "grad_norm": 0.26559117436408997, | |
| "learning_rate": 9.068142963267558e-05, | |
| "loss": 0.0246, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.4044461406354356, | |
| "grad_norm": 0.3729347586631775, | |
| "learning_rate": 9.036923681638463e-05, | |
| "loss": 0.0331, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.419711859555386, | |
| "grad_norm": 0.2612379491329193, | |
| "learning_rate": 9.00524563963343e-05, | |
| "loss": 0.0242, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 1.4349775784753362, | |
| "grad_norm": 0.4376416802406311, | |
| "learning_rate": 8.973112437179436e-05, | |
| "loss": 0.0375, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.4502432973952868, | |
| "grad_norm": 0.2591637074947357, | |
| "learning_rate": 8.940527725928383e-05, | |
| "loss": 0.034, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4502432973952868, | |
| "eval_loss": 0.037408314645290375, | |
| "eval_runtime": 156.3423, | |
| "eval_samples_per_second": 7.452, | |
| "eval_steps_per_second": 7.452, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4655090163152371, | |
| "grad_norm": 0.3151344060897827, | |
| "learning_rate": 8.90749520884212e-05, | |
| "loss": 0.0311, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.4807747352351874, | |
| "grad_norm": 0.30333495140075684, | |
| "learning_rate": 8.874018639771637e-05, | |
| "loss": 0.0323, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 1.4960404541551378, | |
| "grad_norm": 0.318057119846344, | |
| "learning_rate": 8.840101823030471e-05, | |
| "loss": 0.0293, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 1.5113061730750883, | |
| "grad_norm": 0.4195692241191864, | |
| "learning_rate": 8.805748612962382e-05, | |
| "loss": 0.0396, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 1.5265718919950386, | |
| "grad_norm": 0.1193198561668396, | |
| "learning_rate": 8.77096291350334e-05, | |
| "loss": 0.0253, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.5265718919950386, | |
| "eval_loss": 0.03985153138637543, | |
| "eval_runtime": 156.2778, | |
| "eval_samples_per_second": 7.455, | |
| "eval_steps_per_second": 7.455, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.541837610914989, | |
| "grad_norm": 0.2724255323410034, | |
| "learning_rate": 8.735748677737874e-05, | |
| "loss": 0.0356, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 1.5571033298349395, | |
| "grad_norm": 0.6145180463790894, | |
| "learning_rate": 8.700109907449845e-05, | |
| "loss": 0.0266, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 1.5723690487548898, | |
| "grad_norm": 0.26061272621154785, | |
| "learning_rate": 8.66405065266768e-05, | |
| "loss": 0.0277, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 1.5876347676748401, | |
| "grad_norm": 0.22841082513332367, | |
| "learning_rate": 8.627575011204115e-05, | |
| "loss": 0.0293, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 1.6029004865947907, | |
| "grad_norm": 0.3250572681427002, | |
| "learning_rate": 8.590687128190516e-05, | |
| "loss": 0.0299, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.6029004865947907, | |
| "eval_loss": 0.03824828937649727, | |
| "eval_runtime": 156.1357, | |
| "eval_samples_per_second": 7.461, | |
| "eval_steps_per_second": 7.461, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.618166205514741, | |
| "grad_norm": 0.09818235039710999, | |
| "learning_rate": 8.553391195605833e-05, | |
| "loss": 0.0271, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 1.6334319244346913, | |
| "grad_norm": 0.46138861775398254, | |
| "learning_rate": 8.515691451800205e-05, | |
| "loss": 0.04, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 1.6486976433546419, | |
| "grad_norm": 0.34611156582832336, | |
| "learning_rate": 8.477592181013316e-05, | |
| "loss": 0.0268, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 1.663963362274592, | |
| "grad_norm": 0.3302578032016754, | |
| "learning_rate": 8.439097712887531e-05, | |
| "loss": 0.03, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 1.6792290811945425, | |
| "grad_norm": 0.395153284072876, | |
| "learning_rate": 8.400212421975865e-05, | |
| "loss": 0.0534, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.6792290811945425, | |
| "eval_loss": 0.04240264743566513, | |
| "eval_runtime": 156.1164, | |
| "eval_samples_per_second": 7.462, | |
| "eval_steps_per_second": 7.462, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.694494800114493, | |
| "grad_norm": 0.2963167726993561, | |
| "learning_rate": 8.360940727244859e-05, | |
| "loss": 0.0436, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 1.7097605190344431, | |
| "grad_norm": 0.2605683207511902, | |
| "learning_rate": 8.321287091572403e-05, | |
| "loss": 0.0272, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 1.7250262379543937, | |
| "grad_norm": 0.43958792090415955, | |
| "learning_rate": 8.281256021240566e-05, | |
| "loss": 0.0317, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 1.740291956874344, | |
| "grad_norm": 0.20269373059272766, | |
| "learning_rate": 8.240852065423506e-05, | |
| "loss": 0.0376, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 1.7555576757942943, | |
| "grad_norm": 0.19015586376190186, | |
| "learning_rate": 8.20007981567048e-05, | |
| "loss": 0.0318, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.7555576757942943, | |
| "eval_loss": 0.04022592678666115, | |
| "eval_runtime": 156.0262, | |
| "eval_samples_per_second": 7.467, | |
| "eval_steps_per_second": 7.467, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.7708233947142449, | |
| "grad_norm": 0.2430041879415512, | |
| "learning_rate": 8.158943905384082e-05, | |
| "loss": 0.0394, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 1.7860891136341952, | |
| "grad_norm": 0.1872541755437851, | |
| "learning_rate": 8.117449009293668e-05, | |
| "loss": 0.031, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 1.8013548325541455, | |
| "grad_norm": 0.24640363454818726, | |
| "learning_rate": 8.075599842924139e-05, | |
| "loss": 0.0253, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 1.816620551474096, | |
| "grad_norm": 0.23295819759368896, | |
| "learning_rate": 8.033401162060049e-05, | |
| "loss": 0.0362, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 1.8318862703940464, | |
| "grad_norm": 0.5127443671226501, | |
| "learning_rate": 7.990857762205157e-05, | |
| "loss": 0.0484, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.8318862703940464, | |
| "eval_loss": 0.03849531337618828, | |
| "eval_runtime": 155.9091, | |
| "eval_samples_per_second": 7.472, | |
| "eval_steps_per_second": 7.472, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.8471519893139967, | |
| "grad_norm": 0.3558383285999298, | |
| "learning_rate": 7.947974478037468e-05, | |
| "loss": 0.0371, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 1.8624177082339473, | |
| "grad_norm": 0.42320486903190613, | |
| "learning_rate": 7.904756182859797e-05, | |
| "loss": 0.0297, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 1.8776834271538976, | |
| "grad_norm": 0.16989709436893463, | |
| "learning_rate": 7.861207788045984e-05, | |
| "loss": 0.0313, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 1.892949146073848, | |
| "grad_norm": 0.37773561477661133, | |
| "learning_rate": 7.817334242482738e-05, | |
| "loss": 0.0328, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 1.9082148649937984, | |
| "grad_norm": 0.1707279086112976, | |
| "learning_rate": 7.773140532007262e-05, | |
| "loss": 0.0263, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.9082148649937984, | |
| "eval_loss": 0.03550805523991585, | |
| "eval_runtime": 155.9432, | |
| "eval_samples_per_second": 7.471, | |
| "eval_steps_per_second": 7.471, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.9234805839137485, | |
| "grad_norm": 0.32426440715789795, | |
| "learning_rate": 7.728631678840638e-05, | |
| "loss": 0.0293, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 1.938746302833699, | |
| "grad_norm": 0.23525658249855042, | |
| "learning_rate": 7.683812741017112e-05, | |
| "loss": 0.0321, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 1.9540120217536494, | |
| "grad_norm": 0.5105435848236084, | |
| "learning_rate": 7.638688811809274e-05, | |
| "loss": 0.0379, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 1.9692777406735997, | |
| "grad_norm": 0.27154791355133057, | |
| "learning_rate": 7.593265019149275e-05, | |
| "loss": 0.0279, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 1.9845434595935503, | |
| "grad_norm": 0.41763678193092346, | |
| "learning_rate": 7.547546525046073e-05, | |
| "loss": 0.0329, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.9845434595935503, | |
| "eval_loss": 0.03486299887299538, | |
| "eval_runtime": 155.963, | |
| "eval_samples_per_second": 7.47, | |
| "eval_steps_per_second": 7.47, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.9998091785135006, | |
| "grad_norm": 0.12300687283277512, | |
| "learning_rate": 7.501538524998812e-05, | |
| "loss": 0.0365, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.015074897433451, | |
| "grad_norm": 0.19598349928855896, | |
| "learning_rate": 7.455246247406406e-05, | |
| "loss": 0.0137, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 2.0303406163534015, | |
| "grad_norm": 0.3010258376598358, | |
| "learning_rate": 7.408674952973382e-05, | |
| "loss": 0.023, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 2.0456063352733516, | |
| "grad_norm": 0.2672453820705414, | |
| "learning_rate": 7.361829934112036e-05, | |
| "loss": 0.0213, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 2.060872054193302, | |
| "grad_norm": 0.21076306700706482, | |
| "learning_rate": 7.314716514341006e-05, | |
| "loss": 0.0171, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.060872054193302, | |
| "eval_loss": 0.03650743141770363, | |
| "eval_runtime": 155.864, | |
| "eval_samples_per_second": 7.474, | |
| "eval_steps_per_second": 7.474, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.0761377731132526, | |
| "grad_norm": 0.2325100302696228, | |
| "learning_rate": 7.267340047680305e-05, | |
| "loss": 0.0169, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 2.0914034920332027, | |
| "grad_norm": 0.2855936884880066, | |
| "learning_rate": 7.21970591804287e-05, | |
| "loss": 0.0169, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 2.1066692109531533, | |
| "grad_norm": 0.5615317821502686, | |
| "learning_rate": 7.171819538622747e-05, | |
| "loss": 0.0227, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 2.121934929873104, | |
| "grad_norm": 0.19267569482326508, | |
| "learning_rate": 7.123686351279914e-05, | |
| "loss": 0.0178, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 2.137200648793054, | |
| "grad_norm": 0.2372904121875763, | |
| "learning_rate": 7.07531182592187e-05, | |
| "loss": 0.0204, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.137200648793054, | |
| "eval_loss": 0.03742413595318794, | |
| "eval_runtime": 155.8584, | |
| "eval_samples_per_second": 7.475, | |
| "eval_steps_per_second": 7.475, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.1524663677130045, | |
| "grad_norm": 0.4747006893157959, | |
| "learning_rate": 7.026701459882026e-05, | |
| "loss": 0.0294, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 2.167732086632955, | |
| "grad_norm": 0.28667911887168884, | |
| "learning_rate": 6.977860777294988e-05, | |
| "loss": 0.0169, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 2.182997805552905, | |
| "grad_norm": 0.28892043232917786, | |
| "learning_rate": 6.92879532846878e-05, | |
| "loss": 0.017, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 2.1982635244728557, | |
| "grad_norm": 0.24941062927246094, | |
| "learning_rate": 6.879510689254104e-05, | |
| "loss": 0.0182, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 2.213529243392806, | |
| "grad_norm": 0.3995440900325775, | |
| "learning_rate": 6.830012460410697e-05, | |
| "loss": 0.0248, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.213529243392806, | |
| "eval_loss": 0.04132550209760666, | |
| "eval_runtime": 155.8248, | |
| "eval_samples_per_second": 7.476, | |
| "eval_steps_per_second": 7.476, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.2287949623127563, | |
| "grad_norm": 0.2918906509876251, | |
| "learning_rate": 6.780306266970851e-05, | |
| "loss": 0.0199, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 2.244060681232707, | |
| "grad_norm": 0.9692716002464294, | |
| "learning_rate": 6.73039775760018e-05, | |
| "loss": 0.0166, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 2.2593264001526574, | |
| "grad_norm": 0.2235914021730423, | |
| "learning_rate": 6.680292603955702e-05, | |
| "loss": 0.0178, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 2.2745921190726075, | |
| "grad_norm": 0.25725817680358887, | |
| "learning_rate": 6.629996500041299e-05, | |
| "loss": 0.0171, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 2.289857837992558, | |
| "grad_norm": 0.1939374804496765, | |
| "learning_rate": 6.579515161560649e-05, | |
| "loss": 0.0132, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.289857837992558, | |
| "eval_loss": 0.039708200842142105, | |
| "eval_runtime": 155.7865, | |
| "eval_samples_per_second": 7.478, | |
| "eval_steps_per_second": 7.478, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.305123556912508, | |
| "grad_norm": 0.5140581727027893, | |
| "learning_rate": 6.528854325267692e-05, | |
| "loss": 0.0254, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 2.3203892758324587, | |
| "grad_norm": 0.3529181480407715, | |
| "learning_rate": 6.478019748314686e-05, | |
| "loss": 0.0248, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 2.3356549947524092, | |
| "grad_norm": 0.6066356897354126, | |
| "learning_rate": 6.42701720759797e-05, | |
| "loss": 0.0229, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 2.3509207136723593, | |
| "grad_norm": 0.13625769317150116, | |
| "learning_rate": 6.375852499101467e-05, | |
| "loss": 0.0143, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 2.36618643259231, | |
| "grad_norm": 0.16366352140903473, | |
| "learning_rate": 6.324531437238019e-05, | |
| "loss": 0.0152, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.36618643259231, | |
| "eval_loss": 0.03828950226306915, | |
| "eval_runtime": 155.7795, | |
| "eval_samples_per_second": 7.479, | |
| "eval_steps_per_second": 7.479, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.3814521515122604, | |
| "grad_norm": 0.27981364727020264, | |
| "learning_rate": 6.273059854188636e-05, | |
| "loss": 0.0218, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 2.3967178704322105, | |
| "grad_norm": 0.42901352047920227, | |
| "learning_rate": 6.221443599239721e-05, | |
| "loss": 0.0179, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 2.411983589352161, | |
| "grad_norm": 0.38229596614837646, | |
| "learning_rate": 6.169688538118342e-05, | |
| "loss": 0.0255, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 2.4272493082721116, | |
| "grad_norm": 0.17062246799468994, | |
| "learning_rate": 6.117800552325655e-05, | |
| "loss": 0.02, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 2.4425150271920617, | |
| "grad_norm": 0.2703785300254822, | |
| "learning_rate": 6.06578553846852e-05, | |
| "loss": 0.0222, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.4425150271920617, | |
| "eval_loss": 0.03865242376923561, | |
| "eval_runtime": 155.7392, | |
| "eval_samples_per_second": 7.48, | |
| "eval_steps_per_second": 7.48, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.4577807461120122, | |
| "grad_norm": 0.2679692506790161, | |
| "learning_rate": 6.013649407589401e-05, | |
| "loss": 0.0223, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 2.473046465031963, | |
| "grad_norm": 0.29233378171920776, | |
| "learning_rate": 5.961398084494634e-05, | |
| "loss": 0.0252, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 2.488312183951913, | |
| "grad_norm": 0.46518829464912415, | |
| "learning_rate": 5.909037507081121e-05, | |
| "loss": 0.0241, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 2.5035779028718634, | |
| "grad_norm": 0.08025455474853516, | |
| "learning_rate": 5.8565736256615434e-05, | |
| "loss": 0.0266, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 2.518843621791814, | |
| "grad_norm": 0.14326392114162445, | |
| "learning_rate": 5.8040124022881625e-05, | |
| "loss": 0.0187, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.518843621791814, | |
| "eval_loss": 0.03742476552724838, | |
| "eval_runtime": 155.7547, | |
| "eval_samples_per_second": 7.48, | |
| "eval_steps_per_second": 7.48, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.534109340711764, | |
| "grad_norm": 0.2520762085914612, | |
| "learning_rate": 5.751359810075284e-05, | |
| "loss": 0.0151, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 2.5493750596317146, | |
| "grad_norm": 0.2356204092502594, | |
| "learning_rate": 5.6986218325204676e-05, | |
| "loss": 0.0103, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 2.5646407785516647, | |
| "grad_norm": 0.4597303867340088, | |
| "learning_rate": 5.645804462824556e-05, | |
| "loss": 0.0151, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 2.5799064974716153, | |
| "grad_norm": 0.40278297662734985, | |
| "learning_rate": 5.5929137032106005e-05, | |
| "loss": 0.0171, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 2.595172216391566, | |
| "grad_norm": 0.30628910660743713, | |
| "learning_rate": 5.53995556424176e-05, | |
| "loss": 0.0177, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.595172216391566, | |
| "eval_loss": 0.041493531316518784, | |
| "eval_runtime": 155.6991, | |
| "eval_samples_per_second": 7.482, | |
| "eval_steps_per_second": 7.482, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.610437935311516, | |
| "grad_norm": 0.2515423595905304, | |
| "learning_rate": 5.4869360641382615e-05, | |
| "loss": 0.0153, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 2.6257036542314665, | |
| "grad_norm": 0.36887672543525696, | |
| "learning_rate": 5.433861228093471e-05, | |
| "loss": 0.023, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 2.640969373151417, | |
| "grad_norm": 0.19568361341953278, | |
| "learning_rate": 5.380737087589197e-05, | |
| "loss": 0.0144, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 2.656235092071367, | |
| "grad_norm": 0.476130872964859, | |
| "learning_rate": 5.327569679710256e-05, | |
| "loss": 0.0286, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 2.6715008109913176, | |
| "grad_norm": 0.26536592841148376, | |
| "learning_rate": 5.274365046458416e-05, | |
| "loss": 0.0164, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.6715008109913176, | |
| "eval_loss": 0.03795096278190613, | |
| "eval_runtime": 155.706, | |
| "eval_samples_per_second": 7.482, | |
| "eval_steps_per_second": 7.482, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.686766529911268, | |
| "grad_norm": 0.5614975690841675, | |
| "learning_rate": 5.2211292340657804e-05, | |
| "loss": 0.0187, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 2.7020322488312183, | |
| "grad_norm": 0.1835988163948059, | |
| "learning_rate": 5.167868292307678e-05, | |
| "loss": 0.0242, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 2.717297967751169, | |
| "grad_norm": 0.2698806822299957, | |
| "learning_rate": 5.114588273815173e-05, | |
| "loss": 0.016, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 2.732563686671119, | |
| "grad_norm": 0.5950194597244263, | |
| "learning_rate": 5.061295233387223e-05, | |
| "loss": 0.0275, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 2.7478294055910695, | |
| "grad_norm": 0.2528298795223236, | |
| "learning_rate": 5.007995227302617e-05, | |
| "loss": 0.0212, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.7478294055910695, | |
| "eval_loss": 0.03947535157203674, | |
| "eval_runtime": 155.725, | |
| "eval_samples_per_second": 7.481, | |
| "eval_steps_per_second": 7.481, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.76309512451102, | |
| "grad_norm": 0.09799537807703018, | |
| "learning_rate": 4.954694312631729e-05, | |
| "loss": 0.024, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 2.7783608434309706, | |
| "grad_norm": 0.6346628665924072, | |
| "learning_rate": 4.901398546548181e-05, | |
| "loss": 0.0186, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 2.7936265623509207, | |
| "grad_norm": 0.4291705787181854, | |
| "learning_rate": 4.848113985640513e-05, | |
| "loss": 0.0178, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 2.808892281270871, | |
| "grad_norm": 0.26121002435684204, | |
| "learning_rate": 4.794846685223884e-05, | |
| "loss": 0.0229, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 2.8241580001908213, | |
| "grad_norm": 0.40268027782440186, | |
| "learning_rate": 4.741602698651966e-05, | |
| "loss": 0.0248, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.8241580001908213, | |
| "eval_loss": 0.03567200526595116, | |
| "eval_runtime": 155.7614, | |
| "eval_samples_per_second": 7.479, | |
| "eval_steps_per_second": 7.479, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.839423719110772, | |
| "grad_norm": 0.28412625193595886, | |
| "learning_rate": 4.6883880766290086e-05, | |
| "loss": 0.0244, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 2.8546894380307224, | |
| "grad_norm": 0.2994270324707031, | |
| "learning_rate": 4.635208866522251e-05, | |
| "loss": 0.0176, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 2.8699551569506725, | |
| "grad_norm": 0.3112628757953644, | |
| "learning_rate": 4.5820711116746785e-05, | |
| "loss": 0.0136, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 2.885220875870623, | |
| "grad_norm": 0.7493220567703247, | |
| "learning_rate": 4.528980850718255e-05, | |
| "loss": 0.0249, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 2.9004865947905736, | |
| "grad_norm": 0.3043513894081116, | |
| "learning_rate": 4.475944116887695e-05, | |
| "loss": 0.0187, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.9004865947905736, | |
| "eval_loss": 0.03836146742105484, | |
| "eval_runtime": 155.7298, | |
| "eval_samples_per_second": 7.481, | |
| "eval_steps_per_second": 7.481, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.9157523137105237, | |
| "grad_norm": 0.28810712695121765, | |
| "learning_rate": 4.4229669373348226e-05, | |
| "loss": 0.0252, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 2.9310180326304742, | |
| "grad_norm": 0.26556193828582764, | |
| "learning_rate": 4.3700553324436575e-05, | |
| "loss": 0.0177, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 2.9462837515504248, | |
| "grad_norm": 0.4432234764099121, | |
| "learning_rate": 4.317215315146238e-05, | |
| "loss": 0.0247, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 2.961549470470375, | |
| "grad_norm": 0.35339197516441345, | |
| "learning_rate": 4.264452890239315e-05, | |
| "loss": 0.0217, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 2.9768151893903254, | |
| "grad_norm": 0.48235374689102173, | |
| "learning_rate": 4.211774053701952e-05, | |
| "loss": 0.0315, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.9768151893903254, | |
| "eval_loss": 0.037155311554670334, | |
| "eval_runtime": 155.6439, | |
| "eval_samples_per_second": 7.485, | |
| "eval_steps_per_second": 7.485, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.9920809083102755, | |
| "grad_norm": 0.1263391226530075, | |
| "learning_rate": 4.159184792014145e-05, | |
| "loss": 0.0153, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 3.007346627230226, | |
| "grad_norm": 0.1030694767832756, | |
| "learning_rate": 4.1066910814765016e-05, | |
| "loss": 0.0102, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 3.0226123461501766, | |
| "grad_norm": 0.19300442934036255, | |
| "learning_rate": 4.0542988875310995e-05, | |
| "loss": 0.0146, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 3.0378780650701267, | |
| "grad_norm": 0.14339104294776917, | |
| "learning_rate": 4.002014164083552e-05, | |
| "loss": 0.0098, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 3.0531437839900772, | |
| "grad_norm": 0.06706851720809937, | |
| "learning_rate": 3.9498428528264204e-05, | |
| "loss": 0.006, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.0531437839900772, | |
| "eval_loss": 0.04230096563696861, | |
| "eval_runtime": 155.6887, | |
| "eval_samples_per_second": 7.483, | |
| "eval_steps_per_second": 7.483, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.068409502910028, | |
| "grad_norm": 0.12648242712020874, | |
| "learning_rate": 3.89779088256397e-05, | |
| "loss": 0.0063, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 3.083675221829978, | |
| "grad_norm": 0.24645593762397766, | |
| "learning_rate": 3.845864168538437e-05, | |
| "loss": 0.0067, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 3.0989409407499284, | |
| "grad_norm": 0.23762091994285583, | |
| "learning_rate": 3.794068611757794e-05, | |
| "loss": 0.0094, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 3.114206659669879, | |
| "grad_norm": 0.22739964723587036, | |
| "learning_rate": 3.7424100983251695e-05, | |
| "loss": 0.0094, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 3.129472378589829, | |
| "grad_norm": 0.7959986925125122, | |
| "learning_rate": 3.6908944987699345e-05, | |
| "loss": 0.0077, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.129472378589829, | |
| "eval_loss": 0.04588808864355087, | |
| "eval_runtime": 155.6391, | |
| "eval_samples_per_second": 7.485, | |
| "eval_steps_per_second": 7.485, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.1447380975097796, | |
| "grad_norm": 0.09930922836065292, | |
| "learning_rate": 3.639527667380571e-05, | |
| "loss": 0.0035, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 3.16000381642973, | |
| "grad_norm": 0.3177180290222168, | |
| "learning_rate": 3.5883154415393885e-05, | |
| "loss": 0.0079, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 3.1752695353496803, | |
| "grad_norm": 0.402797132730484, | |
| "learning_rate": 3.537263641059152e-05, | |
| "loss": 0.0098, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 3.190535254269631, | |
| "grad_norm": 0.27361375093460083, | |
| "learning_rate": 3.486378067521718e-05, | |
| "loss": 0.0087, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 3.2058009731895813, | |
| "grad_norm": 0.41781383752822876, | |
| "learning_rate": 3.435664503618732e-05, | |
| "loss": 0.0073, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.2058009731895813, | |
| "eval_loss": 0.04926072433590889, | |
| "eval_runtime": 155.69, | |
| "eval_samples_per_second": 7.483, | |
| "eval_steps_per_second": 7.483, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.2210666921095314, | |
| "grad_norm": 0.04020585119724274, | |
| "learning_rate": 3.3851287124944756e-05, | |
| "loss": 0.0059, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 3.236332411029482, | |
| "grad_norm": 0.11026966571807861, | |
| "learning_rate": 3.334776437090944e-05, | |
| "loss": 0.0052, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 3.251598129949432, | |
| "grad_norm": 0.029121991246938705, | |
| "learning_rate": 3.2846133994952046e-05, | |
| "loss": 0.0044, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 3.2668638488693826, | |
| "grad_norm": 0.22142323851585388, | |
| "learning_rate": 3.234645300289136e-05, | |
| "loss": 0.0039, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 3.282129567789333, | |
| "grad_norm": 0.9102628827095032, | |
| "learning_rate": 3.1848778179016075e-05, | |
| "loss": 0.0096, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.282129567789333, | |
| "eval_loss": 0.05229068547487259, | |
| "eval_runtime": 155.7655, | |
| "eval_samples_per_second": 7.479, | |
| "eval_steps_per_second": 7.479, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.2973952867092833, | |
| "grad_norm": 0.6087216138839722, | |
| "learning_rate": 3.135316607963176e-05, | |
| "loss": 0.0104, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 3.312661005629234, | |
| "grad_norm": 0.49881669878959656, | |
| "learning_rate": 3.085967302663375e-05, | |
| "loss": 0.0076, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 3.3279267245491844, | |
| "grad_norm": 0.3587443232536316, | |
| "learning_rate": 3.0368355101106615e-05, | |
| "loss": 0.0142, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 3.3431924434691345, | |
| "grad_norm": 0.12659725546836853, | |
| "learning_rate": 2.987926813695116e-05, | |
| "loss": 0.008, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 3.358458162389085, | |
| "grad_norm": 0.07203304767608643, | |
| "learning_rate": 2.939246771453924e-05, | |
| "loss": 0.0086, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.358458162389085, | |
| "eval_loss": 0.04494870454072952, | |
| "eval_runtime": 155.7823, | |
| "eval_samples_per_second": 7.478, | |
| "eval_steps_per_second": 7.478, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.3737238813090356, | |
| "grad_norm": 0.7683483958244324, | |
| "learning_rate": 2.890800915439772e-05, | |
| "loss": 0.0112, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 3.3889896002289857, | |
| "grad_norm": 0.1044091284275055, | |
| "learning_rate": 2.842594751092159e-05, | |
| "loss": 0.01, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 3.404255319148936, | |
| "grad_norm": 0.2757960557937622, | |
| "learning_rate": 2.794633756611776e-05, | |
| "loss": 0.0082, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 3.4195210380688867, | |
| "grad_norm": 0.31744205951690674, | |
| "learning_rate": 2.7469233823379347e-05, | |
| "loss": 0.0056, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 3.434786756988837, | |
| "grad_norm": 1.4233782291412354, | |
| "learning_rate": 2.6994690501292032e-05, | |
| "loss": 0.0057, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.434786756988837, | |
| "eval_loss": 0.04692812263965607, | |
| "eval_runtime": 155.7735, | |
| "eval_samples_per_second": 7.479, | |
| "eval_steps_per_second": 7.479, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.4500524759087874, | |
| "grad_norm": 0.5911946892738342, | |
| "learning_rate": 2.652276152747246e-05, | |
| "loss": 0.0073, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 3.465318194828738, | |
| "grad_norm": 0.26131606101989746, | |
| "learning_rate": 2.6053500532439968e-05, | |
| "loss": 0.0056, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 3.480583913748688, | |
| "grad_norm": 0.03926728665828705, | |
| "learning_rate": 2.5586960843521824e-05, | |
| "loss": 0.0085, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 3.4958496326686386, | |
| "grad_norm": 0.3418906629085541, | |
| "learning_rate": 2.5123195478793217e-05, | |
| "loss": 0.0096, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 3.5111153515885887, | |
| "grad_norm": 0.1729433536529541, | |
| "learning_rate": 2.466225714105202e-05, | |
| "loss": 0.0098, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.5111153515885887, | |
| "eval_loss": 0.04600750282406807, | |
| "eval_runtime": 155.6908, | |
| "eval_samples_per_second": 7.483, | |
| "eval_steps_per_second": 7.483, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.526381070508539, | |
| "grad_norm": 0.1316080242395401, | |
| "learning_rate": 2.420419821182982e-05, | |
| "loss": 0.0072, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 3.5416467894284898, | |
| "grad_norm": 0.6513664722442627, | |
| "learning_rate": 2.3749070745438996e-05, | |
| "loss": 0.0056, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 3.5569125083484403, | |
| "grad_norm": 0.20830810070037842, | |
| "learning_rate": 2.3296926463057396e-05, | |
| "loss": 0.0071, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 3.5721782272683904, | |
| "grad_norm": 0.14865128695964813, | |
| "learning_rate": 2.284781674685058e-05, | |
| "loss": 0.0041, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 3.587443946188341, | |
| "grad_norm": 0.1634235978126526, | |
| "learning_rate": 2.2401792634132708e-05, | |
| "loss": 0.0086, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.587443946188341, | |
| "eval_loss": 0.04932467266917229, | |
| "eval_runtime": 155.7011, | |
| "eval_samples_per_second": 7.482, | |
| "eval_steps_per_second": 7.482, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.602709665108291, | |
| "grad_norm": 0.6523926854133606, | |
| "learning_rate": 2.195890481156666e-05, | |
| "loss": 0.0083, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 3.6179753840282416, | |
| "grad_norm": 0.15616604685783386, | |
| "learning_rate": 2.151920360940387e-05, | |
| "loss": 0.0106, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 3.633241102948192, | |
| "grad_norm": 0.43568360805511475, | |
| "learning_rate": 2.1082738995764785e-05, | |
| "loss": 0.0052, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 3.6485068218681422, | |
| "grad_norm": 0.251506507396698, | |
| "learning_rate": 2.0649560570960465e-05, | |
| "loss": 0.0056, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 3.663772540788093, | |
| "grad_norm": 0.3159063458442688, | |
| "learning_rate": 2.0219717561855855e-05, | |
| "loss": 0.0073, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.663772540788093, | |
| "eval_loss": 0.047119684517383575, | |
| "eval_runtime": 155.735, | |
| "eval_samples_per_second": 7.481, | |
| "eval_steps_per_second": 7.481, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.679038259708043, | |
| "grad_norm": 0.048487480729818344, | |
| "learning_rate": 1.9793258816275728e-05, | |
| "loss": 0.0091, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 3.6943039786279934, | |
| "grad_norm": 0.4514082968235016, | |
| "learning_rate": 1.9370232797453402e-05, | |
| "loss": 0.009, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 3.709569697547944, | |
| "grad_norm": 0.271023154258728, | |
| "learning_rate": 1.8950687578523502e-05, | |
| "loss": 0.0062, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 3.7248354164678945, | |
| "grad_norm": 0.155420184135437, | |
| "learning_rate": 1.853467083705869e-05, | |
| "loss": 0.0031, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 3.7401011353878446, | |
| "grad_norm": 0.33148664236068726, | |
| "learning_rate": 1.8122229849651716e-05, | |
| "loss": 0.0086, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.7401011353878446, | |
| "eval_loss": 0.04678386077284813, | |
| "eval_runtime": 155.695, | |
| "eval_samples_per_second": 7.483, | |
| "eval_steps_per_second": 7.483, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.755366854307795, | |
| "grad_norm": 0.3089897632598877, | |
| "learning_rate": 1.7713411486542707e-05, | |
| "loss": 0.008, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 3.7706325732277453, | |
| "grad_norm": 0.4456401467323303, | |
| "learning_rate": 1.7308262206292897e-05, | |
| "loss": 0.0083, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 3.785898292147696, | |
| "grad_norm": 0.46226081252098083, | |
| "learning_rate": 1.6906828050504907e-05, | |
| "loss": 0.0149, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 3.8011640110676463, | |
| "grad_norm": 0.018434859812259674, | |
| "learning_rate": 1.650915463859068e-05, | |
| "loss": 0.0077, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 3.8164297299875964, | |
| "grad_norm": 0.28657904267311096, | |
| "learning_rate": 1.6115287162587055e-05, | |
| "loss": 0.0079, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.8164297299875964, | |
| "eval_loss": 0.045529987663030624, | |
| "eval_runtime": 155.7642, | |
| "eval_samples_per_second": 7.479, | |
| "eval_steps_per_second": 7.479, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.831695448907547, | |
| "grad_norm": 0.09848945587873459, | |
| "learning_rate": 1.57252703820203e-05, | |
| "loss": 0.0052, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 3.8469611678274975, | |
| "grad_norm": 0.4062275290489197, | |
| "learning_rate": 1.5339148618819393e-05, | |
| "loss": 0.009, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 3.8622268867474476, | |
| "grad_norm": 0.3156788945198059, | |
| "learning_rate": 1.4956965752279395e-05, | |
| "loss": 0.0082, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 3.877492605667398, | |
| "grad_norm": 0.35523831844329834, | |
| "learning_rate": 1.457876521407484e-05, | |
| "loss": 0.0072, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 3.8927583245873487, | |
| "grad_norm": 0.243367001414299, | |
| "learning_rate": 1.4204589983324173e-05, | |
| "loss": 0.0042, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.8927583245873487, | |
| "eval_loss": 0.04733826965093613, | |
| "eval_runtime": 155.9113, | |
| "eval_samples_per_second": 7.472, | |
| "eval_steps_per_second": 7.472, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.908024043507299, | |
| "grad_norm": 0.5131608843803406, | |
| "learning_rate": 1.383448258170557e-05, | |
| "loss": 0.0091, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 3.9232897624272494, | |
| "grad_norm": 0.4350660443305969, | |
| "learning_rate": 1.3468485068624653e-05, | |
| "loss": 0.005, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 3.9385554813471995, | |
| "grad_norm": 0.10545644909143448, | |
| "learning_rate": 1.310663903643492e-05, | |
| "loss": 0.009, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 3.95382120026715, | |
| "grad_norm": 0.19669237732887268, | |
| "learning_rate": 1.2748985605711028e-05, | |
| "loss": 0.0072, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 3.9690869191871005, | |
| "grad_norm": 0.3060455024242401, | |
| "learning_rate": 1.2395565420575932e-05, | |
| "loss": 0.0088, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 3.9690869191871005, | |
| "eval_loss": 0.047428667545318604, | |
| "eval_runtime": 156.1393, | |
| "eval_samples_per_second": 7.461, | |
| "eval_steps_per_second": 7.461, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 3.984352638107051, | |
| "grad_norm": 0.1554788053035736, | |
| "learning_rate": 1.2046418644081903e-05, | |
| "loss": 0.014, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 3.999618357027001, | |
| "grad_norm": 1.1880522966384888, | |
| "learning_rate": 1.1701584953646505e-05, | |
| "loss": 0.0068, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 4.014884075946951, | |
| "grad_norm": 0.056341659277677536, | |
| "learning_rate": 1.1361103536543466e-05, | |
| "loss": 0.0044, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 4.030149794866902, | |
| "grad_norm": 0.012919405475258827, | |
| "learning_rate": 1.1025013085449527e-05, | |
| "loss": 0.0022, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 4.045415513786852, | |
| "grad_norm": 0.060808297246694565, | |
| "learning_rate": 1.0693351794047224e-05, | |
| "loss": 0.004, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 4.045415513786852, | |
| "eval_loss": 0.0473913699388504, | |
| "eval_runtime": 156.2274, | |
| "eval_samples_per_second": 7.457, | |
| "eval_steps_per_second": 7.457, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 4.060681232706803, | |
| "grad_norm": 0.15758514404296875, | |
| "learning_rate": 1.036615735268468e-05, | |
| "loss": 0.0015, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 4.0759469516267535, | |
| "grad_norm": 0.04278062283992767, | |
| "learning_rate": 1.0043466944092272e-05, | |
| "loss": 0.0023, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 4.091212670546703, | |
| "grad_norm": 0.08595144748687744, | |
| "learning_rate": 9.72531723915726e-06, | |
| "loss": 0.0048, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 4.106478389466654, | |
| "grad_norm": 0.03355566784739494, | |
| "learning_rate": 9.411744392756405e-06, | |
| "loss": 0.0033, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 4.121744108386604, | |
| "grad_norm": 0.07282890379428864, | |
| "learning_rate": 9.102784039647339e-06, | |
| "loss": 0.0033, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.121744108386604, | |
| "eval_loss": 0.049959179013967514, | |
| "eval_runtime": 156.1823, | |
| "eval_samples_per_second": 7.459, | |
| "eval_steps_per_second": 7.459, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.137009827306555, | |
| "grad_norm": 0.01570916548371315, | |
| "learning_rate": 8.79847129041893e-06, | |
| "loss": 0.003, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 4.152275546226505, | |
| "grad_norm": 0.06394907087087631, | |
| "learning_rate": 8.498840727501316e-06, | |
| "loss": 0.0019, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 4.167541265146456, | |
| "grad_norm": 0.10380612313747406, | |
| "learning_rate": 8.203926401235957e-06, | |
| "loss": 0.0034, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 4.1828069840664055, | |
| "grad_norm": 0.1005665585398674, | |
| "learning_rate": 7.913761826006017e-06, | |
| "loss": 0.0023, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 4.198072702986356, | |
| "grad_norm": 0.058339644223451614, | |
| "learning_rate": 7.628379976427868e-06, | |
| "loss": 0.0009, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.198072702986356, | |
| "eval_loss": 0.05198967829346657, | |
| "eval_runtime": 156.1523, | |
| "eval_samples_per_second": 7.461, | |
| "eval_steps_per_second": 7.461, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.213338421906307, | |
| "grad_norm": 0.1591460257768631, | |
| "learning_rate": 7.347813283603705e-06, | |
| "loss": 0.0058, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 4.228604140826257, | |
| "grad_norm": 0.017265984788537025, | |
| "learning_rate": 7.072093631436161e-06, | |
| "loss": 0.0014, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 4.243869859746208, | |
| "grad_norm": 0.05040580779314041, | |
| "learning_rate": 6.801252353004867e-06, | |
| "loss": 0.0033, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 4.259135578666158, | |
| "grad_norm": 0.04195038229227066, | |
| "learning_rate": 6.535320227005826e-06, | |
| "loss": 0.0034, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 4.274401297586108, | |
| "grad_norm": 0.024743085727095604, | |
| "learning_rate": 6.274327474253611e-06, | |
| "loss": 0.0021, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.274401297586108, | |
| "eval_loss": 0.053240980952978134, | |
| "eval_runtime": 156.2448, | |
| "eval_samples_per_second": 7.456, | |
| "eval_steps_per_second": 7.456, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.289667016506058, | |
| "grad_norm": 0.03961138427257538, | |
| "learning_rate": 6.018303754247112e-06, | |
| "loss": 0.0027, | |
| "step": 2810 | |
| }, | |
| { | |
| "epoch": 4.304932735426009, | |
| "grad_norm": 0.036531973630189896, | |
| "learning_rate": 5.767278161798911e-06, | |
| "loss": 0.0034, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 4.3201984543459595, | |
| "grad_norm": 0.16394208371639252, | |
| "learning_rate": 5.521279223729026e-06, | |
| "loss": 0.0012, | |
| "step": 2830 | |
| }, | |
| { | |
| "epoch": 4.33546417326591, | |
| "grad_norm": 0.032556578516960144, | |
| "learning_rate": 5.280334895622968e-06, | |
| "loss": 0.0014, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 4.350729892185861, | |
| "grad_norm": 0.21047158539295197, | |
| "learning_rate": 5.044472558654961e-06, | |
| "loss": 0.0042, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.350729892185861, | |
| "eval_loss": 0.05455264449119568, | |
| "eval_runtime": 156.3026, | |
| "eval_samples_per_second": 7.453, | |
| "eval_steps_per_second": 7.453, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.36599561110581, | |
| "grad_norm": 0.02670607902109623, | |
| "learning_rate": 4.813719016476203e-06, | |
| "loss": 0.0021, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 4.381261330025761, | |
| "grad_norm": 0.023184500634670258, | |
| "learning_rate": 4.588100492168973e-06, | |
| "loss": 0.0012, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 4.396527048945711, | |
| "grad_norm": 0.14299553632736206, | |
| "learning_rate": 4.367642625266511e-06, | |
| "loss": 0.0015, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 4.411792767865662, | |
| "grad_norm": 0.020235197618603706, | |
| "learning_rate": 4.1523704688394176e-06, | |
| "loss": 0.0014, | |
| "step": 2890 | |
| }, | |
| { | |
| "epoch": 4.427058486785612, | |
| "grad_norm": 0.16960173845291138, | |
| "learning_rate": 3.9423084866484884e-06, | |
| "loss": 0.0017, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.427058486785612, | |
| "eval_loss": 0.05663292482495308, | |
| "eval_runtime": 156.2522, | |
| "eval_samples_per_second": 7.456, | |
| "eval_steps_per_second": 7.456, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.442324205705562, | |
| "grad_norm": 0.030367441475391388, | |
| "learning_rate": 3.737480550364736e-06, | |
| "loss": 0.0023, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 4.457589924625513, | |
| "grad_norm": 0.05753505975008011, | |
| "learning_rate": 3.5379099368564817e-06, | |
| "loss": 0.0016, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 4.472855643545463, | |
| "grad_norm": 0.1254509538412094, | |
| "learning_rate": 3.3436193255442396e-06, | |
| "loss": 0.0013, | |
| "step": 2930 | |
| }, | |
| { | |
| "epoch": 4.488121362465414, | |
| "grad_norm": 0.09961813688278198, | |
| "learning_rate": 3.1546307958233214e-06, | |
| "loss": 0.0043, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 4.503387081385364, | |
| "grad_norm": 0.1321631669998169, | |
| "learning_rate": 2.9709658245547834e-06, | |
| "loss": 0.002, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.503387081385364, | |
| "eval_loss": 0.057674556970596313, | |
| "eval_runtime": 156.1774, | |
| "eval_samples_per_second": 7.459, | |
| "eval_steps_per_second": 7.459, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.518652800305315, | |
| "grad_norm": 0.04699141904711723, | |
| "learning_rate": 2.792645283624712e-06, | |
| "loss": 0.0022, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 4.5339185192252645, | |
| "grad_norm": 0.13197575509548187, | |
| "learning_rate": 2.6196894375723645e-06, | |
| "loss": 0.0019, | |
| "step": 2970 | |
| }, | |
| { | |
| "epoch": 4.549184238145215, | |
| "grad_norm": 0.18447992205619812, | |
| "learning_rate": 2.452117941287246e-06, | |
| "loss": 0.0036, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 4.5644499570651655, | |
| "grad_norm": 0.26583021879196167, | |
| "learning_rate": 2.2899498377755566e-06, | |
| "loss": 0.0011, | |
| "step": 2990 | |
| }, | |
| { | |
| "epoch": 4.579715675985116, | |
| "grad_norm": 0.02824637107551098, | |
| "learning_rate": 2.1332035559960663e-06, | |
| "loss": 0.0021, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.579715675985116, | |
| "eval_loss": 0.05825762450695038, | |
| "eval_runtime": 156.2544, | |
| "eval_samples_per_second": 7.456, | |
| "eval_steps_per_second": 7.456, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.594981394905067, | |
| "grad_norm": 0.050509508699178696, | |
| "learning_rate": 1.9818969087658735e-06, | |
| "loss": 0.0014, | |
| "step": 3010 | |
| }, | |
| { | |
| "epoch": 4.610247113825016, | |
| "grad_norm": 0.05490247160196304, | |
| "learning_rate": 1.8360470907361093e-06, | |
| "loss": 0.0011, | |
| "step": 3020 | |
| }, | |
| { | |
| "epoch": 4.625512832744967, | |
| "grad_norm": 0.03532513976097107, | |
| "learning_rate": 1.6956706764379438e-06, | |
| "loss": 0.0022, | |
| "step": 3030 | |
| }, | |
| { | |
| "epoch": 4.640778551664917, | |
| "grad_norm": 0.19588930904865265, | |
| "learning_rate": 1.5607836183989921e-06, | |
| "loss": 0.0014, | |
| "step": 3040 | |
| }, | |
| { | |
| "epoch": 4.656044270584868, | |
| "grad_norm": 0.04761837422847748, | |
| "learning_rate": 1.4314012453305215e-06, | |
| "loss": 0.0017, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.656044270584868, | |
| "eval_loss": 0.05846200883388519, | |
| "eval_runtime": 156.1503, | |
| "eval_samples_per_second": 7.461, | |
| "eval_steps_per_second": 7.461, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.6713099895048185, | |
| "grad_norm": 0.4008547365665436, | |
| "learning_rate": 1.3075382603854157e-06, | |
| "loss": 0.0014, | |
| "step": 3060 | |
| }, | |
| { | |
| "epoch": 4.686575708424769, | |
| "grad_norm": 0.026124007999897003, | |
| "learning_rate": 1.1892087394873353e-06, | |
| "loss": 0.0015, | |
| "step": 3070 | |
| }, | |
| { | |
| "epoch": 4.701841427344719, | |
| "grad_norm": 0.1011100560426712, | |
| "learning_rate": 1.076426129731084e-06, | |
| "loss": 0.0022, | |
| "step": 3080 | |
| }, | |
| { | |
| "epoch": 4.717107146264669, | |
| "grad_norm": 0.09242038428783417, | |
| "learning_rate": 9.692032478545e-07, | |
| "loss": 0.002, | |
| "step": 3090 | |
| }, | |
| { | |
| "epoch": 4.73237286518462, | |
| "grad_norm": 0.017347002401947975, | |
| "learning_rate": 8.675522787819023e-07, | |
| "loss": 0.0036, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.73237286518462, | |
| "eval_loss": 0.058684661984443665, | |
| "eval_runtime": 156.2813, | |
| "eval_samples_per_second": 7.455, | |
| "eval_steps_per_second": 7.455, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.74763858410457, | |
| "grad_norm": 0.20832569897174835, | |
| "learning_rate": 7.714847742394337e-07, | |
| "loss": 0.0035, | |
| "step": 3110 | |
| }, | |
| { | |
| "epoch": 4.762904303024521, | |
| "grad_norm": 0.20151978731155396, | |
| "learning_rate": 6.810116514422593e-07, | |
| "loss": 0.0046, | |
| "step": 3120 | |
| }, | |
| { | |
| "epoch": 4.7781700219444705, | |
| "grad_norm": 0.048032715916633606, | |
| "learning_rate": 5.961431918539817e-07, | |
| "loss": 0.0025, | |
| "step": 3130 | |
| }, | |
| { | |
| "epoch": 4.793435740864421, | |
| "grad_norm": 0.013832991011440754, | |
| "learning_rate": 5.16889040018187e-07, | |
| "loss": 0.002, | |
| "step": 3140 | |
| }, | |
| { | |
| "epoch": 4.808701459784372, | |
| "grad_norm": 0.12184188514947891, | |
| "learning_rate": 4.432582024624543e-07, | |
| "loss": 0.0021, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 4.808701459784372, | |
| "eval_loss": 0.05884731933474541, | |
| "eval_runtime": 156.187, | |
| "eval_samples_per_second": 7.459, | |
| "eval_steps_per_second": 7.459, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 4.823967178704322, | |
| "grad_norm": 0.14583688974380493, | |
| "learning_rate": 3.7525904667484737e-07, | |
| "loss": 0.0015, | |
| "step": 3160 | |
| }, | |
| { | |
| "epoch": 4.839232897624273, | |
| "grad_norm": 1.1857644319534302, | |
| "learning_rate": 3.128993001530245e-07, | |
| "loss": 0.0025, | |
| "step": 3170 | |
| }, | |
| { | |
| "epoch": 4.854498616544223, | |
| "grad_norm": 0.10287916660308838, | |
| "learning_rate": 2.5618604952605816e-07, | |
| "loss": 0.003, | |
| "step": 3180 | |
| }, | |
| { | |
| "epoch": 4.869764335464174, | |
| "grad_norm": 0.02538507618010044, | |
| "learning_rate": 2.0512573974912908e-07, | |
| "loss": 0.0024, | |
| "step": 3190 | |
| }, | |
| { | |
| "epoch": 4.885030054384123, | |
| "grad_norm": 0.5639934539794922, | |
| "learning_rate": 1.597241733711008e-07, | |
| "loss": 0.0028, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 4.885030054384123, | |
| "eval_loss": 0.058813489973545074, | |
| "eval_runtime": 156.3015, | |
| "eval_samples_per_second": 7.454, | |
| "eval_steps_per_second": 7.454, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 4.900295773304074, | |
| "grad_norm": 0.22544801235198975, | |
| "learning_rate": 1.1998650987510295e-07, | |
| "loss": 0.0015, | |
| "step": 3210 | |
| }, | |
| { | |
| "epoch": 4.9155614922240245, | |
| "grad_norm": 0.014127412810921669, | |
| "learning_rate": 8.591726509222242e-08, | |
| "loss": 0.0059, | |
| "step": 3220 | |
| }, | |
| { | |
| "epoch": 4.930827211143975, | |
| "grad_norm": 0.09811969101428986, | |
| "learning_rate": 5.752031068830266e-08, | |
| "loss": 0.0016, | |
| "step": 3230 | |
| }, | |
| { | |
| "epoch": 4.946092930063926, | |
| "grad_norm": 0.08793773502111435, | |
| "learning_rate": 3.4798873723984605e-08, | |
| "loss": 0.0008, | |
| "step": 3240 | |
| }, | |
| { | |
| "epoch": 4.961358648983875, | |
| "grad_norm": 0.23520350456237793, | |
| "learning_rate": 1.7755536287944464e-08, | |
| "loss": 0.0021, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 4.961358648983875, | |
| "eval_loss": 0.058792199939489365, | |
| "eval_runtime": 156.1853, | |
| "eval_samples_per_second": 7.459, | |
| "eval_steps_per_second": 7.459, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 4.976624367903826, | |
| "grad_norm": 0.024279046803712845, | |
| "learning_rate": 6.3922352034895276e-09, | |
| "loss": 0.0052, | |
| "step": 3260 | |
| }, | |
| { | |
| "epoch": 4.991890086823776, | |
| "grad_norm": 0.1994946151971817, | |
| "learning_rate": 7.102618084620094e-10, | |
| "loss": 0.0023, | |
| "step": 3270 | |
| }, | |
| { | |
| "epoch": 4.999522946283752, | |
| "step": 3275, | |
| "total_flos": 8.22077121848279e+17, | |
| "train_loss": 0.04081964985958037, | |
| "train_runtime": 35506.1452, | |
| "train_samples_per_second": 1.476, | |
| "train_steps_per_second": 0.092 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 3275, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8.22077121848279e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |