| { | |
| "best_metric": 0.028858734294772148, | |
| "best_model_checkpoint": "saves/psy-course/MentaLLaMA-chat-7B/train/fold4/checkpoint-1250", | |
| "epoch": 4.999522946283752, | |
| "eval_steps": 50, | |
| "global_step": 3275, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.015265718919950386, | |
| "grad_norm": 1.6029620170593262, | |
| "learning_rate": 3.0487804878048782e-06, | |
| "loss": 1.7277, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.030531437839900772, | |
| "grad_norm": 2.1076600551605225, | |
| "learning_rate": 6.0975609756097564e-06, | |
| "loss": 1.689, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.04579715675985116, | |
| "grad_norm": 2.5429253578186035, | |
| "learning_rate": 9.146341463414634e-06, | |
| "loss": 1.6402, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.061062875679801544, | |
| "grad_norm": 2.8307013511657715, | |
| "learning_rate": 1.2195121951219513e-05, | |
| "loss": 1.437, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.07632859459975193, | |
| "grad_norm": 0.9449720978736877, | |
| "learning_rate": 1.524390243902439e-05, | |
| "loss": 0.796, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.07632859459975193, | |
| "eval_loss": 0.614504337310791, | |
| "eval_runtime": 178.8075, | |
| "eval_samples_per_second": 6.515, | |
| "eval_steps_per_second": 6.515, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.09159431351970232, | |
| "grad_norm": 1.0583168268203735, | |
| "learning_rate": 1.8292682926829268e-05, | |
| "loss": 0.5954, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.10686003243965271, | |
| "grad_norm": 0.9122205972671509, | |
| "learning_rate": 2.134146341463415e-05, | |
| "loss": 0.4645, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.12212575135960309, | |
| "grad_norm": 0.8755609393119812, | |
| "learning_rate": 2.4390243902439026e-05, | |
| "loss": 0.3116, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.13739147027955348, | |
| "grad_norm": 0.8426439166069031, | |
| "learning_rate": 2.7439024390243906e-05, | |
| "loss": 0.1824, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.15265718919950386, | |
| "grad_norm": 0.43453994393348694, | |
| "learning_rate": 3.048780487804878e-05, | |
| "loss": 0.1534, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.15265718919950386, | |
| "eval_loss": 0.104978546500206, | |
| "eval_runtime": 178.8662, | |
| "eval_samples_per_second": 6.513, | |
| "eval_steps_per_second": 6.513, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.16792290811945426, | |
| "grad_norm": 0.4413648843765259, | |
| "learning_rate": 3.353658536585366e-05, | |
| "loss": 0.1087, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.18318862703940464, | |
| "grad_norm": 0.8687300682067871, | |
| "learning_rate": 3.6585365853658535e-05, | |
| "loss": 0.096, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.19845434595935502, | |
| "grad_norm": 0.6343626379966736, | |
| "learning_rate": 3.9634146341463416e-05, | |
| "loss": 0.0853, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.21372006487930542, | |
| "grad_norm": 0.7491863369941711, | |
| "learning_rate": 4.26829268292683e-05, | |
| "loss": 0.0767, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.2289857837992558, | |
| "grad_norm": 0.495426744222641, | |
| "learning_rate": 4.573170731707318e-05, | |
| "loss": 0.0619, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.2289857837992558, | |
| "eval_loss": 0.06294012069702148, | |
| "eval_runtime": 178.9812, | |
| "eval_samples_per_second": 6.509, | |
| "eval_steps_per_second": 6.509, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.24425150271920618, | |
| "grad_norm": 0.7597019076347351, | |
| "learning_rate": 4.878048780487805e-05, | |
| "loss": 0.0873, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.2595172216391566, | |
| "grad_norm": 0.804664671421051, | |
| "learning_rate": 5.182926829268293e-05, | |
| "loss": 0.0739, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.27478294055910696, | |
| "grad_norm": 0.3810129165649414, | |
| "learning_rate": 5.487804878048781e-05, | |
| "loss": 0.0792, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.29004865947905734, | |
| "grad_norm": 0.6029999852180481, | |
| "learning_rate": 5.792682926829268e-05, | |
| "loss": 0.0695, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.3053143783990077, | |
| "grad_norm": 0.6588765382766724, | |
| "learning_rate": 6.097560975609756e-05, | |
| "loss": 0.0535, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.3053143783990077, | |
| "eval_loss": 0.05500848591327667, | |
| "eval_runtime": 179.0405, | |
| "eval_samples_per_second": 6.507, | |
| "eval_steps_per_second": 6.507, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.3205800973189581, | |
| "grad_norm": 0.5788701772689819, | |
| "learning_rate": 6.402439024390244e-05, | |
| "loss": 0.0593, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.3358458162389085, | |
| "grad_norm": 0.7792841196060181, | |
| "learning_rate": 6.707317073170732e-05, | |
| "loss": 0.0566, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.3511115351588589, | |
| "grad_norm": 0.7082734704017639, | |
| "learning_rate": 7.012195121951219e-05, | |
| "loss": 0.0449, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.3663772540788093, | |
| "grad_norm": 0.5391858816146851, | |
| "learning_rate": 7.317073170731707e-05, | |
| "loss": 0.0636, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.38164297299875966, | |
| "grad_norm": 0.47337889671325684, | |
| "learning_rate": 7.621951219512195e-05, | |
| "loss": 0.0576, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.38164297299875966, | |
| "eval_loss": 0.04678180441260338, | |
| "eval_runtime": 178.9876, | |
| "eval_samples_per_second": 6.509, | |
| "eval_steps_per_second": 6.509, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.39690869191871003, | |
| "grad_norm": 0.7037196159362793, | |
| "learning_rate": 7.926829268292683e-05, | |
| "loss": 0.0537, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.4121744108386604, | |
| "grad_norm": 0.44716084003448486, | |
| "learning_rate": 8.231707317073171e-05, | |
| "loss": 0.0513, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.42744012975861084, | |
| "grad_norm": 0.288339227437973, | |
| "learning_rate": 8.53658536585366e-05, | |
| "loss": 0.0524, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.4427058486785612, | |
| "grad_norm": 0.5624875426292419, | |
| "learning_rate": 8.841463414634147e-05, | |
| "loss": 0.0575, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.4579715675985116, | |
| "grad_norm": 0.3156270682811737, | |
| "learning_rate": 9.146341463414635e-05, | |
| "loss": 0.0501, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.4579715675985116, | |
| "eval_loss": 0.046911776065826416, | |
| "eval_runtime": 178.9736, | |
| "eval_samples_per_second": 6.509, | |
| "eval_steps_per_second": 6.509, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.473237286518462, | |
| "grad_norm": 0.5870501399040222, | |
| "learning_rate": 9.451219512195122e-05, | |
| "loss": 0.0536, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.48850300543841235, | |
| "grad_norm": 0.27398020029067993, | |
| "learning_rate": 9.75609756097561e-05, | |
| "loss": 0.0369, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.5037687243583627, | |
| "grad_norm": 0.6370400190353394, | |
| "learning_rate": 9.999988635788465e-05, | |
| "loss": 0.0474, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.5190344432783132, | |
| "grad_norm": 0.6367189288139343, | |
| "learning_rate": 9.999590893808788e-05, | |
| "loss": 0.0455, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.5343001621982635, | |
| "grad_norm": 0.47603678703308105, | |
| "learning_rate": 9.998624992909386e-05, | |
| "loss": 0.0456, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5343001621982635, | |
| "eval_loss": 0.042300350964069366, | |
| "eval_runtime": 178.9958, | |
| "eval_samples_per_second": 6.509, | |
| "eval_steps_per_second": 6.509, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5495658811182139, | |
| "grad_norm": 0.17798028886318207, | |
| "learning_rate": 9.997091042856284e-05, | |
| "loss": 0.0413, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.5648316000381643, | |
| "grad_norm": 0.2773096561431885, | |
| "learning_rate": 9.994989217969224e-05, | |
| "loss": 0.0513, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.5800973189581147, | |
| "grad_norm": 0.7576242685317993, | |
| "learning_rate": 9.992319757101863e-05, | |
| "loss": 0.0538, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.5953630378780651, | |
| "grad_norm": 0.39998742938041687, | |
| "learning_rate": 9.98908296361462e-05, | |
| "loss": 0.0593, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.6106287567980154, | |
| "grad_norm": 0.32975825667381287, | |
| "learning_rate": 9.98527920534021e-05, | |
| "loss": 0.0487, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6106287567980154, | |
| "eval_loss": 0.03924345597624779, | |
| "eval_runtime": 179.019, | |
| "eval_samples_per_second": 6.508, | |
| "eval_steps_per_second": 6.508, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6258944757179659, | |
| "grad_norm": 0.4215024709701538, | |
| "learning_rate": 9.980908914541844e-05, | |
| "loss": 0.0436, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.6411601946379162, | |
| "grad_norm": 0.27699562907218933, | |
| "learning_rate": 9.975972587864095e-05, | |
| "loss": 0.0507, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.6564259135578666, | |
| "grad_norm": 0.559887170791626, | |
| "learning_rate": 9.970470786276467e-05, | |
| "loss": 0.0378, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.671691632477817, | |
| "grad_norm": 0.42969998717308044, | |
| "learning_rate": 9.964404135009648e-05, | |
| "loss": 0.0495, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.6869573513977674, | |
| "grad_norm": 0.2953483462333679, | |
| "learning_rate": 9.957773323484454e-05, | |
| "loss": 0.0543, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.6869573513977674, | |
| "eval_loss": 0.03886280581355095, | |
| "eval_runtime": 179.0976, | |
| "eval_samples_per_second": 6.505, | |
| "eval_steps_per_second": 6.505, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.7022230703177178, | |
| "grad_norm": 0.2300979197025299, | |
| "learning_rate": 9.950579105233483e-05, | |
| "loss": 0.0386, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.7174887892376681, | |
| "grad_norm": 0.2619139552116394, | |
| "learning_rate": 9.94282229781548e-05, | |
| "loss": 0.0402, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.7327545081576186, | |
| "grad_norm": 0.49340057373046875, | |
| "learning_rate": 9.934503782722438e-05, | |
| "loss": 0.0444, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.748020227077569, | |
| "grad_norm": 0.23474624752998352, | |
| "learning_rate": 9.925624505279411e-05, | |
| "loss": 0.0383, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.7632859459975193, | |
| "grad_norm": 0.12562145292758942, | |
| "learning_rate": 9.916185474537098e-05, | |
| "loss": 0.0298, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7632859459975193, | |
| "eval_loss": 0.03485063835978508, | |
| "eval_runtime": 179.1055, | |
| "eval_samples_per_second": 6.505, | |
| "eval_steps_per_second": 6.505, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7785516649174697, | |
| "grad_norm": 0.33692803978919983, | |
| "learning_rate": 9.906187763157168e-05, | |
| "loss": 0.0472, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.7938173838374201, | |
| "grad_norm": 0.28782883286476135, | |
| "learning_rate": 9.895632507290362e-05, | |
| "loss": 0.0479, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.8090831027573705, | |
| "grad_norm": 0.2520039677619934, | |
| "learning_rate": 9.884520906447379e-05, | |
| "loss": 0.0359, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.8243488216773208, | |
| "grad_norm": 0.3067379295825958, | |
| "learning_rate": 9.872854223362562e-05, | |
| "loss": 0.035, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.8396145405972713, | |
| "grad_norm": 0.5226966738700867, | |
| "learning_rate": 9.860633783850406e-05, | |
| "loss": 0.0378, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8396145405972713, | |
| "eval_loss": 0.03473576903343201, | |
| "eval_runtime": 179.0927, | |
| "eval_samples_per_second": 6.505, | |
| "eval_steps_per_second": 6.505, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8548802595172217, | |
| "grad_norm": 0.16309522092342377, | |
| "learning_rate": 9.847860976654879e-05, | |
| "loss": 0.0404, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.870145978437172, | |
| "grad_norm": 0.18132278323173523, | |
| "learning_rate": 9.834537253291616e-05, | |
| "loss": 0.0401, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.8854116973571224, | |
| "grad_norm": 0.583713948726654, | |
| "learning_rate": 9.820664127882957e-05, | |
| "loss": 0.0468, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.9006774162770728, | |
| "grad_norm": 0.3539969325065613, | |
| "learning_rate": 9.806243176985888e-05, | |
| "loss": 0.0437, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.9159431351970232, | |
| "grad_norm": 0.3411082923412323, | |
| "learning_rate": 9.791276039412875e-05, | |
| "loss": 0.0387, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.9159431351970232, | |
| "eval_loss": 0.032861627638339996, | |
| "eval_runtime": 179.1835, | |
| "eval_samples_per_second": 6.502, | |
| "eval_steps_per_second": 6.502, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.9312088541169735, | |
| "grad_norm": 0.22567597031593323, | |
| "learning_rate": 9.775764416045628e-05, | |
| "loss": 0.0502, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.946474573036924, | |
| "grad_norm": 0.29538026452064514, | |
| "learning_rate": 9.759710069641814e-05, | |
| "loss": 0.042, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.9617402919568744, | |
| "grad_norm": 0.20513112843036652, | |
| "learning_rate": 9.743114824634734e-05, | |
| "loss": 0.0283, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.9770060108768247, | |
| "grad_norm": 0.26253455877304077, | |
| "learning_rate": 9.725980566925989e-05, | |
| "loss": 0.0367, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.9922717297967751, | |
| "grad_norm": 0.2190350592136383, | |
| "learning_rate": 9.708309243671165e-05, | |
| "loss": 0.0275, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.9922717297967751, | |
| "eval_loss": 0.035241108387708664, | |
| "eval_runtime": 179.2823, | |
| "eval_samples_per_second": 6.498, | |
| "eval_steps_per_second": 6.498, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.0075374487167255, | |
| "grad_norm": 0.09830069541931152, | |
| "learning_rate": 9.690102863058563e-05, | |
| "loss": 0.0321, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.0228031676366758, | |
| "grad_norm": 0.2949357032775879, | |
| "learning_rate": 9.67136349408098e-05, | |
| "loss": 0.0293, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.0380688865566263, | |
| "grad_norm": 0.10271728783845901, | |
| "learning_rate": 9.652093266300583e-05, | |
| "loss": 0.0309, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.0533346054765766, | |
| "grad_norm": 0.2911938726902008, | |
| "learning_rate": 9.632294369606916e-05, | |
| "loss": 0.0213, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.068600324396527, | |
| "grad_norm": 0.23335903882980347, | |
| "learning_rate": 9.61196905396802e-05, | |
| "loss": 0.0348, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.068600324396527, | |
| "eval_loss": 0.03389936685562134, | |
| "eval_runtime": 179.1248, | |
| "eval_samples_per_second": 6.504, | |
| "eval_steps_per_second": 6.504, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.0838660433164775, | |
| "grad_norm": 0.26263928413391113, | |
| "learning_rate": 9.591119629174764e-05, | |
| "loss": 0.0398, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.0991317622364278, | |
| "grad_norm": 0.15275321900844574, | |
| "learning_rate": 9.569748464578343e-05, | |
| "loss": 0.0229, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.1143974811563782, | |
| "grad_norm": 0.23484571278095245, | |
| "learning_rate": 9.54785798882103e-05, | |
| "loss": 0.029, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.1296632000763287, | |
| "grad_norm": 0.18005479872226715, | |
| "learning_rate": 9.525450689560181e-05, | |
| "loss": 0.0266, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.144928918996279, | |
| "grad_norm": 0.17082622647285461, | |
| "learning_rate": 9.502529113185532e-05, | |
| "loss": 0.0346, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.144928918996279, | |
| "eval_loss": 0.03384435176849365, | |
| "eval_runtime": 179.1418, | |
| "eval_samples_per_second": 6.503, | |
| "eval_steps_per_second": 6.503, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.1601946379162293, | |
| "grad_norm": 0.31557226181030273, | |
| "learning_rate": 9.479095864529828e-05, | |
| "loss": 0.0297, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.1754603568361797, | |
| "grad_norm": 0.17261050641536713, | |
| "learning_rate": 9.455153606572806e-05, | |
| "loss": 0.0242, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.1907260757561302, | |
| "grad_norm": 0.25501230359077454, | |
| "learning_rate": 9.430705060138569e-05, | |
| "loss": 0.0325, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.2059917946760805, | |
| "grad_norm": 0.47437065839767456, | |
| "learning_rate": 9.405753003586395e-05, | |
| "loss": 0.0288, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.2212575135960309, | |
| "grad_norm": 0.20840288698673248, | |
| "learning_rate": 9.38030027249499e-05, | |
| "loss": 0.0321, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.2212575135960309, | |
| "eval_loss": 0.033740561455488205, | |
| "eval_runtime": 179.1979, | |
| "eval_samples_per_second": 6.501, | |
| "eval_steps_per_second": 6.501, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.2365232325159814, | |
| "grad_norm": 0.24225717782974243, | |
| "learning_rate": 9.354349759340263e-05, | |
| "loss": 0.0278, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.2517889514359317, | |
| "grad_norm": 0.20259130001068115, | |
| "learning_rate": 9.327904413166615e-05, | |
| "loss": 0.0291, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.267054670355882, | |
| "grad_norm": 0.4767257273197174, | |
| "learning_rate": 9.300967239251798e-05, | |
| "loss": 0.0374, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.2823203892758324, | |
| "grad_norm": 0.20219506323337555, | |
| "learning_rate": 9.27354129876541e-05, | |
| "loss": 0.03, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.297586108195783, | |
| "grad_norm": 0.19528479874134064, | |
| "learning_rate": 9.245629708421008e-05, | |
| "loss": 0.0288, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.297586108195783, | |
| "eval_loss": 0.0333208329975605, | |
| "eval_runtime": 179.2291, | |
| "eval_samples_per_second": 6.5, | |
| "eval_steps_per_second": 6.5, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.3128518271157332, | |
| "grad_norm": 0.3612363934516907, | |
| "learning_rate": 9.217235640121926e-05, | |
| "loss": 0.0311, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.3281175460356835, | |
| "grad_norm": 0.4072886109352112, | |
| "learning_rate": 9.188362320600812e-05, | |
| "loss": 0.0371, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.343383264955634, | |
| "grad_norm": 0.2643483281135559, | |
| "learning_rate": 9.159013031052943e-05, | |
| "loss": 0.0266, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.3586489838755844, | |
| "grad_norm": 0.30413269996643066, | |
| "learning_rate": 9.129191106763346e-05, | |
| "loss": 0.0276, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 1.3739147027955347, | |
| "grad_norm": 0.23679038882255554, | |
| "learning_rate": 9.098899936727771e-05, | |
| "loss": 0.028, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.3739147027955347, | |
| "eval_loss": 0.03594866767525673, | |
| "eval_runtime": 179.217, | |
| "eval_samples_per_second": 6.501, | |
| "eval_steps_per_second": 6.501, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.3891804217154853, | |
| "grad_norm": 0.4948261082172394, | |
| "learning_rate": 9.068142963267558e-05, | |
| "loss": 0.0262, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.4044461406354356, | |
| "grad_norm": 0.27103114128112793, | |
| "learning_rate": 9.036923681638463e-05, | |
| "loss": 0.0291, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.419711859555386, | |
| "grad_norm": 0.17580902576446533, | |
| "learning_rate": 9.00524563963343e-05, | |
| "loss": 0.0205, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 1.4349775784753362, | |
| "grad_norm": 0.22460690140724182, | |
| "learning_rate": 8.973112437179436e-05, | |
| "loss": 0.033, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.4502432973952868, | |
| "grad_norm": 0.16176696121692657, | |
| "learning_rate": 8.940527725928383e-05, | |
| "loss": 0.0277, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4502432973952868, | |
| "eval_loss": 0.03296082094311714, | |
| "eval_runtime": 179.256, | |
| "eval_samples_per_second": 6.499, | |
| "eval_steps_per_second": 6.499, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4655090163152371, | |
| "grad_norm": 0.16683737933635712, | |
| "learning_rate": 8.90749520884212e-05, | |
| "loss": 0.028, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.4807747352351874, | |
| "grad_norm": 0.23072558641433716, | |
| "learning_rate": 8.874018639771637e-05, | |
| "loss": 0.031, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 1.4960404541551378, | |
| "grad_norm": 0.18204113841056824, | |
| "learning_rate": 8.840101823030471e-05, | |
| "loss": 0.0261, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 1.5113061730750883, | |
| "grad_norm": 0.2924092411994934, | |
| "learning_rate": 8.805748612962382e-05, | |
| "loss": 0.0295, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 1.5265718919950386, | |
| "grad_norm": 0.1391419917345047, | |
| "learning_rate": 8.77096291350334e-05, | |
| "loss": 0.0202, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.5265718919950386, | |
| "eval_loss": 0.03339965641498566, | |
| "eval_runtime": 179.5613, | |
| "eval_samples_per_second": 6.488, | |
| "eval_steps_per_second": 6.488, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.541837610914989, | |
| "grad_norm": 0.2946327030658722, | |
| "learning_rate": 8.735748677737874e-05, | |
| "loss": 0.0299, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 1.5571033298349395, | |
| "grad_norm": 0.8059619069099426, | |
| "learning_rate": 8.700109907449845e-05, | |
| "loss": 0.0223, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 1.5723690487548898, | |
| "grad_norm": 0.09737138450145721, | |
| "learning_rate": 8.66405065266768e-05, | |
| "loss": 0.0251, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 1.5876347676748401, | |
| "grad_norm": 0.20474199950695038, | |
| "learning_rate": 8.627575011204115e-05, | |
| "loss": 0.0241, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 1.6029004865947907, | |
| "grad_norm": 0.13863246142864227, | |
| "learning_rate": 8.590687128190516e-05, | |
| "loss": 0.0291, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.6029004865947907, | |
| "eval_loss": 0.03433850780129433, | |
| "eval_runtime": 179.7512, | |
| "eval_samples_per_second": 6.481, | |
| "eval_steps_per_second": 6.481, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.618166205514741, | |
| "grad_norm": 0.12294421344995499, | |
| "learning_rate": 8.553391195605833e-05, | |
| "loss": 0.0247, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 1.6334319244346913, | |
| "grad_norm": 0.2797018587589264, | |
| "learning_rate": 8.515691451800205e-05, | |
| "loss": 0.0426, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 1.6486976433546419, | |
| "grad_norm": 0.19925975799560547, | |
| "learning_rate": 8.477592181013316e-05, | |
| "loss": 0.0237, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 1.663963362274592, | |
| "grad_norm": 0.16685841977596283, | |
| "learning_rate": 8.439097712887531e-05, | |
| "loss": 0.0274, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 1.6792290811945425, | |
| "grad_norm": 0.2913149893283844, | |
| "learning_rate": 8.400212421975865e-05, | |
| "loss": 0.0402, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.6792290811945425, | |
| "eval_loss": 0.03206111863255501, | |
| "eval_runtime": 179.642, | |
| "eval_samples_per_second": 6.485, | |
| "eval_steps_per_second": 6.485, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.694494800114493, | |
| "grad_norm": 0.19945311546325684, | |
| "learning_rate": 8.360940727244859e-05, | |
| "loss": 0.0343, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 1.7097605190344431, | |
| "grad_norm": 0.20246616005897522, | |
| "learning_rate": 8.321287091572403e-05, | |
| "loss": 0.0233, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 1.7250262379543937, | |
| "grad_norm": 0.455074667930603, | |
| "learning_rate": 8.281256021240566e-05, | |
| "loss": 0.0283, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 1.740291956874344, | |
| "grad_norm": 0.1479114294052124, | |
| "learning_rate": 8.240852065423506e-05, | |
| "loss": 0.0351, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 1.7555576757942943, | |
| "grad_norm": 0.3095378577709198, | |
| "learning_rate": 8.20007981567048e-05, | |
| "loss": 0.0268, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.7555576757942943, | |
| "eval_loss": 0.03280867636203766, | |
| "eval_runtime": 179.7524, | |
| "eval_samples_per_second": 6.481, | |
| "eval_steps_per_second": 6.481, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.7708233947142449, | |
| "grad_norm": 0.1850660890340805, | |
| "learning_rate": 8.158943905384082e-05, | |
| "loss": 0.0322, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 1.7860891136341952, | |
| "grad_norm": 0.23737244307994843, | |
| "learning_rate": 8.117449009293668e-05, | |
| "loss": 0.0291, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 1.8013548325541455, | |
| "grad_norm": 0.18103042244911194, | |
| "learning_rate": 8.075599842924139e-05, | |
| "loss": 0.0276, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 1.816620551474096, | |
| "grad_norm": 0.18441970646381378, | |
| "learning_rate": 8.033401162060049e-05, | |
| "loss": 0.0343, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 1.8318862703940464, | |
| "grad_norm": 0.36745738983154297, | |
| "learning_rate": 7.990857762205157e-05, | |
| "loss": 0.0411, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.8318862703940464, | |
| "eval_loss": 0.032041117548942566, | |
| "eval_runtime": 179.8085, | |
| "eval_samples_per_second": 6.479, | |
| "eval_steps_per_second": 6.479, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.8471519893139967, | |
| "grad_norm": 0.28123798966407776, | |
| "learning_rate": 7.947974478037468e-05, | |
| "loss": 0.0355, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 1.8624177082339473, | |
| "grad_norm": 0.6014788150787354, | |
| "learning_rate": 7.904756182859797e-05, | |
| "loss": 0.0235, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 1.8776834271538976, | |
| "grad_norm": 0.11423056572675705, | |
| "learning_rate": 7.861207788045984e-05, | |
| "loss": 0.0327, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 1.892949146073848, | |
| "grad_norm": 0.33343955874443054, | |
| "learning_rate": 7.817334242482738e-05, | |
| "loss": 0.028, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 1.9082148649937984, | |
| "grad_norm": 0.12100326269865036, | |
| "learning_rate": 7.773140532007262e-05, | |
| "loss": 0.0262, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.9082148649937984, | |
| "eval_loss": 0.028858734294772148, | |
| "eval_runtime": 179.691, | |
| "eval_samples_per_second": 6.483, | |
| "eval_steps_per_second": 6.483, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.9234805839137485, | |
| "grad_norm": 0.1283182054758072, | |
| "learning_rate": 7.728631678840638e-05, | |
| "loss": 0.0267, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 1.938746302833699, | |
| "grad_norm": 0.11831828206777573, | |
| "learning_rate": 7.683812741017112e-05, | |
| "loss": 0.0252, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 1.9540120217536494, | |
| "grad_norm": 0.33695539832115173, | |
| "learning_rate": 7.638688811809274e-05, | |
| "loss": 0.0294, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 1.9692777406735997, | |
| "grad_norm": 0.19394966959953308, | |
| "learning_rate": 7.593265019149275e-05, | |
| "loss": 0.029, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 1.9845434595935503, | |
| "grad_norm": 0.24795125424861908, | |
| "learning_rate": 7.547546525046073e-05, | |
| "loss": 0.0318, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.9845434595935503, | |
| "eval_loss": 0.02917238511145115, | |
| "eval_runtime": 179.7965, | |
| "eval_samples_per_second": 6.48, | |
| "eval_steps_per_second": 6.48, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.9998091785135006, | |
| "grad_norm": 0.08313370496034622, | |
| "learning_rate": 7.501538524998812e-05, | |
| "loss": 0.0305, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.015074897433451, | |
| "grad_norm": 0.16452887654304504, | |
| "learning_rate": 7.455246247406406e-05, | |
| "loss": 0.0137, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 2.0303406163534015, | |
| "grad_norm": 0.26041746139526367, | |
| "learning_rate": 7.408674952973382e-05, | |
| "loss": 0.0173, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 2.0456063352733516, | |
| "grad_norm": 0.19397439062595367, | |
| "learning_rate": 7.361829934112036e-05, | |
| "loss": 0.0217, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 2.060872054193302, | |
| "grad_norm": 0.19004268944263458, | |
| "learning_rate": 7.314716514341006e-05, | |
| "loss": 0.0159, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.060872054193302, | |
| "eval_loss": 0.030632687732577324, | |
| "eval_runtime": 179.7659, | |
| "eval_samples_per_second": 6.481, | |
| "eval_steps_per_second": 6.481, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.0761377731132526, | |
| "grad_norm": 0.2005777806043625, | |
| "learning_rate": 7.267340047680305e-05, | |
| "loss": 0.0168, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 2.0914034920332027, | |
| "grad_norm": 0.2647518813610077, | |
| "learning_rate": 7.21970591804287e-05, | |
| "loss": 0.0159, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 2.1066692109531533, | |
| "grad_norm": 0.27719810605049133, | |
| "learning_rate": 7.171819538622747e-05, | |
| "loss": 0.0202, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 2.121934929873104, | |
| "grad_norm": 0.17780855298042297, | |
| "learning_rate": 7.123686351279914e-05, | |
| "loss": 0.0182, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 2.137200648793054, | |
| "grad_norm": 0.2095697820186615, | |
| "learning_rate": 7.07531182592187e-05, | |
| "loss": 0.019, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.137200648793054, | |
| "eval_loss": 0.031268153339624405, | |
| "eval_runtime": 179.7579, | |
| "eval_samples_per_second": 6.481, | |
| "eval_steps_per_second": 6.481, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.1524663677130045, | |
| "grad_norm": 0.3520405888557434, | |
| "learning_rate": 7.026701459882026e-05, | |
| "loss": 0.0271, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 2.167732086632955, | |
| "grad_norm": 0.16760867834091187, | |
| "learning_rate": 6.977860777294988e-05, | |
| "loss": 0.0193, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 2.182997805552905, | |
| "grad_norm": 0.1556943655014038, | |
| "learning_rate": 6.92879532846878e-05, | |
| "loss": 0.017, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 2.1982635244728557, | |
| "grad_norm": 0.17596006393432617, | |
| "learning_rate": 6.879510689254104e-05, | |
| "loss": 0.016, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 2.213529243392806, | |
| "grad_norm": 0.18705613911151886, | |
| "learning_rate": 6.830012460410697e-05, | |
| "loss": 0.022, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.213529243392806, | |
| "eval_loss": 0.03142455592751503, | |
| "eval_runtime": 179.7594, | |
| "eval_samples_per_second": 6.481, | |
| "eval_steps_per_second": 6.481, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.2287949623127563, | |
| "grad_norm": 0.22514861822128296, | |
| "learning_rate": 6.780306266970851e-05, | |
| "loss": 0.0143, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 2.244060681232707, | |
| "grad_norm": 0.3207738995552063, | |
| "learning_rate": 6.73039775760018e-05, | |
| "loss": 0.0203, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 2.2593264001526574, | |
| "grad_norm": 0.14879602193832397, | |
| "learning_rate": 6.680292603955702e-05, | |
| "loss": 0.0153, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 2.2745921190726075, | |
| "grad_norm": 0.16453400254249573, | |
| "learning_rate": 6.629996500041299e-05, | |
| "loss": 0.0181, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 2.289857837992558, | |
| "grad_norm": 0.23307697474956512, | |
| "learning_rate": 6.579515161560649e-05, | |
| "loss": 0.0133, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.289857837992558, | |
| "eval_loss": 0.032863009721040726, | |
| "eval_runtime": 179.7521, | |
| "eval_samples_per_second": 6.481, | |
| "eval_steps_per_second": 6.481, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.305123556912508, | |
| "grad_norm": 0.29715099930763245, | |
| "learning_rate": 6.528854325267692e-05, | |
| "loss": 0.0225, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 2.3203892758324587, | |
| "grad_norm": 0.27671730518341064, | |
| "learning_rate": 6.478019748314686e-05, | |
| "loss": 0.0193, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 2.3356549947524092, | |
| "grad_norm": 0.08666820079088211, | |
| "learning_rate": 6.42701720759797e-05, | |
| "loss": 0.0206, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 2.3509207136723593, | |
| "grad_norm": 0.08629153668880463, | |
| "learning_rate": 6.375852499101467e-05, | |
| "loss": 0.0147, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 2.36618643259231, | |
| "grad_norm": 0.11571142822504044, | |
| "learning_rate": 6.324531437238019e-05, | |
| "loss": 0.0134, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.36618643259231, | |
| "eval_loss": 0.031696803867816925, | |
| "eval_runtime": 179.7802, | |
| "eval_samples_per_second": 6.48, | |
| "eval_steps_per_second": 6.48, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.3814521515122604, | |
| "grad_norm": 0.2257205992937088, | |
| "learning_rate": 6.273059854188636e-05, | |
| "loss": 0.0228, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 2.3967178704322105, | |
| "grad_norm": 0.44024819135665894, | |
| "learning_rate": 6.221443599239721e-05, | |
| "loss": 0.0203, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 2.411983589352161, | |
| "grad_norm": 0.20170512795448303, | |
| "learning_rate": 6.169688538118342e-05, | |
| "loss": 0.0227, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 2.4272493082721116, | |
| "grad_norm": 0.15628792345523834, | |
| "learning_rate": 6.117800552325655e-05, | |
| "loss": 0.0181, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 2.4425150271920617, | |
| "grad_norm": 0.17330731451511383, | |
| "learning_rate": 6.06578553846852e-05, | |
| "loss": 0.0185, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.4425150271920617, | |
| "eval_loss": 0.029930831864476204, | |
| "eval_runtime": 179.7394, | |
| "eval_samples_per_second": 6.482, | |
| "eval_steps_per_second": 6.482, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.4577807461120122, | |
| "grad_norm": 0.23071150481700897, | |
| "learning_rate": 6.013649407589401e-05, | |
| "loss": 0.0236, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 2.473046465031963, | |
| "grad_norm": 0.17635267972946167, | |
| "learning_rate": 5.961398084494634e-05, | |
| "loss": 0.0211, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 2.488312183951913, | |
| "grad_norm": 0.2662081718444824, | |
| "learning_rate": 5.909037507081121e-05, | |
| "loss": 0.0247, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 2.5035779028718634, | |
| "grad_norm": 0.16230575740337372, | |
| "learning_rate": 5.8565736256615434e-05, | |
| "loss": 0.0243, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 2.518843621791814, | |
| "grad_norm": 0.11765624582767487, | |
| "learning_rate": 5.8040124022881625e-05, | |
| "loss": 0.0229, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.518843621791814, | |
| "eval_loss": 0.030760467052459717, | |
| "eval_runtime": 179.8009, | |
| "eval_samples_per_second": 6.479, | |
| "eval_steps_per_second": 6.479, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.534109340711764, | |
| "grad_norm": 0.2117856740951538, | |
| "learning_rate": 5.751359810075284e-05, | |
| "loss": 0.0155, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 2.5493750596317146, | |
| "grad_norm": 0.1411379873752594, | |
| "learning_rate": 5.6986218325204676e-05, | |
| "loss": 0.0133, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 2.5646407785516647, | |
| "grad_norm": 0.1770981103181839, | |
| "learning_rate": 5.645804462824556e-05, | |
| "loss": 0.0159, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 2.5799064974716153, | |
| "grad_norm": 0.3099724352359772, | |
| "learning_rate": 5.5929137032106005e-05, | |
| "loss": 0.0182, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 2.595172216391566, | |
| "grad_norm": 0.22490672767162323, | |
| "learning_rate": 5.53995556424176e-05, | |
| "loss": 0.0224, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.595172216391566, | |
| "eval_loss": 0.030679209157824516, | |
| "eval_runtime": 179.8574, | |
| "eval_samples_per_second": 6.477, | |
| "eval_steps_per_second": 6.477, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.610437935311516, | |
| "grad_norm": 0.13174369931221008, | |
| "learning_rate": 5.4869360641382615e-05, | |
| "loss": 0.0152, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 2.6257036542314665, | |
| "grad_norm": 0.21557074785232544, | |
| "learning_rate": 5.433861228093471e-05, | |
| "loss": 0.0209, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 2.640969373151417, | |
| "grad_norm": 0.13589583337306976, | |
| "learning_rate": 5.380737087589197e-05, | |
| "loss": 0.0144, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 2.656235092071367, | |
| "grad_norm": 0.23737147450447083, | |
| "learning_rate": 5.327569679710256e-05, | |
| "loss": 0.0236, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 2.6715008109913176, | |
| "grad_norm": 0.15822412073612213, | |
| "learning_rate": 5.274365046458416e-05, | |
| "loss": 0.0159, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.6715008109913176, | |
| "eval_loss": 0.03090774454176426, | |
| "eval_runtime": 179.9376, | |
| "eval_samples_per_second": 6.474, | |
| "eval_steps_per_second": 6.474, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.686766529911268, | |
| "grad_norm": 0.31121695041656494, | |
| "learning_rate": 5.2211292340657804e-05, | |
| "loss": 0.0209, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 2.7020322488312183, | |
| "grad_norm": 0.1423243284225464, | |
| "learning_rate": 5.167868292307678e-05, | |
| "loss": 0.0225, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 2.717297967751169, | |
| "grad_norm": 0.18490996956825256, | |
| "learning_rate": 5.114588273815173e-05, | |
| "loss": 0.019, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 2.732563686671119, | |
| "grad_norm": 0.2611638307571411, | |
| "learning_rate": 5.061295233387223e-05, | |
| "loss": 0.0228, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 2.7478294055910695, | |
| "grad_norm": 0.15606072545051575, | |
| "learning_rate": 5.007995227302617e-05, | |
| "loss": 0.0185, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.7478294055910695, | |
| "eval_loss": 0.03144565969705582, | |
| "eval_runtime": 179.8019, | |
| "eval_samples_per_second": 6.479, | |
| "eval_steps_per_second": 6.479, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.76309512451102, | |
| "grad_norm": 0.09613101929426193, | |
| "learning_rate": 4.954694312631729e-05, | |
| "loss": 0.0176, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 2.7783608434309706, | |
| "grad_norm": 0.23314177989959717, | |
| "learning_rate": 4.901398546548181e-05, | |
| "loss": 0.0179, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 2.7936265623509207, | |
| "grad_norm": 0.2562658488750458, | |
| "learning_rate": 4.848113985640513e-05, | |
| "loss": 0.0163, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 2.808892281270871, | |
| "grad_norm": 0.13945740461349487, | |
| "learning_rate": 4.794846685223884e-05, | |
| "loss": 0.0194, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 2.8241580001908213, | |
| "grad_norm": 0.2183397263288498, | |
| "learning_rate": 4.741602698651966e-05, | |
| "loss": 0.0203, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.8241580001908213, | |
| "eval_loss": 0.03035680390894413, | |
| "eval_runtime": 179.8704, | |
| "eval_samples_per_second": 6.477, | |
| "eval_steps_per_second": 6.477, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.839423719110772, | |
| "grad_norm": 0.10274723172187805, | |
| "learning_rate": 4.6883880766290086e-05, | |
| "loss": 0.0208, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 2.8546894380307224, | |
| "grad_norm": 0.1990806758403778, | |
| "learning_rate": 4.635208866522251e-05, | |
| "loss": 0.0149, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 2.8699551569506725, | |
| "grad_norm": 0.15856200456619263, | |
| "learning_rate": 4.5820711116746785e-05, | |
| "loss": 0.0134, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 2.885220875870623, | |
| "grad_norm": 0.30222058296203613, | |
| "learning_rate": 4.528980850718255e-05, | |
| "loss": 0.0216, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 2.9004865947905736, | |
| "grad_norm": 0.13257752358913422, | |
| "learning_rate": 4.475944116887695e-05, | |
| "loss": 0.0159, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.9004865947905736, | |
| "eval_loss": 0.031748704612255096, | |
| "eval_runtime": 179.7821, | |
| "eval_samples_per_second": 6.48, | |
| "eval_steps_per_second": 6.48, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.9157523137105237, | |
| "grad_norm": 0.2612052261829376, | |
| "learning_rate": 4.4229669373348226e-05, | |
| "loss": 0.0215, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 2.9310180326304742, | |
| "grad_norm": 0.15642981231212616, | |
| "learning_rate": 4.3700553324436575e-05, | |
| "loss": 0.0191, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 2.9462837515504248, | |
| "grad_norm": 0.24419263005256653, | |
| "learning_rate": 4.317215315146238e-05, | |
| "loss": 0.0232, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 2.961549470470375, | |
| "grad_norm": 0.12495076656341553, | |
| "learning_rate": 4.264452890239315e-05, | |
| "loss": 0.0223, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 2.9768151893903254, | |
| "grad_norm": 0.26308339834213257, | |
| "learning_rate": 4.211774053701952e-05, | |
| "loss": 0.0229, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.9768151893903254, | |
| "eval_loss": 0.030370745807886124, | |
| "eval_runtime": 179.8091, | |
| "eval_samples_per_second": 6.479, | |
| "eval_steps_per_second": 6.479, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.9920809083102755, | |
| "grad_norm": 0.21157263219356537, | |
| "learning_rate": 4.159184792014145e-05, | |
| "loss": 0.0163, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 3.007346627230226, | |
| "grad_norm": 0.062321823090314865, | |
| "learning_rate": 4.1066910814765016e-05, | |
| "loss": 0.0113, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 3.0226123461501766, | |
| "grad_norm": 0.1957968771457672, | |
| "learning_rate": 4.0542988875310995e-05, | |
| "loss": 0.0138, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 3.0378780650701267, | |
| "grad_norm": 0.12392223626375198, | |
| "learning_rate": 4.002014164083552e-05, | |
| "loss": 0.012, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 3.0531437839900772, | |
| "grad_norm": 0.09297648817300797, | |
| "learning_rate": 3.9498428528264204e-05, | |
| "loss": 0.0087, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.0531437839900772, | |
| "eval_loss": 0.032914210110902786, | |
| "eval_runtime": 179.7483, | |
| "eval_samples_per_second": 6.481, | |
| "eval_steps_per_second": 6.481, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.068409502910028, | |
| "grad_norm": 0.1610821634531021, | |
| "learning_rate": 3.89779088256397e-05, | |
| "loss": 0.0093, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 3.083675221829978, | |
| "grad_norm": 0.18227307498455048, | |
| "learning_rate": 3.845864168538437e-05, | |
| "loss": 0.0129, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 3.0989409407499284, | |
| "grad_norm": 0.26220542192459106, | |
| "learning_rate": 3.794068611757794e-05, | |
| "loss": 0.0136, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 3.114206659669879, | |
| "grad_norm": 0.15985815227031708, | |
| "learning_rate": 3.7424100983251695e-05, | |
| "loss": 0.0122, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 3.129472378589829, | |
| "grad_norm": 0.06649836152791977, | |
| "learning_rate": 3.6908944987699345e-05, | |
| "loss": 0.0073, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.129472378589829, | |
| "eval_loss": 0.033551715314388275, | |
| "eval_runtime": 179.8686, | |
| "eval_samples_per_second": 6.477, | |
| "eval_steps_per_second": 6.477, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.1447380975097796, | |
| "grad_norm": 0.11365962028503418, | |
| "learning_rate": 3.639527667380571e-05, | |
| "loss": 0.0084, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 3.16000381642973, | |
| "grad_norm": 0.1839054673910141, | |
| "learning_rate": 3.5883154415393885e-05, | |
| "loss": 0.0101, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 3.1752695353496803, | |
| "grad_norm": 0.20857231318950653, | |
| "learning_rate": 3.537263641059152e-05, | |
| "loss": 0.0107, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 3.190535254269631, | |
| "grad_norm": 0.15886227786540985, | |
| "learning_rate": 3.486378067521718e-05, | |
| "loss": 0.0114, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 3.2058009731895813, | |
| "grad_norm": 0.10763030499219894, | |
| "learning_rate": 3.435664503618732e-05, | |
| "loss": 0.0068, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.2058009731895813, | |
| "eval_loss": 0.03458699211478233, | |
| "eval_runtime": 179.8039, | |
| "eval_samples_per_second": 6.479, | |
| "eval_steps_per_second": 6.479, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.2210666921095314, | |
| "grad_norm": 0.15906715393066406, | |
| "learning_rate": 3.3851287124944756e-05, | |
| "loss": 0.0091, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 3.236332411029482, | |
| "grad_norm": 0.07679519057273865, | |
| "learning_rate": 3.334776437090944e-05, | |
| "loss": 0.0094, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 3.251598129949432, | |
| "grad_norm": 0.07010853290557861, | |
| "learning_rate": 3.2846133994952046e-05, | |
| "loss": 0.0091, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 3.2668638488693826, | |
| "grad_norm": 0.28941255807876587, | |
| "learning_rate": 3.234645300289136e-05, | |
| "loss": 0.0051, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 3.282129567789333, | |
| "grad_norm": 0.2712288796901703, | |
| "learning_rate": 3.1848778179016075e-05, | |
| "loss": 0.0099, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.282129567789333, | |
| "eval_loss": 0.03673892095685005, | |
| "eval_runtime": 179.9183, | |
| "eval_samples_per_second": 6.475, | |
| "eval_steps_per_second": 6.475, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.2973952867092833, | |
| "grad_norm": 0.3802885413169861, | |
| "learning_rate": 3.135316607963176e-05, | |
| "loss": 0.0108, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 3.312661005629234, | |
| "grad_norm": 0.19786600768566132, | |
| "learning_rate": 3.085967302663375e-05, | |
| "loss": 0.0082, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 3.3279267245491844, | |
| "grad_norm": 0.25580260157585144, | |
| "learning_rate": 3.0368355101106615e-05, | |
| "loss": 0.0153, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 3.3431924434691345, | |
| "grad_norm": 0.18568645417690277, | |
| "learning_rate": 2.987926813695116e-05, | |
| "loss": 0.0092, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 3.358458162389085, | |
| "grad_norm": 0.15619678795337677, | |
| "learning_rate": 2.939246771453924e-05, | |
| "loss": 0.0124, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.358458162389085, | |
| "eval_loss": 0.035190921276807785, | |
| "eval_runtime": 180.011, | |
| "eval_samples_per_second": 6.472, | |
| "eval_steps_per_second": 6.472, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.3737238813090356, | |
| "grad_norm": 0.2756540775299072, | |
| "learning_rate": 2.890800915439772e-05, | |
| "loss": 0.0112, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 3.3889896002289857, | |
| "grad_norm": 0.15437598526477814, | |
| "learning_rate": 2.842594751092159e-05, | |
| "loss": 0.0095, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 3.404255319148936, | |
| "grad_norm": 0.21479307115077972, | |
| "learning_rate": 2.794633756611776e-05, | |
| "loss": 0.0122, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 3.4195210380688867, | |
| "grad_norm": 0.21604545414447784, | |
| "learning_rate": 2.7469233823379347e-05, | |
| "loss": 0.0089, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 3.434786756988837, | |
| "grad_norm": 0.38754087686538696, | |
| "learning_rate": 2.6994690501292032e-05, | |
| "loss": 0.0087, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.434786756988837, | |
| "eval_loss": 0.03597886487841606, | |
| "eval_runtime": 179.9469, | |
| "eval_samples_per_second": 6.474, | |
| "eval_steps_per_second": 6.474, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.4500524759087874, | |
| "grad_norm": 0.2704348862171173, | |
| "learning_rate": 2.652276152747246e-05, | |
| "loss": 0.0072, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 3.465318194828738, | |
| "grad_norm": 0.27907001972198486, | |
| "learning_rate": 2.6053500532439968e-05, | |
| "loss": 0.0078, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 3.480583913748688, | |
| "grad_norm": 0.024898068979382515, | |
| "learning_rate": 2.5586960843521824e-05, | |
| "loss": 0.0098, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 3.4958496326686386, | |
| "grad_norm": 0.4165664315223694, | |
| "learning_rate": 2.5123195478793217e-05, | |
| "loss": 0.0106, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 3.5111153515885887, | |
| "grad_norm": 0.17015843093395233, | |
| "learning_rate": 2.466225714105202e-05, | |
| "loss": 0.0131, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.5111153515885887, | |
| "eval_loss": 0.03541536629199982, | |
| "eval_runtime": 179.5955, | |
| "eval_samples_per_second": 6.487, | |
| "eval_steps_per_second": 6.487, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.526381070508539, | |
| "grad_norm": 0.10995722562074661, | |
| "learning_rate": 2.420419821182982e-05, | |
| "loss": 0.0118, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 3.5416467894284898, | |
| "grad_norm": 0.1612035483121872, | |
| "learning_rate": 2.3749070745438996e-05, | |
| "loss": 0.0063, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 3.5569125083484403, | |
| "grad_norm": 0.15208382904529572, | |
| "learning_rate": 2.3296926463057396e-05, | |
| "loss": 0.0102, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 3.5721782272683904, | |
| "grad_norm": 0.19346974790096283, | |
| "learning_rate": 2.284781674685058e-05, | |
| "loss": 0.0079, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 3.587443946188341, | |
| "grad_norm": 0.43242356181144714, | |
| "learning_rate": 2.2401792634132708e-05, | |
| "loss": 0.0143, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.587443946188341, | |
| "eval_loss": 0.036229364573955536, | |
| "eval_runtime": 178.755, | |
| "eval_samples_per_second": 6.517, | |
| "eval_steps_per_second": 6.517, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.602709665108291, | |
| "grad_norm": 0.35156679153442383, | |
| "learning_rate": 2.195890481156666e-05, | |
| "loss": 0.0097, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 3.6179753840282416, | |
| "grad_norm": 0.09875629097223282, | |
| "learning_rate": 2.151920360940387e-05, | |
| "loss": 0.0112, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 3.633241102948192, | |
| "grad_norm": 0.26106587052345276, | |
| "learning_rate": 2.1082738995764785e-05, | |
| "loss": 0.0078, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 3.6485068218681422, | |
| "grad_norm": 0.15816567838191986, | |
| "learning_rate": 2.0649560570960465e-05, | |
| "loss": 0.0046, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 3.663772540788093, | |
| "grad_norm": 0.2872539460659027, | |
| "learning_rate": 2.0219717561855855e-05, | |
| "loss": 0.0066, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.663772540788093, | |
| "eval_loss": 0.036142896860837936, | |
| "eval_runtime": 177.8777, | |
| "eval_samples_per_second": 6.549, | |
| "eval_steps_per_second": 6.549, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.679038259708043, | |
| "grad_norm": 0.16133087873458862, | |
| "learning_rate": 1.9793258816275728e-05, | |
| "loss": 0.0112, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 3.6943039786279934, | |
| "grad_norm": 0.32570400834083557, | |
| "learning_rate": 1.9370232797453402e-05, | |
| "loss": 0.009, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 3.709569697547944, | |
| "grad_norm": 0.21927714347839355, | |
| "learning_rate": 1.8950687578523502e-05, | |
| "loss": 0.0085, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 3.7248354164678945, | |
| "grad_norm": 0.2975069284439087, | |
| "learning_rate": 1.853467083705869e-05, | |
| "loss": 0.0057, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 3.7401011353878446, | |
| "grad_norm": 0.19914095103740692, | |
| "learning_rate": 1.8122229849651716e-05, | |
| "loss": 0.0093, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.7401011353878446, | |
| "eval_loss": 0.03722745552659035, | |
| "eval_runtime": 177.626, | |
| "eval_samples_per_second": 6.559, | |
| "eval_steps_per_second": 6.559, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.755366854307795, | |
| "grad_norm": 0.20476055145263672, | |
| "learning_rate": 1.7713411486542707e-05, | |
| "loss": 0.0091, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 3.7706325732277453, | |
| "grad_norm": 0.22789469361305237, | |
| "learning_rate": 1.7308262206292897e-05, | |
| "loss": 0.0111, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 3.785898292147696, | |
| "grad_norm": 0.16632677614688873, | |
| "learning_rate": 1.6906828050504907e-05, | |
| "loss": 0.014, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 3.8011640110676463, | |
| "grad_norm": 0.23071546852588654, | |
| "learning_rate": 1.650915463859068e-05, | |
| "loss": 0.0092, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 3.8164297299875964, | |
| "grad_norm": 0.08320309221744537, | |
| "learning_rate": 1.6115287162587055e-05, | |
| "loss": 0.0074, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.8164297299875964, | |
| "eval_loss": 0.03621383011341095, | |
| "eval_runtime": 177.3254, | |
| "eval_samples_per_second": 6.57, | |
| "eval_steps_per_second": 6.57, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.831695448907547, | |
| "grad_norm": 0.07662249356508255, | |
| "learning_rate": 1.57252703820203e-05, | |
| "loss": 0.0069, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 3.8469611678274975, | |
| "grad_norm": 0.35770368576049805, | |
| "learning_rate": 1.5339148618819393e-05, | |
| "loss": 0.0093, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 3.8622268867474476, | |
| "grad_norm": 0.17595680058002472, | |
| "learning_rate": 1.4956965752279395e-05, | |
| "loss": 0.0105, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 3.877492605667398, | |
| "grad_norm": 0.28761520981788635, | |
| "learning_rate": 1.457876521407484e-05, | |
| "loss": 0.0124, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 3.8927583245873487, | |
| "grad_norm": 0.24026645720005035, | |
| "learning_rate": 1.4204589983324173e-05, | |
| "loss": 0.0081, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.8927583245873487, | |
| "eval_loss": 0.03659246861934662, | |
| "eval_runtime": 177.2547, | |
| "eval_samples_per_second": 6.572, | |
| "eval_steps_per_second": 6.572, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.908024043507299, | |
| "grad_norm": 0.1325196474790573, | |
| "learning_rate": 1.383448258170557e-05, | |
| "loss": 0.0097, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 3.9232897624272494, | |
| "grad_norm": 0.18720892071723938, | |
| "learning_rate": 1.3468485068624653e-05, | |
| "loss": 0.009, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 3.9385554813471995, | |
| "grad_norm": 0.03328974172472954, | |
| "learning_rate": 1.310663903643492e-05, | |
| "loss": 0.009, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 3.95382120026715, | |
| "grad_norm": 0.08329346030950546, | |
| "learning_rate": 1.2748985605711028e-05, | |
| "loss": 0.0087, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 3.9690869191871005, | |
| "grad_norm": 0.1863928586244583, | |
| "learning_rate": 1.2395565420575932e-05, | |
| "loss": 0.0088, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 3.9690869191871005, | |
| "eval_loss": 0.03702830523252487, | |
| "eval_runtime": 176.9895, | |
| "eval_samples_per_second": 6.582, | |
| "eval_steps_per_second": 6.582, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 3.984352638107051, | |
| "grad_norm": 0.1559509038925171, | |
| "learning_rate": 1.2046418644081903e-05, | |
| "loss": 0.0178, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 3.999618357027001, | |
| "grad_norm": 0.302651584148407, | |
| "learning_rate": 1.1701584953646505e-05, | |
| "loss": 0.0086, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 4.014884075946951, | |
| "grad_norm": 0.19416853785514832, | |
| "learning_rate": 1.1361103536543466e-05, | |
| "loss": 0.0047, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 4.030149794866902, | |
| "grad_norm": 0.0938921868801117, | |
| "learning_rate": 1.1025013085449527e-05, | |
| "loss": 0.0035, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 4.045415513786852, | |
| "grad_norm": 0.1689745932817459, | |
| "learning_rate": 1.0693351794047224e-05, | |
| "loss": 0.0052, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 4.045415513786852, | |
| "eval_loss": 0.037683501839637756, | |
| "eval_runtime": 176.8431, | |
| "eval_samples_per_second": 6.588, | |
| "eval_steps_per_second": 6.588, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 4.060681232706803, | |
| "grad_norm": 0.13139070570468903, | |
| "learning_rate": 1.036615735268468e-05, | |
| "loss": 0.0041, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 4.0759469516267535, | |
| "grad_norm": 0.04985181987285614, | |
| "learning_rate": 1.0043466944092272e-05, | |
| "loss": 0.0059, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 4.091212670546703, | |
| "grad_norm": 0.09271463006734848, | |
| "learning_rate": 9.72531723915726e-06, | |
| "loss": 0.0086, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 4.106478389466654, | |
| "grad_norm": 0.03634309768676758, | |
| "learning_rate": 9.411744392756405e-06, | |
| "loss": 0.0052, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 4.121744108386604, | |
| "grad_norm": 0.06921113282442093, | |
| "learning_rate": 9.102784039647339e-06, | |
| "loss": 0.0053, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.121744108386604, | |
| "eval_loss": 0.039026983082294464, | |
| "eval_runtime": 176.6518, | |
| "eval_samples_per_second": 6.595, | |
| "eval_steps_per_second": 6.595, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.137009827306555, | |
| "grad_norm": 0.046318791806697845, | |
| "learning_rate": 8.79847129041893e-06, | |
| "loss": 0.0039, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 4.152275546226505, | |
| "grad_norm": 0.07762674242258072, | |
| "learning_rate": 8.498840727501316e-06, | |
| "loss": 0.0049, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 4.167541265146456, | |
| "grad_norm": 0.0868455171585083, | |
| "learning_rate": 8.203926401235957e-06, | |
| "loss": 0.0043, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 4.1828069840664055, | |
| "grad_norm": 0.13933709263801575, | |
| "learning_rate": 7.913761826006017e-06, | |
| "loss": 0.0037, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 4.198072702986356, | |
| "grad_norm": 0.11513333767652512, | |
| "learning_rate": 7.628379976427868e-06, | |
| "loss": 0.0025, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.198072702986356, | |
| "eval_loss": 0.040155019611120224, | |
| "eval_runtime": 176.5921, | |
| "eval_samples_per_second": 6.597, | |
| "eval_steps_per_second": 6.597, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.213338421906307, | |
| "grad_norm": 0.3080286681652069, | |
| "learning_rate": 7.347813283603705e-06, | |
| "loss": 0.0067, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 4.228604140826257, | |
| "grad_norm": 0.06484497338533401, | |
| "learning_rate": 7.072093631436161e-06, | |
| "loss": 0.0041, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 4.243869859746208, | |
| "grad_norm": 0.023219596594572067, | |
| "learning_rate": 6.801252353004867e-06, | |
| "loss": 0.0039, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 4.259135578666158, | |
| "grad_norm": 0.13570910692214966, | |
| "learning_rate": 6.535320227005826e-06, | |
| "loss": 0.0059, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 4.274401297586108, | |
| "grad_norm": 0.0693252682685852, | |
| "learning_rate": 6.274327474253611e-06, | |
| "loss": 0.0027, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.274401297586108, | |
| "eval_loss": 0.04144105687737465, | |
| "eval_runtime": 176.6226, | |
| "eval_samples_per_second": 6.596, | |
| "eval_steps_per_second": 6.596, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.289667016506058, | |
| "grad_norm": 0.10616520047187805, | |
| "learning_rate": 6.018303754247112e-06, | |
| "loss": 0.0051, | |
| "step": 2810 | |
| }, | |
| { | |
| "epoch": 4.304932735426009, | |
| "grad_norm": 0.14562755823135376, | |
| "learning_rate": 5.767278161798911e-06, | |
| "loss": 0.0042, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 4.3201984543459595, | |
| "grad_norm": 0.1878853291273117, | |
| "learning_rate": 5.521279223729026e-06, | |
| "loss": 0.0035, | |
| "step": 2830 | |
| }, | |
| { | |
| "epoch": 4.33546417326591, | |
| "grad_norm": 0.17619742453098297, | |
| "learning_rate": 5.280334895622968e-06, | |
| "loss": 0.004, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 4.350729892185861, | |
| "grad_norm": 0.15715391933918, | |
| "learning_rate": 5.044472558654961e-06, | |
| "loss": 0.0076, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.350729892185861, | |
| "eval_loss": 0.04184148460626602, | |
| "eval_runtime": 176.611, | |
| "eval_samples_per_second": 6.596, | |
| "eval_steps_per_second": 6.596, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.36599561110581, | |
| "grad_norm": 0.05749718099832535, | |
| "learning_rate": 4.813719016476203e-06, | |
| "loss": 0.0029, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 4.381261330025761, | |
| "grad_norm": 0.27915164828300476, | |
| "learning_rate": 4.588100492168973e-06, | |
| "loss": 0.003, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 4.396527048945711, | |
| "grad_norm": 0.26044297218322754, | |
| "learning_rate": 4.367642625266511e-06, | |
| "loss": 0.0027, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 4.411792767865662, | |
| "grad_norm": 0.09349619597196579, | |
| "learning_rate": 4.1523704688394176e-06, | |
| "loss": 0.0042, | |
| "step": 2890 | |
| }, | |
| { | |
| "epoch": 4.427058486785612, | |
| "grad_norm": 0.06762149930000305, | |
| "learning_rate": 3.9423084866484884e-06, | |
| "loss": 0.0022, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.427058486785612, | |
| "eval_loss": 0.04281175881624222, | |
| "eval_runtime": 176.5277, | |
| "eval_samples_per_second": 6.6, | |
| "eval_steps_per_second": 6.6, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.442324205705562, | |
| "grad_norm": 0.15383104979991913, | |
| "learning_rate": 3.737480550364736e-06, | |
| "loss": 0.0062, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 4.457589924625513, | |
| "grad_norm": 0.21743904054164886, | |
| "learning_rate": 3.5379099368564817e-06, | |
| "loss": 0.0056, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 4.472855643545463, | |
| "grad_norm": 0.05257946625351906, | |
| "learning_rate": 3.3436193255442396e-06, | |
| "loss": 0.0034, | |
| "step": 2930 | |
| }, | |
| { | |
| "epoch": 4.488121362465414, | |
| "grad_norm": 0.05678296461701393, | |
| "learning_rate": 3.1546307958233214e-06, | |
| "loss": 0.0065, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 4.503387081385364, | |
| "grad_norm": 0.09029638022184372, | |
| "learning_rate": 2.9709658245547834e-06, | |
| "loss": 0.0036, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.503387081385364, | |
| "eval_loss": 0.04338888078927994, | |
| "eval_runtime": 176.4344, | |
| "eval_samples_per_second": 6.603, | |
| "eval_steps_per_second": 6.603, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.518652800305315, | |
| "grad_norm": 0.017809836193919182, | |
| "learning_rate": 2.792645283624712e-06, | |
| "loss": 0.0051, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 4.5339185192252645, | |
| "grad_norm": 0.08156479895114899, | |
| "learning_rate": 2.6196894375723645e-06, | |
| "loss": 0.0016, | |
| "step": 2970 | |
| }, | |
| { | |
| "epoch": 4.549184238145215, | |
| "grad_norm": 0.24031901359558105, | |
| "learning_rate": 2.452117941287246e-06, | |
| "loss": 0.0049, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 4.5644499570651655, | |
| "grad_norm": 0.2679056227207184, | |
| "learning_rate": 2.2899498377755566e-06, | |
| "loss": 0.0024, | |
| "step": 2990 | |
| }, | |
| { | |
| "epoch": 4.579715675985116, | |
| "grad_norm": 0.1409529745578766, | |
| "learning_rate": 2.1332035559960663e-06, | |
| "loss": 0.003, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.579715675985116, | |
| "eval_loss": 0.04322050139307976, | |
| "eval_runtime": 176.3389, | |
| "eval_samples_per_second": 6.607, | |
| "eval_steps_per_second": 6.607, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.594981394905067, | |
| "grad_norm": 0.3694174289703369, | |
| "learning_rate": 1.9818969087658735e-06, | |
| "loss": 0.0034, | |
| "step": 3010 | |
| }, | |
| { | |
| "epoch": 4.610247113825016, | |
| "grad_norm": 0.007240964099764824, | |
| "learning_rate": 1.8360470907361093e-06, | |
| "loss": 0.002, | |
| "step": 3020 | |
| }, | |
| { | |
| "epoch": 4.625512832744967, | |
| "grad_norm": 0.21716032922267914, | |
| "learning_rate": 1.6956706764379438e-06, | |
| "loss": 0.0046, | |
| "step": 3030 | |
| }, | |
| { | |
| "epoch": 4.640778551664917, | |
| "grad_norm": 0.10940048843622208, | |
| "learning_rate": 1.5607836183989921e-06, | |
| "loss": 0.0033, | |
| "step": 3040 | |
| }, | |
| { | |
| "epoch": 4.656044270584868, | |
| "grad_norm": 0.14919409155845642, | |
| "learning_rate": 1.4314012453305215e-06, | |
| "loss": 0.002, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.656044270584868, | |
| "eval_loss": 0.043372880667448044, | |
| "eval_runtime": 176.3259, | |
| "eval_samples_per_second": 6.607, | |
| "eval_steps_per_second": 6.607, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.6713099895048185, | |
| "grad_norm": 0.21336524188518524, | |
| "learning_rate": 1.3075382603854157e-06, | |
| "loss": 0.003, | |
| "step": 3060 | |
| }, | |
| { | |
| "epoch": 4.686575708424769, | |
| "grad_norm": 0.1394193321466446, | |
| "learning_rate": 1.1892087394873353e-06, | |
| "loss": 0.0034, | |
| "step": 3070 | |
| }, | |
| { | |
| "epoch": 4.701841427344719, | |
| "grad_norm": 0.02440819889307022, | |
| "learning_rate": 1.076426129731084e-06, | |
| "loss": 0.0036, | |
| "step": 3080 | |
| }, | |
| { | |
| "epoch": 4.717107146264669, | |
| "grad_norm": 0.14676684141159058, | |
| "learning_rate": 9.692032478545e-07, | |
| "loss": 0.0069, | |
| "step": 3090 | |
| }, | |
| { | |
| "epoch": 4.73237286518462, | |
| "grad_norm": 0.01789388246834278, | |
| "learning_rate": 8.675522787819023e-07, | |
| "loss": 0.0078, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.73237286518462, | |
| "eval_loss": 0.043460384011268616, | |
| "eval_runtime": 176.2874, | |
| "eval_samples_per_second": 6.609, | |
| "eval_steps_per_second": 6.609, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.74763858410457, | |
| "grad_norm": 0.04277348518371582, | |
| "learning_rate": 7.714847742394337e-07, | |
| "loss": 0.0053, | |
| "step": 3110 | |
| }, | |
| { | |
| "epoch": 4.762904303024521, | |
| "grad_norm": 0.26189810037612915, | |
| "learning_rate": 6.810116514422593e-07, | |
| "loss": 0.005, | |
| "step": 3120 | |
| }, | |
| { | |
| "epoch": 4.7781700219444705, | |
| "grad_norm": 0.054308198392391205, | |
| "learning_rate": 5.961431918539817e-07, | |
| "loss": 0.0032, | |
| "step": 3130 | |
| }, | |
| { | |
| "epoch": 4.793435740864421, | |
| "grad_norm": 0.10629548877477646, | |
| "learning_rate": 5.16889040018187e-07, | |
| "loss": 0.0055, | |
| "step": 3140 | |
| }, | |
| { | |
| "epoch": 4.808701459784372, | |
| "grad_norm": 0.05638623237609863, | |
| "learning_rate": 4.432582024624543e-07, | |
| "loss": 0.0033, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 4.808701459784372, | |
| "eval_loss": 0.04357059299945831, | |
| "eval_runtime": 176.3321, | |
| "eval_samples_per_second": 6.607, | |
| "eval_steps_per_second": 6.607, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 4.823967178704322, | |
| "grad_norm": 0.035809215158224106, | |
| "learning_rate": 3.7525904667484737e-07, | |
| "loss": 0.0029, | |
| "step": 3160 | |
| }, | |
| { | |
| "epoch": 4.839232897624273, | |
| "grad_norm": 0.25203967094421387, | |
| "learning_rate": 3.128993001530245e-07, | |
| "loss": 0.0059, | |
| "step": 3170 | |
| }, | |
| { | |
| "epoch": 4.854498616544223, | |
| "grad_norm": 0.04179414361715317, | |
| "learning_rate": 2.5618604952605816e-07, | |
| "loss": 0.0046, | |
| "step": 3180 | |
| }, | |
| { | |
| "epoch": 4.869764335464174, | |
| "grad_norm": 0.14175380766391754, | |
| "learning_rate": 2.0512573974912908e-07, | |
| "loss": 0.0038, | |
| "step": 3190 | |
| }, | |
| { | |
| "epoch": 4.885030054384123, | |
| "grad_norm": 0.20164638757705688, | |
| "learning_rate": 1.597241733711008e-07, | |
| "loss": 0.0055, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 4.885030054384123, | |
| "eval_loss": 0.043629422783851624, | |
| "eval_runtime": 176.2217, | |
| "eval_samples_per_second": 6.611, | |
| "eval_steps_per_second": 6.611, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 4.900295773304074, | |
| "grad_norm": 0.10687878727912903, | |
| "learning_rate": 1.1998650987510295e-07, | |
| "loss": 0.0035, | |
| "step": 3210 | |
| }, | |
| { | |
| "epoch": 4.9155614922240245, | |
| "grad_norm": 0.010824548080563545, | |
| "learning_rate": 8.591726509222242e-08, | |
| "loss": 0.0074, | |
| "step": 3220 | |
| }, | |
| { | |
| "epoch": 4.930827211143975, | |
| "grad_norm": 0.13975496590137482, | |
| "learning_rate": 5.752031068830266e-08, | |
| "loss": 0.0041, | |
| "step": 3230 | |
| }, | |
| { | |
| "epoch": 4.946092930063926, | |
| "grad_norm": 0.08203299343585968, | |
| "learning_rate": 3.4798873723984605e-08, | |
| "loss": 0.0032, | |
| "step": 3240 | |
| }, | |
| { | |
| "epoch": 4.961358648983875, | |
| "grad_norm": 0.22268716990947723, | |
| "learning_rate": 1.7755536287944464e-08, | |
| "loss": 0.006, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 4.961358648983875, | |
| "eval_loss": 0.043610408902168274, | |
| "eval_runtime": 176.2018, | |
| "eval_samples_per_second": 6.612, | |
| "eval_steps_per_second": 6.612, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 4.976624367903826, | |
| "grad_norm": 0.07115557044744492, | |
| "learning_rate": 6.3922352034895276e-09, | |
| "loss": 0.0103, | |
| "step": 3260 | |
| }, | |
| { | |
| "epoch": 4.991890086823776, | |
| "grad_norm": 0.18499644100666046, | |
| "learning_rate": 7.102618084620094e-10, | |
| "loss": 0.0049, | |
| "step": 3270 | |
| }, | |
| { | |
| "epoch": 4.999522946283752, | |
| "step": 3275, | |
| "total_flos": 8.8535579239868e+17, | |
| "train_loss": 0.048749475333526845, | |
| "train_runtime": 37525.7545, | |
| "train_samples_per_second": 1.397, | |
| "train_steps_per_second": 0.087 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 3275, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8.8535579239868e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |