| { | |
| "best_metric": 0.027184655889868736, | |
| "best_model_checkpoint": "saves/psy-course/MentaLLaMA-chat-7B/train/fold7/checkpoint-1800", | |
| "epoch": 4.9961802902979375, | |
| "eval_steps": 50, | |
| "global_step": 3270, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.015278838808250574, | |
| "grad_norm": 1.848912000656128, | |
| "learning_rate": 3.0581039755351682e-06, | |
| "loss": 1.7007, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.030557677616501147, | |
| "grad_norm": 1.845645785331726, | |
| "learning_rate": 6.1162079510703365e-06, | |
| "loss": 1.6546, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.04583651642475172, | |
| "grad_norm": 2.5136754512786865, | |
| "learning_rate": 9.174311926605506e-06, | |
| "loss": 1.5461, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.061115355233002294, | |
| "grad_norm": 3.130951404571533, | |
| "learning_rate": 1.2232415902140673e-05, | |
| "loss": 1.3083, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.07639419404125286, | |
| "grad_norm": 0.9243296384811401, | |
| "learning_rate": 1.5290519877675842e-05, | |
| "loss": 0.8412, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.07639419404125286, | |
| "eval_loss": 0.6190279722213745, | |
| "eval_runtime": 219.3498, | |
| "eval_samples_per_second": 5.307, | |
| "eval_steps_per_second": 5.307, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.09167303284950344, | |
| "grad_norm": 1.0339502096176147, | |
| "learning_rate": 1.834862385321101e-05, | |
| "loss": 0.6464, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.10695187165775401, | |
| "grad_norm": 1.244530200958252, | |
| "learning_rate": 2.140672782874618e-05, | |
| "loss": 0.4868, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.12223071046600459, | |
| "grad_norm": 0.8109800815582275, | |
| "learning_rate": 2.4464831804281346e-05, | |
| "loss": 0.3051, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.13750954927425516, | |
| "grad_norm": 0.551679253578186, | |
| "learning_rate": 2.7522935779816515e-05, | |
| "loss": 0.1913, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.15278838808250572, | |
| "grad_norm": 0.790469765663147, | |
| "learning_rate": 3.0581039755351684e-05, | |
| "loss": 0.1455, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.15278838808250572, | |
| "eval_loss": 0.10693324357271194, | |
| "eval_runtime": 219.1206, | |
| "eval_samples_per_second": 5.312, | |
| "eval_steps_per_second": 5.312, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.16806722689075632, | |
| "grad_norm": 0.5916261672973633, | |
| "learning_rate": 3.363914373088685e-05, | |
| "loss": 0.1072, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.18334606569900688, | |
| "grad_norm": 0.5760579705238342, | |
| "learning_rate": 3.669724770642202e-05, | |
| "loss": 0.096, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.19862490450725745, | |
| "grad_norm": 0.6538326740264893, | |
| "learning_rate": 3.9755351681957185e-05, | |
| "loss": 0.0842, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.21390374331550802, | |
| "grad_norm": 0.6428112983703613, | |
| "learning_rate": 4.281345565749236e-05, | |
| "loss": 0.0834, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.22918258212375858, | |
| "grad_norm": 0.6178712248802185, | |
| "learning_rate": 4.587155963302753e-05, | |
| "loss": 0.0861, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.22918258212375858, | |
| "eval_loss": 0.06472994387149811, | |
| "eval_runtime": 218.5605, | |
| "eval_samples_per_second": 5.326, | |
| "eval_steps_per_second": 5.326, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.24446142093200918, | |
| "grad_norm": 0.7672821283340454, | |
| "learning_rate": 4.892966360856269e-05, | |
| "loss": 0.0823, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.2597402597402597, | |
| "grad_norm": 0.593714714050293, | |
| "learning_rate": 5.1987767584097854e-05, | |
| "loss": 0.0676, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.2750190985485103, | |
| "grad_norm": 0.5263367295265198, | |
| "learning_rate": 5.504587155963303e-05, | |
| "loss": 0.0731, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.2902979373567609, | |
| "grad_norm": 0.8246402740478516, | |
| "learning_rate": 5.81039755351682e-05, | |
| "loss": 0.0799, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.30557677616501144, | |
| "grad_norm": 0.7266839742660522, | |
| "learning_rate": 6.116207951070337e-05, | |
| "loss": 0.0575, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.30557677616501144, | |
| "eval_loss": 0.05183817818760872, | |
| "eval_runtime": 217.88, | |
| "eval_samples_per_second": 5.342, | |
| "eval_steps_per_second": 5.342, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.32085561497326204, | |
| "grad_norm": 0.5840321779251099, | |
| "learning_rate": 6.422018348623854e-05, | |
| "loss": 0.0501, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.33613445378151263, | |
| "grad_norm": 0.6017265915870667, | |
| "learning_rate": 6.72782874617737e-05, | |
| "loss": 0.0637, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.35141329258976317, | |
| "grad_norm": 0.6707488894462585, | |
| "learning_rate": 7.033639143730886e-05, | |
| "loss": 0.057, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.36669213139801377, | |
| "grad_norm": 0.41309818625450134, | |
| "learning_rate": 7.339449541284404e-05, | |
| "loss": 0.0494, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.3819709702062643, | |
| "grad_norm": 0.5365416407585144, | |
| "learning_rate": 7.645259938837921e-05, | |
| "loss": 0.0643, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.3819709702062643, | |
| "eval_loss": 0.04688103497028351, | |
| "eval_runtime": 218.0282, | |
| "eval_samples_per_second": 5.339, | |
| "eval_steps_per_second": 5.339, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.3972498090145149, | |
| "grad_norm": 0.2568855285644531, | |
| "learning_rate": 7.951070336391437e-05, | |
| "loss": 0.0436, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.4125286478227655, | |
| "grad_norm": 0.808674693107605, | |
| "learning_rate": 8.256880733944955e-05, | |
| "loss": 0.0708, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.42780748663101603, | |
| "grad_norm": 0.32300978899002075, | |
| "learning_rate": 8.562691131498472e-05, | |
| "loss": 0.0634, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.4430863254392666, | |
| "grad_norm": 0.24263863265514374, | |
| "learning_rate": 8.868501529051988e-05, | |
| "loss": 0.0507, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.45836516424751717, | |
| "grad_norm": 0.45118218660354614, | |
| "learning_rate": 9.174311926605506e-05, | |
| "loss": 0.0341, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.45836516424751717, | |
| "eval_loss": 0.043478548526763916, | |
| "eval_runtime": 218.6129, | |
| "eval_samples_per_second": 5.324, | |
| "eval_steps_per_second": 5.324, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.47364400305576776, | |
| "grad_norm": 0.44334620237350464, | |
| "learning_rate": 9.480122324159021e-05, | |
| "loss": 0.0361, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.48892284186401835, | |
| "grad_norm": 0.27677738666534424, | |
| "learning_rate": 9.785932721712538e-05, | |
| "loss": 0.0466, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.5042016806722689, | |
| "grad_norm": 0.4536680281162262, | |
| "learning_rate": 9.999974360983129e-05, | |
| "loss": 0.0556, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.5194805194805194, | |
| "grad_norm": 0.4593944549560547, | |
| "learning_rate": 9.999518563553522e-05, | |
| "loss": 0.0652, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.5347593582887701, | |
| "grad_norm": 0.290985107421875, | |
| "learning_rate": 9.998493069976636e-05, | |
| "loss": 0.0641, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5347593582887701, | |
| "eval_loss": 0.041326772421598434, | |
| "eval_runtime": 218.3999, | |
| "eval_samples_per_second": 5.33, | |
| "eval_steps_per_second": 5.33, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5500381970970206, | |
| "grad_norm": 0.4909442663192749, | |
| "learning_rate": 9.99689799710767e-05, | |
| "loss": 0.0574, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.5653170359052712, | |
| "grad_norm": 0.5292761921882629, | |
| "learning_rate": 9.994733526705501e-05, | |
| "loss": 0.0535, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.5805958747135218, | |
| "grad_norm": 0.3746335804462433, | |
| "learning_rate": 9.991999905411966e-05, | |
| "loss": 0.0432, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.5958747135217723, | |
| "grad_norm": 0.18307775259017944, | |
| "learning_rate": 9.988697444723762e-05, | |
| "loss": 0.0345, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.6111535523300229, | |
| "grad_norm": 0.22528858482837677, | |
| "learning_rate": 9.984826520956949e-05, | |
| "loss": 0.0405, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6111535523300229, | |
| "eval_loss": 0.04191767796874046, | |
| "eval_runtime": 218.4167, | |
| "eval_samples_per_second": 5.329, | |
| "eval_steps_per_second": 5.329, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6264323911382735, | |
| "grad_norm": 0.299173504114151, | |
| "learning_rate": 9.980387575204072e-05, | |
| "loss": 0.0363, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.6417112299465241, | |
| "grad_norm": 0.5245795249938965, | |
| "learning_rate": 9.975381113283891e-05, | |
| "loss": 0.0346, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.6569900687547746, | |
| "grad_norm": 0.3329148292541504, | |
| "learning_rate": 9.969807705683751e-05, | |
| "loss": 0.0441, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.6722689075630253, | |
| "grad_norm": 0.23399081826210022, | |
| "learning_rate": 9.96366798749457e-05, | |
| "loss": 0.0301, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.6875477463712758, | |
| "grad_norm": 0.1943167895078659, | |
| "learning_rate": 9.956962658338473e-05, | |
| "loss": 0.0531, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.6875477463712758, | |
| "eval_loss": 0.03845739737153053, | |
| "eval_runtime": 217.3338, | |
| "eval_samples_per_second": 5.356, | |
| "eval_steps_per_second": 5.356, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.7028265851795263, | |
| "grad_norm": 0.2697027027606964, | |
| "learning_rate": 9.94969248228907e-05, | |
| "loss": 0.0363, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.7181054239877769, | |
| "grad_norm": 0.3219946026802063, | |
| "learning_rate": 9.941858287784383e-05, | |
| "loss": 0.0447, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.7333842627960275, | |
| "grad_norm": 0.4683067798614502, | |
| "learning_rate": 9.933460967532453e-05, | |
| "loss": 0.043, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.7486631016042781, | |
| "grad_norm": 0.3924793004989624, | |
| "learning_rate": 9.924501478409618e-05, | |
| "loss": 0.0468, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.7639419404125286, | |
| "grad_norm": 0.25234106183052063, | |
| "learning_rate": 9.914980841351465e-05, | |
| "loss": 0.041, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7639419404125286, | |
| "eval_loss": 0.03724617511034012, | |
| "eval_runtime": 213.7645, | |
| "eval_samples_per_second": 5.445, | |
| "eval_steps_per_second": 5.445, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7792207792207793, | |
| "grad_norm": 0.21722671389579773, | |
| "learning_rate": 9.904900141236506e-05, | |
| "loss": 0.0254, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.7944996180290298, | |
| "grad_norm": 0.3712560534477234, | |
| "learning_rate": 9.894260526762548e-05, | |
| "loss": 0.0495, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.8097784568372803, | |
| "grad_norm": 0.32290956377983093, | |
| "learning_rate": 9.883063210315804e-05, | |
| "loss": 0.0556, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.825057295645531, | |
| "grad_norm": 0.397980272769928, | |
| "learning_rate": 9.871309467832738e-05, | |
| "loss": 0.0381, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.8403361344537815, | |
| "grad_norm": 0.24909217655658722, | |
| "learning_rate": 9.859000638654674e-05, | |
| "loss": 0.0283, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8403361344537815, | |
| "eval_loss": 0.03525731340050697, | |
| "eval_runtime": 209.1141, | |
| "eval_samples_per_second": 5.566, | |
| "eval_steps_per_second": 5.566, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8556149732620321, | |
| "grad_norm": 0.2012615203857422, | |
| "learning_rate": 9.846138125375175e-05, | |
| "loss": 0.0501, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.8708938120702827, | |
| "grad_norm": 0.19848772883415222, | |
| "learning_rate": 9.83272339368022e-05, | |
| "loss": 0.0298, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.8861726508785333, | |
| "grad_norm": 0.27441200613975525, | |
| "learning_rate": 9.818757972181191e-05, | |
| "loss": 0.043, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.9014514896867838, | |
| "grad_norm": 0.3285536766052246, | |
| "learning_rate": 9.804243452240675e-05, | |
| "loss": 0.0335, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.9167303284950343, | |
| "grad_norm": 0.48698437213897705, | |
| "learning_rate": 9.789181487791146e-05, | |
| "loss": 0.041, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.9167303284950343, | |
| "eval_loss": 0.03299807012081146, | |
| "eval_runtime": 207.055, | |
| "eval_samples_per_second": 5.622, | |
| "eval_steps_per_second": 5.622, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.932009167303285, | |
| "grad_norm": 0.37265974283218384, | |
| "learning_rate": 9.773573795146485e-05, | |
| "loss": 0.044, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.9472880061115355, | |
| "grad_norm": 0.22519999742507935, | |
| "learning_rate": 9.757422152806415e-05, | |
| "loss": 0.0284, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.9625668449197861, | |
| "grad_norm": 0.24463610351085663, | |
| "learning_rate": 9.74072840125383e-05, | |
| "loss": 0.0417, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.9778456837280367, | |
| "grad_norm": 0.3929882347583771, | |
| "learning_rate": 9.723494442745085e-05, | |
| "loss": 0.0395, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.9931245225362872, | |
| "grad_norm": 0.40377989411354065, | |
| "learning_rate": 9.705722241093223e-05, | |
| "loss": 0.0553, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.9931245225362872, | |
| "eval_loss": 0.0362694077193737, | |
| "eval_runtime": 205.7921, | |
| "eval_samples_per_second": 5.656, | |
| "eval_steps_per_second": 5.656, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.0084033613445378, | |
| "grad_norm": 0.14314551651477814, | |
| "learning_rate": 9.687413821444199e-05, | |
| "loss": 0.0376, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.0236822001527883, | |
| "grad_norm": 0.2880457937717438, | |
| "learning_rate": 9.668571270046122e-05, | |
| "loss": 0.0351, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.0389610389610389, | |
| "grad_norm": 0.19249747693538666, | |
| "learning_rate": 9.649196734011519e-05, | |
| "loss": 0.0288, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.0542398777692896, | |
| "grad_norm": 0.46998459100723267, | |
| "learning_rate": 9.629292421072671e-05, | |
| "loss": 0.0295, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.0695187165775402, | |
| "grad_norm": 0.1609886735677719, | |
| "learning_rate": 9.608860599330048e-05, | |
| "loss": 0.0314, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.0695187165775402, | |
| "eval_loss": 0.03102213703095913, | |
| "eval_runtime": 204.7184, | |
| "eval_samples_per_second": 5.686, | |
| "eval_steps_per_second": 5.686, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.0847975553857907, | |
| "grad_norm": 0.2983687222003937, | |
| "learning_rate": 9.587903596993854e-05, | |
| "loss": 0.0297, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.1000763941940412, | |
| "grad_norm": 0.3572332561016083, | |
| "learning_rate": 9.566423802118724e-05, | |
| "loss": 0.0301, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.1153552330022918, | |
| "grad_norm": 0.10540549457073212, | |
| "learning_rate": 9.544423662331612e-05, | |
| "loss": 0.0438, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.1306340718105423, | |
| "grad_norm": 0.332472026348114, | |
| "learning_rate": 9.521905684552877e-05, | |
| "loss": 0.0354, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.1459129106187929, | |
| "grad_norm": 0.19438593089580536, | |
| "learning_rate": 9.498872434710623e-05, | |
| "loss": 0.0211, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.1459129106187929, | |
| "eval_loss": 0.031196942552924156, | |
| "eval_runtime": 204.1433, | |
| "eval_samples_per_second": 5.702, | |
| "eval_steps_per_second": 5.702, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.1611917494270436, | |
| "grad_norm": 0.22115331888198853, | |
| "learning_rate": 9.475326537448307e-05, | |
| "loss": 0.0285, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.1764705882352942, | |
| "grad_norm": 0.16889172792434692, | |
| "learning_rate": 9.451270675825665e-05, | |
| "loss": 0.0287, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.1917494270435447, | |
| "grad_norm": 0.330474853515625, | |
| "learning_rate": 9.426707591012976e-05, | |
| "loss": 0.0261, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.2070282658517952, | |
| "grad_norm": 0.4934603273868561, | |
| "learning_rate": 9.4016400819787e-05, | |
| "loss": 0.0404, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.2223071046600458, | |
| "grad_norm": 0.39667800068855286, | |
| "learning_rate": 9.376071005170539e-05, | |
| "loss": 0.0314, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.2223071046600458, | |
| "eval_loss": 0.03202859312295914, | |
| "eval_runtime": 203.9785, | |
| "eval_samples_per_second": 5.706, | |
| "eval_steps_per_second": 5.706, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.2375859434682965, | |
| "grad_norm": 0.2728429138660431, | |
| "learning_rate": 9.350003274189949e-05, | |
| "loss": 0.0361, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.2528647822765469, | |
| "grad_norm": 0.23270060122013092, | |
| "learning_rate": 9.323439859460122e-05, | |
| "loss": 0.0312, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.2681436210847976, | |
| "grad_norm": 0.21153134107589722, | |
| "learning_rate": 9.296383787887519e-05, | |
| "loss": 0.0311, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.2834224598930482, | |
| "grad_norm": 0.18999841809272766, | |
| "learning_rate": 9.268838142516943e-05, | |
| "loss": 0.0405, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.2987012987012987, | |
| "grad_norm": 0.28957971930503845, | |
| "learning_rate": 9.240806062180234e-05, | |
| "loss": 0.0325, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.2987012987012987, | |
| "eval_loss": 0.03146594762802124, | |
| "eval_runtime": 203.4351, | |
| "eval_samples_per_second": 5.722, | |
| "eval_steps_per_second": 5.722, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.3139801375095492, | |
| "grad_norm": 0.12776818871498108, | |
| "learning_rate": 9.212290741138592e-05, | |
| "loss": 0.0172, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.3292589763177998, | |
| "grad_norm": 0.1620066910982132, | |
| "learning_rate": 9.183295428718592e-05, | |
| "loss": 0.0285, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.3445378151260505, | |
| "grad_norm": 0.2502717971801758, | |
| "learning_rate": 9.153823428941924e-05, | |
| "loss": 0.0395, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.359816653934301, | |
| "grad_norm": 0.23712791502475739, | |
| "learning_rate": 9.1238781001489e-05, | |
| "loss": 0.0276, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 1.3750954927425516, | |
| "grad_norm": 0.27235275506973267, | |
| "learning_rate": 9.093462854615766e-05, | |
| "loss": 0.0351, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.3750954927425516, | |
| "eval_loss": 0.030502911657094955, | |
| "eval_runtime": 203.3489, | |
| "eval_samples_per_second": 5.724, | |
| "eval_steps_per_second": 5.724, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.3903743315508021, | |
| "grad_norm": 0.2629202604293823, | |
| "learning_rate": 9.062581158165876e-05, | |
| "loss": 0.0323, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.4056531703590527, | |
| "grad_norm": 0.2535449266433716, | |
| "learning_rate": 9.031236529774764e-05, | |
| "loss": 0.0222, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.4209320091673032, | |
| "grad_norm": 0.19801892340183258, | |
| "learning_rate": 8.999432541169145e-05, | |
| "loss": 0.0273, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 1.4362108479755538, | |
| "grad_norm": 0.19468438625335693, | |
| "learning_rate": 8.967172816419927e-05, | |
| "loss": 0.0274, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.4514896867838045, | |
| "grad_norm": 0.268258273601532, | |
| "learning_rate": 8.934461031529242e-05, | |
| "loss": 0.0402, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4514896867838045, | |
| "eval_loss": 0.03139554709196091, | |
| "eval_runtime": 203.1165, | |
| "eval_samples_per_second": 5.731, | |
| "eval_steps_per_second": 5.731, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.466768525592055, | |
| "grad_norm": 0.21832090616226196, | |
| "learning_rate": 8.901300914011569e-05, | |
| "loss": 0.0308, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.4820473644003056, | |
| "grad_norm": 0.12288864701986313, | |
| "learning_rate": 8.867696242468976e-05, | |
| "loss": 0.0228, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 1.4973262032085561, | |
| "grad_norm": 0.2422870695590973, | |
| "learning_rate": 8.833650846160555e-05, | |
| "loss": 0.0321, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 1.5126050420168067, | |
| "grad_norm": 0.20795999467372894, | |
| "learning_rate": 8.79916860456607e-05, | |
| "loss": 0.0269, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 1.5278838808250574, | |
| "grad_norm": 0.13397260010242462, | |
| "learning_rate": 8.7642534469439e-05, | |
| "loss": 0.0262, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.5278838808250574, | |
| "eval_loss": 0.029931625351309776, | |
| "eval_runtime": 202.6566, | |
| "eval_samples_per_second": 5.744, | |
| "eval_steps_per_second": 5.744, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.5431627196333078, | |
| "grad_norm": 0.5128399729728699, | |
| "learning_rate": 8.728909351883283e-05, | |
| "loss": 0.0295, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 1.5584415584415585, | |
| "grad_norm": 0.24673043191432953, | |
| "learning_rate": 8.693140346850975e-05, | |
| "loss": 0.0379, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 1.573720397249809, | |
| "grad_norm": 0.16341492533683777, | |
| "learning_rate": 8.656950507732303e-05, | |
| "loss": 0.0247, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 1.5889992360580596, | |
| "grad_norm": 0.11685626953840256, | |
| "learning_rate": 8.620343958366718e-05, | |
| "loss": 0.0284, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 1.6042780748663101, | |
| "grad_norm": 0.25839105248451233, | |
| "learning_rate": 8.5833248700779e-05, | |
| "loss": 0.026, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.6042780748663101, | |
| "eval_loss": 0.030233880504965782, | |
| "eval_runtime": 202.0854, | |
| "eval_samples_per_second": 5.76, | |
| "eval_steps_per_second": 5.76, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.6195569136745607, | |
| "grad_norm": 0.12446019053459167, | |
| "learning_rate": 8.545897461198413e-05, | |
| "loss": 0.0269, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 1.6348357524828114, | |
| "grad_norm": 0.13764119148254395, | |
| "learning_rate": 8.508065996589036e-05, | |
| "loss": 0.0295, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 1.6501145912910617, | |
| "grad_norm": 0.1417740285396576, | |
| "learning_rate": 8.469834787152783e-05, | |
| "loss": 0.0235, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 1.6653934300993125, | |
| "grad_norm": 0.1833825260400772, | |
| "learning_rate": 8.43120818934367e-05, | |
| "loss": 0.0267, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 1.680672268907563, | |
| "grad_norm": 0.259737104177475, | |
| "learning_rate": 8.392190604670293e-05, | |
| "loss": 0.024, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.680672268907563, | |
| "eval_loss": 0.031445007771253586, | |
| "eval_runtime": 201.7681, | |
| "eval_samples_per_second": 5.769, | |
| "eval_steps_per_second": 5.769, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.6959511077158136, | |
| "grad_norm": 0.3111629784107208, | |
| "learning_rate": 8.352786479194288e-05, | |
| "loss": 0.0351, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 1.7112299465240641, | |
| "grad_norm": 0.36554020643234253, | |
| "learning_rate": 8.313000303023688e-05, | |
| "loss": 0.0245, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 1.7265087853323147, | |
| "grad_norm": 0.18379883468151093, | |
| "learning_rate": 8.27283660980128e-05, | |
| "loss": 0.0371, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 1.7417876241405654, | |
| "grad_norm": 0.13640639185905457, | |
| "learning_rate": 8.232299976187999e-05, | |
| "loss": 0.0334, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 1.7570664629488157, | |
| "grad_norm": 0.205993190407753, | |
| "learning_rate": 8.191395021341408e-05, | |
| "loss": 0.0487, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.7570664629488157, | |
| "eval_loss": 0.03015304170548916, | |
| "eval_runtime": 201.7837, | |
| "eval_samples_per_second": 5.769, | |
| "eval_steps_per_second": 5.769, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.7723453017570665, | |
| "grad_norm": 0.30252721905708313, | |
| "learning_rate": 8.150126406389352e-05, | |
| "loss": 0.0299, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 1.787624140565317, | |
| "grad_norm": 0.21577638387680054, | |
| "learning_rate": 8.108498833898815e-05, | |
| "loss": 0.0315, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 1.8029029793735676, | |
| "grad_norm": 0.34054169058799744, | |
| "learning_rate": 8.066517047340066e-05, | |
| "loss": 0.0348, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 1.8181818181818183, | |
| "grad_norm": 0.06973261386156082, | |
| "learning_rate": 8.02418583054614e-05, | |
| "loss": 0.0213, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 1.8334606569900687, | |
| "grad_norm": 0.21064534783363342, | |
| "learning_rate": 7.981510007167719e-05, | |
| "loss": 0.0251, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.8334606569900687, | |
| "eval_loss": 0.030031004920601845, | |
| "eval_runtime": 201.7876, | |
| "eval_samples_per_second": 5.768, | |
| "eval_steps_per_second": 5.768, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.8487394957983194, | |
| "grad_norm": 0.3094477355480194, | |
| "learning_rate": 7.938494440123468e-05, | |
| "loss": 0.0295, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 1.8640183346065697, | |
| "grad_norm": 0.15358871221542358, | |
| "learning_rate": 7.895144031045918e-05, | |
| "loss": 0.0287, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 1.8792971734148205, | |
| "grad_norm": 0.14194321632385254, | |
| "learning_rate": 7.851463719722913e-05, | |
| "loss": 0.025, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 1.894576012223071, | |
| "grad_norm": 0.1547326147556305, | |
| "learning_rate": 7.80745848353473e-05, | |
| "loss": 0.0294, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 1.9098548510313216, | |
| "grad_norm": 0.42348071932792664, | |
| "learning_rate": 7.763133336886892e-05, | |
| "loss": 0.028, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.9098548510313216, | |
| "eval_loss": 0.031990259885787964, | |
| "eval_runtime": 201.9183, | |
| "eval_samples_per_second": 5.765, | |
| "eval_steps_per_second": 5.765, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.9251336898395723, | |
| "grad_norm": 0.10496515035629272, | |
| "learning_rate": 7.718493330638789e-05, | |
| "loss": 0.0205, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 1.9404125286478227, | |
| "grad_norm": 0.07516542822122574, | |
| "learning_rate": 7.673543551528122e-05, | |
| "loss": 0.0346, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 1.9556913674560734, | |
| "grad_norm": 0.5417054891586304, | |
| "learning_rate": 7.628289121591277e-05, | |
| "loss": 0.0314, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 1.970970206264324, | |
| "grad_norm": 0.17793487012386322, | |
| "learning_rate": 7.582735197579656e-05, | |
| "loss": 0.0282, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 1.9862490450725745, | |
| "grad_norm": 0.1875259131193161, | |
| "learning_rate": 7.536886970372078e-05, | |
| "loss": 0.0244, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.9862490450725745, | |
| "eval_loss": 0.029929550364613533, | |
| "eval_runtime": 201.5395, | |
| "eval_samples_per_second": 5.776, | |
| "eval_steps_per_second": 5.776, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.0015278838808253, | |
| "grad_norm": 0.14435645937919617, | |
| "learning_rate": 7.490749664383271e-05, | |
| "loss": 0.0372, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.0168067226890756, | |
| "grad_norm": 0.09202131628990173, | |
| "learning_rate": 7.444328536968538e-05, | |
| "loss": 0.0258, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 2.0320855614973263, | |
| "grad_norm": 0.43767938017845154, | |
| "learning_rate": 7.397628877824701e-05, | |
| "loss": 0.024, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 2.0473644003055766, | |
| "grad_norm": 0.13443796336650848, | |
| "learning_rate": 7.350656008387327e-05, | |
| "loss": 0.0192, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 2.0626432391138274, | |
| "grad_norm": 0.21498188376426697, | |
| "learning_rate": 7.303415281224346e-05, | |
| "loss": 0.0211, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.0626432391138274, | |
| "eval_loss": 0.0282374769449234, | |
| "eval_runtime": 202.0409, | |
| "eval_samples_per_second": 5.761, | |
| "eval_steps_per_second": 5.761, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.0779220779220777, | |
| "grad_norm": 0.1617782562971115, | |
| "learning_rate": 7.255912079426136e-05, | |
| "loss": 0.0155, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 2.0932009167303285, | |
| "grad_norm": 0.07547878473997116, | |
| "learning_rate": 7.208151815992107e-05, | |
| "loss": 0.0172, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 2.1084797555385792, | |
| "grad_norm": 0.34869226813316345, | |
| "learning_rate": 7.160139933213898e-05, | |
| "loss": 0.0254, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 2.1237585943468296, | |
| "grad_norm": 0.2867926061153412, | |
| "learning_rate": 7.111881902055223e-05, | |
| "loss": 0.0226, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 2.1390374331550803, | |
| "grad_norm": 0.26209303736686707, | |
| "learning_rate": 7.06338322152845e-05, | |
| "loss": 0.019, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.1390374331550803, | |
| "eval_loss": 0.02854863740503788, | |
| "eval_runtime": 201.0589, | |
| "eval_samples_per_second": 5.789, | |
| "eval_steps_per_second": 5.789, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.1543162719633306, | |
| "grad_norm": 0.17289575934410095, | |
| "learning_rate": 7.014649418067994e-05, | |
| "loss": 0.0176, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 2.1695951107715814, | |
| "grad_norm": 0.22034431993961334, | |
| "learning_rate": 6.965686044900577e-05, | |
| "loss": 0.0163, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 2.184873949579832, | |
| "grad_norm": 0.2867283821105957, | |
| "learning_rate": 6.91649868141243e-05, | |
| "loss": 0.0154, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 2.2001527883880825, | |
| "grad_norm": 0.17423681914806366, | |
| "learning_rate": 6.86709293251353e-05, | |
| "loss": 0.0142, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 2.2154316271963332, | |
| "grad_norm": 0.10424409806728363, | |
| "learning_rate": 6.817474427998916e-05, | |
| "loss": 0.012, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.2154316271963332, | |
| "eval_loss": 0.030223889276385307, | |
| "eval_runtime": 200.9988, | |
| "eval_samples_per_second": 5.791, | |
| "eval_steps_per_second": 5.791, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.2307104660045836, | |
| "grad_norm": 0.325139582157135, | |
| "learning_rate": 6.767648821907172e-05, | |
| "loss": 0.0192, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 2.2459893048128343, | |
| "grad_norm": 0.11585790663957596, | |
| "learning_rate": 6.717621791876147e-05, | |
| "loss": 0.0302, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 2.2612681436210846, | |
| "grad_norm": 0.16039828956127167, | |
| "learning_rate": 6.667399038495986e-05, | |
| "loss": 0.0207, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 2.2765469824293354, | |
| "grad_norm": 0.18742448091506958, | |
| "learning_rate": 6.616986284659556e-05, | |
| "loss": 0.0247, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 2.2918258212375857, | |
| "grad_norm": 0.07378096133470535, | |
| "learning_rate": 6.566389274910309e-05, | |
| "loss": 0.0181, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.2918258212375857, | |
| "eval_loss": 0.02827996388077736, | |
| "eval_runtime": 201.0916, | |
| "eval_samples_per_second": 5.788, | |
| "eval_steps_per_second": 5.788, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.3071046600458365, | |
| "grad_norm": 0.10507321357727051, | |
| "learning_rate": 6.515613774787697e-05, | |
| "loss": 0.019, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 2.3223834988540872, | |
| "grad_norm": 0.10831980407238007, | |
| "learning_rate": 6.464665570170186e-05, | |
| "loss": 0.0215, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 2.3376623376623376, | |
| "grad_norm": 0.10839013010263443, | |
| "learning_rate": 6.413550466615952e-05, | |
| "loss": 0.017, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 2.3529411764705883, | |
| "grad_norm": 0.12335723638534546, | |
| "learning_rate": 6.362274288701342e-05, | |
| "loss": 0.0174, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 2.3682200152788386, | |
| "grad_norm": 0.16996541619300842, | |
| "learning_rate": 6.310842879357158e-05, | |
| "loss": 0.0176, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.3682200152788386, | |
| "eval_loss": 0.028811601921916008, | |
| "eval_runtime": 201.2139, | |
| "eval_samples_per_second": 5.785, | |
| "eval_steps_per_second": 5.785, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.3834988540870894, | |
| "grad_norm": 0.22573326528072357, | |
| "learning_rate": 6.25926209920285e-05, | |
| "loss": 0.0117, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 2.39877769289534, | |
| "grad_norm": 0.24418701231479645, | |
| "learning_rate": 6.207537825878708e-05, | |
| "loss": 0.0165, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 2.4140565317035905, | |
| "grad_norm": 0.15098631381988525, | |
| "learning_rate": 6.155675953376095e-05, | |
| "loss": 0.027, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 2.4293353705118412, | |
| "grad_norm": 0.1565188765525818, | |
| "learning_rate": 6.103682391365828e-05, | |
| "loss": 0.0185, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 2.4446142093200915, | |
| "grad_norm": 0.0722580999135971, | |
| "learning_rate": 6.05156306452477e-05, | |
| "loss": 0.0136, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.4446142093200915, | |
| "eval_loss": 0.027694666758179665, | |
| "eval_runtime": 201.4813, | |
| "eval_samples_per_second": 5.777, | |
| "eval_steps_per_second": 5.777, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.4598930481283423, | |
| "grad_norm": 0.2664923667907715, | |
| "learning_rate": 5.9993239118607124e-05, | |
| "loss": 0.0251, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 2.475171886936593, | |
| "grad_norm": 0.15546078979969025, | |
| "learning_rate": 5.9469708860356246e-05, | |
| "loss": 0.0207, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 2.4904507257448434, | |
| "grad_norm": 0.14643099904060364, | |
| "learning_rate": 5.89450995268734e-05, | |
| "loss": 0.0195, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 2.5057295645530937, | |
| "grad_norm": 0.1590198427438736, | |
| "learning_rate": 5.841947089749783e-05, | |
| "loss": 0.0251, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 2.5210084033613445, | |
| "grad_norm": 0.19299504160881042, | |
| "learning_rate": 5.78928828677177e-05, | |
| "loss": 0.0217, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.5210084033613445, | |
| "eval_loss": 0.028578663244843483, | |
| "eval_runtime": 201.0706, | |
| "eval_samples_per_second": 5.789, | |
| "eval_steps_per_second": 5.789, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.5362872421695952, | |
| "grad_norm": 0.18513061106204987, | |
| "learning_rate": 5.7365395442345085e-05, | |
| "loss": 0.0143, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 2.5515660809778455, | |
| "grad_norm": 0.1994353085756302, | |
| "learning_rate": 5.683706872867833e-05, | |
| "loss": 0.0149, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 2.5668449197860963, | |
| "grad_norm": 0.20116868615150452, | |
| "learning_rate": 5.630796292965288e-05, | |
| "loss": 0.0255, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 2.5821237585943466, | |
| "grad_norm": 0.1676681637763977, | |
| "learning_rate": 5.57781383369811e-05, | |
| "loss": 0.02, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 2.5974025974025974, | |
| "grad_norm": 0.1904657781124115, | |
| "learning_rate": 5.524765532428203e-05, | |
| "loss": 0.0156, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.5974025974025974, | |
| "eval_loss": 0.029410608112812042, | |
| "eval_runtime": 200.9522, | |
| "eval_samples_per_second": 5.792, | |
| "eval_steps_per_second": 5.792, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.612681436210848, | |
| "grad_norm": 0.15733115375041962, | |
| "learning_rate": 5.471657434020182e-05, | |
| "loss": 0.0187, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 2.6279602750190985, | |
| "grad_norm": 0.20242205262184143, | |
| "learning_rate": 5.418495590152557e-05, | |
| "loss": 0.0231, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 2.643239113827349, | |
| "grad_norm": 0.1593477725982666, | |
| "learning_rate": 5.365286058628145e-05, | |
| "loss": 0.0212, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 2.6585179526355995, | |
| "grad_norm": 0.3499566912651062, | |
| "learning_rate": 5.312034902683779e-05, | |
| "loss": 0.0268, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 2.6737967914438503, | |
| "grad_norm": 0.1606576293706894, | |
| "learning_rate": 5.258748190299404e-05, | |
| "loss": 0.0191, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.6737967914438503, | |
| "eval_loss": 0.028579337522387505, | |
| "eval_runtime": 200.7556, | |
| "eval_samples_per_second": 5.798, | |
| "eval_steps_per_second": 5.798, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.689075630252101, | |
| "grad_norm": 0.26182451844215393, | |
| "learning_rate": 5.20543199350663e-05, | |
| "loss": 0.0217, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 2.7043544690603514, | |
| "grad_norm": 0.17532721161842346, | |
| "learning_rate": 5.152092387696821e-05, | |
| "loss": 0.0215, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 2.719633307868602, | |
| "grad_norm": 0.23950158059597015, | |
| "learning_rate": 5.0987354509287985e-05, | |
| "loss": 0.0224, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 2.7349121466768525, | |
| "grad_norm": 0.3977627754211426, | |
| "learning_rate": 5.045367263236257e-05, | |
| "loss": 0.017, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 2.750190985485103, | |
| "grad_norm": 0.10810764133930206, | |
| "learning_rate": 4.991993905934931e-05, | |
| "loss": 0.0249, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.750190985485103, | |
| "eval_loss": 0.027184655889868736, | |
| "eval_runtime": 200.6455, | |
| "eval_samples_per_second": 5.801, | |
| "eval_steps_per_second": 5.801, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.765469824293354, | |
| "grad_norm": 0.23987551033496857, | |
| "learning_rate": 4.938621460929639e-05, | |
| "loss": 0.0182, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 2.7807486631016043, | |
| "grad_norm": 0.2798612415790558, | |
| "learning_rate": 4.885256010021233e-05, | |
| "loss": 0.0193, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 2.7960275019098546, | |
| "grad_norm": 0.16607950627803802, | |
| "learning_rate": 4.831903634213599e-05, | |
| "loss": 0.0199, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 2.8113063407181054, | |
| "grad_norm": 0.04422255605459213, | |
| "learning_rate": 4.778570413020702e-05, | |
| "loss": 0.0211, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 2.826585179526356, | |
| "grad_norm": 0.24929310381412506, | |
| "learning_rate": 4.725262423773838e-05, | |
| "loss": 0.0237, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.826585179526356, | |
| "eval_loss": 0.02900101989507675, | |
| "eval_runtime": 201.0849, | |
| "eval_samples_per_second": 5.789, | |
| "eval_steps_per_second": 5.789, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.8418640183346064, | |
| "grad_norm": 0.2282838672399521, | |
| "learning_rate": 4.671985740929123e-05, | |
| "loss": 0.0165, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 2.857142857142857, | |
| "grad_norm": 0.06340071558952332, | |
| "learning_rate": 4.618746435375295e-05, | |
| "loss": 0.023, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 2.8724216959511075, | |
| "grad_norm": 0.04884711280465126, | |
| "learning_rate": 4.565550573741942e-05, | |
| "loss": 0.0159, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 2.8877005347593583, | |
| "grad_norm": 0.24351294338703156, | |
| "learning_rate": 4.512404217708217e-05, | |
| "loss": 0.0227, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 2.902979373567609, | |
| "grad_norm": 0.12852653861045837, | |
| "learning_rate": 4.45931342331209e-05, | |
| "loss": 0.021, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.902979373567609, | |
| "eval_loss": 0.02777683548629284, | |
| "eval_runtime": 200.6223, | |
| "eval_samples_per_second": 5.802, | |
| "eval_steps_per_second": 5.802, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.9182582123758594, | |
| "grad_norm": 0.1962100714445114, | |
| "learning_rate": 4.406284240260278e-05, | |
| "loss": 0.023, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 2.93353705118411, | |
| "grad_norm": 0.16242529451847076, | |
| "learning_rate": 4.3533227112388694e-05, | |
| "loss": 0.0165, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 2.9488158899923604, | |
| "grad_norm": 0.15565882623195648, | |
| "learning_rate": 4.300434871224763e-05, | |
| "loss": 0.0234, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 2.964094728800611, | |
| "grad_norm": 0.1874658316373825, | |
| "learning_rate": 4.247626746797983e-05, | |
| "loss": 0.0207, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 2.979373567608862, | |
| "grad_norm": 0.1462504267692566, | |
| "learning_rate": 4.1949043554549406e-05, | |
| "loss": 0.0174, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.979373567608862, | |
| "eval_loss": 0.028289297595620155, | |
| "eval_runtime": 200.8115, | |
| "eval_samples_per_second": 5.796, | |
| "eval_steps_per_second": 5.796, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.9946524064171123, | |
| "grad_norm": 0.3352892994880676, | |
| "learning_rate": 4.14227370492275e-05, | |
| "loss": 0.0261, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 3.009931245225363, | |
| "grad_norm": 0.15284225344657898, | |
| "learning_rate": 4.08974079247464e-05, | |
| "loss": 0.0172, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 3.0252100840336134, | |
| "grad_norm": 0.06005563586950302, | |
| "learning_rate": 4.037311604246565e-05, | |
| "loss": 0.0115, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 3.040488922841864, | |
| "grad_norm": 0.21838952600955963, | |
| "learning_rate": 3.9849921145550805e-05, | |
| "loss": 0.0129, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 3.0557677616501144, | |
| "grad_norm": 0.11845885962247849, | |
| "learning_rate": 3.9327882852165795e-05, | |
| "loss": 0.0122, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.0557677616501144, | |
| "eval_loss": 0.028973255306482315, | |
| "eval_runtime": 200.9411, | |
| "eval_samples_per_second": 5.793, | |
| "eval_steps_per_second": 5.793, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.071046600458365, | |
| "grad_norm": 0.09495562314987183, | |
| "learning_rate": 3.880706064867926e-05, | |
| "loss": 0.0125, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 3.0863254392666155, | |
| "grad_norm": 0.16284717619419098, | |
| "learning_rate": 3.8287513882886196e-05, | |
| "loss": 0.009, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 3.1016042780748663, | |
| "grad_norm": 0.1330808848142624, | |
| "learning_rate": 3.776930175724521e-05, | |
| "loss": 0.011, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 3.116883116883117, | |
| "grad_norm": 0.1752845197916031, | |
| "learning_rate": 3.7252483322132386e-05, | |
| "loss": 0.0128, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 3.1321619556913673, | |
| "grad_norm": 0.09726885706186295, | |
| "learning_rate": 3.673711746911252e-05, | |
| "loss": 0.0137, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.1321619556913673, | |
| "eval_loss": 0.030082957819104195, | |
| "eval_runtime": 200.9521, | |
| "eval_samples_per_second": 5.792, | |
| "eval_steps_per_second": 5.792, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.147440794499618, | |
| "grad_norm": 0.09027769416570663, | |
| "learning_rate": 3.6223262924228344e-05, | |
| "loss": 0.0078, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 3.1627196333078684, | |
| "grad_norm": 0.08661149442195892, | |
| "learning_rate": 3.5710978241308733e-05, | |
| "loss": 0.013, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 3.177998472116119, | |
| "grad_norm": 0.19561336934566498, | |
| "learning_rate": 3.520032179529652e-05, | |
| "loss": 0.0123, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 3.19327731092437, | |
| "grad_norm": 0.28915536403656006, | |
| "learning_rate": 3.4691351775596564e-05, | |
| "loss": 0.0135, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 3.2085561497326203, | |
| "grad_norm": 0.12461555749177933, | |
| "learning_rate": 3.41841261794451e-05, | |
| "loss": 0.0086, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.2085561497326203, | |
| "eval_loss": 0.030945168808102608, | |
| "eval_runtime": 200.348, | |
| "eval_samples_per_second": 5.81, | |
| "eval_steps_per_second": 5.81, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.223834988540871, | |
| "grad_norm": 0.17976081371307373, | |
| "learning_rate": 3.367870280530101e-05, | |
| "loss": 0.0176, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 3.2391138273491213, | |
| "grad_norm": 0.17633405327796936, | |
| "learning_rate": 3.3175139246259536e-05, | |
| "loss": 0.0109, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 3.254392666157372, | |
| "grad_norm": 0.23417995870113373, | |
| "learning_rate": 3.2673492883489696e-05, | |
| "loss": 0.0105, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 3.2696715049656224, | |
| "grad_norm": 0.385770320892334, | |
| "learning_rate": 3.2173820879695535e-05, | |
| "loss": 0.0166, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 3.284950343773873, | |
| "grad_norm": 0.1962185502052307, | |
| "learning_rate": 3.1676180172602525e-05, | |
| "loss": 0.0136, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.284950343773873, | |
| "eval_loss": 0.030590800568461418, | |
| "eval_runtime": 199.5546, | |
| "eval_samples_per_second": 5.833, | |
| "eval_steps_per_second": 5.833, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.300229182582124, | |
| "grad_norm": 0.07931424677371979, | |
| "learning_rate": 3.11806274684695e-05, | |
| "loss": 0.0115, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 3.3155080213903743, | |
| "grad_norm": 0.12415704876184464, | |
| "learning_rate": 3.068721923562688e-05, | |
| "loss": 0.0076, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 3.330786860198625, | |
| "grad_norm": 0.1858009248971939, | |
| "learning_rate": 3.019601169804216e-05, | |
| "loss": 0.0109, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 3.3460656990068753, | |
| "grad_norm": 0.15695886313915253, | |
| "learning_rate": 2.9707060828913225e-05, | |
| "loss": 0.0086, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 3.361344537815126, | |
| "grad_norm": 0.5040724277496338, | |
| "learning_rate": 2.9220422344290056e-05, | |
| "loss": 0.0111, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.361344537815126, | |
| "eval_loss": 0.03102887235581875, | |
| "eval_runtime": 200.5885, | |
| "eval_samples_per_second": 5.803, | |
| "eval_steps_per_second": 5.803, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.3766233766233764, | |
| "grad_norm": 0.2534728944301605, | |
| "learning_rate": 2.873615169672601e-05, | |
| "loss": 0.0099, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 3.391902215431627, | |
| "grad_norm": 0.10433147847652435, | |
| "learning_rate": 2.8254304068958927e-05, | |
| "loss": 0.011, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 3.407181054239878, | |
| "grad_norm": 0.09524795413017273, | |
| "learning_rate": 2.7774934367622996e-05, | |
| "loss": 0.0159, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 3.4224598930481283, | |
| "grad_norm": 0.1627880036830902, | |
| "learning_rate": 2.7298097216992284e-05, | |
| "loss": 0.0095, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 3.437738731856379, | |
| "grad_norm": 0.24664515256881714, | |
| "learning_rate": 2.6823846952756125e-05, | |
| "loss": 0.0142, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.437738731856379, | |
| "eval_loss": 0.03270566090941429, | |
| "eval_runtime": 200.5417, | |
| "eval_samples_per_second": 5.804, | |
| "eval_steps_per_second": 5.804, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.4530175706646293, | |
| "grad_norm": 0.2718111276626587, | |
| "learning_rate": 2.6352237615827636e-05, | |
| "loss": 0.0077, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 3.46829640947288, | |
| "grad_norm": 0.2555138170719147, | |
| "learning_rate": 2.5883322946185777e-05, | |
| "loss": 0.0129, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 3.483575248281131, | |
| "grad_norm": 0.23297752439975739, | |
| "learning_rate": 2.5417156376751562e-05, | |
| "loss": 0.0124, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 3.498854087089381, | |
| "grad_norm": 0.24713806807994843, | |
| "learning_rate": 2.4953791027299506e-05, | |
| "loss": 0.008, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 3.514132925897632, | |
| "grad_norm": 0.15914295613765717, | |
| "learning_rate": 2.4493279698404493e-05, | |
| "loss": 0.0114, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.514132925897632, | |
| "eval_loss": 0.031200075522065163, | |
| "eval_runtime": 200.6578, | |
| "eval_samples_per_second": 5.801, | |
| "eval_steps_per_second": 5.801, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.5294117647058822, | |
| "grad_norm": 0.14958317577838898, | |
| "learning_rate": 2.403567486542518e-05, | |
| "loss": 0.0107, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 3.544690603514133, | |
| "grad_norm": 0.10404475033283234, | |
| "learning_rate": 2.3581028672524485e-05, | |
| "loss": 0.0111, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 3.5599694423223838, | |
| "grad_norm": 0.11570907384157181, | |
| "learning_rate": 2.312939292672765e-05, | |
| "loss": 0.0088, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 3.575248281130634, | |
| "grad_norm": 0.1450202912092209, | |
| "learning_rate": 2.268081909201885e-05, | |
| "loss": 0.0087, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 3.5905271199388844, | |
| "grad_norm": 0.1596309393644333, | |
| "learning_rate": 2.2235358283476936e-05, | |
| "loss": 0.015, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.5905271199388844, | |
| "eval_loss": 0.031868599355220795, | |
| "eval_runtime": 200.4001, | |
| "eval_samples_per_second": 5.808, | |
| "eval_steps_per_second": 5.808, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.605805958747135, | |
| "grad_norm": 0.13130627572536469, | |
| "learning_rate": 2.179306126145075e-05, | |
| "loss": 0.0135, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 3.621084797555386, | |
| "grad_norm": 0.2653070390224457, | |
| "learning_rate": 2.1353978425775008e-05, | |
| "loss": 0.0129, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 3.6363636363636362, | |
| "grad_norm": 0.20412729680538177, | |
| "learning_rate": 2.091815981002731e-05, | |
| "loss": 0.0101, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 3.651642475171887, | |
| "grad_norm": 0.18179409205913544, | |
| "learning_rate": 2.0485655075826667e-05, | |
| "loss": 0.0141, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 3.6669213139801373, | |
| "grad_norm": 0.3348677158355713, | |
| "learning_rate": 2.0056513507174685e-05, | |
| "loss": 0.0088, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.6669213139801373, | |
| "eval_loss": 0.030020447447896004, | |
| "eval_runtime": 200.0438, | |
| "eval_samples_per_second": 5.819, | |
| "eval_steps_per_second": 5.819, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.682200152788388, | |
| "grad_norm": 0.25341227650642395, | |
| "learning_rate": 1.963078400483953e-05, | |
| "loss": 0.0111, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 3.697478991596639, | |
| "grad_norm": 0.2232055962085724, | |
| "learning_rate": 1.9208515080783723e-05, | |
| "loss": 0.0087, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 3.712757830404889, | |
| "grad_norm": 0.29884758591651917, | |
| "learning_rate": 1.8789754852636245e-05, | |
| "loss": 0.0084, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 3.72803666921314, | |
| "grad_norm": 0.2963765859603882, | |
| "learning_rate": 1.837455103820942e-05, | |
| "loss": 0.0173, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 3.7433155080213902, | |
| "grad_norm": 0.06125696748495102, | |
| "learning_rate": 1.7962950950061502e-05, | |
| "loss": 0.0068, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.7433155080213902, | |
| "eval_loss": 0.03097730502486229, | |
| "eval_runtime": 199.2868, | |
| "eval_samples_per_second": 5.841, | |
| "eval_steps_per_second": 5.841, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.758594346829641, | |
| "grad_norm": 0.330082505941391, | |
| "learning_rate": 1.7555001490105488e-05, | |
| "loss": 0.0103, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 3.7738731856378918, | |
| "grad_norm": 0.3047909438610077, | |
| "learning_rate": 1.7150749144264462e-05, | |
| "loss": 0.0123, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 3.789152024446142, | |
| "grad_norm": 0.09779535233974457, | |
| "learning_rate": 1.6750239977174682e-05, | |
| "loss": 0.0097, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 3.8044308632543924, | |
| "grad_norm": 0.3834091126918793, | |
| "learning_rate": 1.6353519626936397e-05, | |
| "loss": 0.0145, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 3.819709702062643, | |
| "grad_norm": 0.12687507271766663, | |
| "learning_rate": 1.596063329991341e-05, | |
| "loss": 0.0098, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.819709702062643, | |
| "eval_loss": 0.02996741607785225, | |
| "eval_runtime": 199.4136, | |
| "eval_samples_per_second": 5.837, | |
| "eval_steps_per_second": 5.837, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.834988540870894, | |
| "grad_norm": 0.10428875684738159, | |
| "learning_rate": 1.5571625765581832e-05, | |
| "loss": 0.0081, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 3.8502673796791442, | |
| "grad_norm": 0.2717987895011902, | |
| "learning_rate": 1.5186541351428545e-05, | |
| "loss": 0.012, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 3.865546218487395, | |
| "grad_norm": 0.6217739582061768, | |
| "learning_rate": 1.4805423937900087e-05, | |
| "loss": 0.0138, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 3.8808250572956453, | |
| "grad_norm": 0.14223617315292358, | |
| "learning_rate": 1.4428316953402526e-05, | |
| "loss": 0.0095, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 3.896103896103896, | |
| "grad_norm": 0.42163944244384766, | |
| "learning_rate": 1.4055263369352672e-05, | |
| "loss": 0.0088, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.896103896103896, | |
| "eval_loss": 0.02977598085999489, | |
| "eval_runtime": 199.4878, | |
| "eval_samples_per_second": 5.835, | |
| "eval_steps_per_second": 5.835, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.911382734912147, | |
| "grad_norm": 0.27528712153434753, | |
| "learning_rate": 1.3686305695281559e-05, | |
| "loss": 0.0111, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 3.926661573720397, | |
| "grad_norm": 0.07628080993890762, | |
| "learning_rate": 1.3321485973990494e-05, | |
| "loss": 0.0083, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 3.941940412528648, | |
| "grad_norm": 0.3497057855129242, | |
| "learning_rate": 1.2960845776760156e-05, | |
| "loss": 0.0164, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 3.9572192513368982, | |
| "grad_norm": 0.1905433088541031, | |
| "learning_rate": 1.2604426198613688e-05, | |
| "loss": 0.0089, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 3.972498090145149, | |
| "grad_norm": 0.29687440395355225, | |
| "learning_rate": 1.2252267853633798e-05, | |
| "loss": 0.0081, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 3.972498090145149, | |
| "eval_loss": 0.030624018982052803, | |
| "eval_runtime": 199.6483, | |
| "eval_samples_per_second": 5.83, | |
| "eval_steps_per_second": 5.83, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 3.9877769289533997, | |
| "grad_norm": 0.16313646733760834, | |
| "learning_rate": 1.1904410870334803e-05, | |
| "loss": 0.0132, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 4.0030557677616505, | |
| "grad_norm": 0.20121262967586517, | |
| "learning_rate": 1.1560894887090052e-05, | |
| "loss": 0.011, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 4.0183346065699, | |
| "grad_norm": 0.3056090772151947, | |
| "learning_rate": 1.1221759047615004e-05, | |
| "loss": 0.0084, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 4.033613445378151, | |
| "grad_norm": 0.07052377611398697, | |
| "learning_rate": 1.0887041996506859e-05, | |
| "loss": 0.0041, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 4.048892284186402, | |
| "grad_norm": 0.07718514651060104, | |
| "learning_rate": 1.0556781874841027e-05, | |
| "loss": 0.0052, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 4.048892284186402, | |
| "eval_loss": 0.031373970210552216, | |
| "eval_runtime": 199.7835, | |
| "eval_samples_per_second": 5.826, | |
| "eval_steps_per_second": 5.826, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 4.064171122994653, | |
| "grad_norm": 0.09308119118213654, | |
| "learning_rate": 1.0231016315824875e-05, | |
| "loss": 0.0042, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 4.0794499618029025, | |
| "grad_norm": 0.04948989301919937, | |
| "learning_rate": 9.909782440509491e-06, | |
| "loss": 0.0063, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 4.094728800611153, | |
| "grad_norm": 0.328868567943573, | |
| "learning_rate": 9.593116853559648e-06, | |
| "loss": 0.0076, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 4.110007639419404, | |
| "grad_norm": 0.03805866092443466, | |
| "learning_rate": 9.281055639082747e-06, | |
| "loss": 0.009, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 4.125286478227655, | |
| "grad_norm": 0.16321790218353271, | |
| "learning_rate": 8.973634356517063e-06, | |
| "loss": 0.0076, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.125286478227655, | |
| "eval_loss": 0.03259371221065521, | |
| "eval_runtime": 199.7271, | |
| "eval_samples_per_second": 5.828, | |
| "eval_steps_per_second": 5.828, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.140565317035906, | |
| "grad_norm": 0.2903512120246887, | |
| "learning_rate": 8.670888036579639e-06, | |
| "loss": 0.0056, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 4.1558441558441555, | |
| "grad_norm": 0.06461212784051895, | |
| "learning_rate": 8.372851177274604e-06, | |
| "loss": 0.0064, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 4.171122994652406, | |
| "grad_norm": 0.3396087884902954, | |
| "learning_rate": 8.079557739962128e-06, | |
| "loss": 0.0063, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 4.186401833460657, | |
| "grad_norm": 0.14997313916683197, | |
| "learning_rate": 7.791041145488454e-06, | |
| "loss": 0.0065, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 4.201680672268908, | |
| "grad_norm": 0.1173219084739685, | |
| "learning_rate": 7.507334270377619e-06, | |
| "loss": 0.0091, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.201680672268908, | |
| "eval_loss": 0.03307883441448212, | |
| "eval_runtime": 199.4548, | |
| "eval_samples_per_second": 5.836, | |
| "eval_steps_per_second": 5.836, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.2169595110771585, | |
| "grad_norm": 0.14040367305278778, | |
| "learning_rate": 7.228469443085206e-06, | |
| "loss": 0.0072, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 4.232238349885408, | |
| "grad_norm": 0.11947222054004669, | |
| "learning_rate": 6.954478440314427e-06, | |
| "loss": 0.0079, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 4.247517188693659, | |
| "grad_norm": 0.08814125508069992, | |
| "learning_rate": 6.685392483395259e-06, | |
| "loss": 0.0029, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 4.26279602750191, | |
| "grad_norm": 0.13022877275943756, | |
| "learning_rate": 6.421242234726682e-06, | |
| "loss": 0.0055, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 4.278074866310161, | |
| "grad_norm": 0.22816994786262512, | |
| "learning_rate": 6.1620577942827166e-06, | |
| "loss": 0.0045, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.278074866310161, | |
| "eval_loss": 0.0341590940952301, | |
| "eval_runtime": 199.141, | |
| "eval_samples_per_second": 5.845, | |
| "eval_steps_per_second": 5.845, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.293353705118411, | |
| "grad_norm": 0.7083064317703247, | |
| "learning_rate": 5.907868696182584e-06, | |
| "loss": 0.0058, | |
| "step": 2810 | |
| }, | |
| { | |
| "epoch": 4.308632543926661, | |
| "grad_norm": 0.029391461983323097, | |
| "learning_rate": 5.658703905325186e-06, | |
| "loss": 0.0078, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 4.323911382734912, | |
| "grad_norm": 0.17884346842765808, | |
| "learning_rate": 5.414591814088627e-06, | |
| "loss": 0.0077, | |
| "step": 2830 | |
| }, | |
| { | |
| "epoch": 4.339190221543163, | |
| "grad_norm": 0.07933899760246277, | |
| "learning_rate": 5.17556023909489e-06, | |
| "loss": 0.0059, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 4.354469060351414, | |
| "grad_norm": 0.22485603392124176, | |
| "learning_rate": 4.941636418040058e-06, | |
| "loss": 0.0047, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.354469060351414, | |
| "eval_loss": 0.034740038216114044, | |
| "eval_runtime": 199.1325, | |
| "eval_samples_per_second": 5.845, | |
| "eval_steps_per_second": 5.845, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.369747899159664, | |
| "grad_norm": 0.30258139967918396, | |
| "learning_rate": 4.7128470065906925e-06, | |
| "loss": 0.0041, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 4.385026737967914, | |
| "grad_norm": 0.20538049936294556, | |
| "learning_rate": 4.4892180753462744e-06, | |
| "loss": 0.0039, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 4.400305576776165, | |
| "grad_norm": 0.1709292232990265, | |
| "learning_rate": 4.270775106868586e-06, | |
| "loss": 0.004, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 4.415584415584416, | |
| "grad_norm": 0.12943388521671295, | |
| "learning_rate": 4.057542992777868e-06, | |
| "loss": 0.0057, | |
| "step": 2890 | |
| }, | |
| { | |
| "epoch": 4.4308632543926665, | |
| "grad_norm": 0.07701294124126434, | |
| "learning_rate": 3.849546030916473e-06, | |
| "loss": 0.0047, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.4308632543926665, | |
| "eval_loss": 0.03577398136258125, | |
| "eval_runtime": 199.2545, | |
| "eval_samples_per_second": 5.842, | |
| "eval_steps_per_second": 5.842, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.446142093200916, | |
| "grad_norm": 0.42437708377838135, | |
| "learning_rate": 3.646807922580098e-06, | |
| "loss": 0.004, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 4.461420932009167, | |
| "grad_norm": 0.21150662004947662, | |
| "learning_rate": 3.4493517698170164e-06, | |
| "loss": 0.0082, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 4.476699770817418, | |
| "grad_norm": 0.15437455475330353, | |
| "learning_rate": 3.2572000727956186e-06, | |
| "loss": 0.0058, | |
| "step": 2930 | |
| }, | |
| { | |
| "epoch": 4.491978609625669, | |
| "grad_norm": 0.3453219532966614, | |
| "learning_rate": 3.070374727240466e-06, | |
| "loss": 0.0043, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 4.507257448433919, | |
| "grad_norm": 0.1276451051235199, | |
| "learning_rate": 2.8888970219373314e-06, | |
| "loss": 0.005, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.507257448433919, | |
| "eval_loss": 0.03592822700738907, | |
| "eval_runtime": 199.3782, | |
| "eval_samples_per_second": 5.838, | |
| "eval_steps_per_second": 5.838, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.522536287242169, | |
| "grad_norm": 0.09260442107915878, | |
| "learning_rate": 2.7127876363072736e-06, | |
| "loss": 0.0031, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 4.53781512605042, | |
| "grad_norm": 0.21995659172534943, | |
| "learning_rate": 2.54206663805025e-06, | |
| "loss": 0.0057, | |
| "step": 2970 | |
| }, | |
| { | |
| "epoch": 4.553093964858671, | |
| "grad_norm": 0.0280003622174263, | |
| "learning_rate": 2.3767534808584125e-06, | |
| "loss": 0.0026, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 4.5683728036669216, | |
| "grad_norm": 0.11262509971857071, | |
| "learning_rate": 2.2168670021993075e-06, | |
| "loss": 0.0078, | |
| "step": 2990 | |
| }, | |
| { | |
| "epoch": 4.583651642475171, | |
| "grad_norm": 0.32838335633277893, | |
| "learning_rate": 2.0624254211693894e-06, | |
| "loss": 0.0049, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.583651642475171, | |
| "eval_loss": 0.0363006666302681, | |
| "eval_runtime": 199.2158, | |
| "eval_samples_per_second": 5.843, | |
| "eval_steps_per_second": 5.843, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.598930481283422, | |
| "grad_norm": 0.17691144347190857, | |
| "learning_rate": 1.9134463364179177e-06, | |
| "loss": 0.007, | |
| "step": 3010 | |
| }, | |
| { | |
| "epoch": 4.614209320091673, | |
| "grad_norm": 0.4434875547885895, | |
| "learning_rate": 1.7699467241416024e-06, | |
| "loss": 0.0073, | |
| "step": 3020 | |
| }, | |
| { | |
| "epoch": 4.629488158899924, | |
| "grad_norm": 0.17472083866596222, | |
| "learning_rate": 1.6319429361501714e-06, | |
| "loss": 0.0045, | |
| "step": 3030 | |
| }, | |
| { | |
| "epoch": 4.6447669977081745, | |
| "grad_norm": 0.05675290897488594, | |
| "learning_rate": 1.4994506980030577e-06, | |
| "loss": 0.0019, | |
| "step": 3040 | |
| }, | |
| { | |
| "epoch": 4.660045836516424, | |
| "grad_norm": 0.3393559157848358, | |
| "learning_rate": 1.3724851072174917e-06, | |
| "loss": 0.0039, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.660045836516424, | |
| "eval_loss": 0.036286067217588425, | |
| "eval_runtime": 198.942, | |
| "eval_samples_per_second": 5.851, | |
| "eval_steps_per_second": 5.851, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.675324675324675, | |
| "grad_norm": 0.2996921241283417, | |
| "learning_rate": 1.251060631548112e-06, | |
| "loss": 0.0107, | |
| "step": 3060 | |
| }, | |
| { | |
| "epoch": 4.690603514132926, | |
| "grad_norm": 0.1846129596233368, | |
| "learning_rate": 1.135191107338368e-06, | |
| "loss": 0.0029, | |
| "step": 3070 | |
| }, | |
| { | |
| "epoch": 4.705882352941177, | |
| "grad_norm": 0.33350446820259094, | |
| "learning_rate": 1.0248897379438904e-06, | |
| "loss": 0.0054, | |
| "step": 3080 | |
| }, | |
| { | |
| "epoch": 4.721161191749427, | |
| "grad_norm": 0.18361401557922363, | |
| "learning_rate": 9.201690922279405e-07, | |
| "loss": 0.0052, | |
| "step": 3090 | |
| }, | |
| { | |
| "epoch": 4.736440030557677, | |
| "grad_norm": 0.1535758525133133, | |
| "learning_rate": 8.210411031291776e-07, | |
| "loss": 0.0062, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.736440030557677, | |
| "eval_loss": 0.03661744296550751, | |
| "eval_runtime": 199.1326, | |
| "eval_samples_per_second": 5.845, | |
| "eval_steps_per_second": 5.845, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.751718869365928, | |
| "grad_norm": 0.1135258898139, | |
| "learning_rate": 7.275170663019415e-07, | |
| "loss": 0.0061, | |
| "step": 3110 | |
| }, | |
| { | |
| "epoch": 4.766997708174179, | |
| "grad_norm": 0.3599923253059387, | |
| "learning_rate": 6.396076388290484e-07, | |
| "loss": 0.0043, | |
| "step": 3120 | |
| }, | |
| { | |
| "epoch": 4.7822765469824295, | |
| "grad_norm": 0.06559276580810547, | |
| "learning_rate": 5.573228380074736e-07, | |
| "loss": 0.0054, | |
| "step": 3130 | |
| }, | |
| { | |
| "epoch": 4.79755538579068, | |
| "grad_norm": 0.02168980799615383, | |
| "learning_rate": 4.806720402068477e-07, | |
| "loss": 0.0031, | |
| "step": 3140 | |
| }, | |
| { | |
| "epoch": 4.81283422459893, | |
| "grad_norm": 0.08103174716234207, | |
| "learning_rate": 4.0966397980100604e-07, | |
| "loss": 0.0054, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 4.81283422459893, | |
| "eval_loss": 0.03663839399814606, | |
| "eval_runtime": 198.8473, | |
| "eval_samples_per_second": 5.854, | |
| "eval_steps_per_second": 5.854, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 4.828113063407181, | |
| "grad_norm": 0.16072829067707062, | |
| "learning_rate": 3.4430674817274575e-07, | |
| "loss": 0.0043, | |
| "step": 3160 | |
| }, | |
| { | |
| "epoch": 4.843391902215432, | |
| "grad_norm": 0.05386335402727127, | |
| "learning_rate": 2.8460779279176896e-07, | |
| "loss": 0.0071, | |
| "step": 3170 | |
| }, | |
| { | |
| "epoch": 4.8586707410236825, | |
| "grad_norm": 0.18377594649791718, | |
| "learning_rate": 2.3057391636606696e-07, | |
| "loss": 0.0035, | |
| "step": 3180 | |
| }, | |
| { | |
| "epoch": 4.873949579831933, | |
| "grad_norm": 0.5214228630065918, | |
| "learning_rate": 1.8221127606674605e-07, | |
| "loss": 0.0078, | |
| "step": 3190 | |
| }, | |
| { | |
| "epoch": 4.889228418640183, | |
| "grad_norm": 0.09072000533342361, | |
| "learning_rate": 1.3952538282639982e-07, | |
| "loss": 0.0041, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 4.889228418640183, | |
| "eval_loss": 0.03660115599632263, | |
| "eval_runtime": 198.4598, | |
| "eval_samples_per_second": 5.865, | |
| "eval_steps_per_second": 5.865, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 4.904507257448434, | |
| "grad_norm": 0.4262472093105316, | |
| "learning_rate": 1.025211007111615e-07, | |
| "loss": 0.0048, | |
| "step": 3210 | |
| }, | |
| { | |
| "epoch": 4.919786096256685, | |
| "grad_norm": 0.13413435220718384, | |
| "learning_rate": 7.120264636643615e-08, | |
| "loss": 0.0023, | |
| "step": 3220 | |
| }, | |
| { | |
| "epoch": 4.935064935064935, | |
| "grad_norm": 0.03055586665868759, | |
| "learning_rate": 4.5573588536407254e-08, | |
| "loss": 0.006, | |
| "step": 3230 | |
| }, | |
| { | |
| "epoch": 4.950343773873186, | |
| "grad_norm": 0.36961618065834045, | |
| "learning_rate": 2.5636847657367623e-08, | |
| "loss": 0.0094, | |
| "step": 3240 | |
| }, | |
| { | |
| "epoch": 4.965622612681436, | |
| "grad_norm": 0.05908333510160446, | |
| "learning_rate": 1.1394695524963306e-08, | |
| "loss": 0.0047, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 4.965622612681436, | |
| "eval_loss": 0.03658132627606392, | |
| "eval_runtime": 198.452, | |
| "eval_samples_per_second": 5.865, | |
| "eval_steps_per_second": 5.865, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 4.980901451489687, | |
| "grad_norm": 0.10072652250528336, | |
| "learning_rate": 2.8487550352951363e-09, | |
| "loss": 0.0064, | |
| "step": 3260 | |
| }, | |
| { | |
| "epoch": 4.9961802902979375, | |
| "grad_norm": 0.12432490289211273, | |
| "learning_rate": 0.0, | |
| "loss": 0.0078, | |
| "step": 3270 | |
| }, | |
| { | |
| "epoch": 4.9961802902979375, | |
| "step": 3270, | |
| "total_flos": 8.84620635703935e+17, | |
| "train_loss": 0.04931148957939596, | |
| "train_runtime": 42656.8883, | |
| "train_samples_per_second": 1.227, | |
| "train_steps_per_second": 0.077 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 3270, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8.84620635703935e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |