| { |
| "best_global_step": null, |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 0.6781576716586606, |
| "eval_steps": 300, |
| "global_step": 1200, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.0028256569652444193, |
| "grad_norm": 125.03303527832031, |
| "learning_rate": 6.666666666666666e-07, |
| "loss": 19.3957, |
| "mean_token_accuracy": 0.443931570649147, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.005651313930488839, |
| "grad_norm": 179.0009765625, |
| "learning_rate": 1.5e-06, |
| "loss": 19.1611, |
| "mean_token_accuracy": 0.4493979915976524, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.008476970895733259, |
| "grad_norm": 117.55085754394531, |
| "learning_rate": 2.3333333333333336e-06, |
| "loss": 19.097, |
| "mean_token_accuracy": 0.45788785591721537, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.011302627860977677, |
| "grad_norm": 109.04960632324219, |
| "learning_rate": 3.1666666666666667e-06, |
| "loss": 19.207, |
| "mean_token_accuracy": 0.4440133422613144, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.014128284826222097, |
| "grad_norm": 118.16978454589844, |
| "learning_rate": 4e-06, |
| "loss": 17.2711, |
| "mean_token_accuracy": 0.4634488716721535, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.016953941791466517, |
| "grad_norm": 161.21022033691406, |
| "learning_rate": 4.833333333333333e-06, |
| "loss": 17.7255, |
| "mean_token_accuracy": 0.4570723704993725, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.019779598756710936, |
| "grad_norm": 111.49407958984375, |
| "learning_rate": 5.666666666666667e-06, |
| "loss": 18.1505, |
| "mean_token_accuracy": 0.46947305351495744, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.022605255721955354, |
| "grad_norm": 94.02478790283203, |
| "learning_rate": 5.999955686683124e-06, |
| "loss": 16.5811, |
| "mean_token_accuracy": 0.4923714891076088, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.025430912687199773, |
| "grad_norm": 93.49138641357422, |
| "learning_rate": 5.999684887820798e-06, |
| "loss": 14.6779, |
| "mean_token_accuracy": 0.4900531888008118, |
| "step": 45 |
| }, |
| { |
| "epoch": 0.028256569652444195, |
| "grad_norm": 89.19295501708984, |
| "learning_rate": 5.9991679308007015e-06, |
| "loss": 15.0693, |
| "mean_token_accuracy": 0.48212875574827196, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.031082226617688613, |
| "grad_norm": 117.02027130126953, |
| "learning_rate": 5.998404858045021e-06, |
| "loss": 14.6772, |
| "mean_token_accuracy": 0.4899917796254158, |
| "step": 55 |
| }, |
| { |
| "epoch": 0.033907883582933035, |
| "grad_norm": 72.73108673095703, |
| "learning_rate": 5.997395732172529e-06, |
| "loss": 13.4677, |
| "mean_token_accuracy": 0.4900201290845871, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.03673354054817745, |
| "grad_norm": 77.72334289550781, |
| "learning_rate": 5.996140635993444e-06, |
| "loss": 13.2612, |
| "mean_token_accuracy": 0.5328952759504318, |
| "step": 65 |
| }, |
| { |
| "epoch": 0.03955919751342187, |
| "grad_norm": 80.2801284790039, |
| "learning_rate": 5.994639672502639e-06, |
| "loss": 11.504, |
| "mean_token_accuracy": 0.559516441822052, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.04238485447866629, |
| "grad_norm": 61.040287017822266, |
| "learning_rate": 5.992892964871187e-06, |
| "loss": 11.0794, |
| "mean_token_accuracy": 0.5566341817378998, |
| "step": 75 |
| }, |
| { |
| "epoch": 0.04521051144391071, |
| "grad_norm": 61.21868896484375, |
| "learning_rate": 5.990900656436255e-06, |
| "loss": 11.9816, |
| "mean_token_accuracy": 0.5282905742526054, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.04803616840915513, |
| "grad_norm": 114.57307434082031, |
| "learning_rate": 5.988662910689342e-06, |
| "loss": 10.3066, |
| "mean_token_accuracy": 0.5789915665984153, |
| "step": 85 |
| }, |
| { |
| "epoch": 0.050861825374399545, |
| "grad_norm": 76.79177856445312, |
| "learning_rate": 5.986179911262859e-06, |
| "loss": 10.7346, |
| "mean_token_accuracy": 0.5474225521087647, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.05368748233964397, |
| "grad_norm": 50.477203369140625, |
| "learning_rate": 5.983451861915061e-06, |
| "loss": 11.5344, |
| "mean_token_accuracy": 0.5186601117253303, |
| "step": 95 |
| }, |
| { |
| "epoch": 0.05651313930488839, |
| "grad_norm": 65.024169921875, |
| "learning_rate": 5.980478986513332e-06, |
| "loss": 10.1865, |
| "mean_token_accuracy": 0.5476699441671371, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.059338796270132804, |
| "grad_norm": 78.97113037109375, |
| "learning_rate": 5.977261529015807e-06, |
| "loss": 10.5502, |
| "mean_token_accuracy": 0.5594380274415016, |
| "step": 105 |
| }, |
| { |
| "epoch": 0.062164453235377226, |
| "grad_norm": 63.45710372924805, |
| "learning_rate": 5.9737997534513565e-06, |
| "loss": 10.1732, |
| "mean_token_accuracy": 0.5498600453138351, |
| "step": 110 |
| }, |
| { |
| "epoch": 0.06499011020062165, |
| "grad_norm": 43.65032958984375, |
| "learning_rate": 5.970093943897915e-06, |
| "loss": 9.483, |
| "mean_token_accuracy": 0.5819845095276832, |
| "step": 115 |
| }, |
| { |
| "epoch": 0.06781576716586607, |
| "grad_norm": 65.38373565673828, |
| "learning_rate": 5.966144404459178e-06, |
| "loss": 9.8998, |
| "mean_token_accuracy": 0.5803950399160385, |
| "step": 120 |
| }, |
| { |
| "epoch": 0.07064142413111048, |
| "grad_norm": 51.20427703857422, |
| "learning_rate": 5.96195145923964e-06, |
| "loss": 9.5007, |
| "mean_token_accuracy": 0.5401258394122124, |
| "step": 125 |
| }, |
| { |
| "epoch": 0.0734670810963549, |
| "grad_norm": 43.631324768066406, |
| "learning_rate": 5.957515452317996e-06, |
| "loss": 9.3663, |
| "mean_token_accuracy": 0.5585527911782264, |
| "step": 130 |
| }, |
| { |
| "epoch": 0.07629273806159932, |
| "grad_norm": 49.34850311279297, |
| "learning_rate": 5.952836747718916e-06, |
| "loss": 10.0569, |
| "mean_token_accuracy": 0.5572537362575531, |
| "step": 135 |
| }, |
| { |
| "epoch": 0.07911839502684374, |
| "grad_norm": 56.23272705078125, |
| "learning_rate": 5.947915729383162e-06, |
| "loss": 9.1624, |
| "mean_token_accuracy": 0.6116252914071083, |
| "step": 140 |
| }, |
| { |
| "epoch": 0.08194405199208817, |
| "grad_norm": 54.321495056152344, |
| "learning_rate": 5.942752801136086e-06, |
| "loss": 9.0426, |
| "mean_token_accuracy": 0.5830309092998505, |
| "step": 145 |
| }, |
| { |
| "epoch": 0.08476970895733257, |
| "grad_norm": 49.35163116455078, |
| "learning_rate": 5.937348386654492e-06, |
| "loss": 8.7897, |
| "mean_token_accuracy": 0.5833149090409279, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.087595365922577, |
| "grad_norm": 41.16096878051758, |
| "learning_rate": 5.9317029294318685e-06, |
| "loss": 9.2698, |
| "mean_token_accuracy": 0.5732270866632462, |
| "step": 155 |
| }, |
| { |
| "epoch": 0.09042102288782142, |
| "grad_norm": 56.901546478271484, |
| "learning_rate": 5.925816892741992e-06, |
| "loss": 8.8971, |
| "mean_token_accuracy": 0.5911598846316337, |
| "step": 160 |
| }, |
| { |
| "epoch": 0.09324667985306584, |
| "grad_norm": 37.740020751953125, |
| "learning_rate": 5.919690759600914e-06, |
| "loss": 9.3825, |
| "mean_token_accuracy": 0.5646735802292824, |
| "step": 165 |
| }, |
| { |
| "epoch": 0.09607233681831026, |
| "grad_norm": 43.82129669189453, |
| "learning_rate": 5.913325032727323e-06, |
| "loss": 8.921, |
| "mean_token_accuracy": 0.5748174145817757, |
| "step": 170 |
| }, |
| { |
| "epoch": 0.09889799378355468, |
| "grad_norm": 43.662750244140625, |
| "learning_rate": 5.906720234501286e-06, |
| "loss": 8.329, |
| "mean_token_accuracy": 0.5832746580243111, |
| "step": 175 |
| }, |
| { |
| "epoch": 0.10172365074879909, |
| "grad_norm": 63.78599548339844, |
| "learning_rate": 5.899876906921388e-06, |
| "loss": 9.403, |
| "mean_token_accuracy": 0.5746660903096199, |
| "step": 180 |
| }, |
| { |
| "epoch": 0.10454930771404351, |
| "grad_norm": 38.50173568725586, |
| "learning_rate": 5.892795611560252e-06, |
| "loss": 7.9569, |
| "mean_token_accuracy": 0.6175401106476783, |
| "step": 185 |
| }, |
| { |
| "epoch": 0.10737496467928793, |
| "grad_norm": 46.76047134399414, |
| "learning_rate": 5.885476929518457e-06, |
| "loss": 8.8664, |
| "mean_token_accuracy": 0.5698649421334266, |
| "step": 190 |
| }, |
| { |
| "epoch": 0.11020062164453236, |
| "grad_norm": 34.551719665527344, |
| "learning_rate": 5.877921461376848e-06, |
| "loss": 8.9507, |
| "mean_token_accuracy": 0.5774942457675933, |
| "step": 195 |
| }, |
| { |
| "epoch": 0.11302627860977678, |
| "grad_norm": 46.14909362792969, |
| "learning_rate": 5.8701298271472565e-06, |
| "loss": 8.3937, |
| "mean_token_accuracy": 0.5869078159332275, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.11585193557502119, |
| "grad_norm": 38.04121017456055, |
| "learning_rate": 5.862102666221617e-06, |
| "loss": 9.1566, |
| "mean_token_accuracy": 0.5589849069714546, |
| "step": 205 |
| }, |
| { |
| "epoch": 0.11867759254026561, |
| "grad_norm": 47.98865509033203, |
| "learning_rate": 5.853840637319504e-06, |
| "loss": 8.3272, |
| "mean_token_accuracy": 0.613035187125206, |
| "step": 210 |
| }, |
| { |
| "epoch": 0.12150324950551003, |
| "grad_norm": 52.02131271362305, |
| "learning_rate": 5.845344418434068e-06, |
| "loss": 9.2413, |
| "mean_token_accuracy": 0.5741102159023285, |
| "step": 215 |
| }, |
| { |
| "epoch": 0.12432890647075445, |
| "grad_norm": 46.88343048095703, |
| "learning_rate": 5.8366147067764056e-06, |
| "loss": 8.468, |
| "mean_token_accuracy": 0.5816206842660904, |
| "step": 220 |
| }, |
| { |
| "epoch": 0.12715456343599887, |
| "grad_norm": 43.369300842285156, |
| "learning_rate": 5.8276522187183435e-06, |
| "loss": 7.4452, |
| "mean_token_accuracy": 0.6464217156171799, |
| "step": 225 |
| }, |
| { |
| "epoch": 0.1299802204012433, |
| "grad_norm": 46.008766174316406, |
| "learning_rate": 5.818457689733649e-06, |
| "loss": 8.2276, |
| "mean_token_accuracy": 0.6063334688544273, |
| "step": 230 |
| }, |
| { |
| "epoch": 0.13280587736648772, |
| "grad_norm": 38.97382736206055, |
| "learning_rate": 5.809031874337681e-06, |
| "loss": 8.029, |
| "mean_token_accuracy": 0.6226910144090653, |
| "step": 235 |
| }, |
| { |
| "epoch": 0.13563153433173214, |
| "grad_norm": 44.3059196472168, |
| "learning_rate": 5.7993755460254685e-06, |
| "loss": 8.4051, |
| "mean_token_accuracy": 0.5975374907255173, |
| "step": 240 |
| }, |
| { |
| "epoch": 0.13845719129697653, |
| "grad_norm": 56.00782012939453, |
| "learning_rate": 5.789489497208243e-06, |
| "loss": 8.6075, |
| "mean_token_accuracy": 0.5798447385430336, |
| "step": 245 |
| }, |
| { |
| "epoch": 0.14128284826222096, |
| "grad_norm": 39.610755920410156, |
| "learning_rate": 5.779374539148403e-06, |
| "loss": 7.9846, |
| "mean_token_accuracy": 0.593911099433899, |
| "step": 250 |
| }, |
| { |
| "epoch": 0.14410850522746538, |
| "grad_norm": 40.99591827392578, |
| "learning_rate": 5.769031501892949e-06, |
| "loss": 8.3259, |
| "mean_token_accuracy": 0.5966995969414711, |
| "step": 255 |
| }, |
| { |
| "epoch": 0.1469341621927098, |
| "grad_norm": 37.0196533203125, |
| "learning_rate": 5.7584612342053655e-06, |
| "loss": 7.7289, |
| "mean_token_accuracy": 0.6313483536243438, |
| "step": 260 |
| }, |
| { |
| "epoch": 0.14975981915795422, |
| "grad_norm": 36.436134338378906, |
| "learning_rate": 5.7476646034959705e-06, |
| "loss": 9.042, |
| "mean_token_accuracy": 0.5726025938987732, |
| "step": 265 |
| }, |
| { |
| "epoch": 0.15258547612319864, |
| "grad_norm": 49.964351654052734, |
| "learning_rate": 5.736642495750733e-06, |
| "loss": 7.8111, |
| "mean_token_accuracy": 0.6032327204942703, |
| "step": 270 |
| }, |
| { |
| "epoch": 0.15541113308844307, |
| "grad_norm": 37.209232330322266, |
| "learning_rate": 5.725395815458571e-06, |
| "loss": 6.2546, |
| "mean_token_accuracy": 0.6550982385873795, |
| "step": 275 |
| }, |
| { |
| "epoch": 0.1582367900536875, |
| "grad_norm": 39.0145149230957, |
| "learning_rate": 5.713925485537126e-06, |
| "loss": 8.2466, |
| "mean_token_accuracy": 0.5892508149147033, |
| "step": 280 |
| }, |
| { |
| "epoch": 0.1610624470189319, |
| "grad_norm": 46.74445724487305, |
| "learning_rate": 5.702232447257029e-06, |
| "loss": 8.1135, |
| "mean_token_accuracy": 0.6003284469246865, |
| "step": 285 |
| }, |
| { |
| "epoch": 0.16388810398417633, |
| "grad_norm": 61.96944046020508, |
| "learning_rate": 5.6903176601646535e-06, |
| "loss": 8.0814, |
| "mean_token_accuracy": 0.6058224648237228, |
| "step": 290 |
| }, |
| { |
| "epoch": 0.16671376094942075, |
| "grad_norm": 41.85062026977539, |
| "learning_rate": 5.6781821020033794e-06, |
| "loss": 7.7756, |
| "mean_token_accuracy": 0.6040422543883324, |
| "step": 295 |
| }, |
| { |
| "epoch": 0.16953941791466515, |
| "grad_norm": 37.07870864868164, |
| "learning_rate": 5.665826768633358e-06, |
| "loss": 7.4973, |
| "mean_token_accuracy": 0.6139729157090187, |
| "step": 300 |
| }, |
| { |
| "epoch": 0.16953941791466515, |
| "eval_loss": 1.9660394191741943, |
| "eval_mean_token_accuracy": 0.6081940480295172, |
| "eval_runtime": 60.361, |
| "eval_samples_per_second": 26.06, |
| "eval_steps_per_second": 3.264, |
| "step": 300 |
| }, |
| { |
| "epoch": 0.17236507487990957, |
| "grad_norm": 36.64670181274414, |
| "learning_rate": 5.6532526739497834e-06, |
| "loss": 7.6396, |
| "mean_token_accuracy": 0.6107118725776672, |
| "step": 305 |
| }, |
| { |
| "epoch": 0.175190731845154, |
| "grad_norm": 54.716243743896484, |
| "learning_rate": 5.640460849799702e-06, |
| "loss": 7.2263, |
| "mean_token_accuracy": 0.6144478976726532, |
| "step": 310 |
| }, |
| { |
| "epoch": 0.1780163888103984, |
| "grad_norm": 55.097808837890625, |
| "learning_rate": 5.627452345897328e-06, |
| "loss": 7.0887, |
| "mean_token_accuracy": 0.6283742040395737, |
| "step": 315 |
| }, |
| { |
| "epoch": 0.18084204577564283, |
| "grad_norm": 61.68784713745117, |
| "learning_rate": 5.614228229737906e-06, |
| "loss": 7.8888, |
| "mean_token_accuracy": 0.6202467530965805, |
| "step": 320 |
| }, |
| { |
| "epoch": 0.18366770274088726, |
| "grad_norm": 50.16268539428711, |
| "learning_rate": 5.600789586510113e-06, |
| "loss": 8.1697, |
| "mean_token_accuracy": 0.6052482485771179, |
| "step": 325 |
| }, |
| { |
| "epoch": 0.18649335970613168, |
| "grad_norm": 34.98442840576172, |
| "learning_rate": 5.587137519007004e-06, |
| "loss": 7.6766, |
| "mean_token_accuracy": 0.610385374724865, |
| "step": 330 |
| }, |
| { |
| "epoch": 0.1893190166713761, |
| "grad_norm": 54.69295120239258, |
| "learning_rate": 5.5732731475355135e-06, |
| "loss": 7.4116, |
| "mean_token_accuracy": 0.6147442162036896, |
| "step": 335 |
| }, |
| { |
| "epoch": 0.19214467363662052, |
| "grad_norm": 36.03273391723633, |
| "learning_rate": 5.559197609824526e-06, |
| "loss": 7.7704, |
| "mean_token_accuracy": 0.6110788837075234, |
| "step": 340 |
| }, |
| { |
| "epoch": 0.19497033060186494, |
| "grad_norm": 45.89215087890625, |
| "learning_rate": 5.544912060931511e-06, |
| "loss": 6.0621, |
| "mean_token_accuracy": 0.6817868202924728, |
| "step": 345 |
| }, |
| { |
| "epoch": 0.19779598756710937, |
| "grad_norm": 43.32875442504883, |
| "learning_rate": 5.530417673147736e-06, |
| "loss": 7.5385, |
| "mean_token_accuracy": 0.6150149628520012, |
| "step": 350 |
| }, |
| { |
| "epoch": 0.20062164453235376, |
| "grad_norm": 35.37779998779297, |
| "learning_rate": 5.515715635902064e-06, |
| "loss": 6.6067, |
| "mean_token_accuracy": 0.6687664300203323, |
| "step": 355 |
| }, |
| { |
| "epoch": 0.20344730149759818, |
| "grad_norm": 44.46392822265625, |
| "learning_rate": 5.500807155663359e-06, |
| "loss": 6.2822, |
| "mean_token_accuracy": 0.6637924790382386, |
| "step": 360 |
| }, |
| { |
| "epoch": 0.2062729584628426, |
| "grad_norm": 36.08877182006836, |
| "learning_rate": 5.485693455841464e-06, |
| "loss": 6.894, |
| "mean_token_accuracy": 0.6358136102557183, |
| "step": 365 |
| }, |
| { |
| "epoch": 0.20909861542808703, |
| "grad_norm": 34.14577865600586, |
| "learning_rate": 5.470375776686822e-06, |
| "loss": 7.4445, |
| "mean_token_accuracy": 0.6168029010295868, |
| "step": 370 |
| }, |
| { |
| "epoch": 0.21192427239333145, |
| "grad_norm": 40.84408950805664, |
| "learning_rate": 5.454855375188691e-06, |
| "loss": 7.0881, |
| "mean_token_accuracy": 0.6331988245248794, |
| "step": 375 |
| }, |
| { |
| "epoch": 0.21474992935857587, |
| "grad_norm": 47.68820571899414, |
| "learning_rate": 5.439133524971994e-06, |
| "loss": 7.1198, |
| "mean_token_accuracy": 0.6500597685575485, |
| "step": 380 |
| }, |
| { |
| "epoch": 0.2175755863238203, |
| "grad_norm": 62.971771240234375, |
| "learning_rate": 5.4232115161928125e-06, |
| "loss": 7.3537, |
| "mean_token_accuracy": 0.6475826740264893, |
| "step": 385 |
| }, |
| { |
| "epoch": 0.2204012432890647, |
| "grad_norm": 47.17545700073242, |
| "learning_rate": 5.407090655432498e-06, |
| "loss": 6.484, |
| "mean_token_accuracy": 0.6483543753623963, |
| "step": 390 |
| }, |
| { |
| "epoch": 0.22322690025430914, |
| "grad_norm": 47.76310348510742, |
| "learning_rate": 5.390772265590469e-06, |
| "loss": 7.1721, |
| "mean_token_accuracy": 0.6379205271601677, |
| "step": 395 |
| }, |
| { |
| "epoch": 0.22605255721955356, |
| "grad_norm": 41.001304626464844, |
| "learning_rate": 5.374257685775642e-06, |
| "loss": 7.3882, |
| "mean_token_accuracy": 0.6161196917295456, |
| "step": 400 |
| }, |
| { |
| "epoch": 0.22887821418479798, |
| "grad_norm": 41.1621208190918, |
| "learning_rate": 5.357548271196545e-06, |
| "loss": 6.6539, |
| "mean_token_accuracy": 0.6448143661022187, |
| "step": 405 |
| }, |
| { |
| "epoch": 0.23170387115004237, |
| "grad_norm": 56.77714920043945, |
| "learning_rate": 5.34064539305011e-06, |
| "loss": 6.7501, |
| "mean_token_accuracy": 0.6442455291748047, |
| "step": 410 |
| }, |
| { |
| "epoch": 0.2345295281152868, |
| "grad_norm": 34.29147720336914, |
| "learning_rate": 5.323550438409145e-06, |
| "loss": 7.1571, |
| "mean_token_accuracy": 0.6502243876457214, |
| "step": 415 |
| }, |
| { |
| "epoch": 0.23735518508053122, |
| "grad_norm": 38.84526824951172, |
| "learning_rate": 5.306264810108515e-06, |
| "loss": 7.4697, |
| "mean_token_accuracy": 0.6136257261037826, |
| "step": 420 |
| }, |
| { |
| "epoch": 0.24018084204577564, |
| "grad_norm": 39.21842575073242, |
| "learning_rate": 5.288789926630018e-06, |
| "loss": 5.6676, |
| "mean_token_accuracy": 0.6803564548492431, |
| "step": 425 |
| }, |
| { |
| "epoch": 0.24300649901102006, |
| "grad_norm": 43.55912780761719, |
| "learning_rate": 5.27112722198599e-06, |
| "loss": 7.6674, |
| "mean_token_accuracy": 0.6143711119890213, |
| "step": 430 |
| }, |
| { |
| "epoch": 0.24583215597626448, |
| "grad_norm": 47.082237243652344, |
| "learning_rate": 5.253278145601618e-06, |
| "loss": 6.9372, |
| "mean_token_accuracy": 0.6543397754430771, |
| "step": 435 |
| }, |
| { |
| "epoch": 0.2486578129415089, |
| "grad_norm": 71.78733825683594, |
| "learning_rate": 5.235244162196007e-06, |
| "loss": 6.3831, |
| "mean_token_accuracy": 0.6731083989143372, |
| "step": 440 |
| }, |
| { |
| "epoch": 0.2514834699067533, |
| "grad_norm": 41.5792121887207, |
| "learning_rate": 5.217026751661978e-06, |
| "loss": 5.9193, |
| "mean_token_accuracy": 0.6984533488750457, |
| "step": 445 |
| }, |
| { |
| "epoch": 0.25430912687199775, |
| "grad_norm": 32.45619201660156, |
| "learning_rate": 5.198627408944628e-06, |
| "loss": 6.1931, |
| "mean_token_accuracy": 0.6726161792874337, |
| "step": 450 |
| }, |
| { |
| "epoch": 0.25713478383724214, |
| "grad_norm": 54.69667434692383, |
| "learning_rate": 5.180047643918653e-06, |
| "loss": 5.6802, |
| "mean_token_accuracy": 0.700816172361374, |
| "step": 455 |
| }, |
| { |
| "epoch": 0.2599604408024866, |
| "grad_norm": 52.139469146728516, |
| "learning_rate": 5.161288981264445e-06, |
| "loss": 7.4401, |
| "mean_token_accuracy": 0.6105516791343689, |
| "step": 460 |
| }, |
| { |
| "epoch": 0.262786097767731, |
| "grad_norm": 38.770294189453125, |
| "learning_rate": 5.142352960342976e-06, |
| "loss": 6.3299, |
| "mean_token_accuracy": 0.6778733760118485, |
| "step": 465 |
| }, |
| { |
| "epoch": 0.26561175473297544, |
| "grad_norm": 44.44423294067383, |
| "learning_rate": 5.123241135069471e-06, |
| "loss": 6.3576, |
| "mean_token_accuracy": 0.6793156564235687, |
| "step": 470 |
| }, |
| { |
| "epoch": 0.26843741169821983, |
| "grad_norm": 52.27141189575195, |
| "learning_rate": 5.103955073785902e-06, |
| "loss": 5.4571, |
| "mean_token_accuracy": 0.7010635808110237, |
| "step": 475 |
| }, |
| { |
| "epoch": 0.2712630686634643, |
| "grad_norm": 41.901485443115234, |
| "learning_rate": 5.084496359132275e-06, |
| "loss": 6.2462, |
| "mean_token_accuracy": 0.6626246273517609, |
| "step": 480 |
| }, |
| { |
| "epoch": 0.2740887256287087, |
| "grad_norm": 39.49611282348633, |
| "learning_rate": 5.064866587916764e-06, |
| "loss": 6.3324, |
| "mean_token_accuracy": 0.6691112801432609, |
| "step": 485 |
| }, |
| { |
| "epoch": 0.27691438259395307, |
| "grad_norm": 41.7744140625, |
| "learning_rate": 5.045067370984676e-06, |
| "loss": 7.2696, |
| "mean_token_accuracy": 0.6365453451871872, |
| "step": 490 |
| }, |
| { |
| "epoch": 0.2797400395591975, |
| "grad_norm": 49.72770690917969, |
| "learning_rate": 5.02510033308626e-06, |
| "loss": 6.5657, |
| "mean_token_accuracy": 0.6508561789989471, |
| "step": 495 |
| }, |
| { |
| "epoch": 0.2825656965244419, |
| "grad_norm": 32.61491394042969, |
| "learning_rate": 5.004967112743376e-06, |
| "loss": 6.6128, |
| "mean_token_accuracy": 0.6579498335719108, |
| "step": 500 |
| }, |
| { |
| "epoch": 0.28539135348968636, |
| "grad_norm": 49.389400482177734, |
| "learning_rate": 4.984669362115039e-06, |
| "loss": 6.7253, |
| "mean_token_accuracy": 0.6457798436284066, |
| "step": 505 |
| }, |
| { |
| "epoch": 0.28821701045493076, |
| "grad_norm": 51.6247673034668, |
| "learning_rate": 4.964208746861841e-06, |
| "loss": 6.2031, |
| "mean_token_accuracy": 0.6751710534095764, |
| "step": 510 |
| }, |
| { |
| "epoch": 0.2910426674201752, |
| "grad_norm": 28.600732803344727, |
| "learning_rate": 4.943586946009259e-06, |
| "loss": 6.6904, |
| "mean_token_accuracy": 0.6619319871068001, |
| "step": 515 |
| }, |
| { |
| "epoch": 0.2938683243854196, |
| "grad_norm": 43.22124481201172, |
| "learning_rate": 4.92280565180988e-06, |
| "loss": 6.7604, |
| "mean_token_accuracy": 0.6478939458727837, |
| "step": 520 |
| }, |
| { |
| "epoch": 0.29669398135066405, |
| "grad_norm": 41.86116027832031, |
| "learning_rate": 4.901866569604527e-06, |
| "loss": 6.0308, |
| "mean_token_accuracy": 0.6734458118677139, |
| "step": 525 |
| }, |
| { |
| "epoch": 0.29951963831590844, |
| "grad_norm": 41.02956771850586, |
| "learning_rate": 4.8807714176823205e-06, |
| "loss": 7.0681, |
| "mean_token_accuracy": 0.6355025738477706, |
| "step": 530 |
| }, |
| { |
| "epoch": 0.3023452952811529, |
| "grad_norm": 44.11521530151367, |
| "learning_rate": 4.859521927139664e-06, |
| "loss": 6.1855, |
| "mean_token_accuracy": 0.6703523576259613, |
| "step": 535 |
| }, |
| { |
| "epoch": 0.3051709522463973, |
| "grad_norm": 32.89723587036133, |
| "learning_rate": 4.838119841738205e-06, |
| "loss": 6.0888, |
| "mean_token_accuracy": 0.6834497556090355, |
| "step": 540 |
| }, |
| { |
| "epoch": 0.3079966092116417, |
| "grad_norm": 32.18568420410156, |
| "learning_rate": 4.816566917761719e-06, |
| "loss": 6.9014, |
| "mean_token_accuracy": 0.6485872358083725, |
| "step": 545 |
| }, |
| { |
| "epoch": 0.31082226617688613, |
| "grad_norm": 46.209686279296875, |
| "learning_rate": 4.794864923872006e-06, |
| "loss": 6.1183, |
| "mean_token_accuracy": 0.6952649801969528, |
| "step": 550 |
| }, |
| { |
| "epoch": 0.3136479231421305, |
| "grad_norm": 42.59083938598633, |
| "learning_rate": 4.773015640963735e-06, |
| "loss": 6.1898, |
| "mean_token_accuracy": 0.6966498523950577, |
| "step": 555 |
| }, |
| { |
| "epoch": 0.316473580107375, |
| "grad_norm": 34.99408721923828, |
| "learning_rate": 4.751020862018315e-06, |
| "loss": 6.4017, |
| "mean_token_accuracy": 0.6635166749358177, |
| "step": 560 |
| }, |
| { |
| "epoch": 0.31929923707261937, |
| "grad_norm": 44.781253814697266, |
| "learning_rate": 4.728882391956751e-06, |
| "loss": 6.294, |
| "mean_token_accuracy": 0.6908501267433167, |
| "step": 565 |
| }, |
| { |
| "epoch": 0.3221248940378638, |
| "grad_norm": 38.54107666015625, |
| "learning_rate": 4.706602047491535e-06, |
| "loss": 6.3961, |
| "mean_token_accuracy": 0.6642886430025101, |
| "step": 570 |
| }, |
| { |
| "epoch": 0.3249505510031082, |
| "grad_norm": 39.30413055419922, |
| "learning_rate": 4.68418165697756e-06, |
| "loss": 7.0987, |
| "mean_token_accuracy": 0.6421632379293442, |
| "step": 575 |
| }, |
| { |
| "epoch": 0.32777620796835266, |
| "grad_norm": 32.5236930847168, |
| "learning_rate": 4.66162306026209e-06, |
| "loss": 6.3601, |
| "mean_token_accuracy": 0.6777586549520492, |
| "step": 580 |
| }, |
| { |
| "epoch": 0.33060186493359706, |
| "grad_norm": 44.02008056640625, |
| "learning_rate": 4.638928108533771e-06, |
| "loss": 6.7745, |
| "mean_token_accuracy": 0.6339758485555649, |
| "step": 585 |
| }, |
| { |
| "epoch": 0.3334275218988415, |
| "grad_norm": 42.38660430908203, |
| "learning_rate": 4.616098664170726e-06, |
| "loss": 6.7977, |
| "mean_token_accuracy": 0.6368318185210228, |
| "step": 590 |
| }, |
| { |
| "epoch": 0.3362531788640859, |
| "grad_norm": 56.86142349243164, |
| "learning_rate": 4.5931366005877205e-06, |
| "loss": 6.5369, |
| "mean_token_accuracy": 0.6513374149799347, |
| "step": 595 |
| }, |
| { |
| "epoch": 0.3390788358293303, |
| "grad_norm": 46.34036636352539, |
| "learning_rate": 4.570043802082435e-06, |
| "loss": 6.975, |
| "mean_token_accuracy": 0.6324821501970291, |
| "step": 600 |
| }, |
| { |
| "epoch": 0.3390788358293303, |
| "eval_loss": 1.6515789031982422, |
| "eval_mean_token_accuracy": 0.6573993170321896, |
| "eval_runtime": 60.1378, |
| "eval_samples_per_second": 26.157, |
| "eval_steps_per_second": 3.276, |
| "step": 600 |
| }, |
| { |
| "epoch": 0.34190449279457474, |
| "grad_norm": 34.469337463378906, |
| "learning_rate": 4.546822163680829e-06, |
| "loss": 5.6408, |
| "mean_token_accuracy": 0.6926419764757157, |
| "step": 605 |
| }, |
| { |
| "epoch": 0.34473014975981914, |
| "grad_norm": 54.577877044677734, |
| "learning_rate": 4.523473590981639e-06, |
| "loss": 5.1717, |
| "mean_token_accuracy": 0.708769902586937, |
| "step": 610 |
| }, |
| { |
| "epoch": 0.3475558067250636, |
| "grad_norm": 42.55693817138672, |
| "learning_rate": 4.5e-06, |
| "loss": 6.131, |
| "mean_token_accuracy": 0.6689537853002548, |
| "step": 615 |
| }, |
| { |
| "epoch": 0.350381463690308, |
| "grad_norm": 32.82284927368164, |
| "learning_rate": 4.476403317010212e-06, |
| "loss": 6.4724, |
| "mean_token_accuracy": 0.6572571873664856, |
| "step": 620 |
| }, |
| { |
| "epoch": 0.35320712065555243, |
| "grad_norm": 40.628170013427734, |
| "learning_rate": 4.452685478387672e-06, |
| "loss": 7.2712, |
| "mean_token_accuracy": 0.6308314383029938, |
| "step": 625 |
| }, |
| { |
| "epoch": 0.3560327776207968, |
| "grad_norm": 32.64014434814453, |
| "learning_rate": 4.4288484304499706e-06, |
| "loss": 5.8603, |
| "mean_token_accuracy": 0.6960221052169799, |
| "step": 630 |
| }, |
| { |
| "epoch": 0.3588584345860413, |
| "grad_norm": 45.46329116821289, |
| "learning_rate": 4.404894129297172e-06, |
| "loss": 6.1867, |
| "mean_token_accuracy": 0.6657388493418693, |
| "step": 635 |
| }, |
| { |
| "epoch": 0.36168409155128567, |
| "grad_norm": 105.43537139892578, |
| "learning_rate": 4.380824540651301e-06, |
| "loss": 6.4694, |
| "mean_token_accuracy": 0.6702811747789383, |
| "step": 640 |
| }, |
| { |
| "epoch": 0.3645097485165301, |
| "grad_norm": 41.170921325683594, |
| "learning_rate": 4.356641639695022e-06, |
| "loss": 6.4786, |
| "mean_token_accuracy": 0.6519061028957367, |
| "step": 645 |
| }, |
| { |
| "epoch": 0.3673354054817745, |
| "grad_norm": 33.53129959106445, |
| "learning_rate": 4.332347410909566e-06, |
| "loss": 6.4479, |
| "mean_token_accuracy": 0.6696308821439743, |
| "step": 650 |
| }, |
| { |
| "epoch": 0.3701610624470189, |
| "grad_norm": 44.768699645996094, |
| "learning_rate": 4.307943847911868e-06, |
| "loss": 6.6492, |
| "mean_token_accuracy": 0.6501506567001343, |
| "step": 655 |
| }, |
| { |
| "epoch": 0.37298671941226336, |
| "grad_norm": 38.888816833496094, |
| "learning_rate": 4.283432953290981e-06, |
| "loss": 6.7759, |
| "mean_token_accuracy": 0.6582424193620682, |
| "step": 660 |
| }, |
| { |
| "epoch": 0.37581237637750775, |
| "grad_norm": 42.18849563598633, |
| "learning_rate": 4.258816738443731e-06, |
| "loss": 6.0352, |
| "mean_token_accuracy": 0.672022745013237, |
| "step": 665 |
| }, |
| { |
| "epoch": 0.3786380333427522, |
| "grad_norm": 32.39656066894531, |
| "learning_rate": 4.234097223409664e-06, |
| "loss": 6.2633, |
| "mean_token_accuracy": 0.6928743287920952, |
| "step": 670 |
| }, |
| { |
| "epoch": 0.3814636903079966, |
| "grad_norm": 38.409385681152344, |
| "learning_rate": 4.209276436705276e-06, |
| "loss": 6.2866, |
| "mean_token_accuracy": 0.6808415204286575, |
| "step": 675 |
| }, |
| { |
| "epoch": 0.38428934727324104, |
| "grad_norm": 41.57419204711914, |
| "learning_rate": 4.184356415157556e-06, |
| "loss": 5.9778, |
| "mean_token_accuracy": 0.7001727074384689, |
| "step": 680 |
| }, |
| { |
| "epoch": 0.38711500423848544, |
| "grad_norm": 34.73065185546875, |
| "learning_rate": 4.159339203736831e-06, |
| "loss": 6.0301, |
| "mean_token_accuracy": 0.6711145430803299, |
| "step": 685 |
| }, |
| { |
| "epoch": 0.3899406612037299, |
| "grad_norm": 40.23607635498047, |
| "learning_rate": 4.134226855388963e-06, |
| "loss": 5.7109, |
| "mean_token_accuracy": 0.7057820171117782, |
| "step": 690 |
| }, |
| { |
| "epoch": 0.3927663181689743, |
| "grad_norm": 38.31148910522461, |
| "learning_rate": 4.10902143086688e-06, |
| "loss": 6.6394, |
| "mean_token_accuracy": 0.6580009430646896, |
| "step": 695 |
| }, |
| { |
| "epoch": 0.39559197513421873, |
| "grad_norm": 50.16755676269531, |
| "learning_rate": 4.08372499856146e-06, |
| "loss": 5.611, |
| "mean_token_accuracy": 0.7013431131839752, |
| "step": 700 |
| }, |
| { |
| "epoch": 0.3984176320994631, |
| "grad_norm": 30.385122299194336, |
| "learning_rate": 4.0583396343318025e-06, |
| "loss": 6.5846, |
| "mean_token_accuracy": 0.6525479450821876, |
| "step": 705 |
| }, |
| { |
| "epoch": 0.4012432890647075, |
| "grad_norm": 40.52418518066406, |
| "learning_rate": 4.032867421334884e-06, |
| "loss": 5.4611, |
| "mean_token_accuracy": 0.698312160372734, |
| "step": 710 |
| }, |
| { |
| "epoch": 0.40406894602995197, |
| "grad_norm": 39.72053146362305, |
| "learning_rate": 4.0073104498546036e-06, |
| "loss": 5.9016, |
| "mean_token_accuracy": 0.6850436985492706, |
| "step": 715 |
| }, |
| { |
| "epoch": 0.40689460299519636, |
| "grad_norm": 38.679527282714844, |
| "learning_rate": 3.981670817130255e-06, |
| "loss": 6.0392, |
| "mean_token_accuracy": 0.6699395298957824, |
| "step": 720 |
| }, |
| { |
| "epoch": 0.4097202599604408, |
| "grad_norm": 42.41149139404297, |
| "learning_rate": 3.955950627184426e-06, |
| "loss": 6.2423, |
| "mean_token_accuracy": 0.6726677268743515, |
| "step": 725 |
| }, |
| { |
| "epoch": 0.4125459169256852, |
| "grad_norm": 36.73404312133789, |
| "learning_rate": 3.930151990650336e-06, |
| "loss": 5.0402, |
| "mean_token_accuracy": 0.7204348385334015, |
| "step": 730 |
| }, |
| { |
| "epoch": 0.41537157389092966, |
| "grad_norm": 33.893760681152344, |
| "learning_rate": 3.904277024598638e-06, |
| "loss": 5.5147, |
| "mean_token_accuracy": 0.6979331076145172, |
| "step": 735 |
| }, |
| { |
| "epoch": 0.41819723085617405, |
| "grad_norm": 47.56764602661133, |
| "learning_rate": 3.878327852363686e-06, |
| "loss": 5.7995, |
| "mean_token_accuracy": 0.7078070282936096, |
| "step": 740 |
| }, |
| { |
| "epoch": 0.4210228878214185, |
| "grad_norm": 31.648059844970703, |
| "learning_rate": 3.852306603369294e-06, |
| "loss": 6.8761, |
| "mean_token_accuracy": 0.665014611184597, |
| "step": 745 |
| }, |
| { |
| "epoch": 0.4238485447866629, |
| "grad_norm": 38.39750289916992, |
| "learning_rate": 3.826215412953991e-06, |
| "loss": 6.0653, |
| "mean_token_accuracy": 0.6770342886447906, |
| "step": 750 |
| }, |
| { |
| "epoch": 0.42667420175190734, |
| "grad_norm": 30.88237190246582, |
| "learning_rate": 3.800056422195792e-06, |
| "loss": 6.535, |
| "mean_token_accuracy": 0.6590037375688553, |
| "step": 755 |
| }, |
| { |
| "epoch": 0.42949985871715174, |
| "grad_norm": 63.2603874206543, |
| "learning_rate": 3.773831777736499e-06, |
| "loss": 6.5161, |
| "mean_token_accuracy": 0.6474016666412353, |
| "step": 760 |
| }, |
| { |
| "epoch": 0.43232551568239613, |
| "grad_norm": 50.654457092285156, |
| "learning_rate": 3.747543631605547e-06, |
| "loss": 6.7369, |
| "mean_token_accuracy": 0.6452984467148781, |
| "step": 765 |
| }, |
| { |
| "epoch": 0.4351511726476406, |
| "grad_norm": 37.10031509399414, |
| "learning_rate": 3.721194141043398e-06, |
| "loss": 6.2939, |
| "mean_token_accuracy": 0.6634088665246963, |
| "step": 770 |
| }, |
| { |
| "epoch": 0.437976829612885, |
| "grad_norm": 32.4448356628418, |
| "learning_rate": 3.694785468324526e-06, |
| "loss": 5.3857, |
| "mean_token_accuracy": 0.7086734473705292, |
| "step": 775 |
| }, |
| { |
| "epoch": 0.4408024865781294, |
| "grad_norm": 37.90336990356445, |
| "learning_rate": 3.6683197805799684e-06, |
| "loss": 5.5692, |
| "mean_token_accuracy": 0.691333469748497, |
| "step": 780 |
| }, |
| { |
| "epoch": 0.4436281435433738, |
| "grad_norm": 33.28772735595703, |
| "learning_rate": 3.641799249619492e-06, |
| "loss": 5.7555, |
| "mean_token_accuracy": 0.6938249558210373, |
| "step": 785 |
| }, |
| { |
| "epoch": 0.44645380050861827, |
| "grad_norm": 40.561336517333984, |
| "learning_rate": 3.6152260517533743e-06, |
| "loss": 6.5292, |
| "mean_token_accuracy": 0.6575401365756989, |
| "step": 790 |
| }, |
| { |
| "epoch": 0.44927945747386266, |
| "grad_norm": 34.72788619995117, |
| "learning_rate": 3.588602367613805e-06, |
| "loss": 5.6275, |
| "mean_token_accuracy": 0.6937674105167388, |
| "step": 795 |
| }, |
| { |
| "epoch": 0.4521051144391071, |
| "grad_norm": 51.351802825927734, |
| "learning_rate": 3.56193038197595e-06, |
| "loss": 5.8965, |
| "mean_token_accuracy": 0.7013622283935547, |
| "step": 800 |
| }, |
| { |
| "epoch": 0.4549307714043515, |
| "grad_norm": 34.20261001586914, |
| "learning_rate": 3.5352122835786555e-06, |
| "loss": 6.2313, |
| "mean_token_accuracy": 0.6691349744796753, |
| "step": 805 |
| }, |
| { |
| "epoch": 0.45775642836959596, |
| "grad_norm": 32.29352951049805, |
| "learning_rate": 3.508450264944848e-06, |
| "loss": 6.0912, |
| "mean_token_accuracy": 0.6840269297361374, |
| "step": 810 |
| }, |
| { |
| "epoch": 0.46058208533484035, |
| "grad_norm": 39.749229431152344, |
| "learning_rate": 3.481646522201602e-06, |
| "loss": 5.9559, |
| "mean_token_accuracy": 0.6829979822039605, |
| "step": 815 |
| }, |
| { |
| "epoch": 0.46340774230008475, |
| "grad_norm": 39.798587799072266, |
| "learning_rate": 3.4548032548999336e-06, |
| "loss": 6.161, |
| "mean_token_accuracy": 0.6791020795702934, |
| "step": 820 |
| }, |
| { |
| "epoch": 0.4662333992653292, |
| "grad_norm": 51.893211364746094, |
| "learning_rate": 3.4279226658342925e-06, |
| "loss": 6.3016, |
| "mean_token_accuracy": 0.6657601609826088, |
| "step": 825 |
| }, |
| { |
| "epoch": 0.4690590562305736, |
| "grad_norm": 50.02108383178711, |
| "learning_rate": 3.4010069608618056e-06, |
| "loss": 6.0286, |
| "mean_token_accuracy": 0.6940437912940979, |
| "step": 830 |
| }, |
| { |
| "epoch": 0.47188471319581804, |
| "grad_norm": 55.37822723388672, |
| "learning_rate": 3.374058348721255e-06, |
| "loss": 6.2305, |
| "mean_token_accuracy": 0.6752733439207077, |
| "step": 835 |
| }, |
| { |
| "epoch": 0.47471037016106243, |
| "grad_norm": 39.274688720703125, |
| "learning_rate": 3.347079040851833e-06, |
| "loss": 6.4344, |
| "mean_token_accuracy": 0.6463314086198807, |
| "step": 840 |
| }, |
| { |
| "epoch": 0.4775360271263069, |
| "grad_norm": 33.772884368896484, |
| "learning_rate": 3.3200712512116598e-06, |
| "loss": 4.5399, |
| "mean_token_accuracy": 0.7319628089666367, |
| "step": 845 |
| }, |
| { |
| "epoch": 0.4803616840915513, |
| "grad_norm": 47.047767639160156, |
| "learning_rate": 3.293037196096113e-06, |
| "loss": 6.0456, |
| "mean_token_accuracy": 0.664868313074112, |
| "step": 850 |
| }, |
| { |
| "epoch": 0.4831873410567957, |
| "grad_norm": 41.05727767944336, |
| "learning_rate": 3.2659790939559453e-06, |
| "loss": 5.7682, |
| "mean_token_accuracy": 0.7045676440000535, |
| "step": 855 |
| }, |
| { |
| "epoch": 0.4860129980220401, |
| "grad_norm": 37.37389373779297, |
| "learning_rate": 3.238899165215245e-06, |
| "loss": 5.2326, |
| "mean_token_accuracy": 0.7269378632307053, |
| "step": 860 |
| }, |
| { |
| "epoch": 0.48883865498728457, |
| "grad_norm": 39.5576286315918, |
| "learning_rate": 3.211799632089216e-06, |
| "loss": 5.1324, |
| "mean_token_accuracy": 0.7152336061000824, |
| "step": 865 |
| }, |
| { |
| "epoch": 0.49166431195252897, |
| "grad_norm": 41.0853157043457, |
| "learning_rate": 3.1846827184018294e-06, |
| "loss": 5.7599, |
| "mean_token_accuracy": 0.6923278480768204, |
| "step": 870 |
| }, |
| { |
| "epoch": 0.49448996891777336, |
| "grad_norm": 51.03715896606445, |
| "learning_rate": 3.157550649403322e-06, |
| "loss": 4.9395, |
| "mean_token_accuracy": 0.7514464080333709, |
| "step": 875 |
| }, |
| { |
| "epoch": 0.4973156258830178, |
| "grad_norm": 37.161231994628906, |
| "learning_rate": 3.1304056515876024e-06, |
| "loss": 7.0247, |
| "mean_token_accuracy": 0.6437601447105408, |
| "step": 880 |
| }, |
| { |
| "epoch": 0.5001412828482622, |
| "grad_norm": 47.0338134765625, |
| "learning_rate": 3.1032499525095303e-06, |
| "loss": 5.647, |
| "mean_token_accuracy": 0.6982032418251037, |
| "step": 885 |
| }, |
| { |
| "epoch": 0.5029669398135066, |
| "grad_norm": 32.64208221435547, |
| "learning_rate": 3.076085780602128e-06, |
| "loss": 5.6704, |
| "mean_token_accuracy": 0.6939798533916474, |
| "step": 890 |
| }, |
| { |
| "epoch": 0.5057925967787511, |
| "grad_norm": 32.635005950927734, |
| "learning_rate": 3.048915364993708e-06, |
| "loss": 5.2166, |
| "mean_token_accuracy": 0.7231873899698258, |
| "step": 895 |
| }, |
| { |
| "epoch": 0.5086182537439955, |
| "grad_norm": 29.856887817382812, |
| "learning_rate": 3.0217409353249512e-06, |
| "loss": 5.9994, |
| "mean_token_accuracy": 0.6912487387657166, |
| "step": 900 |
| }, |
| { |
| "epoch": 0.5086182537439955, |
| "eval_loss": 1.5281765460968018, |
| "eval_mean_token_accuracy": 0.6797472610691477, |
| "eval_runtime": 60.4506, |
| "eval_samples_per_second": 26.021, |
| "eval_steps_per_second": 3.259, |
| "step": 900 |
| }, |
| { |
| "epoch": 0.5114439107092399, |
| "grad_norm": 46.50059509277344, |
| "learning_rate": 2.994564721565935e-06, |
| "loss": 5.3769, |
| "mean_token_accuracy": 0.7131396651268005, |
| "step": 905 |
| }, |
| { |
| "epoch": 0.5142695676744843, |
| "grad_norm": 36.98368453979492, |
| "learning_rate": 2.9673889538331435e-06, |
| "loss": 5.5554, |
| "mean_token_accuracy": 0.7053171962499618, |
| "step": 910 |
| }, |
| { |
| "epoch": 0.5170952246397288, |
| "grad_norm": 40.522071838378906, |
| "learning_rate": 2.94021586220646e-06, |
| "loss": 5.9756, |
| "mean_token_accuracy": 0.6839839160442353, |
| "step": 915 |
| }, |
| { |
| "epoch": 0.5199208816049732, |
| "grad_norm": 39.4980583190918, |
| "learning_rate": 2.9130476765461605e-06, |
| "loss": 5.4328, |
| "mean_token_accuracy": 0.7064492374658584, |
| "step": 920 |
| }, |
| { |
| "epoch": 0.5227465385702176, |
| "grad_norm": 57.04869842529297, |
| "learning_rate": 2.8858866263099325e-06, |
| "loss": 6.114, |
| "mean_token_accuracy": 0.6875434190034866, |
| "step": 925 |
| }, |
| { |
| "epoch": 0.525572195535462, |
| "grad_norm": 31.3112850189209, |
| "learning_rate": 2.858734940369919e-06, |
| "loss": 5.63, |
| "mean_token_accuracy": 0.7200439631938934, |
| "step": 930 |
| }, |
| { |
| "epoch": 0.5283978525007064, |
| "grad_norm": 47.432926177978516, |
| "learning_rate": 2.831594846829821e-06, |
| "loss": 5.7099, |
| "mean_token_accuracy": 0.695030590891838, |
| "step": 935 |
| }, |
| { |
| "epoch": 0.5312235094659509, |
| "grad_norm": 35.99347686767578, |
| "learning_rate": 2.8044685728420472e-06, |
| "loss": 4.4271, |
| "mean_token_accuracy": 0.7551506340503693, |
| "step": 940 |
| }, |
| { |
| "epoch": 0.5340491664311953, |
| "grad_norm": 38.81227111816406, |
| "learning_rate": 2.777358344424957e-06, |
| "loss": 4.8282, |
| "mean_token_accuracy": 0.7347202837467194, |
| "step": 945 |
| }, |
| { |
| "epoch": 0.5368748233964397, |
| "grad_norm": 39.581241607666016, |
| "learning_rate": 2.7502663862801866e-06, |
| "loss": 5.3346, |
| "mean_token_accuracy": 0.7030239164829254, |
| "step": 950 |
| }, |
| { |
| "epoch": 0.539700480361684, |
| "grad_norm": 45.003013610839844, |
| "learning_rate": 2.7231949216100943e-06, |
| "loss": 6.2008, |
| "mean_token_accuracy": 0.6608472660183906, |
| "step": 955 |
| }, |
| { |
| "epoch": 0.5425261373269286, |
| "grad_norm": 45.456016540527344, |
| "learning_rate": 2.696146171935312e-06, |
| "loss": 4.8497, |
| "mean_token_accuracy": 0.7226766556501388, |
| "step": 960 |
| }, |
| { |
| "epoch": 0.545351794292173, |
| "grad_norm": 44.264671325683594, |
| "learning_rate": 2.6691223569124495e-06, |
| "loss": 5.5343, |
| "mean_token_accuracy": 0.7043332427740097, |
| "step": 965 |
| }, |
| { |
| "epoch": 0.5481774512574173, |
| "grad_norm": 43.33969497680664, |
| "learning_rate": 2.6421256941519453e-06, |
| "loss": 5.0521, |
| "mean_token_accuracy": 0.7183876752853393, |
| "step": 970 |
| }, |
| { |
| "epoch": 0.5510031082226617, |
| "grad_norm": 69.75106048583984, |
| "learning_rate": 2.61515839903609e-06, |
| "loss": 5.2742, |
| "mean_token_accuracy": 0.7265293389558792, |
| "step": 975 |
| }, |
| { |
| "epoch": 0.5538287651879061, |
| "grad_norm": 45.96234130859375, |
| "learning_rate": 2.588222684537222e-06, |
| "loss": 5.7874, |
| "mean_token_accuracy": 0.6878435671329498, |
| "step": 980 |
| }, |
| { |
| "epoch": 0.5566544221531506, |
| "grad_norm": 32.10597229003906, |
| "learning_rate": 2.5613207610361338e-06, |
| "loss": 5.682, |
| "mean_token_accuracy": 0.6860069572925568, |
| "step": 985 |
| }, |
| { |
| "epoch": 0.559480079118395, |
| "grad_norm": 49.05516815185547, |
| "learning_rate": 2.5344548361406842e-06, |
| "loss": 5.7007, |
| "mean_token_accuracy": 0.6855567038059235, |
| "step": 990 |
| }, |
| { |
| "epoch": 0.5623057360836394, |
| "grad_norm": 74.12281799316406, |
| "learning_rate": 2.507627114504637e-06, |
| "loss": 6.9595, |
| "mean_token_accuracy": 0.6414749681949615, |
| "step": 995 |
| }, |
| { |
| "epoch": 0.5651313930488838, |
| "grad_norm": 47.54744338989258, |
| "learning_rate": 2.480839797646746e-06, |
| "loss": 5.8039, |
| "mean_token_accuracy": 0.7000325888395309, |
| "step": 1000 |
| }, |
| { |
| "epoch": 0.5679570500141283, |
| "grad_norm": 38.07027053833008, |
| "learning_rate": 2.4540950837700923e-06, |
| "loss": 6.192, |
| "mean_token_accuracy": 0.702258163690567, |
| "step": 1005 |
| }, |
| { |
| "epoch": 0.5707827069793727, |
| "grad_norm": 47.82304382324219, |
| "learning_rate": 2.4273951675817043e-06, |
| "loss": 5.2627, |
| "mean_token_accuracy": 0.704213073849678, |
| "step": 1010 |
| }, |
| { |
| "epoch": 0.5736083639446171, |
| "grad_norm": 33.01102066040039, |
| "learning_rate": 2.4007422401124488e-06, |
| "loss": 5.9233, |
| "mean_token_accuracy": 0.6820877581834793, |
| "step": 1015 |
| }, |
| { |
| "epoch": 0.5764340209098615, |
| "grad_norm": 28.866247177124023, |
| "learning_rate": 2.3741384885372346e-06, |
| "loss": 5.5267, |
| "mean_token_accuracy": 0.705855768918991, |
| "step": 1020 |
| }, |
| { |
| "epoch": 0.579259677875106, |
| "grad_norm": 46.4747428894043, |
| "learning_rate": 2.347586095995532e-06, |
| "loss": 6.0163, |
| "mean_token_accuracy": 0.6707334235310555, |
| "step": 1025 |
| }, |
| { |
| "epoch": 0.5820853348403504, |
| "grad_norm": 41.4154052734375, |
| "learning_rate": 2.3210872414122224e-06, |
| "loss": 5.1814, |
| "mean_token_accuracy": 0.729784882068634, |
| "step": 1030 |
| }, |
| { |
| "epoch": 0.5849109918055948, |
| "grad_norm": 36.9775505065918, |
| "learning_rate": 2.2946440993187876e-06, |
| "loss": 5.3629, |
| "mean_token_accuracy": 0.6963009983301163, |
| "step": 1035 |
| }, |
| { |
| "epoch": 0.5877366487708392, |
| "grad_norm": 33.48902893066406, |
| "learning_rate": 2.2682588396748687e-06, |
| "loss": 6.5289, |
| "mean_token_accuracy": 0.6563118815422058, |
| "step": 1040 |
| }, |
| { |
| "epoch": 0.5905623057360836, |
| "grad_norm": 41.8765983581543, |
| "learning_rate": 2.241933627690196e-06, |
| "loss": 6.1125, |
| "mean_token_accuracy": 0.6872710019350052, |
| "step": 1045 |
| }, |
| { |
| "epoch": 0.5933879627013281, |
| "grad_norm": 33.268428802490234, |
| "learning_rate": 2.2156706236469088e-06, |
| "loss": 6.1175, |
| "mean_token_accuracy": 0.6717882409691811, |
| "step": 1050 |
| }, |
| { |
| "epoch": 0.5962136196665725, |
| "grad_norm": 39.825626373291016, |
| "learning_rate": 2.1894719827222783e-06, |
| "loss": 5.5307, |
| "mean_token_accuracy": 0.7009034663438797, |
| "step": 1055 |
| }, |
| { |
| "epoch": 0.5990392766318169, |
| "grad_norm": 44.565643310546875, |
| "learning_rate": 2.1633398548118515e-06, |
| "loss": 4.8155, |
| "mean_token_accuracy": 0.7297946393489838, |
| "step": 1060 |
| }, |
| { |
| "epoch": 0.6018649335970613, |
| "grad_norm": 39.603511810302734, |
| "learning_rate": 2.137276384353032e-06, |
| "loss": 4.6931, |
| "mean_token_accuracy": 0.7337764650583267, |
| "step": 1065 |
| }, |
| { |
| "epoch": 0.6046905905623058, |
| "grad_norm": 42.91708755493164, |
| "learning_rate": 2.111283710149097e-06, |
| "loss": 5.3974, |
| "mean_token_accuracy": 0.7125359356403351, |
| "step": 1070 |
| }, |
| { |
| "epoch": 0.6075162475275502, |
| "grad_norm": 57.45164108276367, |
| "learning_rate": 2.08536396519369e-06, |
| "loss": 5.4134, |
| "mean_token_accuracy": 0.6924123004078865, |
| "step": 1075 |
| }, |
| { |
| "epoch": 0.6103419044927946, |
| "grad_norm": 32.598880767822266, |
| "learning_rate": 2.0595192764957815e-06, |
| "loss": 5.6697, |
| "mean_token_accuracy": 0.6915164411067962, |
| "step": 1080 |
| }, |
| { |
| "epoch": 0.613167561458039, |
| "grad_norm": 44.280662536621094, |
| "learning_rate": 2.0337517649051282e-06, |
| "loss": 5.7898, |
| "mean_token_accuracy": 0.7014181435108184, |
| "step": 1085 |
| }, |
| { |
| "epoch": 0.6159932184232834, |
| "grad_norm": 50.706207275390625, |
| "learning_rate": 2.008063544938227e-06, |
| "loss": 6.3557, |
| "mean_token_accuracy": 0.6665431886911393, |
| "step": 1090 |
| }, |
| { |
| "epoch": 0.6188188753885279, |
| "grad_norm": 34.798702239990234, |
| "learning_rate": 1.982456724604798e-06, |
| "loss": 5.8921, |
| "mean_token_accuracy": 0.6692013502120971, |
| "step": 1095 |
| }, |
| { |
| "epoch": 0.6216445323537723, |
| "grad_norm": 43.494693756103516, |
| "learning_rate": 1.956933405234799e-06, |
| "loss": 5.0179, |
| "mean_token_accuracy": 0.7225543946027756, |
| "step": 1100 |
| }, |
| { |
| "epoch": 0.6244701893190167, |
| "grad_norm": 30.238845825195312, |
| "learning_rate": 1.9314956813059893e-06, |
| "loss": 5.3437, |
| "mean_token_accuracy": 0.7133805066347122, |
| "step": 1105 |
| }, |
| { |
| "epoch": 0.627295846284261, |
| "grad_norm": 58.761962890625, |
| "learning_rate": 1.906145640272049e-06, |
| "loss": 5.6678, |
| "mean_token_accuracy": 0.6920988261699677, |
| "step": 1110 |
| }, |
| { |
| "epoch": 0.6301215032495056, |
| "grad_norm": 36.33903884887695, |
| "learning_rate": 1.8808853623912808e-06, |
| "loss": 4.9883, |
| "mean_token_accuracy": 0.7231852769851684, |
| "step": 1115 |
| }, |
| { |
| "epoch": 0.63294716021475, |
| "grad_norm": 29.945993423461914, |
| "learning_rate": 1.8557169205559086e-06, |
| "loss": 5.3157, |
| "mean_token_accuracy": 0.706680515408516, |
| "step": 1120 |
| }, |
| { |
| "epoch": 0.6357728171799943, |
| "grad_norm": 28.975589752197266, |
| "learning_rate": 1.830642380121962e-06, |
| "loss": 5.2056, |
| "mean_token_accuracy": 0.7050720751285553, |
| "step": 1125 |
| }, |
| { |
| "epoch": 0.6385984741452387, |
| "grad_norm": 44.97232437133789, |
| "learning_rate": 1.8056637987397989e-06, |
| "loss": 6.0927, |
| "mean_token_accuracy": 0.6807661324739456, |
| "step": 1130 |
| }, |
| { |
| "epoch": 0.6414241311104832, |
| "grad_norm": 34.31592559814453, |
| "learning_rate": 1.7807832261852462e-06, |
| "loss": 5.5395, |
| "mean_token_accuracy": 0.6893488377332687, |
| "step": 1135 |
| }, |
| { |
| "epoch": 0.6442497880757276, |
| "grad_norm": 39.61166763305664, |
| "learning_rate": 1.7560027041913992e-06, |
| "loss": 5.0804, |
| "mean_token_accuracy": 0.7302613139152527, |
| "step": 1140 |
| }, |
| { |
| "epoch": 0.647075445040972, |
| "grad_norm": 42.036598205566406, |
| "learning_rate": 1.7313242662810682e-06, |
| "loss": 5.247, |
| "mean_token_accuracy": 0.6992710053920745, |
| "step": 1145 |
| }, |
| { |
| "epoch": 0.6499011020062164, |
| "grad_norm": 45.033103942871094, |
| "learning_rate": 1.7067499375999042e-06, |
| "loss": 5.6366, |
| "mean_token_accuracy": 0.6878565683960914, |
| "step": 1150 |
| }, |
| { |
| "epoch": 0.6527267589714608, |
| "grad_norm": 45.477500915527344, |
| "learning_rate": 1.6822817347502192e-06, |
| "loss": 5.1352, |
| "mean_token_accuracy": 0.7178653836250305, |
| "step": 1155 |
| }, |
| { |
| "epoch": 0.6555524159367053, |
| "grad_norm": 30.850934982299805, |
| "learning_rate": 1.657921665625497e-06, |
| "loss": 5.3625, |
| "mean_token_accuracy": 0.6953006356954574, |
| "step": 1160 |
| }, |
| { |
| "epoch": 0.6583780729019497, |
| "grad_norm": 44.302703857421875, |
| "learning_rate": 1.6336717292456232e-06, |
| "loss": 4.9628, |
| "mean_token_accuracy": 0.7283646464347839, |
| "step": 1165 |
| }, |
| { |
| "epoch": 0.6612037298671941, |
| "grad_norm": 36.562110900878906, |
| "learning_rate": 1.6095339155928395e-06, |
| "loss": 6.379, |
| "mean_token_accuracy": 0.6473036587238312, |
| "step": 1170 |
| }, |
| { |
| "epoch": 0.6640293868324385, |
| "grad_norm": 45.95467758178711, |
| "learning_rate": 1.5855102054484505e-06, |
| "loss": 5.2969, |
| "mean_token_accuracy": 0.7192613005638122, |
| "step": 1175 |
| }, |
| { |
| "epoch": 0.666855043797683, |
| "grad_norm": 39.13850021362305, |
| "learning_rate": 1.5616025702302725e-06, |
| "loss": 5.9703, |
| "mean_token_accuracy": 0.6769262015819549, |
| "step": 1180 |
| }, |
| { |
| "epoch": 0.6696807007629274, |
| "grad_norm": 51.65994644165039, |
| "learning_rate": 1.53781297183086e-06, |
| "loss": 5.5809, |
| "mean_token_accuracy": 0.7111178368330002, |
| "step": 1185 |
| }, |
| { |
| "epoch": 0.6725063577281718, |
| "grad_norm": 43.34739303588867, |
| "learning_rate": 1.5141433624565027e-06, |
| "loss": 5.735, |
| "mean_token_accuracy": 0.7082278728485107, |
| "step": 1190 |
| }, |
| { |
| "epoch": 0.6753320146934162, |
| "grad_norm": 40.05953598022461, |
| "learning_rate": 1.490595684467038e-06, |
| "loss": 5.2052, |
| "mean_token_accuracy": 0.7205279141664505, |
| "step": 1195 |
| }, |
| { |
| "epoch": 0.6781576716586606, |
| "grad_norm": 33.837886810302734, |
| "learning_rate": 1.4671718702164435e-06, |
| "loss": 5.9794, |
| "mean_token_accuracy": 0.6879066616296768, |
| "step": 1200 |
| }, |
| { |
| "epoch": 0.6781576716586606, |
| "eval_loss": 1.4738432168960571, |
| "eval_mean_token_accuracy": 0.6890995811084806, |
| "eval_runtime": 60.3641, |
| "eval_samples_per_second": 26.059, |
| "eval_steps_per_second": 3.264, |
| "step": 1200 |
| } |
| ], |
| "logging_steps": 5, |
| "max_steps": 1770, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 1, |
| "save_steps": 300, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 7.53372742236814e+16, |
| "train_batch_size": 2, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|