| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.9979530403371464, | |
| "eval_steps": 100, | |
| "global_step": 3114, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.00481637567730283, | |
| "grad_norm": 15.24969482421875, | |
| "learning_rate": 5.60486511952635e-06, | |
| "loss": 1.7081, | |
| "mean_token_accuracy": 0.6025852240622044, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.00963275135460566, | |
| "grad_norm": 12.314075469970703, | |
| "learning_rate": 8.018749137669579e-06, | |
| "loss": 1.7998, | |
| "mean_token_accuracy": 0.5892860271036625, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.014449127031908489, | |
| "grad_norm": 3.993947982788086, | |
| "learning_rate": 9.430780769373472e-06, | |
| "loss": 1.5099, | |
| "mean_token_accuracy": 0.6336932629346848, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.01926550270921132, | |
| "grad_norm": 1.7531483173370361, | |
| "learning_rate": 1.0432633155812801e-05, | |
| "loss": 1.4455, | |
| "mean_token_accuracy": 0.6432643696665764, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.024081878386514148, | |
| "grad_norm": 0.5668932795524597, | |
| "learning_rate": 1.12097302390527e-05, | |
| "loss": 1.351, | |
| "mean_token_accuracy": 0.6605924591422081, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.028898254063816978, | |
| "grad_norm": 0.4905467927455902, | |
| "learning_rate": 1.1844664787516697e-05, | |
| "loss": 1.2968, | |
| "mean_token_accuracy": 0.6679659530520439, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.03371462974111981, | |
| "grad_norm": 0.4706348776817322, | |
| "learning_rate": 1.2381494299136923e-05, | |
| "loss": 1.2571, | |
| "mean_token_accuracy": 0.6737238138914108, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.03853100541842264, | |
| "grad_norm": 0.43812254071235657, | |
| "learning_rate": 1.2846517173956028e-05, | |
| "loss": 1.186, | |
| "mean_token_accuracy": 0.6864958420395851, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.04334738109572547, | |
| "grad_norm": 0.47561436891555786, | |
| "learning_rate": 1.3256696419220589e-05, | |
| "loss": 1.2086, | |
| "mean_token_accuracy": 0.67629723995924, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.048163756773028296, | |
| "grad_norm": 0.4023856222629547, | |
| "learning_rate": 1.3623614257195928e-05, | |
| "loss": 1.1375, | |
| "mean_token_accuracy": 0.6874296680092812, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.052980132450331126, | |
| "grad_norm": 0.39125627279281616, | |
| "learning_rate": 1.3955531815614275e-05, | |
| "loss": 1.1751, | |
| "mean_token_accuracy": 0.6815471544861793, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.057796508127633955, | |
| "grad_norm": 0.410149484872818, | |
| "learning_rate": 1.4258548805659922e-05, | |
| "loss": 1.1622, | |
| "mean_token_accuracy": 0.6843628376722336, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.06261288380493679, | |
| "grad_norm": 0.3972622752189636, | |
| "learning_rate": 1.4537297415249556e-05, | |
| "loss": 1.1487, | |
| "mean_token_accuracy": 0.6880501955747604, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.06742925948223961, | |
| "grad_norm": 0.39933261275291443, | |
| "learning_rate": 1.4795378317280148e-05, | |
| "loss": 1.1235, | |
| "mean_token_accuracy": 0.6893462091684341, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.07224563515954245, | |
| "grad_norm": 0.36374545097351074, | |
| "learning_rate": 1.5035645888899821e-05, | |
| "loss": 1.1015, | |
| "mean_token_accuracy": 0.691944369673729, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.07706201083684527, | |
| "grad_norm": 0.3634852468967438, | |
| "learning_rate": 1.5260401192099252e-05, | |
| "loss": 1.1078, | |
| "mean_token_accuracy": 0.6953662529587745, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.08187838651414811, | |
| "grad_norm": 0.4331696927547455, | |
| "learning_rate": 1.547152634677485e-05, | |
| "loss": 1.0918, | |
| "mean_token_accuracy": 0.7018570601940155, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.08669476219145093, | |
| "grad_norm": 0.4048088490962982, | |
| "learning_rate": 1.5670580437363816e-05, | |
| "loss": 1.0828, | |
| "mean_token_accuracy": 0.699838899075985, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.09151113786875377, | |
| "grad_norm": 0.42811787128448486, | |
| "learning_rate": 1.5858869454458713e-05, | |
| "loss": 1.1066, | |
| "mean_token_accuracy": 0.6937131121754646, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.09632751354605659, | |
| "grad_norm": 0.39595216512680054, | |
| "learning_rate": 1.6037498275339157e-05, | |
| "loss": 1.1185, | |
| "mean_token_accuracy": 0.6915625333786011, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.09632751354605659, | |
| "eval_loss": 1.120937466621399, | |
| "eval_mean_token_accuracy": 0.6942809590926537, | |
| "eval_runtime": 23.7545, | |
| "eval_samples_per_second": 4.21, | |
| "eval_steps_per_second": 0.547, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.10114388922335943, | |
| "grad_norm": 0.3679651618003845, | |
| "learning_rate": 1.620740994898404e-05, | |
| "loss": 1.0867, | |
| "mean_token_accuracy": 0.695780485868454, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.10596026490066225, | |
| "grad_norm": 0.3855838179588318, | |
| "learning_rate": 1.6369415833757502e-05, | |
| "loss": 1.1044, | |
| "mean_token_accuracy": 0.6917166858911514, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.11077664057796509, | |
| "grad_norm": 0.4066142737865448, | |
| "learning_rate": 1.6524219030333085e-05, | |
| "loss": 1.0656, | |
| "mean_token_accuracy": 0.7033417671918869, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.11559301625526791, | |
| "grad_norm": 0.41730743646621704, | |
| "learning_rate": 1.6672432823803147e-05, | |
| "loss": 1.0478, | |
| "mean_token_accuracy": 0.7045514702796936, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.12040939193257075, | |
| "grad_norm": 0.41672202944755554, | |
| "learning_rate": 1.6814595358579055e-05, | |
| "loss": 1.0736, | |
| "mean_token_accuracy": 0.701400151848793, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.12522576760987358, | |
| "grad_norm": 0.3911355137825012, | |
| "learning_rate": 1.695118143339278e-05, | |
| "loss": 1.0484, | |
| "mean_token_accuracy": 0.70690358877182, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.1300421432871764, | |
| "grad_norm": 0.4797114431858063, | |
| "learning_rate": 1.708261206906771e-05, | |
| "loss": 1.0275, | |
| "mean_token_accuracy": 0.7159404128789901, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.13485851896447923, | |
| "grad_norm": 0.4215666353702545, | |
| "learning_rate": 1.7209262335423374e-05, | |
| "loss": 1.0461, | |
| "mean_token_accuracy": 0.7075429305434227, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.13967489464178207, | |
| "grad_norm": 0.39611467719078064, | |
| "learning_rate": 1.733146780410832e-05, | |
| "loss": 1.0515, | |
| "mean_token_accuracy": 0.7017971694469451, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.1444912703190849, | |
| "grad_norm": 0.40612080693244934, | |
| "learning_rate": 1.7449529907043045e-05, | |
| "loss": 1.0619, | |
| "mean_token_accuracy": 0.7035504311323166, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.1493076459963877, | |
| "grad_norm": 0.4210834801197052, | |
| "learning_rate": 1.7563720415913364e-05, | |
| "loss": 1.0407, | |
| "mean_token_accuracy": 0.7101730972528457, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.15412402167369055, | |
| "grad_norm": 0.39545348286628723, | |
| "learning_rate": 1.7674285210242478e-05, | |
| "loss": 1.0184, | |
| "mean_token_accuracy": 0.7077491611242295, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.15894039735099338, | |
| "grad_norm": 0.38933709263801575, | |
| "learning_rate": 1.7781447465461393e-05, | |
| "loss": 1.0496, | |
| "mean_token_accuracy": 0.7077953517436981, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.16375677302829622, | |
| "grad_norm": 0.4460107386112213, | |
| "learning_rate": 1.788541036491807e-05, | |
| "loss": 1.0444, | |
| "mean_token_accuracy": 0.7059019073843956, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.16857314870559903, | |
| "grad_norm": 0.43760132789611816, | |
| "learning_rate": 1.7986359418663275e-05, | |
| "loss": 1.0397, | |
| "mean_token_accuracy": 0.7088375821709633, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.17338952438290187, | |
| "grad_norm": 0.39786314964294434, | |
| "learning_rate": 1.8084464455507042e-05, | |
| "loss": 1.0493, | |
| "mean_token_accuracy": 0.7046118810772896, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.1782059000602047, | |
| "grad_norm": 0.36780059337615967, | |
| "learning_rate": 1.817988134208051e-05, | |
| "loss": 1.0067, | |
| "mean_token_accuracy": 0.7162643566727638, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.18302227573750754, | |
| "grad_norm": 0.3834674656391144, | |
| "learning_rate": 1.8272753472601942e-05, | |
| "loss": 1.0074, | |
| "mean_token_accuracy": 0.7104792430996895, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.18783865141481035, | |
| "grad_norm": 0.3949407935142517, | |
| "learning_rate": 1.8363213065096677e-05, | |
| "loss": 1.0135, | |
| "mean_token_accuracy": 0.715023210644722, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.19265502709211318, | |
| "grad_norm": 0.38337242603302, | |
| "learning_rate": 1.845138229348238e-05, | |
| "loss": 1.0288, | |
| "mean_token_accuracy": 0.7040736198425293, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.19265502709211318, | |
| "eval_loss": 1.064062476158142, | |
| "eval_mean_token_accuracy": 0.7043925065260667, | |
| "eval_runtime": 23.1434, | |
| "eval_samples_per_second": 4.321, | |
| "eval_steps_per_second": 0.562, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.19747140276941602, | |
| "grad_norm": 0.39040884375572205, | |
| "learning_rate": 1.8537374279845146e-05, | |
| "loss": 0.9799, | |
| "mean_token_accuracy": 0.7189632415771484, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.20228777844671886, | |
| "grad_norm": 0.4173654019832611, | |
| "learning_rate": 1.862129396712727e-05, | |
| "loss": 1.0342, | |
| "mean_token_accuracy": 0.7094724953174592, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.20710415412402167, | |
| "grad_norm": 0.3901558518409729, | |
| "learning_rate": 1.8703238889115616e-05, | |
| "loss": 0.9908, | |
| "mean_token_accuracy": 0.7175863072276115, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.2119205298013245, | |
| "grad_norm": 0.4490678906440735, | |
| "learning_rate": 1.8783299851900728e-05, | |
| "loss": 1.0204, | |
| "mean_token_accuracy": 0.7106468200683593, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.21673690547862734, | |
| "grad_norm": 0.43239694833755493, | |
| "learning_rate": 1.8861561538746943e-05, | |
| "loss": 1.0393, | |
| "mean_token_accuracy": 0.7069905534386635, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.22155328115593018, | |
| "grad_norm": 0.4308166801929474, | |
| "learning_rate": 1.8938103048476308e-05, | |
| "loss": 1.0158, | |
| "mean_token_accuracy": 0.7123534351587295, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.22636965683323299, | |
| "grad_norm": 0.3894931972026825, | |
| "learning_rate": 1.901299837594753e-05, | |
| "loss": 0.9873, | |
| "mean_token_accuracy": 0.7187181740999222, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.23118603251053582, | |
| "grad_norm": 0.42207640409469604, | |
| "learning_rate": 1.9086316841946373e-05, | |
| "loss": 0.9893, | |
| "mean_token_accuracy": 0.7170199275016784, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.23600240818783866, | |
| "grad_norm": 0.45573151111602783, | |
| "learning_rate": 1.9158123478747496e-05, | |
| "loss": 1.0173, | |
| "mean_token_accuracy": 0.7128939390182495, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.2408187838651415, | |
| "grad_norm": 0.44542551040649414, | |
| "learning_rate": 1.9228479376722277e-05, | |
| "loss": 1.0264, | |
| "mean_token_accuracy": 0.7066132187843323, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.2456351595424443, | |
| "grad_norm": 0.42006808519363403, | |
| "learning_rate": 1.9297441996621962e-05, | |
| "loss": 1.0169, | |
| "mean_token_accuracy": 0.7104522138834, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.25045153521974717, | |
| "grad_norm": 0.45401668548583984, | |
| "learning_rate": 1.9365065451536008e-05, | |
| "loss": 0.9878, | |
| "mean_token_accuracy": 0.7117918729782104, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.25526791089705, | |
| "grad_norm": 0.40076637268066406, | |
| "learning_rate": 1.9431400761992135e-05, | |
| "loss": 0.9812, | |
| "mean_token_accuracy": 0.7238912284374237, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.2600842865743528, | |
| "grad_norm": 0.39845559000968933, | |
| "learning_rate": 1.9496496087210937e-05, | |
| "loss": 0.9924, | |
| "mean_token_accuracy": 0.7154505327343941, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.26490066225165565, | |
| "grad_norm": 0.38575807213783264, | |
| "learning_rate": 1.9560396935140626e-05, | |
| "loss": 0.9987, | |
| "mean_token_accuracy": 0.7184735506772995, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.26971703792895846, | |
| "grad_norm": 0.37453049421310425, | |
| "learning_rate": 1.9623146353566597e-05, | |
| "loss": 1.0008, | |
| "mean_token_accuracy": 0.7148436263203621, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.27453341360626127, | |
| "grad_norm": 0.379604309797287, | |
| "learning_rate": 1.9684785104305833e-05, | |
| "loss": 1.0257, | |
| "mean_token_accuracy": 0.707456587255001, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.27934978928356413, | |
| "grad_norm": 0.41674622893333435, | |
| "learning_rate": 1.9745351822251547e-05, | |
| "loss": 1.0104, | |
| "mean_token_accuracy": 0.7121455416083335, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.28416616496086694, | |
| "grad_norm": 0.36543646454811096, | |
| "learning_rate": 1.9804883160822233e-05, | |
| "loss": 1.0018, | |
| "mean_token_accuracy": 0.7111613750457764, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.2889825406381698, | |
| "grad_norm": 0.3864189684391022, | |
| "learning_rate": 1.986341392518627e-05, | |
| "loss": 0.9896, | |
| "mean_token_accuracy": 0.7227002561092377, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.2889825406381698, | |
| "eval_loss": 1.0334374904632568, | |
| "eval_mean_token_accuracy": 0.7100509542685288, | |
| "eval_runtime": 23.2119, | |
| "eval_samples_per_second": 4.308, | |
| "eval_steps_per_second": 0.56, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.2937989163154726, | |
| "grad_norm": 0.39042481780052185, | |
| "learning_rate": 1.9920977194474707e-05, | |
| "loss": 0.9747, | |
| "mean_token_accuracy": 0.7194000944495201, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.2986152919927754, | |
| "grad_norm": 0.41932472586631775, | |
| "learning_rate": 1.997760443405659e-05, | |
| "loss": 1.013, | |
| "mean_token_accuracy": 0.7132839515805245, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.3034316676700783, | |
| "grad_norm": 0.3739113509654999, | |
| "learning_rate": 2e-05, | |
| "loss": 0.998, | |
| "mean_token_accuracy": 0.7116592064499855, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.3082480433473811, | |
| "grad_norm": 0.3870009779930115, | |
| "learning_rate": 2e-05, | |
| "loss": 1.0472, | |
| "mean_token_accuracy": 0.7014497891068459, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.3130644190246839, | |
| "grad_norm": 0.39197883009910583, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9896, | |
| "mean_token_accuracy": 0.7157701522111892, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.31788079470198677, | |
| "grad_norm": 0.4166513681411743, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9832, | |
| "mean_token_accuracy": 0.7184643030166626, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.3226971703792896, | |
| "grad_norm": 0.42934778332710266, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9779, | |
| "mean_token_accuracy": 0.7207571104168892, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.32751354605659244, | |
| "grad_norm": 0.3707175850868225, | |
| "learning_rate": 2e-05, | |
| "loss": 1.0075, | |
| "mean_token_accuracy": 0.7121270015835762, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.33232992173389525, | |
| "grad_norm": 0.41228577494621277, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9755, | |
| "mean_token_accuracy": 0.7168642222881317, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.33714629741119806, | |
| "grad_norm": 0.3472833037376404, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9505, | |
| "mean_token_accuracy": 0.7238100618124008, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.3419626730885009, | |
| "grad_norm": 0.3869970142841339, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9567, | |
| "mean_token_accuracy": 0.7265875309705734, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.34677904876580373, | |
| "grad_norm": 0.422489196062088, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9701, | |
| "mean_token_accuracy": 0.7184868469834328, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.35159542444310654, | |
| "grad_norm": 0.43342992663383484, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9979, | |
| "mean_token_accuracy": 0.7126397833228111, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.3564118001204094, | |
| "grad_norm": 0.3574235737323761, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9506, | |
| "mean_token_accuracy": 0.7254954233765603, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.3612281757977122, | |
| "grad_norm": 0.4248846173286438, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9657, | |
| "mean_token_accuracy": 0.7162819489836693, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.3660445514750151, | |
| "grad_norm": 0.4223816692829132, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9546, | |
| "mean_token_accuracy": 0.7249079883098603, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.3708609271523179, | |
| "grad_norm": 0.45861685276031494, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9349, | |
| "mean_token_accuracy": 0.7322170153260231, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.3756773028296207, | |
| "grad_norm": 0.39118438959121704, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9674, | |
| "mean_token_accuracy": 0.7160528540611267, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.38049367850692356, | |
| "grad_norm": 0.3915044069290161, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9661, | |
| "mean_token_accuracy": 0.7225557699799537, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.38531005418422637, | |
| "grad_norm": 0.41501498222351074, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9475, | |
| "mean_token_accuracy": 0.7268393576145172, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.38531005418422637, | |
| "eval_loss": 1.0129687786102295, | |
| "eval_mean_token_accuracy": 0.7142733885691717, | |
| "eval_runtime": 23.1853, | |
| "eval_samples_per_second": 4.313, | |
| "eval_steps_per_second": 0.561, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.3901264298615292, | |
| "grad_norm": 0.48722606897354126, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9985, | |
| "mean_token_accuracy": 0.712544234097004, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.39494280553883204, | |
| "grad_norm": 0.42091795802116394, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9292, | |
| "mean_token_accuracy": 0.7308942764997483, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.39975918121613485, | |
| "grad_norm": 0.4148479998111725, | |
| "learning_rate": 2e-05, | |
| "loss": 0.998, | |
| "mean_token_accuracy": 0.7112752854824066, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.4045755568934377, | |
| "grad_norm": 0.3901733160018921, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9559, | |
| "mean_token_accuracy": 0.7231112748384476, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.4093919325707405, | |
| "grad_norm": 0.4191341996192932, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9793, | |
| "mean_token_accuracy": 0.7161881670355796, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.41420830824804333, | |
| "grad_norm": 0.3945569097995758, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9623, | |
| "mean_token_accuracy": 0.7208192080259324, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.4190246839253462, | |
| "grad_norm": 0.44316938519477844, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9391, | |
| "mean_token_accuracy": 0.7293342381715775, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.423841059602649, | |
| "grad_norm": 0.4176236391067505, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9879, | |
| "mean_token_accuracy": 0.7138759195804596, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.4286574352799518, | |
| "grad_norm": 0.38419532775878906, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9332, | |
| "mean_token_accuracy": 0.7278907805681228, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.4334738109572547, | |
| "grad_norm": 0.3864535093307495, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9838, | |
| "mean_token_accuracy": 0.7188102528452873, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.4382901866345575, | |
| "grad_norm": 0.40301385521888733, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9727, | |
| "mean_token_accuracy": 0.7179606407880783, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.44310656231186035, | |
| "grad_norm": 0.41534721851348877, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9444, | |
| "mean_token_accuracy": 0.7267005681991577, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.44792293798916316, | |
| "grad_norm": 0.3811696767807007, | |
| "learning_rate": 2e-05, | |
| "loss": 0.931, | |
| "mean_token_accuracy": 0.7301761016249657, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 0.45273931366646597, | |
| "grad_norm": 0.38206103444099426, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9205, | |
| "mean_token_accuracy": 0.7324171543121338, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.45755568934376883, | |
| "grad_norm": 0.36854949593544006, | |
| "learning_rate": 2e-05, | |
| "loss": 0.933, | |
| "mean_token_accuracy": 0.7262160122394562, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.46237206502107164, | |
| "grad_norm": 0.36893048882484436, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9566, | |
| "mean_token_accuracy": 0.7235715687274933, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.46718844069837445, | |
| "grad_norm": 0.3726271092891693, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9731, | |
| "mean_token_accuracy": 0.7182789981365204, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 0.4720048163756773, | |
| "grad_norm": 0.36003515124320984, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9331, | |
| "mean_token_accuracy": 0.7282133370637893, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.4768211920529801, | |
| "grad_norm": 0.3744361698627472, | |
| "learning_rate": 2e-05, | |
| "loss": 0.943, | |
| "mean_token_accuracy": 0.727964948117733, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 0.481637567730283, | |
| "grad_norm": 0.40966275334358215, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9205, | |
| "mean_token_accuracy": 0.7308084771037102, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.481637567730283, | |
| "eval_loss": 0.9973437786102295, | |
| "eval_mean_token_accuracy": 0.7170403462189895, | |
| "eval_runtime": 23.158, | |
| "eval_samples_per_second": 4.318, | |
| "eval_steps_per_second": 0.561, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.4864539434075858, | |
| "grad_norm": 0.38875406980514526, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9812, | |
| "mean_token_accuracy": 0.7161158278584481, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 0.4912703190848886, | |
| "grad_norm": 0.39988937973976135, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9406, | |
| "mean_token_accuracy": 0.7260874435305595, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.49608669476219147, | |
| "grad_norm": 0.40625178813934326, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9645, | |
| "mean_token_accuracy": 0.7189718931913376, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 0.5009030704394943, | |
| "grad_norm": 0.4269265830516815, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9302, | |
| "mean_token_accuracy": 0.731168407201767, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.5057194461167971, | |
| "grad_norm": 0.39293113350868225, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9231, | |
| "mean_token_accuracy": 0.735957583785057, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.5105358217941, | |
| "grad_norm": 0.4136344790458679, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9396, | |
| "mean_token_accuracy": 0.7235330045223236, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.5153521974714028, | |
| "grad_norm": 0.3826082944869995, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9618, | |
| "mean_token_accuracy": 0.7200583890080452, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 0.5201685731487056, | |
| "grad_norm": 0.40097910165786743, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9442, | |
| "mean_token_accuracy": 0.72747682929039, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.5249849488260084, | |
| "grad_norm": 0.40495404601097107, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9654, | |
| "mean_token_accuracy": 0.7136755734682083, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 0.5298013245033113, | |
| "grad_norm": 0.4751788079738617, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9431, | |
| "mean_token_accuracy": 0.7281810432672501, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.534617700180614, | |
| "grad_norm": 0.3656327724456787, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9285, | |
| "mean_token_accuracy": 0.7332020610570907, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 0.5394340758579169, | |
| "grad_norm": 0.3560369312763214, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9382, | |
| "mean_token_accuracy": 0.7254460051655769, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.5442504515352198, | |
| "grad_norm": 0.38857781887054443, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9634, | |
| "mean_token_accuracy": 0.7230386212468147, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 0.5490668272125225, | |
| "grad_norm": 0.3445562422275543, | |
| "learning_rate": 2e-05, | |
| "loss": 0.932, | |
| "mean_token_accuracy": 0.7287012577056885, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.5538832028898254, | |
| "grad_norm": 0.4291544258594513, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9771, | |
| "mean_token_accuracy": 0.7165321439504624, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.5586995785671283, | |
| "grad_norm": 0.4487311542034149, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9168, | |
| "mean_token_accuracy": 0.7317699804902077, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.563515954244431, | |
| "grad_norm": 0.40127262473106384, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9564, | |
| "mean_token_accuracy": 0.7205055937170982, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 0.5683323299217339, | |
| "grad_norm": 0.3540896475315094, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9416, | |
| "mean_token_accuracy": 0.7258080333471298, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.5731487055990367, | |
| "grad_norm": 0.36789658665657043, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9602, | |
| "mean_token_accuracy": 0.7191953271627426, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 0.5779650812763396, | |
| "grad_norm": 0.42882364988327026, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9277, | |
| "mean_token_accuracy": 0.7274743899703026, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.5779650812763396, | |
| "eval_loss": 0.9842187762260437, | |
| "eval_mean_token_accuracy": 0.7194240184930655, | |
| "eval_runtime": 23.1812, | |
| "eval_samples_per_second": 4.314, | |
| "eval_steps_per_second": 0.561, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.5827814569536424, | |
| "grad_norm": 0.3456374406814575, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9583, | |
| "mean_token_accuracy": 0.7262210890650749, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 0.5875978326309452, | |
| "grad_norm": 0.39239296317100525, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9355, | |
| "mean_token_accuracy": 0.7298878788948059, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.5924142083082481, | |
| "grad_norm": 0.3922767639160156, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9119, | |
| "mean_token_accuracy": 0.731063374876976, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 0.5972305839855508, | |
| "grad_norm": 0.4066627025604248, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9721, | |
| "mean_token_accuracy": 0.7155601590871811, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.6020469596628537, | |
| "grad_norm": 0.3650158643722534, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9183, | |
| "mean_token_accuracy": 0.7321714773774147, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 0.6068633353401566, | |
| "grad_norm": 0.3967447876930237, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9125, | |
| "mean_token_accuracy": 0.7360529005527496, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.6116797110174593, | |
| "grad_norm": 0.4131582975387573, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9537, | |
| "mean_token_accuracy": 0.7238716900348663, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 0.6164960866947622, | |
| "grad_norm": 0.3441373109817505, | |
| "learning_rate": 2e-05, | |
| "loss": 0.942, | |
| "mean_token_accuracy": 0.7241286799311638, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.621312462372065, | |
| "grad_norm": 0.4480727016925812, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9481, | |
| "mean_token_accuracy": 0.7221115916967392, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 0.6261288380493678, | |
| "grad_norm": 0.3634999394416809, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9463, | |
| "mean_token_accuracy": 0.7219712048768997, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.6309452137266707, | |
| "grad_norm": 0.3939569890499115, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9309, | |
| "mean_token_accuracy": 0.7307459726929665, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 0.6357615894039735, | |
| "grad_norm": 0.3766266405582428, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9568, | |
| "mean_token_accuracy": 0.7186618536710739, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.6405779650812763, | |
| "grad_norm": 0.40739813446998596, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9012, | |
| "mean_token_accuracy": 0.7370730206370354, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 0.6453943407585792, | |
| "grad_norm": 0.4568629860877991, | |
| "learning_rate": 2e-05, | |
| "loss": 0.934, | |
| "mean_token_accuracy": 0.7295185983181, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.650210716435882, | |
| "grad_norm": 0.4078063666820526, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8946, | |
| "mean_token_accuracy": 0.7382076278328895, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 0.6550270921131849, | |
| "grad_norm": 0.39687684178352356, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9299, | |
| "mean_token_accuracy": 0.726612477004528, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.6598434677904876, | |
| "grad_norm": 0.42360830307006836, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9168, | |
| "mean_token_accuracy": 0.7292766153812409, | |
| "step": 685 | |
| }, | |
| { | |
| "epoch": 0.6646598434677905, | |
| "grad_norm": 0.391550213098526, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9434, | |
| "mean_token_accuracy": 0.7261136472225189, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.6694762191450934, | |
| "grad_norm": 0.380113422870636, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9304, | |
| "mean_token_accuracy": 0.7198660045862197, | |
| "step": 695 | |
| }, | |
| { | |
| "epoch": 0.6742925948223961, | |
| "grad_norm": 0.37536484003067017, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9371, | |
| "mean_token_accuracy": 0.7235382974147797, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.6742925948223961, | |
| "eval_loss": 0.973437488079071, | |
| "eval_mean_token_accuracy": 0.7215940081156217, | |
| "eval_runtime": 23.1338, | |
| "eval_samples_per_second": 4.323, | |
| "eval_steps_per_second": 0.562, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.679108970499699, | |
| "grad_norm": 0.39021074771881104, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9437, | |
| "mean_token_accuracy": 0.7243467301130295, | |
| "step": 705 | |
| }, | |
| { | |
| "epoch": 0.6839253461770018, | |
| "grad_norm": 0.391563355922699, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9232, | |
| "mean_token_accuracy": 0.7265035390853882, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.6887417218543046, | |
| "grad_norm": 0.3613850772380829, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9233, | |
| "mean_token_accuracy": 0.7278270050883293, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 0.6935580975316075, | |
| "grad_norm": 0.4116956293582916, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9495, | |
| "mean_token_accuracy": 0.7207832634449005, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.6983744732089103, | |
| "grad_norm": 0.3616946339607239, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9772, | |
| "mean_token_accuracy": 0.7127095699310303, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 0.7031908488862131, | |
| "grad_norm": 0.40902280807495117, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9146, | |
| "mean_token_accuracy": 0.7307567968964577, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.708007224563516, | |
| "grad_norm": 0.3533954918384552, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9187, | |
| "mean_token_accuracy": 0.7295649662613869, | |
| "step": 735 | |
| }, | |
| { | |
| "epoch": 0.7128236002408188, | |
| "grad_norm": 0.3901711702346802, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9299, | |
| "mean_token_accuracy": 0.7287529841065407, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.7176399759181216, | |
| "grad_norm": 0.45047813653945923, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9104, | |
| "mean_token_accuracy": 0.7317241802811623, | |
| "step": 745 | |
| }, | |
| { | |
| "epoch": 0.7224563515954244, | |
| "grad_norm": 0.38531169295310974, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9146, | |
| "mean_token_accuracy": 0.728726564347744, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.7272727272727273, | |
| "grad_norm": 0.437835693359375, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9102, | |
| "mean_token_accuracy": 0.7315579026937484, | |
| "step": 755 | |
| }, | |
| { | |
| "epoch": 0.7320891029500302, | |
| "grad_norm": 0.3801443874835968, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9416, | |
| "mean_token_accuracy": 0.725105169415474, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.7369054786273329, | |
| "grad_norm": 0.39513495564460754, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8933, | |
| "mean_token_accuracy": 0.7324695363640785, | |
| "step": 765 | |
| }, | |
| { | |
| "epoch": 0.7417218543046358, | |
| "grad_norm": 0.4051715135574341, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9193, | |
| "mean_token_accuracy": 0.727542620897293, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.7465382299819386, | |
| "grad_norm": 0.37806564569473267, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9447, | |
| "mean_token_accuracy": 0.7283599302172661, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 0.7513546056592414, | |
| "grad_norm": 0.4053668975830078, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8943, | |
| "mean_token_accuracy": 0.7407664448022843, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.7561709813365443, | |
| "grad_norm": 0.377989798784256, | |
| "learning_rate": 2e-05, | |
| "loss": 0.897, | |
| "mean_token_accuracy": 0.7331721663475037, | |
| "step": 785 | |
| }, | |
| { | |
| "epoch": 0.7609873570138471, | |
| "grad_norm": 0.3637182414531708, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9274, | |
| "mean_token_accuracy": 0.7238499134778976, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.7658037326911499, | |
| "grad_norm": 0.3860657513141632, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9038, | |
| "mean_token_accuracy": 0.7350373581051827, | |
| "step": 795 | |
| }, | |
| { | |
| "epoch": 0.7706201083684527, | |
| "grad_norm": 0.3994176387786865, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9671, | |
| "mean_token_accuracy": 0.7155893579125404, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.7706201083684527, | |
| "eval_loss": 0.964062511920929, | |
| "eval_mean_token_accuracy": 0.7233101450479947, | |
| "eval_runtime": 23.129, | |
| "eval_samples_per_second": 4.324, | |
| "eval_steps_per_second": 0.562, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.7754364840457556, | |
| "grad_norm": 0.4492134153842926, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9353, | |
| "mean_token_accuracy": 0.7261847347021103, | |
| "step": 805 | |
| }, | |
| { | |
| "epoch": 0.7802528597230584, | |
| "grad_norm": 0.45549172163009644, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9376, | |
| "mean_token_accuracy": 0.7261715829372406, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.7850692354003612, | |
| "grad_norm": 0.378998726606369, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9256, | |
| "mean_token_accuracy": 0.7321177124977112, | |
| "step": 815 | |
| }, | |
| { | |
| "epoch": 0.7898856110776641, | |
| "grad_norm": 0.35385045409202576, | |
| "learning_rate": 2e-05, | |
| "loss": 0.908, | |
| "mean_token_accuracy": 0.7342436522245407, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.7947019867549668, | |
| "grad_norm": 0.3778769373893738, | |
| "learning_rate": 2e-05, | |
| "loss": 0.919, | |
| "mean_token_accuracy": 0.7232446476817131, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 0.7995183624322697, | |
| "grad_norm": 0.36614373326301575, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9074, | |
| "mean_token_accuracy": 0.7344337999820709, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.8043347381095726, | |
| "grad_norm": 0.36225926876068115, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8927, | |
| "mean_token_accuracy": 0.7368348643183709, | |
| "step": 835 | |
| }, | |
| { | |
| "epoch": 0.8091511137868754, | |
| "grad_norm": 0.3926050662994385, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8856, | |
| "mean_token_accuracy": 0.7389758110046387, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.8139674894641782, | |
| "grad_norm": 0.3587627708911896, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9458, | |
| "mean_token_accuracy": 0.7253442853689194, | |
| "step": 845 | |
| }, | |
| { | |
| "epoch": 0.818783865141481, | |
| "grad_norm": 0.33780330419540405, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9201, | |
| "mean_token_accuracy": 0.7290245488286018, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.8236002408187839, | |
| "grad_norm": 0.36326298117637634, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8672, | |
| "mean_token_accuracy": 0.7469315692782402, | |
| "step": 855 | |
| }, | |
| { | |
| "epoch": 0.8284166164960867, | |
| "grad_norm": 0.3772122859954834, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9153, | |
| "mean_token_accuracy": 0.7269368454813957, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.8332329921733895, | |
| "grad_norm": 0.3824109137058258, | |
| "learning_rate": 2e-05, | |
| "loss": 0.922, | |
| "mean_token_accuracy": 0.7278964444994926, | |
| "step": 865 | |
| }, | |
| { | |
| "epoch": 0.8380493678506924, | |
| "grad_norm": 0.42278799414634705, | |
| "learning_rate": 2e-05, | |
| "loss": 0.921, | |
| "mean_token_accuracy": 0.730242757499218, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.8428657435279951, | |
| "grad_norm": 0.37090981006622314, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8888, | |
| "mean_token_accuracy": 0.7369868889451027, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 0.847682119205298, | |
| "grad_norm": 0.3563065230846405, | |
| "learning_rate": 2e-05, | |
| "loss": 0.88, | |
| "mean_token_accuracy": 0.7384928792715073, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.8524984948826009, | |
| "grad_norm": 0.3902587592601776, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9187, | |
| "mean_token_accuracy": 0.7276976093649864, | |
| "step": 885 | |
| }, | |
| { | |
| "epoch": 0.8573148705599036, | |
| "grad_norm": 0.4171347916126251, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9003, | |
| "mean_token_accuracy": 0.7332590997219086, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.8621312462372065, | |
| "grad_norm": 0.39231064915657043, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9098, | |
| "mean_token_accuracy": 0.7335718736052513, | |
| "step": 895 | |
| }, | |
| { | |
| "epoch": 0.8669476219145094, | |
| "grad_norm": 0.37322694063186646, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8997, | |
| "mean_token_accuracy": 0.7329329386353493, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.8669476219145094, | |
| "eval_loss": 0.9554687738418579, | |
| "eval_mean_token_accuracy": 0.7253300134952252, | |
| "eval_runtime": 23.1858, | |
| "eval_samples_per_second": 4.313, | |
| "eval_steps_per_second": 0.561, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.8717639975918121, | |
| "grad_norm": 0.4675639271736145, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9078, | |
| "mean_token_accuracy": 0.7307937040925025, | |
| "step": 905 | |
| }, | |
| { | |
| "epoch": 0.876580373269115, | |
| "grad_norm": 0.37796643376350403, | |
| "learning_rate": 2e-05, | |
| "loss": 0.915, | |
| "mean_token_accuracy": 0.7300151839852334, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.8813967489464178, | |
| "grad_norm": 0.3831245005130768, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8853, | |
| "mean_token_accuracy": 0.7340099573135376, | |
| "step": 915 | |
| }, | |
| { | |
| "epoch": 0.8862131246237207, | |
| "grad_norm": 0.4262712299823761, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8898, | |
| "mean_token_accuracy": 0.7373559743165969, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.8910295003010235, | |
| "grad_norm": 0.3827621638774872, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9313, | |
| "mean_token_accuracy": 0.721035298705101, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 0.8958458759783263, | |
| "grad_norm": 0.358642041683197, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8993, | |
| "mean_token_accuracy": 0.7342952102422714, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.9006622516556292, | |
| "grad_norm": 0.45269307494163513, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9201, | |
| "mean_token_accuracy": 0.7350223436951637, | |
| "step": 935 | |
| }, | |
| { | |
| "epoch": 0.9054786273329319, | |
| "grad_norm": 0.4106045663356781, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8975, | |
| "mean_token_accuracy": 0.7355057403445244, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.9102950030102348, | |
| "grad_norm": 0.43229761719703674, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9174, | |
| "mean_token_accuracy": 0.7289028167724609, | |
| "step": 945 | |
| }, | |
| { | |
| "epoch": 0.9151113786875377, | |
| "grad_norm": 0.4135943353176117, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8568, | |
| "mean_token_accuracy": 0.7484120547771453, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.9199277543648404, | |
| "grad_norm": 0.3772605359554291, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8858, | |
| "mean_token_accuracy": 0.7402070254087448, | |
| "step": 955 | |
| }, | |
| { | |
| "epoch": 0.9247441300421433, | |
| "grad_norm": 0.38806506991386414, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8839, | |
| "mean_token_accuracy": 0.738246065378189, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.9295605057194462, | |
| "grad_norm": 0.37721186876296997, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9446, | |
| "mean_token_accuracy": 0.7229430004954338, | |
| "step": 965 | |
| }, | |
| { | |
| "epoch": 0.9343768813967489, | |
| "grad_norm": 0.37863266468048096, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8968, | |
| "mean_token_accuracy": 0.735011525452137, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.9391932570740518, | |
| "grad_norm": 0.35967886447906494, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9053, | |
| "mean_token_accuracy": 0.7334525480866432, | |
| "step": 975 | |
| }, | |
| { | |
| "epoch": 0.9440096327513546, | |
| "grad_norm": 0.36415719985961914, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8855, | |
| "mean_token_accuracy": 0.7396774977445603, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.9488260084286574, | |
| "grad_norm": 0.3698769509792328, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8916, | |
| "mean_token_accuracy": 0.7383552804589272, | |
| "step": 985 | |
| }, | |
| { | |
| "epoch": 0.9536423841059603, | |
| "grad_norm": 0.38771921396255493, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9274, | |
| "mean_token_accuracy": 0.7202894672751426, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.9584587597832631, | |
| "grad_norm": 0.42747798562049866, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9301, | |
| "mean_token_accuracy": 0.7242205336689949, | |
| "step": 995 | |
| }, | |
| { | |
| "epoch": 0.963275135460566, | |
| "grad_norm": 0.36320194602012634, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9062, | |
| "mean_token_accuracy": 0.7277265936136246, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.963275135460566, | |
| "eval_loss": 0.948437511920929, | |
| "eval_mean_token_accuracy": 0.7268086809378403, | |
| "eval_runtime": 23.2366, | |
| "eval_samples_per_second": 4.304, | |
| "eval_steps_per_second": 0.559, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.9680915111378687, | |
| "grad_norm": 0.38512277603149414, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8589, | |
| "mean_token_accuracy": 0.7454344347119332, | |
| "step": 1005 | |
| }, | |
| { | |
| "epoch": 0.9729078868151716, | |
| "grad_norm": 0.35500526428222656, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8943, | |
| "mean_token_accuracy": 0.7319369062781333, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.9777242624924745, | |
| "grad_norm": 0.4057998061180115, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8865, | |
| "mean_token_accuracy": 0.7375865921378135, | |
| "step": 1015 | |
| }, | |
| { | |
| "epoch": 0.9825406381697772, | |
| "grad_norm": 0.40733131766319275, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9033, | |
| "mean_token_accuracy": 0.7339003443717956, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.9873570138470801, | |
| "grad_norm": 0.3689517676830292, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9072, | |
| "mean_token_accuracy": 0.7332539647817612, | |
| "step": 1025 | |
| }, | |
| { | |
| "epoch": 0.9921733895243829, | |
| "grad_norm": 0.3920786678791046, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8831, | |
| "mean_token_accuracy": 0.7399213343858719, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.9969897652016857, | |
| "grad_norm": 0.3332076072692871, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8808, | |
| "mean_token_accuracy": 0.7407150834798812, | |
| "step": 1035 | |
| }, | |
| { | |
| "epoch": 1.0009632751354605, | |
| "grad_norm": 0.43358659744262695, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7348, | |
| "mean_token_accuracy": 0.7383806271986528, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 1.0057796508127634, | |
| "grad_norm": 0.4309595823287964, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7893, | |
| "mean_token_accuracy": 0.7559069380164146, | |
| "step": 1045 | |
| }, | |
| { | |
| "epoch": 1.0105960264900662, | |
| "grad_norm": 0.4139677584171295, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8363, | |
| "mean_token_accuracy": 0.7487693518400192, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.0154124021673692, | |
| "grad_norm": 0.37942108511924744, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8459, | |
| "mean_token_accuracy": 0.7475774019956589, | |
| "step": 1055 | |
| }, | |
| { | |
| "epoch": 1.020228777844672, | |
| "grad_norm": 0.42199230194091797, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8702, | |
| "mean_token_accuracy": 0.7373737841844559, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 1.0250451535219747, | |
| "grad_norm": 0.36780673265457153, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8534, | |
| "mean_token_accuracy": 0.7421116903424263, | |
| "step": 1065 | |
| }, | |
| { | |
| "epoch": 1.0298615291992776, | |
| "grad_norm": 0.3655712306499481, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8861, | |
| "mean_token_accuracy": 0.7346468567848206, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 1.0346779048765804, | |
| "grad_norm": 0.3763968348503113, | |
| "learning_rate": 2e-05, | |
| "loss": 0.869, | |
| "mean_token_accuracy": 0.7416014015674591, | |
| "step": 1075 | |
| }, | |
| { | |
| "epoch": 1.0394942805538832, | |
| "grad_norm": 0.3777642548084259, | |
| "learning_rate": 2e-05, | |
| "loss": 0.882, | |
| "mean_token_accuracy": 0.7347766742110252, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 1.0443106562311861, | |
| "grad_norm": 0.37617817521095276, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8808, | |
| "mean_token_accuracy": 0.7349648147821426, | |
| "step": 1085 | |
| }, | |
| { | |
| "epoch": 1.0491270319084889, | |
| "grad_norm": 0.37556421756744385, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8426, | |
| "mean_token_accuracy": 0.7498120263218879, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 1.0539434075857916, | |
| "grad_norm": 0.3898829221725464, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8365, | |
| "mean_token_accuracy": 0.746685053408146, | |
| "step": 1095 | |
| }, | |
| { | |
| "epoch": 1.0587597832630946, | |
| "grad_norm": 0.3657301068305969, | |
| "learning_rate": 2e-05, | |
| "loss": 0.865, | |
| "mean_token_accuracy": 0.7422873020172119, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.0587597832630946, | |
| "eval_loss": 0.9439062476158142, | |
| "eval_mean_token_accuracy": 0.7276775607695947, | |
| "eval_runtime": 23.2077, | |
| "eval_samples_per_second": 4.309, | |
| "eval_steps_per_second": 0.56, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.0635761589403974, | |
| "grad_norm": 0.3528231084346771, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8491, | |
| "mean_token_accuracy": 0.7472317442297935, | |
| "step": 1105 | |
| }, | |
| { | |
| "epoch": 1.0683925346177001, | |
| "grad_norm": 0.3855115473270416, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8343, | |
| "mean_token_accuracy": 0.7537892758846283, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 1.073208910295003, | |
| "grad_norm": 0.37757667899131775, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8523, | |
| "mean_token_accuracy": 0.7377029240131379, | |
| "step": 1115 | |
| }, | |
| { | |
| "epoch": 1.0780252859723058, | |
| "grad_norm": 0.363437682390213, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8576, | |
| "mean_token_accuracy": 0.7391376078128815, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 1.0828416616496086, | |
| "grad_norm": 0.366401344537735, | |
| "learning_rate": 2e-05, | |
| "loss": 0.854, | |
| "mean_token_accuracy": 0.7467326611280442, | |
| "step": 1125 | |
| }, | |
| { | |
| "epoch": 1.0876580373269116, | |
| "grad_norm": 0.3566599488258362, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8801, | |
| "mean_token_accuracy": 0.7326125755906105, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 1.0924744130042143, | |
| "grad_norm": 0.34054407477378845, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8527, | |
| "mean_token_accuracy": 0.7444597333669662, | |
| "step": 1135 | |
| }, | |
| { | |
| "epoch": 1.097290788681517, | |
| "grad_norm": 0.4111313223838806, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8497, | |
| "mean_token_accuracy": 0.7466902017593384, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 1.10210716435882, | |
| "grad_norm": 0.3806932270526886, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8485, | |
| "mean_token_accuracy": 0.7481859296560287, | |
| "step": 1145 | |
| }, | |
| { | |
| "epoch": 1.1069235400361228, | |
| "grad_norm": 0.3866956830024719, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8138, | |
| "mean_token_accuracy": 0.7499373897910118, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.1117399157134256, | |
| "grad_norm": 0.37800461053848267, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8537, | |
| "mean_token_accuracy": 0.7427974909543991, | |
| "step": 1155 | |
| }, | |
| { | |
| "epoch": 1.1165562913907285, | |
| "grad_norm": 0.3657369911670685, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8643, | |
| "mean_token_accuracy": 0.7425652265548706, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 1.1213726670680313, | |
| "grad_norm": 0.34589388966560364, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8236, | |
| "mean_token_accuracy": 0.746645887196064, | |
| "step": 1165 | |
| }, | |
| { | |
| "epoch": 1.126189042745334, | |
| "grad_norm": 0.3889903426170349, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8502, | |
| "mean_token_accuracy": 0.7441315606236458, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 1.131005418422637, | |
| "grad_norm": 0.3654171824455261, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8639, | |
| "mean_token_accuracy": 0.7370901137590409, | |
| "step": 1175 | |
| }, | |
| { | |
| "epoch": 1.1358217940999398, | |
| "grad_norm": 0.3497660756111145, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8617, | |
| "mean_token_accuracy": 0.741532389819622, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 1.1406381697772425, | |
| "grad_norm": 0.36609187722206116, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8618, | |
| "mean_token_accuracy": 0.7446050852537155, | |
| "step": 1185 | |
| }, | |
| { | |
| "epoch": 1.1454545454545455, | |
| "grad_norm": 0.4400254786014557, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8326, | |
| "mean_token_accuracy": 0.7501450702548027, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 1.1502709211318483, | |
| "grad_norm": 0.3326510190963745, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8418, | |
| "mean_token_accuracy": 0.744844700396061, | |
| "step": 1195 | |
| }, | |
| { | |
| "epoch": 1.155087296809151, | |
| "grad_norm": 0.37238839268684387, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8586, | |
| "mean_token_accuracy": 0.7447129786014557, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.155087296809151, | |
| "eval_loss": 0.9375, | |
| "eval_mean_token_accuracy": 0.7290752346699054, | |
| "eval_runtime": 23.1543, | |
| "eval_samples_per_second": 4.319, | |
| "eval_steps_per_second": 0.561, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.159903672486454, | |
| "grad_norm": 0.3780873119831085, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8274, | |
| "mean_token_accuracy": 0.7483693122863769, | |
| "step": 1205 | |
| }, | |
| { | |
| "epoch": 1.1647200481637567, | |
| "grad_norm": 0.404081255197525, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8249, | |
| "mean_token_accuracy": 0.7522385701537132, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 1.1695364238410595, | |
| "grad_norm": 0.3591543734073639, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8128, | |
| "mean_token_accuracy": 0.7550233393907547, | |
| "step": 1215 | |
| }, | |
| { | |
| "epoch": 1.1743527995183625, | |
| "grad_norm": 0.3817455768585205, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8393, | |
| "mean_token_accuracy": 0.7501140117645264, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 1.1791691751956652, | |
| "grad_norm": 0.3948347270488739, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8172, | |
| "mean_token_accuracy": 0.7537783578038215, | |
| "step": 1225 | |
| }, | |
| { | |
| "epoch": 1.183985550872968, | |
| "grad_norm": 0.39961639046669006, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8531, | |
| "mean_token_accuracy": 0.7440052881836892, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 1.188801926550271, | |
| "grad_norm": 0.3994446098804474, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8193, | |
| "mean_token_accuracy": 0.7571675822138786, | |
| "step": 1235 | |
| }, | |
| { | |
| "epoch": 1.1936183022275737, | |
| "grad_norm": 0.3635159730911255, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8042, | |
| "mean_token_accuracy": 0.7555163532495499, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 1.1984346779048767, | |
| "grad_norm": 0.38725772500038147, | |
| "learning_rate": 2e-05, | |
| "loss": 0.867, | |
| "mean_token_accuracy": 0.7381075561046601, | |
| "step": 1245 | |
| }, | |
| { | |
| "epoch": 1.2032510535821794, | |
| "grad_norm": 0.4206209182739258, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9091, | |
| "mean_token_accuracy": 0.727660009264946, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.2080674292594822, | |
| "grad_norm": 0.3780755400657654, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8501, | |
| "mean_token_accuracy": 0.740609385073185, | |
| "step": 1255 | |
| }, | |
| { | |
| "epoch": 1.2128838049367852, | |
| "grad_norm": 0.3919096887111664, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8262, | |
| "mean_token_accuracy": 0.7502545565366745, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 1.217700180614088, | |
| "grad_norm": 0.4055858850479126, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8326, | |
| "mean_token_accuracy": 0.746037882566452, | |
| "step": 1265 | |
| }, | |
| { | |
| "epoch": 1.2225165562913907, | |
| "grad_norm": 0.3605825901031494, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8352, | |
| "mean_token_accuracy": 0.751497520506382, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 1.2273329319686936, | |
| "grad_norm": 0.3745580017566681, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9102, | |
| "mean_token_accuracy": 0.7277455866336823, | |
| "step": 1275 | |
| }, | |
| { | |
| "epoch": 1.2321493076459964, | |
| "grad_norm": 0.4068564772605896, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8439, | |
| "mean_token_accuracy": 0.7488078013062477, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 1.2369656833232991, | |
| "grad_norm": 0.33656877279281616, | |
| "learning_rate": 2e-05, | |
| "loss": 0.853, | |
| "mean_token_accuracy": 0.7449796929955482, | |
| "step": 1285 | |
| }, | |
| { | |
| "epoch": 1.2417820590006021, | |
| "grad_norm": 0.38442090153694153, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8105, | |
| "mean_token_accuracy": 0.7490809202194214, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 1.2465984346779049, | |
| "grad_norm": 0.4118853211402893, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8409, | |
| "mean_token_accuracy": 0.7445864990353585, | |
| "step": 1295 | |
| }, | |
| { | |
| "epoch": 1.2514148103552076, | |
| "grad_norm": 0.40977755188941956, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8131, | |
| "mean_token_accuracy": 0.7540942341089248, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.2514148103552076, | |
| "eval_loss": 0.9334375262260437, | |
| "eval_mean_token_accuracy": 0.7299059858688941, | |
| "eval_runtime": 23.2243, | |
| "eval_samples_per_second": 4.306, | |
| "eval_steps_per_second": 0.56, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.2562311860325106, | |
| "grad_norm": 0.37511029839515686, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8374, | |
| "mean_token_accuracy": 0.7419778451323509, | |
| "step": 1305 | |
| }, | |
| { | |
| "epoch": 1.2610475617098134, | |
| "grad_norm": 0.3589267134666443, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8379, | |
| "mean_token_accuracy": 0.7468772605061531, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 1.2658639373871163, | |
| "grad_norm": 0.39658141136169434, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8297, | |
| "mean_token_accuracy": 0.7469786286354065, | |
| "step": 1315 | |
| }, | |
| { | |
| "epoch": 1.270680313064419, | |
| "grad_norm": 0.42367202043533325, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8337, | |
| "mean_token_accuracy": 0.7481065571308136, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 1.2754966887417218, | |
| "grad_norm": 0.3799798786640167, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8699, | |
| "mean_token_accuracy": 0.7395881712436676, | |
| "step": 1325 | |
| }, | |
| { | |
| "epoch": 1.2803130644190248, | |
| "grad_norm": 0.35513991117477417, | |
| "learning_rate": 2e-05, | |
| "loss": 0.842, | |
| "mean_token_accuracy": 0.7494134768843651, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 1.2851294400963276, | |
| "grad_norm": 0.3369906544685364, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8505, | |
| "mean_token_accuracy": 0.7420820742845535, | |
| "step": 1335 | |
| }, | |
| { | |
| "epoch": 1.2899458157736303, | |
| "grad_norm": 0.37939414381980896, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8575, | |
| "mean_token_accuracy": 0.7391051918268203, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 1.2947621914509333, | |
| "grad_norm": 0.4896661043167114, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8597, | |
| "mean_token_accuracy": 0.7398741409182549, | |
| "step": 1345 | |
| }, | |
| { | |
| "epoch": 1.299578567128236, | |
| "grad_norm": 0.36100226640701294, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8282, | |
| "mean_token_accuracy": 0.7494728401303291, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 1.3043949428055388, | |
| "grad_norm": 0.35935041308403015, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8384, | |
| "mean_token_accuracy": 0.7416606619954109, | |
| "step": 1355 | |
| }, | |
| { | |
| "epoch": 1.3092113184828418, | |
| "grad_norm": 0.3769769072532654, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8533, | |
| "mean_token_accuracy": 0.7399677246809006, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 1.3140276941601445, | |
| "grad_norm": 0.3857510983943939, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8071, | |
| "mean_token_accuracy": 0.7550437197089195, | |
| "step": 1365 | |
| }, | |
| { | |
| "epoch": 1.3188440698374473, | |
| "grad_norm": 0.3967873752117157, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8172, | |
| "mean_token_accuracy": 0.7505990505218506, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 1.3236604455147503, | |
| "grad_norm": 0.36245691776275635, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8492, | |
| "mean_token_accuracy": 0.7389700740575791, | |
| "step": 1375 | |
| }, | |
| { | |
| "epoch": 1.328476821192053, | |
| "grad_norm": 0.35738521814346313, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8273, | |
| "mean_token_accuracy": 0.74720598757267, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 1.3332931968693558, | |
| "grad_norm": 0.3659331202507019, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8284, | |
| "mean_token_accuracy": 0.7490911051630974, | |
| "step": 1385 | |
| }, | |
| { | |
| "epoch": 1.3381095725466587, | |
| "grad_norm": 0.4177457392215729, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8674, | |
| "mean_token_accuracy": 0.7377855911850929, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 1.3429259482239615, | |
| "grad_norm": 0.35989993810653687, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8047, | |
| "mean_token_accuracy": 0.7577745035290718, | |
| "step": 1395 | |
| }, | |
| { | |
| "epoch": 1.3477423239012642, | |
| "grad_norm": 0.3731432557106018, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8361, | |
| "mean_token_accuracy": 0.7453808888792992, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.3477423239012642, | |
| "eval_loss": 0.9284374713897705, | |
| "eval_mean_token_accuracy": 0.7310354984723605, | |
| "eval_runtime": 23.1671, | |
| "eval_samples_per_second": 4.316, | |
| "eval_steps_per_second": 0.561, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.3525586995785672, | |
| "grad_norm": 0.3571363389492035, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8256, | |
| "mean_token_accuracy": 0.7432364627718926, | |
| "step": 1405 | |
| }, | |
| { | |
| "epoch": 1.35737507525587, | |
| "grad_norm": 0.3314119279384613, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8363, | |
| "mean_token_accuracy": 0.7436547055840492, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 1.3621914509331727, | |
| "grad_norm": 0.3417344093322754, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8472, | |
| "mean_token_accuracy": 0.7448557123541832, | |
| "step": 1415 | |
| }, | |
| { | |
| "epoch": 1.3670078266104757, | |
| "grad_norm": 0.3343771994113922, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8758, | |
| "mean_token_accuracy": 0.7367025226354599, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 1.3718242022877785, | |
| "grad_norm": 0.40026524662971497, | |
| "learning_rate": 2e-05, | |
| "loss": 0.847, | |
| "mean_token_accuracy": 0.7458574742078781, | |
| "step": 1425 | |
| }, | |
| { | |
| "epoch": 1.3766405779650812, | |
| "grad_norm": 0.3898317813873291, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8698, | |
| "mean_token_accuracy": 0.7364508762955666, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 1.3814569536423842, | |
| "grad_norm": 0.38850364089012146, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8153, | |
| "mean_token_accuracy": 0.7526135891675949, | |
| "step": 1435 | |
| }, | |
| { | |
| "epoch": 1.386273329319687, | |
| "grad_norm": 0.3574685752391815, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8464, | |
| "mean_token_accuracy": 0.7421124458312989, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 1.3910897049969897, | |
| "grad_norm": 0.3952126204967499, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8529, | |
| "mean_token_accuracy": 0.7436041012406349, | |
| "step": 1445 | |
| }, | |
| { | |
| "epoch": 1.3959060806742927, | |
| "grad_norm": 0.3442822992801666, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8072, | |
| "mean_token_accuracy": 0.7531652361154556, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 1.4007224563515954, | |
| "grad_norm": 0.39355242252349854, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8571, | |
| "mean_token_accuracy": 0.7427126541733742, | |
| "step": 1455 | |
| }, | |
| { | |
| "epoch": 1.4055388320288982, | |
| "grad_norm": 0.37595421075820923, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8479, | |
| "mean_token_accuracy": 0.742530232667923, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 1.4103552077062012, | |
| "grad_norm": 0.37319841980934143, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8995, | |
| "mean_token_accuracy": 0.7270940363407135, | |
| "step": 1465 | |
| }, | |
| { | |
| "epoch": 1.415171583383504, | |
| "grad_norm": 0.4596470892429352, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8115, | |
| "mean_token_accuracy": 0.7522259518504143, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 1.4199879590608067, | |
| "grad_norm": 0.3937012553215027, | |
| "learning_rate": 2e-05, | |
| "loss": 0.818, | |
| "mean_token_accuracy": 0.7493147507309914, | |
| "step": 1475 | |
| }, | |
| { | |
| "epoch": 1.4248043347381096, | |
| "grad_norm": 0.37263306975364685, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8398, | |
| "mean_token_accuracy": 0.7486390605568886, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 1.4296207104154124, | |
| "grad_norm": 0.3784118592739105, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8002, | |
| "mean_token_accuracy": 0.7569744929671287, | |
| "step": 1485 | |
| }, | |
| { | |
| "epoch": 1.4344370860927151, | |
| "grad_norm": 0.36756840348243713, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8808, | |
| "mean_token_accuracy": 0.735991969704628, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 1.4392534617700181, | |
| "grad_norm": 0.37854087352752686, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8555, | |
| "mean_token_accuracy": 0.7391902863979339, | |
| "step": 1495 | |
| }, | |
| { | |
| "epoch": 1.4440698374473209, | |
| "grad_norm": 0.37972742319107056, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8449, | |
| "mean_token_accuracy": 0.7450771510601044, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.4440698374473209, | |
| "eval_loss": 0.9237499833106995, | |
| "eval_mean_token_accuracy": 0.7318991560202378, | |
| "eval_runtime": 23.0893, | |
| "eval_samples_per_second": 4.331, | |
| "eval_steps_per_second": 0.563, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.4488862131246236, | |
| "grad_norm": 0.37376001477241516, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8485, | |
| "mean_token_accuracy": 0.7402021259069442, | |
| "step": 1505 | |
| }, | |
| { | |
| "epoch": 1.4537025888019266, | |
| "grad_norm": 0.38736093044281006, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8693, | |
| "mean_token_accuracy": 0.7362229198217392, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 1.4585189644792294, | |
| "grad_norm": 0.3877595067024231, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8424, | |
| "mean_token_accuracy": 0.745487704873085, | |
| "step": 1515 | |
| }, | |
| { | |
| "epoch": 1.463335340156532, | |
| "grad_norm": 0.35137495398521423, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8278, | |
| "mean_token_accuracy": 0.7506787940859795, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 1.468151715833835, | |
| "grad_norm": 0.37618595361709595, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8311, | |
| "mean_token_accuracy": 0.7513900756835937, | |
| "step": 1525 | |
| }, | |
| { | |
| "epoch": 1.4729680915111378, | |
| "grad_norm": 0.3877982795238495, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8406, | |
| "mean_token_accuracy": 0.746095435321331, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 1.4777844671884406, | |
| "grad_norm": 0.3873893618583679, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8465, | |
| "mean_token_accuracy": 0.7434879496693612, | |
| "step": 1535 | |
| }, | |
| { | |
| "epoch": 1.4826008428657436, | |
| "grad_norm": 0.37615063786506653, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8214, | |
| "mean_token_accuracy": 0.7459251716732979, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 1.4874172185430463, | |
| "grad_norm": 0.3867892324924469, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8237, | |
| "mean_token_accuracy": 0.7544758155941963, | |
| "step": 1545 | |
| }, | |
| { | |
| "epoch": 1.492233594220349, | |
| "grad_norm": 0.3590559959411621, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8196, | |
| "mean_token_accuracy": 0.7523276194930076, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 1.497049969897652, | |
| "grad_norm": 0.33671319484710693, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8477, | |
| "mean_token_accuracy": 0.7436845555901528, | |
| "step": 1555 | |
| }, | |
| { | |
| "epoch": 1.5018663455749548, | |
| "grad_norm": 0.3445674180984497, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8554, | |
| "mean_token_accuracy": 0.7416912838816643, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 1.5066827212522576, | |
| "grad_norm": 0.38447070121765137, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8339, | |
| "mean_token_accuracy": 0.7485029339790344, | |
| "step": 1565 | |
| }, | |
| { | |
| "epoch": 1.5114990969295605, | |
| "grad_norm": 0.40366995334625244, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8211, | |
| "mean_token_accuracy": 0.7500607624650002, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 1.5163154726068635, | |
| "grad_norm": 0.39034920930862427, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8145, | |
| "mean_token_accuracy": 0.7497797414660454, | |
| "step": 1575 | |
| }, | |
| { | |
| "epoch": 1.521131848284166, | |
| "grad_norm": 0.35405203700065613, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8527, | |
| "mean_token_accuracy": 0.7429638877511024, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 1.525948223961469, | |
| "grad_norm": 0.3475797474384308, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8116, | |
| "mean_token_accuracy": 0.7482677146792411, | |
| "step": 1585 | |
| }, | |
| { | |
| "epoch": 1.530764599638772, | |
| "grad_norm": 0.38322359323501587, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8077, | |
| "mean_token_accuracy": 0.7507022231817245, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 1.5355809753160745, | |
| "grad_norm": 0.4077315032482147, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8108, | |
| "mean_token_accuracy": 0.7523728922009468, | |
| "step": 1595 | |
| }, | |
| { | |
| "epoch": 1.5403973509933775, | |
| "grad_norm": 0.41062474250793457, | |
| "learning_rate": 2e-05, | |
| "loss": 0.816, | |
| "mean_token_accuracy": 0.7487197533249855, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.5403973509933775, | |
| "eval_loss": 0.9193750023841858, | |
| "eval_mean_token_accuracy": 0.7336924580427316, | |
| "eval_runtime": 23.1661, | |
| "eval_samples_per_second": 4.317, | |
| "eval_steps_per_second": 0.561, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.5452137266706805, | |
| "grad_norm": 0.3489004671573639, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8252, | |
| "mean_token_accuracy": 0.7542433932423591, | |
| "step": 1605 | |
| }, | |
| { | |
| "epoch": 1.550030102347983, | |
| "grad_norm": 0.42114558815956116, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8513, | |
| "mean_token_accuracy": 0.7369220793247223, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 1.554846478025286, | |
| "grad_norm": 0.40832996368408203, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8532, | |
| "mean_token_accuracy": 0.7461734786629677, | |
| "step": 1615 | |
| }, | |
| { | |
| "epoch": 1.559662853702589, | |
| "grad_norm": 0.369779497385025, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8416, | |
| "mean_token_accuracy": 0.7418405711650848, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 1.5644792293798915, | |
| "grad_norm": 0.36488446593284607, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8424, | |
| "mean_token_accuracy": 0.7447330921888351, | |
| "step": 1625 | |
| }, | |
| { | |
| "epoch": 1.5692956050571945, | |
| "grad_norm": 0.4260449707508087, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8483, | |
| "mean_token_accuracy": 0.7396710678935051, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 1.5741119807344974, | |
| "grad_norm": 0.44844576716423035, | |
| "learning_rate": 2e-05, | |
| "loss": 0.832, | |
| "mean_token_accuracy": 0.7461599200963974, | |
| "step": 1635 | |
| }, | |
| { | |
| "epoch": 1.5789283564118, | |
| "grad_norm": 0.3739905059337616, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7956, | |
| "mean_token_accuracy": 0.7564267441630363, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 1.583744732089103, | |
| "grad_norm": 0.3647367060184479, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8269, | |
| "mean_token_accuracy": 0.7473939165472985, | |
| "step": 1645 | |
| }, | |
| { | |
| "epoch": 1.588561107766406, | |
| "grad_norm": 0.32268261909484863, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8051, | |
| "mean_token_accuracy": 0.7524646311998368, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 1.5933774834437087, | |
| "grad_norm": 0.38236913084983826, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8193, | |
| "mean_token_accuracy": 0.7506327718496323, | |
| "step": 1655 | |
| }, | |
| { | |
| "epoch": 1.5981938591210114, | |
| "grad_norm": 0.33767271041870117, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8471, | |
| "mean_token_accuracy": 0.7457294464111328, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 1.6030102347983144, | |
| "grad_norm": 0.36715444922447205, | |
| "learning_rate": 2e-05, | |
| "loss": 0.827, | |
| "mean_token_accuracy": 0.7489886403083801, | |
| "step": 1665 | |
| }, | |
| { | |
| "epoch": 1.6078266104756171, | |
| "grad_norm": 0.3837079107761383, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8183, | |
| "mean_token_accuracy": 0.750606831908226, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 1.61264298615292, | |
| "grad_norm": 0.4105582535266876, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7927, | |
| "mean_token_accuracy": 0.7599633246660232, | |
| "step": 1675 | |
| }, | |
| { | |
| "epoch": 1.6174593618302229, | |
| "grad_norm": 0.328166127204895, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8538, | |
| "mean_token_accuracy": 0.7361954420804977, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 1.6222757375075256, | |
| "grad_norm": 0.34064626693725586, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8232, | |
| "mean_token_accuracy": 0.7506048232316971, | |
| "step": 1685 | |
| }, | |
| { | |
| "epoch": 1.6270921131848284, | |
| "grad_norm": 0.3536415100097656, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8128, | |
| "mean_token_accuracy": 0.7551938936114311, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 1.6319084888621314, | |
| "grad_norm": 0.3961397707462311, | |
| "learning_rate": 2e-05, | |
| "loss": 0.845, | |
| "mean_token_accuracy": 0.743786184489727, | |
| "step": 1695 | |
| }, | |
| { | |
| "epoch": 1.636724864539434, | |
| "grad_norm": 0.3730252981185913, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8177, | |
| "mean_token_accuracy": 0.7538556814193725, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.636724864539434, | |
| "eval_loss": 0.9150000214576721, | |
| "eval_mean_token_accuracy": 0.7336668784801776, | |
| "eval_runtime": 23.1715, | |
| "eval_samples_per_second": 4.316, | |
| "eval_steps_per_second": 0.561, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.6415412402167369, | |
| "grad_norm": 0.40690943598747253, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8164, | |
| "mean_token_accuracy": 0.7524816766381264, | |
| "step": 1705 | |
| }, | |
| { | |
| "epoch": 1.6463576158940398, | |
| "grad_norm": 0.40426430106163025, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8629, | |
| "mean_token_accuracy": 0.7397387072443962, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 1.6511739915713426, | |
| "grad_norm": 0.3870543837547302, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8344, | |
| "mean_token_accuracy": 0.7449035495519638, | |
| "step": 1715 | |
| }, | |
| { | |
| "epoch": 1.6559903672486453, | |
| "grad_norm": 0.3909133970737457, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8416, | |
| "mean_token_accuracy": 0.7425220400094986, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 1.6608067429259483, | |
| "grad_norm": 0.3573339581489563, | |
| "learning_rate": 2e-05, | |
| "loss": 0.831, | |
| "mean_token_accuracy": 0.749519394338131, | |
| "step": 1725 | |
| }, | |
| { | |
| "epoch": 1.665623118603251, | |
| "grad_norm": 0.4339994788169861, | |
| "learning_rate": 2e-05, | |
| "loss": 0.83, | |
| "mean_token_accuracy": 0.7467664986848831, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 1.6704394942805538, | |
| "grad_norm": 0.3833540081977844, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8376, | |
| "mean_token_accuracy": 0.7486771330237388, | |
| "step": 1735 | |
| }, | |
| { | |
| "epoch": 1.6752558699578568, | |
| "grad_norm": 0.4031335711479187, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7827, | |
| "mean_token_accuracy": 0.7610711231827736, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 1.6800722456351596, | |
| "grad_norm": 0.40794068574905396, | |
| "learning_rate": 2e-05, | |
| "loss": 0.824, | |
| "mean_token_accuracy": 0.7416556209325791, | |
| "step": 1745 | |
| }, | |
| { | |
| "epoch": 1.6848886213124623, | |
| "grad_norm": 0.3737964332103729, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8187, | |
| "mean_token_accuracy": 0.7503210604190826, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 1.6897049969897653, | |
| "grad_norm": 0.33517488837242126, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8333, | |
| "mean_token_accuracy": 0.7439797177910805, | |
| "step": 1755 | |
| }, | |
| { | |
| "epoch": 1.694521372667068, | |
| "grad_norm": 0.41663119196891785, | |
| "learning_rate": 2e-05, | |
| "loss": 0.888, | |
| "mean_token_accuracy": 0.7334117487072944, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 1.6993377483443708, | |
| "grad_norm": 0.339233934879303, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8314, | |
| "mean_token_accuracy": 0.7454211816191674, | |
| "step": 1765 | |
| }, | |
| { | |
| "epoch": 1.7041541240216738, | |
| "grad_norm": 0.36906835436820984, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8427, | |
| "mean_token_accuracy": 0.7440360963344574, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 1.7089704996989765, | |
| "grad_norm": 0.38175931572914124, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8298, | |
| "mean_token_accuracy": 0.7456939503550529, | |
| "step": 1775 | |
| }, | |
| { | |
| "epoch": 1.7137868753762793, | |
| "grad_norm": 0.3851398527622223, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8381, | |
| "mean_token_accuracy": 0.7432308256626129, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 1.7186032510535822, | |
| "grad_norm": 0.4011867046356201, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8172, | |
| "mean_token_accuracy": 0.7504382818937302, | |
| "step": 1785 | |
| }, | |
| { | |
| "epoch": 1.723419626730885, | |
| "grad_norm": 0.4010428786277771, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8278, | |
| "mean_token_accuracy": 0.7499980315566063, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 1.7282360024081878, | |
| "grad_norm": 0.35039839148521423, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8456, | |
| "mean_token_accuracy": 0.7438850581645966, | |
| "step": 1795 | |
| }, | |
| { | |
| "epoch": 1.7330523780854907, | |
| "grad_norm": 0.37959855794906616, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8414, | |
| "mean_token_accuracy": 0.748087365925312, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.7330523780854907, | |
| "eval_loss": 0.9110937714576721, | |
| "eval_mean_token_accuracy": 0.7353770962128272, | |
| "eval_runtime": 23.1605, | |
| "eval_samples_per_second": 4.318, | |
| "eval_steps_per_second": 0.561, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.7378687537627935, | |
| "grad_norm": 0.33767440915107727, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8569, | |
| "mean_token_accuracy": 0.7423548147082328, | |
| "step": 1805 | |
| }, | |
| { | |
| "epoch": 1.7426851294400962, | |
| "grad_norm": 0.3997369408607483, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8201, | |
| "mean_token_accuracy": 0.7456977978348732, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 1.7475015051173992, | |
| "grad_norm": 0.38806620240211487, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8488, | |
| "mean_token_accuracy": 0.7421660631895065, | |
| "step": 1815 | |
| }, | |
| { | |
| "epoch": 1.752317880794702, | |
| "grad_norm": 0.3360239863395691, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8266, | |
| "mean_token_accuracy": 0.7481269389390945, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 1.7571342564720047, | |
| "grad_norm": 0.41784462332725525, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8148, | |
| "mean_token_accuracy": 0.7510803028941154, | |
| "step": 1825 | |
| }, | |
| { | |
| "epoch": 1.7619506321493077, | |
| "grad_norm": 0.3966754078865051, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8139, | |
| "mean_token_accuracy": 0.7494689762592316, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 1.7667670078266104, | |
| "grad_norm": 0.36949893832206726, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8528, | |
| "mean_token_accuracy": 0.7427579507231712, | |
| "step": 1835 | |
| }, | |
| { | |
| "epoch": 1.7715833835039132, | |
| "grad_norm": 0.37594905495643616, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8478, | |
| "mean_token_accuracy": 0.746378231048584, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 1.7763997591812162, | |
| "grad_norm": 0.3548242151737213, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8191, | |
| "mean_token_accuracy": 0.7453361362218857, | |
| "step": 1845 | |
| }, | |
| { | |
| "epoch": 1.781216134858519, | |
| "grad_norm": 0.3962383270263672, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8207, | |
| "mean_token_accuracy": 0.7496239960193634, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 1.7860325105358217, | |
| "grad_norm": 0.3505515158176422, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8609, | |
| "mean_token_accuracy": 0.7414278611540794, | |
| "step": 1855 | |
| }, | |
| { | |
| "epoch": 1.7908488862131247, | |
| "grad_norm": 0.3898848295211792, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7912, | |
| "mean_token_accuracy": 0.755648897588253, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 1.7956652618904274, | |
| "grad_norm": 0.3252900242805481, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8272, | |
| "mean_token_accuracy": 0.7487868279218673, | |
| "step": 1865 | |
| }, | |
| { | |
| "epoch": 1.8004816375677302, | |
| "grad_norm": 0.3451252281665802, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8074, | |
| "mean_token_accuracy": 0.7537914827466011, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 1.8052980132450331, | |
| "grad_norm": 0.388134241104126, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8195, | |
| "mean_token_accuracy": 0.7505655080080033, | |
| "step": 1875 | |
| }, | |
| { | |
| "epoch": 1.810114388922336, | |
| "grad_norm": 0.3933774530887604, | |
| "learning_rate": 2e-05, | |
| "loss": 0.828, | |
| "mean_token_accuracy": 0.7499305367469787, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 1.8149307645996386, | |
| "grad_norm": 0.41381701827049255, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8403, | |
| "mean_token_accuracy": 0.74359240680933, | |
| "step": 1885 | |
| }, | |
| { | |
| "epoch": 1.8197471402769416, | |
| "grad_norm": 0.3744955062866211, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8285, | |
| "mean_token_accuracy": 0.7447529405355453, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 1.8245635159542446, | |
| "grad_norm": 0.37023985385894775, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8395, | |
| "mean_token_accuracy": 0.7450712293386459, | |
| "step": 1895 | |
| }, | |
| { | |
| "epoch": 1.8293798916315471, | |
| "grad_norm": 0.3904387354850769, | |
| "learning_rate": 2e-05, | |
| "loss": 0.806, | |
| "mean_token_accuracy": 0.7523078992962837, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.8293798916315471, | |
| "eval_loss": 0.9075000286102295, | |
| "eval_mean_token_accuracy": 0.7357059946426978, | |
| "eval_runtime": 23.1147, | |
| "eval_samples_per_second": 4.326, | |
| "eval_steps_per_second": 0.562, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.83419626730885, | |
| "grad_norm": 0.36936506628990173, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8049, | |
| "mean_token_accuracy": 0.7516879752278328, | |
| "step": 1905 | |
| }, | |
| { | |
| "epoch": 1.839012642986153, | |
| "grad_norm": 0.37847042083740234, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8447, | |
| "mean_token_accuracy": 0.7475872814655304, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 1.8438290186634556, | |
| "grad_norm": 0.3323959708213806, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8357, | |
| "mean_token_accuracy": 0.7477746129035949, | |
| "step": 1915 | |
| }, | |
| { | |
| "epoch": 1.8486453943407586, | |
| "grad_norm": 0.39311644434928894, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8016, | |
| "mean_token_accuracy": 0.7572365298867225, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 1.8534617700180616, | |
| "grad_norm": 0.4057285487651825, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8245, | |
| "mean_token_accuracy": 0.7479765564203262, | |
| "step": 1925 | |
| }, | |
| { | |
| "epoch": 1.858278145695364, | |
| "grad_norm": 0.35648080706596375, | |
| "learning_rate": 2e-05, | |
| "loss": 0.828, | |
| "mean_token_accuracy": 0.7472658976912498, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 1.863094521372667, | |
| "grad_norm": 0.4065404236316681, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8149, | |
| "mean_token_accuracy": 0.7524289473891258, | |
| "step": 1935 | |
| }, | |
| { | |
| "epoch": 1.86791089704997, | |
| "grad_norm": 0.3594423234462738, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8431, | |
| "mean_token_accuracy": 0.7439246565103531, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 1.8727272727272726, | |
| "grad_norm": 0.34858575463294983, | |
| "learning_rate": 2e-05, | |
| "loss": 0.833, | |
| "mean_token_accuracy": 0.749679408967495, | |
| "step": 1945 | |
| }, | |
| { | |
| "epoch": 1.8775436484045755, | |
| "grad_norm": 0.37988269329071045, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7625, | |
| "mean_token_accuracy": 0.7687516495585441, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 1.8823600240818785, | |
| "grad_norm": 0.36146003007888794, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8401, | |
| "mean_token_accuracy": 0.743234421312809, | |
| "step": 1955 | |
| }, | |
| { | |
| "epoch": 1.887176399759181, | |
| "grad_norm": 0.3551631569862366, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8378, | |
| "mean_token_accuracy": 0.7408182680606842, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 1.891992775436484, | |
| "grad_norm": 0.3982977569103241, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8227, | |
| "mean_token_accuracy": 0.751612164080143, | |
| "step": 1965 | |
| }, | |
| { | |
| "epoch": 1.896809151113787, | |
| "grad_norm": 0.3863884508609772, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8463, | |
| "mean_token_accuracy": 0.7427176803350448, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 1.9016255267910898, | |
| "grad_norm": 0.3617970645427704, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8255, | |
| "mean_token_accuracy": 0.7477703839540482, | |
| "step": 1975 | |
| }, | |
| { | |
| "epoch": 1.9064419024683925, | |
| "grad_norm": 0.41495469212532043, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8384, | |
| "mean_token_accuracy": 0.7448007896542549, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 1.9112582781456955, | |
| "grad_norm": 0.3848714828491211, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8407, | |
| "mean_token_accuracy": 0.7430221542716027, | |
| "step": 1985 | |
| }, | |
| { | |
| "epoch": 1.9160746538229982, | |
| "grad_norm": 0.36609163880348206, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8299, | |
| "mean_token_accuracy": 0.7459353134036064, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 1.920891029500301, | |
| "grad_norm": 0.3701276183128357, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8325, | |
| "mean_token_accuracy": 0.7488315016031265, | |
| "step": 1995 | |
| }, | |
| { | |
| "epoch": 1.925707405177604, | |
| "grad_norm": 0.3757995367050171, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8018, | |
| "mean_token_accuracy": 0.7553416565060616, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.925707405177604, | |
| "eval_loss": 0.9051562547683716, | |
| "eval_mean_token_accuracy": 0.7356420801236079, | |
| "eval_runtime": 23.1611, | |
| "eval_samples_per_second": 4.318, | |
| "eval_steps_per_second": 0.561, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.9305237808549067, | |
| "grad_norm": 0.36982470750808716, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8334, | |
| "mean_token_accuracy": 0.7408973425626755, | |
| "step": 2005 | |
| }, | |
| { | |
| "epoch": 1.9353401565322095, | |
| "grad_norm": 0.40300092101097107, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8294, | |
| "mean_token_accuracy": 0.748587854206562, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 1.9401565322095125, | |
| "grad_norm": 0.3860663175582886, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7914, | |
| "mean_token_accuracy": 0.7596546769142151, | |
| "step": 2015 | |
| }, | |
| { | |
| "epoch": 1.9449729078868152, | |
| "grad_norm": 0.4126531481742859, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8144, | |
| "mean_token_accuracy": 0.7499192729592323, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 1.949789283564118, | |
| "grad_norm": 0.36783406138420105, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8136, | |
| "mean_token_accuracy": 0.7480936273932457, | |
| "step": 2025 | |
| }, | |
| { | |
| "epoch": 1.954605659241421, | |
| "grad_norm": 0.3961862623691559, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8175, | |
| "mean_token_accuracy": 0.7519208177924156, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 1.9594220349187237, | |
| "grad_norm": 0.39503687620162964, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7986, | |
| "mean_token_accuracy": 0.7552447572350502, | |
| "step": 2035 | |
| }, | |
| { | |
| "epoch": 1.9642384105960264, | |
| "grad_norm": 0.383649080991745, | |
| "learning_rate": 2e-05, | |
| "loss": 0.816, | |
| "mean_token_accuracy": 0.7495980471372604, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 1.9690547862733294, | |
| "grad_norm": 0.36074724793434143, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8157, | |
| "mean_token_accuracy": 0.7535919129848481, | |
| "step": 2045 | |
| }, | |
| { | |
| "epoch": 1.9738711619506322, | |
| "grad_norm": 0.38681527972221375, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8404, | |
| "mean_token_accuracy": 0.7456748083233833, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 1.978687537627935, | |
| "grad_norm": 0.36571046710014343, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8167, | |
| "mean_token_accuracy": 0.7556032404303551, | |
| "step": 2055 | |
| }, | |
| { | |
| "epoch": 1.983503913305238, | |
| "grad_norm": 0.43187224864959717, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8022, | |
| "mean_token_accuracy": 0.7503258436918259, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 1.9883202889825407, | |
| "grad_norm": 0.3371817469596863, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8379, | |
| "mean_token_accuracy": 0.7471990913152695, | |
| "step": 2065 | |
| }, | |
| { | |
| "epoch": 1.9931366646598434, | |
| "grad_norm": 0.3704403042793274, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8468, | |
| "mean_token_accuracy": 0.7404090151190758, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 1.9979530403371464, | |
| "grad_norm": 0.37344101071357727, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8149, | |
| "mean_token_accuracy": 0.7522780746221542, | |
| "step": 2075 | |
| }, | |
| { | |
| "epoch": 2.001926550270921, | |
| "grad_norm": 0.4436754882335663, | |
| "learning_rate": 2e-05, | |
| "loss": 0.6846, | |
| "mean_token_accuracy": 0.7422799016490127, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 2.006742925948224, | |
| "grad_norm": 0.3635530471801758, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7807, | |
| "mean_token_accuracy": 0.7589672565460205, | |
| "step": 2085 | |
| }, | |
| { | |
| "epoch": 2.011559301625527, | |
| "grad_norm": 0.3698291778564453, | |
| "learning_rate": 2e-05, | |
| "loss": 0.778, | |
| "mean_token_accuracy": 0.7537376001477242, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 2.0163756773028294, | |
| "grad_norm": 0.3593323230743408, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7656, | |
| "mean_token_accuracy": 0.7640043929219246, | |
| "step": 2095 | |
| }, | |
| { | |
| "epoch": 2.0211920529801324, | |
| "grad_norm": 0.38869941234588623, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7958, | |
| "mean_token_accuracy": 0.7520812705159188, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 2.0211920529801324, | |
| "eval_loss": 0.9059374928474426, | |
| "eval_mean_token_accuracy": 0.7361722084192129, | |
| "eval_runtime": 23.1994, | |
| "eval_samples_per_second": 4.31, | |
| "eval_steps_per_second": 0.56, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 2.0260084286574354, | |
| "grad_norm": 0.324433833360672, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7856, | |
| "mean_token_accuracy": 0.7576806485652924, | |
| "step": 2105 | |
| }, | |
| { | |
| "epoch": 2.0308248043347383, | |
| "grad_norm": 0.35579246282577515, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7895, | |
| "mean_token_accuracy": 0.7542310237884522, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 2.035641180012041, | |
| "grad_norm": 0.4416930377483368, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7471, | |
| "mean_token_accuracy": 0.7662236809730529, | |
| "step": 2115 | |
| }, | |
| { | |
| "epoch": 2.040457555689344, | |
| "grad_norm": 0.3690590262413025, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7557, | |
| "mean_token_accuracy": 0.7577535837888718, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 2.045273931366647, | |
| "grad_norm": 0.3864422142505646, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7441, | |
| "mean_token_accuracy": 0.7661188945174218, | |
| "step": 2125 | |
| }, | |
| { | |
| "epoch": 2.0500903070439493, | |
| "grad_norm": 0.3759057819843292, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7791, | |
| "mean_token_accuracy": 0.7549151331186295, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 2.0549066827212523, | |
| "grad_norm": 0.35572880506515503, | |
| "learning_rate": 2e-05, | |
| "loss": 0.795, | |
| "mean_token_accuracy": 0.7541061162948608, | |
| "step": 2135 | |
| }, | |
| { | |
| "epoch": 2.0597230583985553, | |
| "grad_norm": 0.3675136864185333, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7965, | |
| "mean_token_accuracy": 0.7506997480988502, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 2.064539434075858, | |
| "grad_norm": 0.3544418215751648, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8036, | |
| "mean_token_accuracy": 0.7521514996886254, | |
| "step": 2145 | |
| }, | |
| { | |
| "epoch": 2.069355809753161, | |
| "grad_norm": 0.3543994426727295, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7788, | |
| "mean_token_accuracy": 0.7620324581861496, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 2.0741721854304638, | |
| "grad_norm": 0.35432854294776917, | |
| "learning_rate": 2e-05, | |
| "loss": 0.774, | |
| "mean_token_accuracy": 0.7557155057787895, | |
| "step": 2155 | |
| }, | |
| { | |
| "epoch": 2.0789885611077663, | |
| "grad_norm": 0.30006834864616394, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7463, | |
| "mean_token_accuracy": 0.7641932889819145, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 2.0838049367850693, | |
| "grad_norm": 0.34351858496665955, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7893, | |
| "mean_token_accuracy": 0.7499398306012154, | |
| "step": 2165 | |
| }, | |
| { | |
| "epoch": 2.0886213124623723, | |
| "grad_norm": 0.40961310267448425, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7827, | |
| "mean_token_accuracy": 0.7576932951807975, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 2.093437688139675, | |
| "grad_norm": 0.35560116171836853, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7698, | |
| "mean_token_accuracy": 0.7573257461190224, | |
| "step": 2175 | |
| }, | |
| { | |
| "epoch": 2.0982540638169778, | |
| "grad_norm": 0.35371777415275574, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7816, | |
| "mean_token_accuracy": 0.7532159358263015, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 2.1030704394942807, | |
| "grad_norm": 0.3600073456764221, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7881, | |
| "mean_token_accuracy": 0.7561186358332634, | |
| "step": 2185 | |
| }, | |
| { | |
| "epoch": 2.1078868151715833, | |
| "grad_norm": 0.37766149640083313, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7797, | |
| "mean_token_accuracy": 0.7565206706523895, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 2.1127031908488862, | |
| "grad_norm": 0.35926687717437744, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8197, | |
| "mean_token_accuracy": 0.7425670564174652, | |
| "step": 2195 | |
| }, | |
| { | |
| "epoch": 2.1175195665261892, | |
| "grad_norm": 0.37030982971191406, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7642, | |
| "mean_token_accuracy": 0.7601809903979302, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 2.1175195665261892, | |
| "eval_loss": 0.9032812714576721, | |
| "eval_mean_token_accuracy": 0.7368296614060035, | |
| "eval_runtime": 23.1974, | |
| "eval_samples_per_second": 4.311, | |
| "eval_steps_per_second": 0.56, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 2.1223359422034918, | |
| "grad_norm": 0.37453070282936096, | |
| "learning_rate": 2e-05, | |
| "loss": 0.714, | |
| "mean_token_accuracy": 0.777516531944275, | |
| "step": 2205 | |
| }, | |
| { | |
| "epoch": 2.1271523178807947, | |
| "grad_norm": 0.39775583148002625, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7447, | |
| "mean_token_accuracy": 0.7646070823073388, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 2.1319686935580977, | |
| "grad_norm": 0.35055381059646606, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7748, | |
| "mean_token_accuracy": 0.7598061427474022, | |
| "step": 2215 | |
| }, | |
| { | |
| "epoch": 2.1367850692354002, | |
| "grad_norm": 0.3668005168437958, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7791, | |
| "mean_token_accuracy": 0.7541374906897544, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 2.141601444912703, | |
| "grad_norm": 0.3544014096260071, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7835, | |
| "mean_token_accuracy": 0.7573020115494729, | |
| "step": 2225 | |
| }, | |
| { | |
| "epoch": 2.146417820590006, | |
| "grad_norm": 0.3782559037208557, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7682, | |
| "mean_token_accuracy": 0.7608320832252502, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 2.1512341962673087, | |
| "grad_norm": 0.3616378605365753, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7991, | |
| "mean_token_accuracy": 0.7570154413580894, | |
| "step": 2235 | |
| }, | |
| { | |
| "epoch": 2.1560505719446117, | |
| "grad_norm": 0.3906887471675873, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7757, | |
| "mean_token_accuracy": 0.7537379145622254, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 2.1608669476219147, | |
| "grad_norm": 0.39540261030197144, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7531, | |
| "mean_token_accuracy": 0.765704445540905, | |
| "step": 2245 | |
| }, | |
| { | |
| "epoch": 2.165683323299217, | |
| "grad_norm": 0.3733433187007904, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7745, | |
| "mean_token_accuracy": 0.7563082128763199, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 2.17049969897652, | |
| "grad_norm": 0.3424307107925415, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7655, | |
| "mean_token_accuracy": 0.7576767817139626, | |
| "step": 2255 | |
| }, | |
| { | |
| "epoch": 2.175316074653823, | |
| "grad_norm": 0.4548731744289398, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7269, | |
| "mean_token_accuracy": 0.7734593346714973, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 2.1801324503311257, | |
| "grad_norm": 0.37536028027534485, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7452, | |
| "mean_token_accuracy": 0.7681032359600067, | |
| "step": 2265 | |
| }, | |
| { | |
| "epoch": 2.1849488260084287, | |
| "grad_norm": 0.3755234479904175, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7654, | |
| "mean_token_accuracy": 0.7575892135500908, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 2.1897652016857316, | |
| "grad_norm": 0.38675227761268616, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7479, | |
| "mean_token_accuracy": 0.7688836097717285, | |
| "step": 2275 | |
| }, | |
| { | |
| "epoch": 2.194581577363034, | |
| "grad_norm": 0.3535851240158081, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7733, | |
| "mean_token_accuracy": 0.7607254981994629, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 2.199397953040337, | |
| "grad_norm": 0.39500853419303894, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7917, | |
| "mean_token_accuracy": 0.7558043077588081, | |
| "step": 2285 | |
| }, | |
| { | |
| "epoch": 2.20421432871764, | |
| "grad_norm": 0.3839825987815857, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7498, | |
| "mean_token_accuracy": 0.7639773234724998, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 2.2090307043949426, | |
| "grad_norm": 0.36488768458366394, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7335, | |
| "mean_token_accuracy": 0.7700603663921356, | |
| "step": 2295 | |
| }, | |
| { | |
| "epoch": 2.2138470800722456, | |
| "grad_norm": 0.3318578898906708, | |
| "learning_rate": 2e-05, | |
| "loss": 0.784, | |
| "mean_token_accuracy": 0.7567041218280792, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 2.2138470800722456, | |
| "eval_loss": 0.9012500047683716, | |
| "eval_mean_token_accuracy": 0.7370858192443848, | |
| "eval_runtime": 23.18, | |
| "eval_samples_per_second": 4.314, | |
| "eval_steps_per_second": 0.561, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 2.2186634557495486, | |
| "grad_norm": 0.4050207734107971, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7634, | |
| "mean_token_accuracy": 0.7616067379713058, | |
| "step": 2305 | |
| }, | |
| { | |
| "epoch": 2.223479831426851, | |
| "grad_norm": 0.3558892011642456, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7733, | |
| "mean_token_accuracy": 0.7565241128206253, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 2.228296207104154, | |
| "grad_norm": 0.39691296219825745, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7773, | |
| "mean_token_accuracy": 0.755375012755394, | |
| "step": 2315 | |
| }, | |
| { | |
| "epoch": 2.233112582781457, | |
| "grad_norm": 0.39223188161849976, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7261, | |
| "mean_token_accuracy": 0.77184529453516, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 2.2379289584587596, | |
| "grad_norm": 0.3514679968357086, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7958, | |
| "mean_token_accuracy": 0.7533982694149017, | |
| "step": 2325 | |
| }, | |
| { | |
| "epoch": 2.2427453341360626, | |
| "grad_norm": 0.3668522536754608, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7359, | |
| "mean_token_accuracy": 0.7672333508729935, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 2.2475617098133656, | |
| "grad_norm": 0.36801326274871826, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7852, | |
| "mean_token_accuracy": 0.7535142377018929, | |
| "step": 2335 | |
| }, | |
| { | |
| "epoch": 2.252378085490668, | |
| "grad_norm": 0.39534881711006165, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7792, | |
| "mean_token_accuracy": 0.7582185581326485, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 2.257194461167971, | |
| "grad_norm": 0.40263575315475464, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7334, | |
| "mean_token_accuracy": 0.773960568010807, | |
| "step": 2345 | |
| }, | |
| { | |
| "epoch": 2.262010836845274, | |
| "grad_norm": 0.3632088005542755, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7542, | |
| "mean_token_accuracy": 0.7653770089149475, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 2.2668272125225766, | |
| "grad_norm": 0.37849465012550354, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7661, | |
| "mean_token_accuracy": 0.7620905011892318, | |
| "step": 2355 | |
| }, | |
| { | |
| "epoch": 2.2716435881998795, | |
| "grad_norm": 0.43345385789871216, | |
| "learning_rate": 2e-05, | |
| "loss": 0.786, | |
| "mean_token_accuracy": 0.7564262568950653, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 2.2764599638771825, | |
| "grad_norm": 0.3616976737976074, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7895, | |
| "mean_token_accuracy": 0.756988562643528, | |
| "step": 2365 | |
| }, | |
| { | |
| "epoch": 2.281276339554485, | |
| "grad_norm": 0.36480623483657837, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7702, | |
| "mean_token_accuracy": 0.7585639297962189, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 2.286092715231788, | |
| "grad_norm": 0.36813783645629883, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7542, | |
| "mean_token_accuracy": 0.7651529818773269, | |
| "step": 2375 | |
| }, | |
| { | |
| "epoch": 2.290909090909091, | |
| "grad_norm": 0.3709932565689087, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7678, | |
| "mean_token_accuracy": 0.7603297665715217, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 2.2957254665863935, | |
| "grad_norm": 0.3750036954879761, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7467, | |
| "mean_token_accuracy": 0.7639903724193573, | |
| "step": 2385 | |
| }, | |
| { | |
| "epoch": 2.3005418422636965, | |
| "grad_norm": 0.4516039788722992, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7618, | |
| "mean_token_accuracy": 0.7606613889336586, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 2.3053582179409995, | |
| "grad_norm": 0.4092274010181427, | |
| "learning_rate": 2e-05, | |
| "loss": 0.742, | |
| "mean_token_accuracy": 0.763524578511715, | |
| "step": 2395 | |
| }, | |
| { | |
| "epoch": 2.310174593618302, | |
| "grad_norm": 0.3847052752971649, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7589, | |
| "mean_token_accuracy": 0.7599444389343262, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 2.310174593618302, | |
| "eval_loss": 0.8995312452316284, | |
| "eval_mean_token_accuracy": 0.7376233431009146, | |
| "eval_runtime": 23.063, | |
| "eval_samples_per_second": 4.336, | |
| "eval_steps_per_second": 0.564, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 2.314990969295605, | |
| "grad_norm": 0.39317360520362854, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7473, | |
| "mean_token_accuracy": 0.7662416860461235, | |
| "step": 2405 | |
| }, | |
| { | |
| "epoch": 2.319807344972908, | |
| "grad_norm": 0.3822837471961975, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7337, | |
| "mean_token_accuracy": 0.7616621315479278, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 2.3246237206502105, | |
| "grad_norm": 0.40895310044288635, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7632, | |
| "mean_token_accuracy": 0.765174786746502, | |
| "step": 2415 | |
| }, | |
| { | |
| "epoch": 2.3294400963275135, | |
| "grad_norm": 0.3547898530960083, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7343, | |
| "mean_token_accuracy": 0.767419508099556, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 2.3342564720048165, | |
| "grad_norm": 0.3836051821708679, | |
| "learning_rate": 2e-05, | |
| "loss": 0.769, | |
| "mean_token_accuracy": 0.7603886500000954, | |
| "step": 2425 | |
| }, | |
| { | |
| "epoch": 2.339072847682119, | |
| "grad_norm": 0.374479204416275, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7685, | |
| "mean_token_accuracy": 0.7552028700709343, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 2.343889223359422, | |
| "grad_norm": 0.37519872188568115, | |
| "learning_rate": 2e-05, | |
| "loss": 0.738, | |
| "mean_token_accuracy": 0.7714676797389984, | |
| "step": 2435 | |
| }, | |
| { | |
| "epoch": 2.348705599036725, | |
| "grad_norm": 0.41698017716407776, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7647, | |
| "mean_token_accuracy": 0.7613590836524964, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 2.3535219747140275, | |
| "grad_norm": 0.3842358887195587, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7809, | |
| "mean_token_accuracy": 0.7568897753953934, | |
| "step": 2445 | |
| }, | |
| { | |
| "epoch": 2.3583383503913304, | |
| "grad_norm": 0.42433464527130127, | |
| "learning_rate": 2e-05, | |
| "loss": 0.726, | |
| "mean_token_accuracy": 0.7667565375566483, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 2.3631547260686334, | |
| "grad_norm": 0.3653852641582489, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7759, | |
| "mean_token_accuracy": 0.7587065488100052, | |
| "step": 2455 | |
| }, | |
| { | |
| "epoch": 2.367971101745936, | |
| "grad_norm": 0.4034559726715088, | |
| "learning_rate": 2e-05, | |
| "loss": 0.756, | |
| "mean_token_accuracy": 0.7609921962022781, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 2.372787477423239, | |
| "grad_norm": 0.39348727464675903, | |
| "learning_rate": 2e-05, | |
| "loss": 0.755, | |
| "mean_token_accuracy": 0.7636922210454941, | |
| "step": 2465 | |
| }, | |
| { | |
| "epoch": 2.377603853100542, | |
| "grad_norm": 0.4438548684120178, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7267, | |
| "mean_token_accuracy": 0.7742465406656265, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 2.3824202287778444, | |
| "grad_norm": 0.455318808555603, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7524, | |
| "mean_token_accuracy": 0.7645120665431022, | |
| "step": 2475 | |
| }, | |
| { | |
| "epoch": 2.3872366044551474, | |
| "grad_norm": 0.36987900733947754, | |
| "learning_rate": 2e-05, | |
| "loss": 0.749, | |
| "mean_token_accuracy": 0.7676509037613869, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 2.3920529801324504, | |
| "grad_norm": 0.38009896874427795, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7927, | |
| "mean_token_accuracy": 0.7539709448814392, | |
| "step": 2485 | |
| }, | |
| { | |
| "epoch": 2.3968693558097534, | |
| "grad_norm": 0.3641670048236847, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7412, | |
| "mean_token_accuracy": 0.7636485174298286, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 2.401685731487056, | |
| "grad_norm": 0.35153478384017944, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7788, | |
| "mean_token_accuracy": 0.7512114852666855, | |
| "step": 2495 | |
| }, | |
| { | |
| "epoch": 2.406502107164359, | |
| "grad_norm": 0.3910485506057739, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7558, | |
| "mean_token_accuracy": 0.7611356824636459, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 2.406502107164359, | |
| "eval_loss": 0.8964062333106995, | |
| "eval_mean_token_accuracy": 0.7384276848572952, | |
| "eval_runtime": 23.1541, | |
| "eval_samples_per_second": 4.319, | |
| "eval_steps_per_second": 0.561, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 2.411318482841662, | |
| "grad_norm": 0.35887807607650757, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7755, | |
| "mean_token_accuracy": 0.7549531042575837, | |
| "step": 2505 | |
| }, | |
| { | |
| "epoch": 2.4161348585189644, | |
| "grad_norm": 0.39550381898880005, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7881, | |
| "mean_token_accuracy": 0.7503395497798919, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 2.4209512341962673, | |
| "grad_norm": 0.4064529836177826, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7608, | |
| "mean_token_accuracy": 0.7628593772649765, | |
| "step": 2515 | |
| }, | |
| { | |
| "epoch": 2.4257676098735703, | |
| "grad_norm": 0.3830781579017639, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7672, | |
| "mean_token_accuracy": 0.7605699598789215, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 2.430583985550873, | |
| "grad_norm": 0.36953088641166687, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7333, | |
| "mean_token_accuracy": 0.7733596116304398, | |
| "step": 2525 | |
| }, | |
| { | |
| "epoch": 2.435400361228176, | |
| "grad_norm": 0.42198288440704346, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7165, | |
| "mean_token_accuracy": 0.7725221425294876, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 2.440216736905479, | |
| "grad_norm": 0.41048187017440796, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7745, | |
| "mean_token_accuracy": 0.7596460074186325, | |
| "step": 2535 | |
| }, | |
| { | |
| "epoch": 2.4450331125827813, | |
| "grad_norm": 0.4293424189090729, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7548, | |
| "mean_token_accuracy": 0.7634711265563965, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 2.4498494882600843, | |
| "grad_norm": 0.3512209355831146, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7663, | |
| "mean_token_accuracy": 0.7589974060654641, | |
| "step": 2545 | |
| }, | |
| { | |
| "epoch": 2.4546658639373873, | |
| "grad_norm": 0.3962377607822418, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7541, | |
| "mean_token_accuracy": 0.7592899754643441, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 2.45948223961469, | |
| "grad_norm": 0.3469253182411194, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7522, | |
| "mean_token_accuracy": 0.7652729198336601, | |
| "step": 2555 | |
| }, | |
| { | |
| "epoch": 2.464298615291993, | |
| "grad_norm": 0.3742927312850952, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7787, | |
| "mean_token_accuracy": 0.7575361758470536, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 2.4691149909692958, | |
| "grad_norm": 0.35377809405326843, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8101, | |
| "mean_token_accuracy": 0.7458323091268539, | |
| "step": 2565 | |
| }, | |
| { | |
| "epoch": 2.4739313666465983, | |
| "grad_norm": 0.3490973711013794, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7427, | |
| "mean_token_accuracy": 0.7698989689350129, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 2.4787477423239013, | |
| "grad_norm": 0.4195067584514618, | |
| "learning_rate": 2e-05, | |
| "loss": 0.775, | |
| "mean_token_accuracy": 0.760101044178009, | |
| "step": 2575 | |
| }, | |
| { | |
| "epoch": 2.4835641180012042, | |
| "grad_norm": 0.40223532915115356, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7506, | |
| "mean_token_accuracy": 0.7645360633730889, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 2.4883804936785068, | |
| "grad_norm": 0.38401681184768677, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7746, | |
| "mean_token_accuracy": 0.7553236082196235, | |
| "step": 2585 | |
| }, | |
| { | |
| "epoch": 2.4931968693558098, | |
| "grad_norm": 0.3794625997543335, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7866, | |
| "mean_token_accuracy": 0.7584537342190742, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 2.4980132450331127, | |
| "grad_norm": 0.3422956168651581, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7529, | |
| "mean_token_accuracy": 0.7641922473907471, | |
| "step": 2595 | |
| }, | |
| { | |
| "epoch": 2.5028296207104153, | |
| "grad_norm": 0.3222048580646515, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7641, | |
| "mean_token_accuracy": 0.764282739162445, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 2.5028296207104153, | |
| "eval_loss": 0.8935937285423279, | |
| "eval_mean_token_accuracy": 0.738965378357814, | |
| "eval_runtime": 23.1976, | |
| "eval_samples_per_second": 4.311, | |
| "eval_steps_per_second": 0.56, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 2.5076459963877182, | |
| "grad_norm": 0.37517258524894714, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7487, | |
| "mean_token_accuracy": 0.7686536237597466, | |
| "step": 2605 | |
| }, | |
| { | |
| "epoch": 2.512462372065021, | |
| "grad_norm": 0.39009490609169006, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7905, | |
| "mean_token_accuracy": 0.7552632004022598, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 2.5172787477423237, | |
| "grad_norm": 0.3636697232723236, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7804, | |
| "mean_token_accuracy": 0.757335577905178, | |
| "step": 2615 | |
| }, | |
| { | |
| "epoch": 2.5220951234196267, | |
| "grad_norm": 0.35198771953582764, | |
| "learning_rate": 2e-05, | |
| "loss": 0.74, | |
| "mean_token_accuracy": 0.7654551029205322, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 2.5269114990969297, | |
| "grad_norm": 0.37345898151397705, | |
| "learning_rate": 2e-05, | |
| "loss": 0.755, | |
| "mean_token_accuracy": 0.7649663195014, | |
| "step": 2625 | |
| }, | |
| { | |
| "epoch": 2.5317278747742327, | |
| "grad_norm": 0.34071609377861023, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7371, | |
| "mean_token_accuracy": 0.7710829094052315, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 2.536544250451535, | |
| "grad_norm": 0.36361777782440186, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7923, | |
| "mean_token_accuracy": 0.7498438969254494, | |
| "step": 2635 | |
| }, | |
| { | |
| "epoch": 2.541360626128838, | |
| "grad_norm": 0.3340684771537781, | |
| "learning_rate": 2e-05, | |
| "loss": 0.753, | |
| "mean_token_accuracy": 0.763702517747879, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 2.546177001806141, | |
| "grad_norm": 0.3246862590312958, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7392, | |
| "mean_token_accuracy": 0.7666044220328331, | |
| "step": 2645 | |
| }, | |
| { | |
| "epoch": 2.5509933774834437, | |
| "grad_norm": 0.3731483221054077, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8032, | |
| "mean_token_accuracy": 0.7451361432671547, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 2.5558097531607467, | |
| "grad_norm": 0.3968595266342163, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7558, | |
| "mean_token_accuracy": 0.7619862273335457, | |
| "step": 2655 | |
| }, | |
| { | |
| "epoch": 2.5606261288380496, | |
| "grad_norm": 0.36753612756729126, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7936, | |
| "mean_token_accuracy": 0.7513890892267228, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 2.565442504515352, | |
| "grad_norm": 0.38044974207878113, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7652, | |
| "mean_token_accuracy": 0.7612641602754593, | |
| "step": 2665 | |
| }, | |
| { | |
| "epoch": 2.570258880192655, | |
| "grad_norm": 0.4354467988014221, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7662, | |
| "mean_token_accuracy": 0.7602713435888291, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 2.575075255869958, | |
| "grad_norm": 0.34920626878738403, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7802, | |
| "mean_token_accuracy": 0.7527212247252464, | |
| "step": 2675 | |
| }, | |
| { | |
| "epoch": 2.5798916315472606, | |
| "grad_norm": 0.3853444457054138, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7929, | |
| "mean_token_accuracy": 0.7507719174027443, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 2.5847080072245636, | |
| "grad_norm": 0.37398529052734375, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7335, | |
| "mean_token_accuracy": 0.7689384967088699, | |
| "step": 2685 | |
| }, | |
| { | |
| "epoch": 2.5895243829018666, | |
| "grad_norm": 0.37192922830581665, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7588, | |
| "mean_token_accuracy": 0.7620333895087242, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 2.594340758579169, | |
| "grad_norm": 0.39118680357933044, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7804, | |
| "mean_token_accuracy": 0.7577536061406136, | |
| "step": 2695 | |
| }, | |
| { | |
| "epoch": 2.599157134256472, | |
| "grad_norm": 0.3549438416957855, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7601, | |
| "mean_token_accuracy": 0.7577766343951226, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 2.599157134256472, | |
| "eval_loss": 0.8917187452316284, | |
| "eval_mean_token_accuracy": 0.7396010435544528, | |
| "eval_runtime": 23.1125, | |
| "eval_samples_per_second": 4.327, | |
| "eval_steps_per_second": 0.562, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 2.603973509933775, | |
| "grad_norm": 0.3113292455673218, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8017, | |
| "mean_token_accuracy": 0.7544750049710274, | |
| "step": 2705 | |
| }, | |
| { | |
| "epoch": 2.6087898856110776, | |
| "grad_norm": 0.3961426615715027, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7835, | |
| "mean_token_accuracy": 0.757952244579792, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 2.6136062612883806, | |
| "grad_norm": 0.38531556725502014, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7448, | |
| "mean_token_accuracy": 0.7656066238880157, | |
| "step": 2715 | |
| }, | |
| { | |
| "epoch": 2.6184226369656836, | |
| "grad_norm": 0.35683271288871765, | |
| "learning_rate": 2e-05, | |
| "loss": 0.764, | |
| "mean_token_accuracy": 0.7579592734575271, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 2.623239012642986, | |
| "grad_norm": 0.3948013484477997, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7455, | |
| "mean_token_accuracy": 0.7686024904251099, | |
| "step": 2725 | |
| }, | |
| { | |
| "epoch": 2.628055388320289, | |
| "grad_norm": 0.4119192659854889, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7726, | |
| "mean_token_accuracy": 0.7610606983304024, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 2.632871763997592, | |
| "grad_norm": 0.31112074851989746, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7557, | |
| "mean_token_accuracy": 0.7639082312583924, | |
| "step": 2735 | |
| }, | |
| { | |
| "epoch": 2.6376881396748946, | |
| "grad_norm": 0.371297150850296, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7477, | |
| "mean_token_accuracy": 0.76564369648695, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 2.6425045153521975, | |
| "grad_norm": 0.4188891351222992, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7785, | |
| "mean_token_accuracy": 0.7570346057415008, | |
| "step": 2745 | |
| }, | |
| { | |
| "epoch": 2.6473208910295005, | |
| "grad_norm": 0.34406188130378723, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7626, | |
| "mean_token_accuracy": 0.7595376938581466, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 2.652137266706803, | |
| "grad_norm": 0.3494187295436859, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7572, | |
| "mean_token_accuracy": 0.7598767071962357, | |
| "step": 2755 | |
| }, | |
| { | |
| "epoch": 2.656953642384106, | |
| "grad_norm": 0.40831491351127625, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7581, | |
| "mean_token_accuracy": 0.7597803384065628, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 2.661770018061409, | |
| "grad_norm": 0.44571653008461, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7731, | |
| "mean_token_accuracy": 0.7607214003801346, | |
| "step": 2765 | |
| }, | |
| { | |
| "epoch": 2.6665863937387115, | |
| "grad_norm": 0.4143577814102173, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7426, | |
| "mean_token_accuracy": 0.7718357488512992, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 2.6714027694160145, | |
| "grad_norm": 0.38475990295410156, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7494, | |
| "mean_token_accuracy": 0.7667113184928894, | |
| "step": 2775 | |
| }, | |
| { | |
| "epoch": 2.6762191450933175, | |
| "grad_norm": 0.3438257575035095, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7726, | |
| "mean_token_accuracy": 0.7581563532352448, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 2.68103552077062, | |
| "grad_norm": 0.3562197983264923, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7758, | |
| "mean_token_accuracy": 0.7572029024362564, | |
| "step": 2785 | |
| }, | |
| { | |
| "epoch": 2.685851896447923, | |
| "grad_norm": 0.38419750332832336, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7738, | |
| "mean_token_accuracy": 0.7595375701785088, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 2.690668272125226, | |
| "grad_norm": 0.3750392198562622, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7642, | |
| "mean_token_accuracy": 0.7576078772544861, | |
| "step": 2795 | |
| }, | |
| { | |
| "epoch": 2.6954846478025285, | |
| "grad_norm": 0.4193848669528961, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7517, | |
| "mean_token_accuracy": 0.7621890246868134, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 2.6954846478025285, | |
| "eval_loss": 0.8895312547683716, | |
| "eval_mean_token_accuracy": 0.7398839684633108, | |
| "eval_runtime": 23.179, | |
| "eval_samples_per_second": 4.314, | |
| "eval_steps_per_second": 0.561, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 2.7003010234798315, | |
| "grad_norm": 0.4397479295730591, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7713, | |
| "mean_token_accuracy": 0.7680559456348419, | |
| "step": 2805 | |
| }, | |
| { | |
| "epoch": 2.7051173991571345, | |
| "grad_norm": 0.3621998727321625, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7212, | |
| "mean_token_accuracy": 0.77404675334692, | |
| "step": 2810 | |
| }, | |
| { | |
| "epoch": 2.709933774834437, | |
| "grad_norm": 0.3363446891307831, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7884, | |
| "mean_token_accuracy": 0.7583529412746429, | |
| "step": 2815 | |
| }, | |
| { | |
| "epoch": 2.71475015051174, | |
| "grad_norm": 0.36584141850471497, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7697, | |
| "mean_token_accuracy": 0.756261146068573, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 2.719566526189043, | |
| "grad_norm": 0.46295201778411865, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7241, | |
| "mean_token_accuracy": 0.7722676262259484, | |
| "step": 2825 | |
| }, | |
| { | |
| "epoch": 2.7243829018663455, | |
| "grad_norm": 0.3580615520477295, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8185, | |
| "mean_token_accuracy": 0.7469978988170624, | |
| "step": 2830 | |
| }, | |
| { | |
| "epoch": 2.7291992775436484, | |
| "grad_norm": 0.3244786858558655, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7581, | |
| "mean_token_accuracy": 0.7603826940059661, | |
| "step": 2835 | |
| }, | |
| { | |
| "epoch": 2.7340156532209514, | |
| "grad_norm": 0.39725518226623535, | |
| "learning_rate": 2e-05, | |
| "loss": 0.767, | |
| "mean_token_accuracy": 0.7605881199240685, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 2.738832028898254, | |
| "grad_norm": 0.40936702489852905, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7682, | |
| "mean_token_accuracy": 0.7618032857775688, | |
| "step": 2845 | |
| }, | |
| { | |
| "epoch": 2.743648404575557, | |
| "grad_norm": 0.357181191444397, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7275, | |
| "mean_token_accuracy": 0.7717493593692779, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 2.74846478025286, | |
| "grad_norm": 0.409920871257782, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7478, | |
| "mean_token_accuracy": 0.7658723518252373, | |
| "step": 2855 | |
| }, | |
| { | |
| "epoch": 2.7532811559301624, | |
| "grad_norm": 0.4086902439594269, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7401, | |
| "mean_token_accuracy": 0.7642821088433266, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 2.7580975316074654, | |
| "grad_norm": 0.3491520285606384, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7711, | |
| "mean_token_accuracy": 0.7554255187511444, | |
| "step": 2865 | |
| }, | |
| { | |
| "epoch": 2.7629139072847684, | |
| "grad_norm": 0.41609054803848267, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7615, | |
| "mean_token_accuracy": 0.7587140306830407, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 2.767730282962071, | |
| "grad_norm": 0.3808140754699707, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8011, | |
| "mean_token_accuracy": 0.7536677673459053, | |
| "step": 2875 | |
| }, | |
| { | |
| "epoch": 2.772546658639374, | |
| "grad_norm": 0.37478163838386536, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7766, | |
| "mean_token_accuracy": 0.7533249914646148, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 2.777363034316677, | |
| "grad_norm": 0.43278947472572327, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7367, | |
| "mean_token_accuracy": 0.7670947164297104, | |
| "step": 2885 | |
| }, | |
| { | |
| "epoch": 2.7821794099939794, | |
| "grad_norm": 0.3967084288597107, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7541, | |
| "mean_token_accuracy": 0.760562515258789, | |
| "step": 2890 | |
| }, | |
| { | |
| "epoch": 2.7869957856712824, | |
| "grad_norm": 0.4235425293445587, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7525, | |
| "mean_token_accuracy": 0.7589347019791604, | |
| "step": 2895 | |
| }, | |
| { | |
| "epoch": 2.7918121613485853, | |
| "grad_norm": 0.4071233570575714, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7481, | |
| "mean_token_accuracy": 0.7661397770047188, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 2.7918121613485853, | |
| "eval_loss": 0.8868749737739563, | |
| "eval_mean_token_accuracy": 0.7403522042127756, | |
| "eval_runtime": 23.1245, | |
| "eval_samples_per_second": 4.324, | |
| "eval_steps_per_second": 0.562, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 2.796628537025888, | |
| "grad_norm": 0.38018038868904114, | |
| "learning_rate": 2e-05, | |
| "loss": 0.75, | |
| "mean_token_accuracy": 0.7664709404110909, | |
| "step": 2905 | |
| }, | |
| { | |
| "epoch": 2.801444912703191, | |
| "grad_norm": 0.412870317697525, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7623, | |
| "mean_token_accuracy": 0.7559232786297798, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 2.806261288380494, | |
| "grad_norm": 0.41684240102767944, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7232, | |
| "mean_token_accuracy": 0.7719547718763351, | |
| "step": 2915 | |
| }, | |
| { | |
| "epoch": 2.8110776640577964, | |
| "grad_norm": 0.35981109738349915, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7832, | |
| "mean_token_accuracy": 0.7560476765036583, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 2.8158940397350993, | |
| "grad_norm": 0.40023863315582275, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7567, | |
| "mean_token_accuracy": 0.765048947930336, | |
| "step": 2925 | |
| }, | |
| { | |
| "epoch": 2.8207104154124023, | |
| "grad_norm": 0.39661338925361633, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7377, | |
| "mean_token_accuracy": 0.7675866693258285, | |
| "step": 2930 | |
| }, | |
| { | |
| "epoch": 2.825526791089705, | |
| "grad_norm": 0.35321974754333496, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8157, | |
| "mean_token_accuracy": 0.746397140622139, | |
| "step": 2935 | |
| }, | |
| { | |
| "epoch": 2.830343166767008, | |
| "grad_norm": 0.39979612827301025, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7824, | |
| "mean_token_accuracy": 0.7587210416793824, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 2.835159542444311, | |
| "grad_norm": 0.3821732699871063, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7895, | |
| "mean_token_accuracy": 0.7537524312734604, | |
| "step": 2945 | |
| }, | |
| { | |
| "epoch": 2.8399759181216133, | |
| "grad_norm": 0.3334910571575165, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7674, | |
| "mean_token_accuracy": 0.7562859013676644, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 2.8447922937989163, | |
| "grad_norm": 0.37625160813331604, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7397, | |
| "mean_token_accuracy": 0.7702266126871109, | |
| "step": 2955 | |
| }, | |
| { | |
| "epoch": 2.8496086694762193, | |
| "grad_norm": 0.38816380500793457, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7463, | |
| "mean_token_accuracy": 0.767043624818325, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 2.854425045153522, | |
| "grad_norm": 0.38124793767929077, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7188, | |
| "mean_token_accuracy": 0.7760316908359528, | |
| "step": 2965 | |
| }, | |
| { | |
| "epoch": 2.8592414208308248, | |
| "grad_norm": 0.3955833315849304, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7405, | |
| "mean_token_accuracy": 0.7660493075847625, | |
| "step": 2970 | |
| }, | |
| { | |
| "epoch": 2.8640577965081278, | |
| "grad_norm": 0.36449986696243286, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7639, | |
| "mean_token_accuracy": 0.7551109924912452, | |
| "step": 2975 | |
| }, | |
| { | |
| "epoch": 2.8688741721854303, | |
| "grad_norm": 0.3809254765510559, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7378, | |
| "mean_token_accuracy": 0.7687376230955124, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 2.8736905478627333, | |
| "grad_norm": 0.347232460975647, | |
| "learning_rate": 2e-05, | |
| "loss": 0.717, | |
| "mean_token_accuracy": 0.7766376540064812, | |
| "step": 2985 | |
| }, | |
| { | |
| "epoch": 2.8785069235400362, | |
| "grad_norm": 0.35575151443481445, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7485, | |
| "mean_token_accuracy": 0.7674190372228622, | |
| "step": 2990 | |
| }, | |
| { | |
| "epoch": 2.8833232992173388, | |
| "grad_norm": 0.3710139989852905, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7296, | |
| "mean_token_accuracy": 0.7713616698980331, | |
| "step": 2995 | |
| }, | |
| { | |
| "epoch": 2.8881396748946417, | |
| "grad_norm": 0.37481653690338135, | |
| "learning_rate": 2e-05, | |
| "loss": 0.759, | |
| "mean_token_accuracy": 0.7613216653466225, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.8881396748946417, | |
| "eval_loss": 0.8843749761581421, | |
| "eval_mean_token_accuracy": 0.7408330532220694, | |
| "eval_runtime": 23.1361, | |
| "eval_samples_per_second": 4.322, | |
| "eval_steps_per_second": 0.562, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.8929560505719447, | |
| "grad_norm": 0.3814287781715393, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7218, | |
| "mean_token_accuracy": 0.7693289056420326, | |
| "step": 3005 | |
| }, | |
| { | |
| "epoch": 2.8977724262492472, | |
| "grad_norm": 0.38517627120018005, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7829, | |
| "mean_token_accuracy": 0.7556496039032936, | |
| "step": 3010 | |
| }, | |
| { | |
| "epoch": 2.90258880192655, | |
| "grad_norm": 0.3561043441295624, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7406, | |
| "mean_token_accuracy": 0.7704349234700203, | |
| "step": 3015 | |
| }, | |
| { | |
| "epoch": 2.907405177603853, | |
| "grad_norm": 0.3632068932056427, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7639, | |
| "mean_token_accuracy": 0.7629840105772019, | |
| "step": 3020 | |
| }, | |
| { | |
| "epoch": 2.9122215532811557, | |
| "grad_norm": 0.39246779680252075, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7497, | |
| "mean_token_accuracy": 0.7630103766918183, | |
| "step": 3025 | |
| }, | |
| { | |
| "epoch": 2.9170379289584587, | |
| "grad_norm": 0.3949568569660187, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7395, | |
| "mean_token_accuracy": 0.7638208046555519, | |
| "step": 3030 | |
| }, | |
| { | |
| "epoch": 2.9218543046357617, | |
| "grad_norm": 0.3802914619445801, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7666, | |
| "mean_token_accuracy": 0.7584463760256768, | |
| "step": 3035 | |
| }, | |
| { | |
| "epoch": 2.926670680313064, | |
| "grad_norm": 0.3917528986930847, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7556, | |
| "mean_token_accuracy": 0.7628441542387009, | |
| "step": 3040 | |
| }, | |
| { | |
| "epoch": 2.931487055990367, | |
| "grad_norm": 0.3582029640674591, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7634, | |
| "mean_token_accuracy": 0.762310317158699, | |
| "step": 3045 | |
| }, | |
| { | |
| "epoch": 2.93630343166767, | |
| "grad_norm": 0.3874087929725647, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7719, | |
| "mean_token_accuracy": 0.7597872078418731, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 2.9411198073449727, | |
| "grad_norm": 0.3633805215358734, | |
| "learning_rate": 2e-05, | |
| "loss": 0.761, | |
| "mean_token_accuracy": 0.7614316448569298, | |
| "step": 3055 | |
| }, | |
| { | |
| "epoch": 2.9459361830222757, | |
| "grad_norm": 0.4053983688354492, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7396, | |
| "mean_token_accuracy": 0.7678497686982155, | |
| "step": 3060 | |
| }, | |
| { | |
| "epoch": 2.9507525586995786, | |
| "grad_norm": 0.37059614062309265, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7771, | |
| "mean_token_accuracy": 0.756655989587307, | |
| "step": 3065 | |
| }, | |
| { | |
| "epoch": 2.955568934376881, | |
| "grad_norm": 0.3870275020599365, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8507, | |
| "mean_token_accuracy": 0.7373094201087952, | |
| "step": 3070 | |
| }, | |
| { | |
| "epoch": 2.960385310054184, | |
| "grad_norm": 0.32037490606307983, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7344, | |
| "mean_token_accuracy": 0.7677667886018753, | |
| "step": 3075 | |
| }, | |
| { | |
| "epoch": 2.965201685731487, | |
| "grad_norm": 0.39370909333229065, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7814, | |
| "mean_token_accuracy": 0.7587102144956589, | |
| "step": 3080 | |
| }, | |
| { | |
| "epoch": 2.9700180614087897, | |
| "grad_norm": 0.39840105175971985, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7745, | |
| "mean_token_accuracy": 0.7543177887797355, | |
| "step": 3085 | |
| }, | |
| { | |
| "epoch": 2.9748344370860926, | |
| "grad_norm": 0.40780118107795715, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7405, | |
| "mean_token_accuracy": 0.7715254962444306, | |
| "step": 3090 | |
| }, | |
| { | |
| "epoch": 2.9796508127633956, | |
| "grad_norm": 0.41411417722702026, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7468, | |
| "mean_token_accuracy": 0.7587345033884049, | |
| "step": 3095 | |
| }, | |
| { | |
| "epoch": 2.984467188440698, | |
| "grad_norm": 0.36079859733581543, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7635, | |
| "mean_token_accuracy": 0.757730433344841, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 2.984467188440698, | |
| "eval_loss": 0.8817187547683716, | |
| "eval_mean_token_accuracy": 0.7412376220409687, | |
| "eval_runtime": 23.1884, | |
| "eval_samples_per_second": 4.313, | |
| "eval_steps_per_second": 0.561, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 2.989283564118001, | |
| "grad_norm": 0.33232828974723816, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7523, | |
| "mean_token_accuracy": 0.7645888909697532, | |
| "step": 3105 | |
| }, | |
| { | |
| "epoch": 2.994099939795304, | |
| "grad_norm": 0.3581736981868744, | |
| "learning_rate": 2e-05, | |
| "loss": 0.7617, | |
| "mean_token_accuracy": 0.7637889131903648, | |
| "step": 3110 | |
| }, | |
| { | |
| "epoch": 2.9979530403371464, | |
| "step": 3114, | |
| "total_flos": 0.0, | |
| "train_loss": 0.0, | |
| "train_runtime": 1.7052, | |
| "train_samples_per_second": 29222.257, | |
| "train_steps_per_second": 1826.171 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 3114, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |