| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 4.951768488745981, |
| "eval_steps": 500, |
| "global_step": 385, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.012861736334405145, |
| "grad_norm": 577.1012573242188, |
| "learning_rate": 8.333333333333335e-09, |
| "loss": 13.9619, |
| "num_input_tokens_seen": 15136, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.02572347266881029, |
| "grad_norm": 552.5532836914062, |
| "learning_rate": 1.666666666666667e-08, |
| "loss": 13.7129, |
| "num_input_tokens_seen": 30208, |
| "step": 2 |
| }, |
| { |
| "epoch": 0.03858520900321544, |
| "grad_norm": 562.699462890625, |
| "learning_rate": 2.5000000000000002e-08, |
| "loss": 13.8474, |
| "num_input_tokens_seen": 45376, |
| "step": 3 |
| }, |
| { |
| "epoch": 0.05144694533762058, |
| "grad_norm": 585.796630859375, |
| "learning_rate": 3.333333333333334e-08, |
| "loss": 13.8844, |
| "num_input_tokens_seen": 59968, |
| "step": 4 |
| }, |
| { |
| "epoch": 0.06430868167202572, |
| "grad_norm": 592.56689453125, |
| "learning_rate": 4.166666666666667e-08, |
| "loss": 14.138, |
| "num_input_tokens_seen": 75136, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.07717041800643087, |
| "grad_norm": 569.5825805664062, |
| "learning_rate": 5.0000000000000004e-08, |
| "loss": 13.9077, |
| "num_input_tokens_seen": 90816, |
| "step": 6 |
| }, |
| { |
| "epoch": 0.09003215434083602, |
| "grad_norm": 578.7579956054688, |
| "learning_rate": 5.833333333333334e-08, |
| "loss": 13.8396, |
| "num_input_tokens_seen": 105600, |
| "step": 7 |
| }, |
| { |
| "epoch": 0.10289389067524116, |
| "grad_norm": 577.330322265625, |
| "learning_rate": 6.666666666666668e-08, |
| "loss": 13.9828, |
| "num_input_tokens_seen": 119872, |
| "step": 8 |
| }, |
| { |
| "epoch": 0.1157556270096463, |
| "grad_norm": 575.2134399414062, |
| "learning_rate": 7.500000000000001e-08, |
| "loss": 14.0361, |
| "num_input_tokens_seen": 135104, |
| "step": 9 |
| }, |
| { |
| "epoch": 0.12861736334405144, |
| "grad_norm": 579.0157470703125, |
| "learning_rate": 8.333333333333334e-08, |
| "loss": 13.9392, |
| "num_input_tokens_seen": 150016, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.1414790996784566, |
| "grad_norm": 585.5687255859375, |
| "learning_rate": 9.166666666666668e-08, |
| "loss": 14.0256, |
| "num_input_tokens_seen": 165216, |
| "step": 11 |
| }, |
| { |
| "epoch": 0.15434083601286175, |
| "grad_norm": 566.5042114257812, |
| "learning_rate": 1.0000000000000001e-07, |
| "loss": 13.6693, |
| "num_input_tokens_seen": 179296, |
| "step": 12 |
| }, |
| { |
| "epoch": 0.16720257234726688, |
| "grad_norm": 574.4198608398438, |
| "learning_rate": 1.0833333333333335e-07, |
| "loss": 13.9031, |
| "num_input_tokens_seen": 193312, |
| "step": 13 |
| }, |
| { |
| "epoch": 0.18006430868167203, |
| "grad_norm": 588.1041259765625, |
| "learning_rate": 1.1666666666666668e-07, |
| "loss": 13.8575, |
| "num_input_tokens_seen": 208768, |
| "step": 14 |
| }, |
| { |
| "epoch": 0.19292604501607716, |
| "grad_norm": 573.4750366210938, |
| "learning_rate": 1.2500000000000002e-07, |
| "loss": 13.8366, |
| "num_input_tokens_seen": 223456, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.2057877813504823, |
| "grad_norm": 572.26220703125, |
| "learning_rate": 1.3333333333333336e-07, |
| "loss": 13.8705, |
| "num_input_tokens_seen": 238656, |
| "step": 16 |
| }, |
| { |
| "epoch": 0.21864951768488747, |
| "grad_norm": 566.3518676757812, |
| "learning_rate": 1.4166666666666668e-07, |
| "loss": 13.1816, |
| "num_input_tokens_seen": 253312, |
| "step": 17 |
| }, |
| { |
| "epoch": 0.2315112540192926, |
| "grad_norm": 560.5255126953125, |
| "learning_rate": 1.5000000000000002e-07, |
| "loss": 13.2292, |
| "num_input_tokens_seen": 268352, |
| "step": 18 |
| }, |
| { |
| "epoch": 0.24437299035369775, |
| "grad_norm": 583.0404663085938, |
| "learning_rate": 1.5833333333333336e-07, |
| "loss": 13.4366, |
| "num_input_tokens_seen": 282560, |
| "step": 19 |
| }, |
| { |
| "epoch": 0.2572347266881029, |
| "grad_norm": 555.8986206054688, |
| "learning_rate": 1.6666666666666668e-07, |
| "loss": 12.9904, |
| "num_input_tokens_seen": 297120, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.27009646302250806, |
| "grad_norm": 557.931396484375, |
| "learning_rate": 1.7500000000000002e-07, |
| "loss": 12.878, |
| "num_input_tokens_seen": 312416, |
| "step": 21 |
| }, |
| { |
| "epoch": 0.2829581993569132, |
| "grad_norm": 562.4014282226562, |
| "learning_rate": 1.8333333333333336e-07, |
| "loss": 12.7794, |
| "num_input_tokens_seen": 327968, |
| "step": 22 |
| }, |
| { |
| "epoch": 0.2958199356913183, |
| "grad_norm": 548.7069702148438, |
| "learning_rate": 1.9166666666666668e-07, |
| "loss": 11.3144, |
| "num_input_tokens_seen": 342880, |
| "step": 23 |
| }, |
| { |
| "epoch": 0.3086816720257235, |
| "grad_norm": 511.2892761230469, |
| "learning_rate": 2.0000000000000002e-07, |
| "loss": 10.8531, |
| "num_input_tokens_seen": 358848, |
| "step": 24 |
| }, |
| { |
| "epoch": 0.3215434083601286, |
| "grad_norm": 496.71868896484375, |
| "learning_rate": 2.0833333333333333e-07, |
| "loss": 10.7149, |
| "num_input_tokens_seen": 373792, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.33440514469453375, |
| "grad_norm": 504.7909851074219, |
| "learning_rate": 2.166666666666667e-07, |
| "loss": 10.5802, |
| "num_input_tokens_seen": 389248, |
| "step": 26 |
| }, |
| { |
| "epoch": 0.34726688102893893, |
| "grad_norm": 495.0346984863281, |
| "learning_rate": 2.2500000000000002e-07, |
| "loss": 10.3671, |
| "num_input_tokens_seen": 404320, |
| "step": 27 |
| }, |
| { |
| "epoch": 0.36012861736334406, |
| "grad_norm": 482.26318359375, |
| "learning_rate": 2.3333333333333336e-07, |
| "loss": 10.1751, |
| "num_input_tokens_seen": 419296, |
| "step": 28 |
| }, |
| { |
| "epoch": 0.3729903536977492, |
| "grad_norm": 480.48193359375, |
| "learning_rate": 2.416666666666667e-07, |
| "loss": 9.7707, |
| "num_input_tokens_seen": 433632, |
| "step": 29 |
| }, |
| { |
| "epoch": 0.3858520900321543, |
| "grad_norm": 467.7600402832031, |
| "learning_rate": 2.5000000000000004e-07, |
| "loss": 9.6489, |
| "num_input_tokens_seen": 448736, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.3987138263665595, |
| "grad_norm": 468.9175720214844, |
| "learning_rate": 2.5833333333333333e-07, |
| "loss": 8.6805, |
| "num_input_tokens_seen": 463840, |
| "step": 31 |
| }, |
| { |
| "epoch": 0.4115755627009646, |
| "grad_norm": 394.77691650390625, |
| "learning_rate": 2.666666666666667e-07, |
| "loss": 5.9207, |
| "num_input_tokens_seen": 478816, |
| "step": 32 |
| }, |
| { |
| "epoch": 0.42443729903536975, |
| "grad_norm": 440.14697265625, |
| "learning_rate": 2.75e-07, |
| "loss": 5.7661, |
| "num_input_tokens_seen": 494464, |
| "step": 33 |
| }, |
| { |
| "epoch": 0.43729903536977494, |
| "grad_norm": 436.6060791015625, |
| "learning_rate": 2.8333333333333336e-07, |
| "loss": 5.6168, |
| "num_input_tokens_seen": 510080, |
| "step": 34 |
| }, |
| { |
| "epoch": 0.45016077170418006, |
| "grad_norm": 388.576416015625, |
| "learning_rate": 2.916666666666667e-07, |
| "loss": 5.3367, |
| "num_input_tokens_seen": 525216, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.4630225080385852, |
| "grad_norm": 327.9267272949219, |
| "learning_rate": 3.0000000000000004e-07, |
| "loss": 4.9751, |
| "num_input_tokens_seen": 539648, |
| "step": 36 |
| }, |
| { |
| "epoch": 0.4758842443729904, |
| "grad_norm": 322.6181335449219, |
| "learning_rate": 3.083333333333334e-07, |
| "loss": 4.7041, |
| "num_input_tokens_seen": 554080, |
| "step": 37 |
| }, |
| { |
| "epoch": 0.4887459807073955, |
| "grad_norm": 313.6160888671875, |
| "learning_rate": 3.166666666666667e-07, |
| "loss": 4.4631, |
| "num_input_tokens_seen": 568960, |
| "step": 38 |
| }, |
| { |
| "epoch": 0.5016077170418006, |
| "grad_norm": 299.7319641113281, |
| "learning_rate": 3.25e-07, |
| "loss": 4.1912, |
| "num_input_tokens_seen": 583904, |
| "step": 39 |
| }, |
| { |
| "epoch": 0.5144694533762058, |
| "grad_norm": 308.4530944824219, |
| "learning_rate": 3.3333333333333335e-07, |
| "loss": 3.9146, |
| "num_input_tokens_seen": 598848, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.5273311897106109, |
| "grad_norm": 315.37396240234375, |
| "learning_rate": 3.416666666666667e-07, |
| "loss": 3.053, |
| "num_input_tokens_seen": 614208, |
| "step": 41 |
| }, |
| { |
| "epoch": 0.5401929260450161, |
| "grad_norm": 263.33258056640625, |
| "learning_rate": 3.5000000000000004e-07, |
| "loss": 1.5544, |
| "num_input_tokens_seen": 629280, |
| "step": 42 |
| }, |
| { |
| "epoch": 0.5530546623794212, |
| "grad_norm": 177.95458984375, |
| "learning_rate": 3.583333333333334e-07, |
| "loss": 1.0549, |
| "num_input_tokens_seen": 643968, |
| "step": 43 |
| }, |
| { |
| "epoch": 0.5659163987138264, |
| "grad_norm": 142.09307861328125, |
| "learning_rate": 3.666666666666667e-07, |
| "loss": 0.711, |
| "num_input_tokens_seen": 659040, |
| "step": 44 |
| }, |
| { |
| "epoch": 0.5787781350482315, |
| "grad_norm": 93.70928192138672, |
| "learning_rate": 3.75e-07, |
| "loss": 0.5127, |
| "num_input_tokens_seen": 674464, |
| "step": 45 |
| }, |
| { |
| "epoch": 0.5916398713826366, |
| "grad_norm": 65.4582290649414, |
| "learning_rate": 3.8333333333333335e-07, |
| "loss": 0.4143, |
| "num_input_tokens_seen": 689216, |
| "step": 46 |
| }, |
| { |
| "epoch": 0.6045016077170418, |
| "grad_norm": 51.321414947509766, |
| "learning_rate": 3.9166666666666675e-07, |
| "loss": 0.4014, |
| "num_input_tokens_seen": 704448, |
| "step": 47 |
| }, |
| { |
| "epoch": 0.617363344051447, |
| "grad_norm": 38.20650100708008, |
| "learning_rate": 4.0000000000000003e-07, |
| "loss": 0.384, |
| "num_input_tokens_seen": 719520, |
| "step": 48 |
| }, |
| { |
| "epoch": 0.6302250803858521, |
| "grad_norm": 70.43089294433594, |
| "learning_rate": 4.083333333333334e-07, |
| "loss": 0.3017, |
| "num_input_tokens_seen": 735520, |
| "step": 49 |
| }, |
| { |
| "epoch": 0.6430868167202572, |
| "grad_norm": 47.982505798339844, |
| "learning_rate": 4.1666666666666667e-07, |
| "loss": 0.3031, |
| "num_input_tokens_seen": 750976, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.6559485530546624, |
| "grad_norm": 27.063344955444336, |
| "learning_rate": 4.2500000000000006e-07, |
| "loss": 0.2818, |
| "num_input_tokens_seen": 765728, |
| "step": 51 |
| }, |
| { |
| "epoch": 0.6688102893890675, |
| "grad_norm": 60.75875473022461, |
| "learning_rate": 4.333333333333334e-07, |
| "loss": 0.2881, |
| "num_input_tokens_seen": 780608, |
| "step": 52 |
| }, |
| { |
| "epoch": 0.6816720257234726, |
| "grad_norm": 56.81061935424805, |
| "learning_rate": 4.416666666666667e-07, |
| "loss": 0.2943, |
| "num_input_tokens_seen": 796192, |
| "step": 53 |
| }, |
| { |
| "epoch": 0.6945337620578779, |
| "grad_norm": 31.529571533203125, |
| "learning_rate": 4.5000000000000003e-07, |
| "loss": 0.2781, |
| "num_input_tokens_seen": 810496, |
| "step": 54 |
| }, |
| { |
| "epoch": 0.707395498392283, |
| "grad_norm": 33.71590805053711, |
| "learning_rate": 4.583333333333333e-07, |
| "loss": 0.2724, |
| "num_input_tokens_seen": 826048, |
| "step": 55 |
| }, |
| { |
| "epoch": 0.7202572347266881, |
| "grad_norm": 26.352554321289062, |
| "learning_rate": 4.666666666666667e-07, |
| "loss": 0.252, |
| "num_input_tokens_seen": 840512, |
| "step": 56 |
| }, |
| { |
| "epoch": 0.7331189710610932, |
| "grad_norm": 15.606438636779785, |
| "learning_rate": 4.7500000000000006e-07, |
| "loss": 0.2482, |
| "num_input_tokens_seen": 855808, |
| "step": 57 |
| }, |
| { |
| "epoch": 0.7459807073954984, |
| "grad_norm": 31.445789337158203, |
| "learning_rate": 4.833333333333334e-07, |
| "loss": 0.2212, |
| "num_input_tokens_seen": 871520, |
| "step": 58 |
| }, |
| { |
| "epoch": 0.7588424437299035, |
| "grad_norm": 9.967268943786621, |
| "learning_rate": 4.916666666666667e-07, |
| "loss": 0.2408, |
| "num_input_tokens_seen": 886144, |
| "step": 59 |
| }, |
| { |
| "epoch": 0.7717041800643086, |
| "grad_norm": 23.841659545898438, |
| "learning_rate": 5.000000000000001e-07, |
| "loss": 0.2133, |
| "num_input_tokens_seen": 901408, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.7845659163987139, |
| "grad_norm": 16.51668357849121, |
| "learning_rate": 5.083333333333334e-07, |
| "loss": 0.2319, |
| "num_input_tokens_seen": 916672, |
| "step": 61 |
| }, |
| { |
| "epoch": 0.797427652733119, |
| "grad_norm": 51.05732345581055, |
| "learning_rate": 5.166666666666667e-07, |
| "loss": 0.2703, |
| "num_input_tokens_seen": 931168, |
| "step": 62 |
| }, |
| { |
| "epoch": 0.8102893890675241, |
| "grad_norm": 61.92572784423828, |
| "learning_rate": 5.250000000000001e-07, |
| "loss": 0.2647, |
| "num_input_tokens_seen": 946816, |
| "step": 63 |
| }, |
| { |
| "epoch": 0.8231511254019293, |
| "grad_norm": 10.283011436462402, |
| "learning_rate": 5.333333333333335e-07, |
| "loss": 0.2288, |
| "num_input_tokens_seen": 961248, |
| "step": 64 |
| }, |
| { |
| "epoch": 0.8360128617363344, |
| "grad_norm": 45.712772369384766, |
| "learning_rate": 5.416666666666667e-07, |
| "loss": 0.2426, |
| "num_input_tokens_seen": 976736, |
| "step": 65 |
| }, |
| { |
| "epoch": 0.8488745980707395, |
| "grad_norm": 22.493711471557617, |
| "learning_rate": 5.5e-07, |
| "loss": 0.1936, |
| "num_input_tokens_seen": 992192, |
| "step": 66 |
| }, |
| { |
| "epoch": 0.8617363344051447, |
| "grad_norm": 10.771052360534668, |
| "learning_rate": 5.583333333333333e-07, |
| "loss": 0.192, |
| "num_input_tokens_seen": 1007808, |
| "step": 67 |
| }, |
| { |
| "epoch": 0.8745980707395499, |
| "grad_norm": 11.903575897216797, |
| "learning_rate": 5.666666666666667e-07, |
| "loss": 0.1956, |
| "num_input_tokens_seen": 1022176, |
| "step": 68 |
| }, |
| { |
| "epoch": 0.887459807073955, |
| "grad_norm": 23.1299991607666, |
| "learning_rate": 5.750000000000001e-07, |
| "loss": 0.1862, |
| "num_input_tokens_seen": 1037280, |
| "step": 69 |
| }, |
| { |
| "epoch": 0.9003215434083601, |
| "grad_norm": 11.018325805664062, |
| "learning_rate": 5.833333333333334e-07, |
| "loss": 0.195, |
| "num_input_tokens_seen": 1051392, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.9131832797427653, |
| "grad_norm": 27.96196174621582, |
| "learning_rate": 5.916666666666667e-07, |
| "loss": 0.1971, |
| "num_input_tokens_seen": 1066432, |
| "step": 71 |
| }, |
| { |
| "epoch": 0.9260450160771704, |
| "grad_norm": 14.441658020019531, |
| "learning_rate": 6.000000000000001e-07, |
| "loss": 0.1683, |
| "num_input_tokens_seen": 1081696, |
| "step": 72 |
| }, |
| { |
| "epoch": 0.9389067524115756, |
| "grad_norm": 17.725786209106445, |
| "learning_rate": 6.083333333333334e-07, |
| "loss": 0.2003, |
| "num_input_tokens_seen": 1096672, |
| "step": 73 |
| }, |
| { |
| "epoch": 0.9517684887459807, |
| "grad_norm": 26.286319732666016, |
| "learning_rate": 6.166666666666668e-07, |
| "loss": 0.1543, |
| "num_input_tokens_seen": 1111488, |
| "step": 74 |
| }, |
| { |
| "epoch": 0.9646302250803859, |
| "grad_norm": 38.698097229003906, |
| "learning_rate": 6.25e-07, |
| "loss": 0.1982, |
| "num_input_tokens_seen": 1126368, |
| "step": 75 |
| }, |
| { |
| "epoch": 0.977491961414791, |
| "grad_norm": 8.287293434143066, |
| "learning_rate": 6.333333333333334e-07, |
| "loss": 0.1545, |
| "num_input_tokens_seen": 1141536, |
| "step": 76 |
| }, |
| { |
| "epoch": 0.9903536977491961, |
| "grad_norm": 44.253807067871094, |
| "learning_rate": 6.416666666666667e-07, |
| "loss": 0.1573, |
| "num_input_tokens_seen": 1156320, |
| "step": 77 |
| }, |
| { |
| "epoch": 1.0032154340836013, |
| "grad_norm": 57.11432647705078, |
| "learning_rate": 6.5e-07, |
| "loss": 0.1788, |
| "num_input_tokens_seen": 1171904, |
| "step": 78 |
| }, |
| { |
| "epoch": 1.0160771704180065, |
| "grad_norm": 21.049449920654297, |
| "learning_rate": 6.583333333333333e-07, |
| "loss": 0.187, |
| "num_input_tokens_seen": 1187232, |
| "step": 79 |
| }, |
| { |
| "epoch": 1.0289389067524115, |
| "grad_norm": 67.61766052246094, |
| "learning_rate": 6.666666666666667e-07, |
| "loss": 0.2009, |
| "num_input_tokens_seen": 1201632, |
| "step": 80 |
| }, |
| { |
| "epoch": 1.0418006430868167, |
| "grad_norm": 91.35673522949219, |
| "learning_rate": 6.750000000000001e-07, |
| "loss": 0.2655, |
| "num_input_tokens_seen": 1216832, |
| "step": 81 |
| }, |
| { |
| "epoch": 1.0546623794212218, |
| "grad_norm": 54.41016387939453, |
| "learning_rate": 6.833333333333334e-07, |
| "loss": 0.2041, |
| "num_input_tokens_seen": 1232128, |
| "step": 82 |
| }, |
| { |
| "epoch": 1.067524115755627, |
| "grad_norm": 10.082817077636719, |
| "learning_rate": 6.916666666666668e-07, |
| "loss": 0.1675, |
| "num_input_tokens_seen": 1246944, |
| "step": 83 |
| }, |
| { |
| "epoch": 1.0803858520900322, |
| "grad_norm": 43.82609558105469, |
| "learning_rate": 7.000000000000001e-07, |
| "loss": 0.2097, |
| "num_input_tokens_seen": 1262624, |
| "step": 84 |
| }, |
| { |
| "epoch": 1.0932475884244373, |
| "grad_norm": 51.51130676269531, |
| "learning_rate": 7.083333333333334e-07, |
| "loss": 0.2171, |
| "num_input_tokens_seen": 1278080, |
| "step": 85 |
| }, |
| { |
| "epoch": 1.1061093247588425, |
| "grad_norm": 35.92145538330078, |
| "learning_rate": 7.166666666666668e-07, |
| "loss": 0.1702, |
| "num_input_tokens_seen": 1293280, |
| "step": 86 |
| }, |
| { |
| "epoch": 1.1189710610932475, |
| "grad_norm": 8.067242622375488, |
| "learning_rate": 7.25e-07, |
| "loss": 0.1255, |
| "num_input_tokens_seen": 1307456, |
| "step": 87 |
| }, |
| { |
| "epoch": 1.1318327974276527, |
| "grad_norm": 28.8727970123291, |
| "learning_rate": 7.333333333333334e-07, |
| "loss": 0.1826, |
| "num_input_tokens_seen": 1323168, |
| "step": 88 |
| }, |
| { |
| "epoch": 1.144694533762058, |
| "grad_norm": 45.813865661621094, |
| "learning_rate": 7.416666666666668e-07, |
| "loss": 0.2039, |
| "num_input_tokens_seen": 1337984, |
| "step": 89 |
| }, |
| { |
| "epoch": 1.157556270096463, |
| "grad_norm": 53.024105072021484, |
| "learning_rate": 7.5e-07, |
| "loss": 0.2337, |
| "num_input_tokens_seen": 1353568, |
| "step": 90 |
| }, |
| { |
| "epoch": 1.1704180064308682, |
| "grad_norm": 17.642776489257812, |
| "learning_rate": 7.583333333333334e-07, |
| "loss": 0.1466, |
| "num_input_tokens_seen": 1368800, |
| "step": 91 |
| }, |
| { |
| "epoch": 1.1832797427652733, |
| "grad_norm": 17.741811752319336, |
| "learning_rate": 7.666666666666667e-07, |
| "loss": 0.1295, |
| "num_input_tokens_seen": 1384192, |
| "step": 92 |
| }, |
| { |
| "epoch": 1.1961414790996785, |
| "grad_norm": 24.114030838012695, |
| "learning_rate": 7.750000000000001e-07, |
| "loss": 0.1525, |
| "num_input_tokens_seen": 1398784, |
| "step": 93 |
| }, |
| { |
| "epoch": 1.2090032154340835, |
| "grad_norm": 27.668102264404297, |
| "learning_rate": 7.833333333333335e-07, |
| "loss": 0.1735, |
| "num_input_tokens_seen": 1414304, |
| "step": 94 |
| }, |
| { |
| "epoch": 1.2218649517684887, |
| "grad_norm": 7.892695426940918, |
| "learning_rate": 7.916666666666667e-07, |
| "loss": 0.1484, |
| "num_input_tokens_seen": 1430880, |
| "step": 95 |
| }, |
| { |
| "epoch": 1.234726688102894, |
| "grad_norm": 12.985011100769043, |
| "learning_rate": 8.000000000000001e-07, |
| "loss": 0.1517, |
| "num_input_tokens_seen": 1445760, |
| "step": 96 |
| }, |
| { |
| "epoch": 1.247588424437299, |
| "grad_norm": 20.373502731323242, |
| "learning_rate": 8.083333333333334e-07, |
| "loss": 0.1429, |
| "num_input_tokens_seen": 1460576, |
| "step": 97 |
| }, |
| { |
| "epoch": 1.2604501607717042, |
| "grad_norm": 8.579370498657227, |
| "learning_rate": 8.166666666666668e-07, |
| "loss": 0.151, |
| "num_input_tokens_seen": 1476032, |
| "step": 98 |
| }, |
| { |
| "epoch": 1.2733118971061093, |
| "grad_norm": 6.366987705230713, |
| "learning_rate": 8.250000000000001e-07, |
| "loss": 0.1217, |
| "num_input_tokens_seen": 1491008, |
| "step": 99 |
| }, |
| { |
| "epoch": 1.2861736334405145, |
| "grad_norm": 19.89562225341797, |
| "learning_rate": 8.333333333333333e-07, |
| "loss": 0.1366, |
| "num_input_tokens_seen": 1505120, |
| "step": 100 |
| }, |
| { |
| "epoch": 1.2990353697749195, |
| "grad_norm": 10.997241020202637, |
| "learning_rate": 8.416666666666667e-07, |
| "loss": 0.1534, |
| "num_input_tokens_seen": 1520736, |
| "step": 101 |
| }, |
| { |
| "epoch": 1.3118971061093248, |
| "grad_norm": 17.20572280883789, |
| "learning_rate": 8.500000000000001e-07, |
| "loss": 0.141, |
| "num_input_tokens_seen": 1535424, |
| "step": 102 |
| }, |
| { |
| "epoch": 1.32475884244373, |
| "grad_norm": 10.88858413696289, |
| "learning_rate": 8.583333333333334e-07, |
| "loss": 0.1238, |
| "num_input_tokens_seen": 1549856, |
| "step": 103 |
| }, |
| { |
| "epoch": 1.337620578778135, |
| "grad_norm": 6.872950077056885, |
| "learning_rate": 8.666666666666668e-07, |
| "loss": 0.1241, |
| "num_input_tokens_seen": 1564512, |
| "step": 104 |
| }, |
| { |
| "epoch": 1.3504823151125402, |
| "grad_norm": 6.931344509124756, |
| "learning_rate": 8.75e-07, |
| "loss": 0.1414, |
| "num_input_tokens_seen": 1578976, |
| "step": 105 |
| }, |
| { |
| "epoch": 1.3633440514469453, |
| "grad_norm": 12.237205505371094, |
| "learning_rate": 8.833333333333334e-07, |
| "loss": 0.1296, |
| "num_input_tokens_seen": 1593952, |
| "step": 106 |
| }, |
| { |
| "epoch": 1.3762057877813505, |
| "grad_norm": 13.147500991821289, |
| "learning_rate": 8.916666666666668e-07, |
| "loss": 0.1232, |
| "num_input_tokens_seen": 1608704, |
| "step": 107 |
| }, |
| { |
| "epoch": 1.3890675241157555, |
| "grad_norm": 23.525880813598633, |
| "learning_rate": 9.000000000000001e-07, |
| "loss": 0.1625, |
| "num_input_tokens_seen": 1623936, |
| "step": 108 |
| }, |
| { |
| "epoch": 1.4019292604501608, |
| "grad_norm": 25.576210021972656, |
| "learning_rate": 9.083333333333335e-07, |
| "loss": 0.1509, |
| "num_input_tokens_seen": 1639264, |
| "step": 109 |
| }, |
| { |
| "epoch": 1.414790996784566, |
| "grad_norm": 18.12908935546875, |
| "learning_rate": 9.166666666666666e-07, |
| "loss": 0.1416, |
| "num_input_tokens_seen": 1654528, |
| "step": 110 |
| }, |
| { |
| "epoch": 1.427652733118971, |
| "grad_norm": 14.664992332458496, |
| "learning_rate": 9.25e-07, |
| "loss": 0.1481, |
| "num_input_tokens_seen": 1669824, |
| "step": 111 |
| }, |
| { |
| "epoch": 1.4405144694533762, |
| "grad_norm": 17.804119110107422, |
| "learning_rate": 9.333333333333334e-07, |
| "loss": 0.1303, |
| "num_input_tokens_seen": 1684800, |
| "step": 112 |
| }, |
| { |
| "epoch": 1.4533762057877815, |
| "grad_norm": 10.886981964111328, |
| "learning_rate": 9.416666666666667e-07, |
| "loss": 0.116, |
| "num_input_tokens_seen": 1699712, |
| "step": 113 |
| }, |
| { |
| "epoch": 1.4662379421221865, |
| "grad_norm": 14.883463859558105, |
| "learning_rate": 9.500000000000001e-07, |
| "loss": 0.0981, |
| "num_input_tokens_seen": 1714208, |
| "step": 114 |
| }, |
| { |
| "epoch": 1.4790996784565915, |
| "grad_norm": 9.800952911376953, |
| "learning_rate": 9.583333333333334e-07, |
| "loss": 0.1174, |
| "num_input_tokens_seen": 1728672, |
| "step": 115 |
| }, |
| { |
| "epoch": 1.4919614147909968, |
| "grad_norm": 8.804801940917969, |
| "learning_rate": 9.666666666666668e-07, |
| "loss": 0.1458, |
| "num_input_tokens_seen": 1743008, |
| "step": 116 |
| }, |
| { |
| "epoch": 1.504823151125402, |
| "grad_norm": 9.836427688598633, |
| "learning_rate": 9.750000000000002e-07, |
| "loss": 0.0952, |
| "num_input_tokens_seen": 1758016, |
| "step": 117 |
| }, |
| { |
| "epoch": 1.517684887459807, |
| "grad_norm": 7.373986721038818, |
| "learning_rate": 9.833333333333334e-07, |
| "loss": 0.1233, |
| "num_input_tokens_seen": 1772512, |
| "step": 118 |
| }, |
| { |
| "epoch": 1.5305466237942122, |
| "grad_norm": 16.394031524658203, |
| "learning_rate": 9.916666666666668e-07, |
| "loss": 0.127, |
| "num_input_tokens_seen": 1787808, |
| "step": 119 |
| }, |
| { |
| "epoch": 1.5434083601286175, |
| "grad_norm": 8.336946487426758, |
| "learning_rate": 1.0000000000000002e-06, |
| "loss": 0.1121, |
| "num_input_tokens_seen": 1803104, |
| "step": 120 |
| }, |
| { |
| "epoch": 1.5562700964630225, |
| "grad_norm": 28.298938751220703, |
| "learning_rate": 1.0083333333333333e-06, |
| "loss": 0.1432, |
| "num_input_tokens_seen": 1817632, |
| "step": 121 |
| }, |
| { |
| "epoch": 1.5691318327974275, |
| "grad_norm": 25.474973678588867, |
| "learning_rate": 1.0166666666666667e-06, |
| "loss": 0.1446, |
| "num_input_tokens_seen": 1832576, |
| "step": 122 |
| }, |
| { |
| "epoch": 1.5819935691318328, |
| "grad_norm": 12.33470630645752, |
| "learning_rate": 1.025e-06, |
| "loss": 0.1056, |
| "num_input_tokens_seen": 1847872, |
| "step": 123 |
| }, |
| { |
| "epoch": 1.594855305466238, |
| "grad_norm": 16.17420768737793, |
| "learning_rate": 1.0333333333333333e-06, |
| "loss": 0.1193, |
| "num_input_tokens_seen": 1863008, |
| "step": 124 |
| }, |
| { |
| "epoch": 1.607717041800643, |
| "grad_norm": 23.109798431396484, |
| "learning_rate": 1.0416666666666667e-06, |
| "loss": 0.1409, |
| "num_input_tokens_seen": 1878688, |
| "step": 125 |
| }, |
| { |
| "epoch": 1.6205787781350482, |
| "grad_norm": 15.091007232666016, |
| "learning_rate": 1.0500000000000001e-06, |
| "loss": 0.1154, |
| "num_input_tokens_seen": 1894144, |
| "step": 126 |
| }, |
| { |
| "epoch": 1.6334405144694535, |
| "grad_norm": 7.409396171569824, |
| "learning_rate": 1.0583333333333335e-06, |
| "loss": 0.1046, |
| "num_input_tokens_seen": 1909504, |
| "step": 127 |
| }, |
| { |
| "epoch": 1.6463022508038585, |
| "grad_norm": 12.553366661071777, |
| "learning_rate": 1.066666666666667e-06, |
| "loss": 0.09, |
| "num_input_tokens_seen": 1924896, |
| "step": 128 |
| }, |
| { |
| "epoch": 1.6591639871382635, |
| "grad_norm": 10.23271369934082, |
| "learning_rate": 1.075e-06, |
| "loss": 0.0858, |
| "num_input_tokens_seen": 1939232, |
| "step": 129 |
| }, |
| { |
| "epoch": 1.6720257234726688, |
| "grad_norm": 6.091500282287598, |
| "learning_rate": 1.0833333333333335e-06, |
| "loss": 0.0782, |
| "num_input_tokens_seen": 1954720, |
| "step": 130 |
| }, |
| { |
| "epoch": 1.684887459807074, |
| "grad_norm": 12.670690536499023, |
| "learning_rate": 1.0916666666666667e-06, |
| "loss": 0.1429, |
| "num_input_tokens_seen": 1969632, |
| "step": 131 |
| }, |
| { |
| "epoch": 1.697749196141479, |
| "grad_norm": 12.42375659942627, |
| "learning_rate": 1.1e-06, |
| "loss": 0.1121, |
| "num_input_tokens_seen": 1984544, |
| "step": 132 |
| }, |
| { |
| "epoch": 1.7106109324758842, |
| "grad_norm": 4.910019874572754, |
| "learning_rate": 1.1083333333333335e-06, |
| "loss": 0.0458, |
| "num_input_tokens_seen": 1999584, |
| "step": 133 |
| }, |
| { |
| "epoch": 1.7234726688102895, |
| "grad_norm": 8.262114524841309, |
| "learning_rate": 1.1166666666666666e-06, |
| "loss": 0.1217, |
| "num_input_tokens_seen": 2014048, |
| "step": 134 |
| }, |
| { |
| "epoch": 1.7363344051446945, |
| "grad_norm": 21.927522659301758, |
| "learning_rate": 1.125e-06, |
| "loss": 0.1253, |
| "num_input_tokens_seen": 2029312, |
| "step": 135 |
| }, |
| { |
| "epoch": 1.7491961414790995, |
| "grad_norm": 8.816388130187988, |
| "learning_rate": 1.1333333333333334e-06, |
| "loss": 0.077, |
| "num_input_tokens_seen": 2044192, |
| "step": 136 |
| }, |
| { |
| "epoch": 1.762057877813505, |
| "grad_norm": 6.877133846282959, |
| "learning_rate": 1.1416666666666668e-06, |
| "loss": 0.0719, |
| "num_input_tokens_seen": 2059552, |
| "step": 137 |
| }, |
| { |
| "epoch": 1.77491961414791, |
| "grad_norm": 5.565485000610352, |
| "learning_rate": 1.1500000000000002e-06, |
| "loss": 0.0916, |
| "num_input_tokens_seen": 2074336, |
| "step": 138 |
| }, |
| { |
| "epoch": 1.787781350482315, |
| "grad_norm": 11.287223815917969, |
| "learning_rate": 1.1583333333333334e-06, |
| "loss": 0.0812, |
| "num_input_tokens_seen": 2089280, |
| "step": 139 |
| }, |
| { |
| "epoch": 1.8006430868167203, |
| "grad_norm": 7.971100807189941, |
| "learning_rate": 1.1666666666666668e-06, |
| "loss": 0.1176, |
| "num_input_tokens_seen": 2104256, |
| "step": 140 |
| }, |
| { |
| "epoch": 1.8135048231511255, |
| "grad_norm": 7.78786563873291, |
| "learning_rate": 1.175e-06, |
| "loss": 0.0631, |
| "num_input_tokens_seen": 2119232, |
| "step": 141 |
| }, |
| { |
| "epoch": 1.8263665594855305, |
| "grad_norm": 8.365852355957031, |
| "learning_rate": 1.1833333333333334e-06, |
| "loss": 0.1137, |
| "num_input_tokens_seen": 2133824, |
| "step": 142 |
| }, |
| { |
| "epoch": 1.8392282958199357, |
| "grad_norm": 16.2335262298584, |
| "learning_rate": 1.1916666666666668e-06, |
| "loss": 0.0958, |
| "num_input_tokens_seen": 2148480, |
| "step": 143 |
| }, |
| { |
| "epoch": 1.852090032154341, |
| "grad_norm": 8.455872535705566, |
| "learning_rate": 1.2000000000000002e-06, |
| "loss": 0.1343, |
| "num_input_tokens_seen": 2163328, |
| "step": 144 |
| }, |
| { |
| "epoch": 1.864951768488746, |
| "grad_norm": 13.241278648376465, |
| "learning_rate": 1.2083333333333333e-06, |
| "loss": 0.1101, |
| "num_input_tokens_seen": 2177568, |
| "step": 145 |
| }, |
| { |
| "epoch": 1.877813504823151, |
| "grad_norm": 13.46755313873291, |
| "learning_rate": 1.2166666666666667e-06, |
| "loss": 0.0914, |
| "num_input_tokens_seen": 2192384, |
| "step": 146 |
| }, |
| { |
| "epoch": 1.8906752411575563, |
| "grad_norm": 7.21525764465332, |
| "learning_rate": 1.2250000000000001e-06, |
| "loss": 0.1114, |
| "num_input_tokens_seen": 2207584, |
| "step": 147 |
| }, |
| { |
| "epoch": 1.9035369774919615, |
| "grad_norm": 5.000830173492432, |
| "learning_rate": 1.2333333333333335e-06, |
| "loss": 0.083, |
| "num_input_tokens_seen": 2222208, |
| "step": 148 |
| }, |
| { |
| "epoch": 1.9163987138263665, |
| "grad_norm": 8.995044708251953, |
| "learning_rate": 1.2416666666666667e-06, |
| "loss": 0.1095, |
| "num_input_tokens_seen": 2237760, |
| "step": 149 |
| }, |
| { |
| "epoch": 1.9292604501607717, |
| "grad_norm": 7.872910976409912, |
| "learning_rate": 1.25e-06, |
| "loss": 0.0662, |
| "num_input_tokens_seen": 2252768, |
| "step": 150 |
| }, |
| { |
| "epoch": 1.942122186495177, |
| "grad_norm": 11.594476699829102, |
| "learning_rate": 1.2583333333333333e-06, |
| "loss": 0.0979, |
| "num_input_tokens_seen": 2268736, |
| "step": 151 |
| }, |
| { |
| "epoch": 1.954983922829582, |
| "grad_norm": 14.245850563049316, |
| "learning_rate": 1.2666666666666669e-06, |
| "loss": 0.0847, |
| "num_input_tokens_seen": 2283840, |
| "step": 152 |
| }, |
| { |
| "epoch": 1.967845659163987, |
| "grad_norm": 12.000141143798828, |
| "learning_rate": 1.275e-06, |
| "loss": 0.0949, |
| "num_input_tokens_seen": 2297696, |
| "step": 153 |
| }, |
| { |
| "epoch": 1.9807073954983923, |
| "grad_norm": 16.763721466064453, |
| "learning_rate": 1.2833333333333335e-06, |
| "loss": 0.1206, |
| "num_input_tokens_seen": 2311936, |
| "step": 154 |
| }, |
| { |
| "epoch": 1.9935691318327975, |
| "grad_norm": 22.926712036132812, |
| "learning_rate": 1.2916666666666669e-06, |
| "loss": 0.139, |
| "num_input_tokens_seen": 2327392, |
| "step": 155 |
| }, |
| { |
| "epoch": 2.0064308681672025, |
| "grad_norm": 7.715733528137207, |
| "learning_rate": 1.3e-06, |
| "loss": 0.07, |
| "num_input_tokens_seen": 2341952, |
| "step": 156 |
| }, |
| { |
| "epoch": 2.0192926045016075, |
| "grad_norm": 5.047181129455566, |
| "learning_rate": 1.3083333333333334e-06, |
| "loss": 0.0562, |
| "num_input_tokens_seen": 2356768, |
| "step": 157 |
| }, |
| { |
| "epoch": 2.032154340836013, |
| "grad_norm": 10.79956340789795, |
| "learning_rate": 1.3166666666666666e-06, |
| "loss": 0.0456, |
| "num_input_tokens_seen": 2371584, |
| "step": 158 |
| }, |
| { |
| "epoch": 2.045016077170418, |
| "grad_norm": 13.864229202270508, |
| "learning_rate": 1.3250000000000002e-06, |
| "loss": 0.0582, |
| "num_input_tokens_seen": 2386880, |
| "step": 159 |
| }, |
| { |
| "epoch": 2.057877813504823, |
| "grad_norm": 7.294124603271484, |
| "learning_rate": 1.3333333333333334e-06, |
| "loss": 0.0452, |
| "num_input_tokens_seen": 2401248, |
| "step": 160 |
| }, |
| { |
| "epoch": 2.0707395498392285, |
| "grad_norm": 5.838871479034424, |
| "learning_rate": 1.3416666666666666e-06, |
| "loss": 0.0553, |
| "num_input_tokens_seen": 2415968, |
| "step": 161 |
| }, |
| { |
| "epoch": 2.0836012861736335, |
| "grad_norm": 20.286243438720703, |
| "learning_rate": 1.3500000000000002e-06, |
| "loss": 0.1108, |
| "num_input_tokens_seen": 2430368, |
| "step": 162 |
| }, |
| { |
| "epoch": 2.0964630225080385, |
| "grad_norm": 12.720942497253418, |
| "learning_rate": 1.3583333333333334e-06, |
| "loss": 0.0791, |
| "num_input_tokens_seen": 2445056, |
| "step": 163 |
| }, |
| { |
| "epoch": 2.1093247588424435, |
| "grad_norm": 9.107832908630371, |
| "learning_rate": 1.3666666666666668e-06, |
| "loss": 0.0637, |
| "num_input_tokens_seen": 2459744, |
| "step": 164 |
| }, |
| { |
| "epoch": 2.122186495176849, |
| "grad_norm": 6.473385334014893, |
| "learning_rate": 1.3750000000000002e-06, |
| "loss": 0.0404, |
| "num_input_tokens_seen": 2474400, |
| "step": 165 |
| }, |
| { |
| "epoch": 2.135048231511254, |
| "grad_norm": 6.510437488555908, |
| "learning_rate": 1.3833333333333336e-06, |
| "loss": 0.0372, |
| "num_input_tokens_seen": 2489280, |
| "step": 166 |
| }, |
| { |
| "epoch": 2.147909967845659, |
| "grad_norm": 8.90833854675293, |
| "learning_rate": 1.3916666666666668e-06, |
| "loss": 0.045, |
| "num_input_tokens_seen": 2503776, |
| "step": 167 |
| }, |
| { |
| "epoch": 2.1607717041800645, |
| "grad_norm": 11.84021282196045, |
| "learning_rate": 1.4000000000000001e-06, |
| "loss": 0.0964, |
| "num_input_tokens_seen": 2518752, |
| "step": 168 |
| }, |
| { |
| "epoch": 2.1736334405144695, |
| "grad_norm": 5.904385566711426, |
| "learning_rate": 1.4083333333333335e-06, |
| "loss": 0.0543, |
| "num_input_tokens_seen": 2533568, |
| "step": 169 |
| }, |
| { |
| "epoch": 2.1864951768488745, |
| "grad_norm": 6.935682773590088, |
| "learning_rate": 1.4166666666666667e-06, |
| "loss": 0.071, |
| "num_input_tokens_seen": 2548640, |
| "step": 170 |
| }, |
| { |
| "epoch": 2.19935691318328, |
| "grad_norm": 3.85262131690979, |
| "learning_rate": 1.425e-06, |
| "loss": 0.0285, |
| "num_input_tokens_seen": 2563232, |
| "step": 171 |
| }, |
| { |
| "epoch": 2.212218649517685, |
| "grad_norm": 5.231224060058594, |
| "learning_rate": 1.4333333333333335e-06, |
| "loss": 0.0399, |
| "num_input_tokens_seen": 2578336, |
| "step": 172 |
| }, |
| { |
| "epoch": 2.22508038585209, |
| "grad_norm": 6.751733779907227, |
| "learning_rate": 1.4416666666666667e-06, |
| "loss": 0.067, |
| "num_input_tokens_seen": 2594336, |
| "step": 173 |
| }, |
| { |
| "epoch": 2.237942122186495, |
| "grad_norm": 4.619236469268799, |
| "learning_rate": 1.45e-06, |
| "loss": 0.0436, |
| "num_input_tokens_seen": 2609408, |
| "step": 174 |
| }, |
| { |
| "epoch": 2.2508038585209005, |
| "grad_norm": 4.799732685089111, |
| "learning_rate": 1.4583333333333335e-06, |
| "loss": 0.0522, |
| "num_input_tokens_seen": 2624288, |
| "step": 175 |
| }, |
| { |
| "epoch": 2.2636655948553055, |
| "grad_norm": 7.050292015075684, |
| "learning_rate": 1.4666666666666669e-06, |
| "loss": 0.0521, |
| "num_input_tokens_seen": 2639296, |
| "step": 176 |
| }, |
| { |
| "epoch": 2.2765273311897105, |
| "grad_norm": 6.788993835449219, |
| "learning_rate": 1.475e-06, |
| "loss": 0.0446, |
| "num_input_tokens_seen": 2653856, |
| "step": 177 |
| }, |
| { |
| "epoch": 2.289389067524116, |
| "grad_norm": 7.368555545806885, |
| "learning_rate": 1.4833333333333337e-06, |
| "loss": 0.0378, |
| "num_input_tokens_seen": 2669216, |
| "step": 178 |
| }, |
| { |
| "epoch": 2.302250803858521, |
| "grad_norm": 6.674912452697754, |
| "learning_rate": 1.4916666666666669e-06, |
| "loss": 0.0387, |
| "num_input_tokens_seen": 2683264, |
| "step": 179 |
| }, |
| { |
| "epoch": 2.315112540192926, |
| "grad_norm": 4.754708766937256, |
| "learning_rate": 1.5e-06, |
| "loss": 0.036, |
| "num_input_tokens_seen": 2698112, |
| "step": 180 |
| }, |
| { |
| "epoch": 2.327974276527331, |
| "grad_norm": 11.09841251373291, |
| "learning_rate": 1.5083333333333336e-06, |
| "loss": 0.0765, |
| "num_input_tokens_seen": 2712800, |
| "step": 181 |
| }, |
| { |
| "epoch": 2.3408360128617365, |
| "grad_norm": 13.097764015197754, |
| "learning_rate": 1.5166666666666668e-06, |
| "loss": 0.0884, |
| "num_input_tokens_seen": 2728032, |
| "step": 182 |
| }, |
| { |
| "epoch": 2.3536977491961415, |
| "grad_norm": 14.366617202758789, |
| "learning_rate": 1.525e-06, |
| "loss": 0.0801, |
| "num_input_tokens_seen": 2742432, |
| "step": 183 |
| }, |
| { |
| "epoch": 2.3665594855305465, |
| "grad_norm": 6.947451591491699, |
| "learning_rate": 1.5333333333333334e-06, |
| "loss": 0.0276, |
| "num_input_tokens_seen": 2758176, |
| "step": 184 |
| }, |
| { |
| "epoch": 2.379421221864952, |
| "grad_norm": 11.02580738067627, |
| "learning_rate": 1.5416666666666668e-06, |
| "loss": 0.0778, |
| "num_input_tokens_seen": 2772768, |
| "step": 185 |
| }, |
| { |
| "epoch": 2.392282958199357, |
| "grad_norm": 15.404431343078613, |
| "learning_rate": 1.5500000000000002e-06, |
| "loss": 0.0726, |
| "num_input_tokens_seen": 2787968, |
| "step": 186 |
| }, |
| { |
| "epoch": 2.405144694533762, |
| "grad_norm": 19.688905715942383, |
| "learning_rate": 1.5583333333333334e-06, |
| "loss": 0.1381, |
| "num_input_tokens_seen": 2802560, |
| "step": 187 |
| }, |
| { |
| "epoch": 2.418006430868167, |
| "grad_norm": 5.732620716094971, |
| "learning_rate": 1.566666666666667e-06, |
| "loss": 0.0408, |
| "num_input_tokens_seen": 2817856, |
| "step": 188 |
| }, |
| { |
| "epoch": 2.4308681672025725, |
| "grad_norm": 9.736536026000977, |
| "learning_rate": 1.5750000000000002e-06, |
| "loss": 0.1066, |
| "num_input_tokens_seen": 2833408, |
| "step": 189 |
| }, |
| { |
| "epoch": 2.4437299035369775, |
| "grad_norm": 9.312838554382324, |
| "learning_rate": 1.5833333333333333e-06, |
| "loss": 0.0686, |
| "num_input_tokens_seen": 2848832, |
| "step": 190 |
| }, |
| { |
| "epoch": 2.4565916398713825, |
| "grad_norm": 7.711926460266113, |
| "learning_rate": 1.591666666666667e-06, |
| "loss": 0.0428, |
| "num_input_tokens_seen": 2863968, |
| "step": 191 |
| }, |
| { |
| "epoch": 2.469453376205788, |
| "grad_norm": 6.855398654937744, |
| "learning_rate": 1.6000000000000001e-06, |
| "loss": 0.0387, |
| "num_input_tokens_seen": 2878944, |
| "step": 192 |
| }, |
| { |
| "epoch": 2.482315112540193, |
| "grad_norm": 3.374630928039551, |
| "learning_rate": 1.6083333333333333e-06, |
| "loss": 0.0489, |
| "num_input_tokens_seen": 2894112, |
| "step": 193 |
| }, |
| { |
| "epoch": 2.495176848874598, |
| "grad_norm": 7.452945709228516, |
| "learning_rate": 1.6166666666666667e-06, |
| "loss": 0.0621, |
| "num_input_tokens_seen": 2908928, |
| "step": 194 |
| }, |
| { |
| "epoch": 2.508038585209003, |
| "grad_norm": 12.478065490722656, |
| "learning_rate": 1.6250000000000001e-06, |
| "loss": 0.0651, |
| "num_input_tokens_seen": 2923680, |
| "step": 195 |
| }, |
| { |
| "epoch": 2.5209003215434085, |
| "grad_norm": 4.181528091430664, |
| "learning_rate": 1.6333333333333335e-06, |
| "loss": 0.0398, |
| "num_input_tokens_seen": 2939136, |
| "step": 196 |
| }, |
| { |
| "epoch": 2.5337620578778135, |
| "grad_norm": 5.443283557891846, |
| "learning_rate": 1.6416666666666667e-06, |
| "loss": 0.0369, |
| "num_input_tokens_seen": 2953760, |
| "step": 197 |
| }, |
| { |
| "epoch": 2.5466237942122185, |
| "grad_norm": 7.586729526519775, |
| "learning_rate": 1.6500000000000003e-06, |
| "loss": 0.0582, |
| "num_input_tokens_seen": 2968640, |
| "step": 198 |
| }, |
| { |
| "epoch": 2.559485530546624, |
| "grad_norm": 6.357609272003174, |
| "learning_rate": 1.6583333333333335e-06, |
| "loss": 0.0479, |
| "num_input_tokens_seen": 2983456, |
| "step": 199 |
| }, |
| { |
| "epoch": 2.572347266881029, |
| "grad_norm": 8.761418342590332, |
| "learning_rate": 1.6666666666666667e-06, |
| "loss": 0.0561, |
| "num_input_tokens_seen": 2998144, |
| "step": 200 |
| }, |
| { |
| "epoch": 2.585209003215434, |
| "grad_norm": 7.058523654937744, |
| "learning_rate": 1.6750000000000003e-06, |
| "loss": 0.0497, |
| "num_input_tokens_seen": 3013248, |
| "step": 201 |
| }, |
| { |
| "epoch": 2.598070739549839, |
| "grad_norm": 8.091582298278809, |
| "learning_rate": 1.6833333333333335e-06, |
| "loss": 0.063, |
| "num_input_tokens_seen": 3028320, |
| "step": 202 |
| }, |
| { |
| "epoch": 2.6109324758842445, |
| "grad_norm": 9.102255821228027, |
| "learning_rate": 1.6916666666666666e-06, |
| "loss": 0.054, |
| "num_input_tokens_seen": 3043232, |
| "step": 203 |
| }, |
| { |
| "epoch": 2.6237942122186495, |
| "grad_norm": 9.521942138671875, |
| "learning_rate": 1.7000000000000002e-06, |
| "loss": 0.0729, |
| "num_input_tokens_seen": 3058848, |
| "step": 204 |
| }, |
| { |
| "epoch": 2.6366559485530545, |
| "grad_norm": 7.026569843292236, |
| "learning_rate": 1.7083333333333334e-06, |
| "loss": 0.0685, |
| "num_input_tokens_seen": 3074080, |
| "step": 205 |
| }, |
| { |
| "epoch": 2.64951768488746, |
| "grad_norm": 13.492867469787598, |
| "learning_rate": 1.7166666666666668e-06, |
| "loss": 0.0823, |
| "num_input_tokens_seen": 3089152, |
| "step": 206 |
| }, |
| { |
| "epoch": 2.662379421221865, |
| "grad_norm": 6.880157947540283, |
| "learning_rate": 1.725e-06, |
| "loss": 0.0322, |
| "num_input_tokens_seen": 3103616, |
| "step": 207 |
| }, |
| { |
| "epoch": 2.67524115755627, |
| "grad_norm": 8.232446670532227, |
| "learning_rate": 1.7333333333333336e-06, |
| "loss": 0.0821, |
| "num_input_tokens_seen": 3119200, |
| "step": 208 |
| }, |
| { |
| "epoch": 2.688102893890675, |
| "grad_norm": 7.365771293640137, |
| "learning_rate": 1.7416666666666668e-06, |
| "loss": 0.0561, |
| "num_input_tokens_seen": 3134400, |
| "step": 209 |
| }, |
| { |
| "epoch": 2.7009646302250805, |
| "grad_norm": 9.879140853881836, |
| "learning_rate": 1.75e-06, |
| "loss": 0.0468, |
| "num_input_tokens_seen": 3149248, |
| "step": 210 |
| }, |
| { |
| "epoch": 2.7138263665594855, |
| "grad_norm": 8.636083602905273, |
| "learning_rate": 1.7583333333333336e-06, |
| "loss": 0.0593, |
| "num_input_tokens_seen": 3164800, |
| "step": 211 |
| }, |
| { |
| "epoch": 2.7266881028938905, |
| "grad_norm": 5.015254497528076, |
| "learning_rate": 1.7666666666666668e-06, |
| "loss": 0.0403, |
| "num_input_tokens_seen": 3179520, |
| "step": 212 |
| }, |
| { |
| "epoch": 2.739549839228296, |
| "grad_norm": 7.730808258056641, |
| "learning_rate": 1.7750000000000002e-06, |
| "loss": 0.0459, |
| "num_input_tokens_seen": 3194016, |
| "step": 213 |
| }, |
| { |
| "epoch": 2.752411575562701, |
| "grad_norm": 6.433826923370361, |
| "learning_rate": 1.7833333333333336e-06, |
| "loss": 0.0509, |
| "num_input_tokens_seen": 3208608, |
| "step": 214 |
| }, |
| { |
| "epoch": 2.765273311897106, |
| "grad_norm": 7.32877779006958, |
| "learning_rate": 1.7916666666666667e-06, |
| "loss": 0.0873, |
| "num_input_tokens_seen": 3224096, |
| "step": 215 |
| }, |
| { |
| "epoch": 2.778135048231511, |
| "grad_norm": 7.283654689788818, |
| "learning_rate": 1.8000000000000001e-06, |
| "loss": 0.0618, |
| "num_input_tokens_seen": 3239136, |
| "step": 216 |
| }, |
| { |
| "epoch": 2.7909967845659165, |
| "grad_norm": 11.044650077819824, |
| "learning_rate": 1.8083333333333335e-06, |
| "loss": 0.1025, |
| "num_input_tokens_seen": 3253184, |
| "step": 217 |
| }, |
| { |
| "epoch": 2.8038585209003215, |
| "grad_norm": 7.287780284881592, |
| "learning_rate": 1.816666666666667e-06, |
| "loss": 0.0669, |
| "num_input_tokens_seen": 3268000, |
| "step": 218 |
| }, |
| { |
| "epoch": 2.816720257234727, |
| "grad_norm": 7.542795658111572, |
| "learning_rate": 1.825e-06, |
| "loss": 0.0447, |
| "num_input_tokens_seen": 3282528, |
| "step": 219 |
| }, |
| { |
| "epoch": 2.829581993569132, |
| "grad_norm": 9.812615394592285, |
| "learning_rate": 1.8333333333333333e-06, |
| "loss": 0.0635, |
| "num_input_tokens_seen": 3297824, |
| "step": 220 |
| }, |
| { |
| "epoch": 2.842443729903537, |
| "grad_norm": 5.7050580978393555, |
| "learning_rate": 1.8416666666666669e-06, |
| "loss": 0.0614, |
| "num_input_tokens_seen": 3312480, |
| "step": 221 |
| }, |
| { |
| "epoch": 2.855305466237942, |
| "grad_norm": 13.298068046569824, |
| "learning_rate": 1.85e-06, |
| "loss": 0.0646, |
| "num_input_tokens_seen": 3327392, |
| "step": 222 |
| }, |
| { |
| "epoch": 2.868167202572347, |
| "grad_norm": 13.815607070922852, |
| "learning_rate": 1.8583333333333335e-06, |
| "loss": 0.0893, |
| "num_input_tokens_seen": 3342944, |
| "step": 223 |
| }, |
| { |
| "epoch": 2.8810289389067525, |
| "grad_norm": 6.954331398010254, |
| "learning_rate": 1.8666666666666669e-06, |
| "loss": 0.0653, |
| "num_input_tokens_seen": 3358112, |
| "step": 224 |
| }, |
| { |
| "epoch": 2.8938906752411575, |
| "grad_norm": 5.133541584014893, |
| "learning_rate": 1.8750000000000003e-06, |
| "loss": 0.0402, |
| "num_input_tokens_seen": 3373472, |
| "step": 225 |
| }, |
| { |
| "epoch": 2.906752411575563, |
| "grad_norm": 7.754403114318848, |
| "learning_rate": 1.8833333333333334e-06, |
| "loss": 0.0407, |
| "num_input_tokens_seen": 3389088, |
| "step": 226 |
| }, |
| { |
| "epoch": 2.919614147909968, |
| "grad_norm": 9.570775985717773, |
| "learning_rate": 1.8916666666666668e-06, |
| "loss": 0.0949, |
| "num_input_tokens_seen": 3403616, |
| "step": 227 |
| }, |
| { |
| "epoch": 2.932475884244373, |
| "grad_norm": 9.21279525756836, |
| "learning_rate": 1.9000000000000002e-06, |
| "loss": 0.0789, |
| "num_input_tokens_seen": 3418944, |
| "step": 228 |
| }, |
| { |
| "epoch": 2.945337620578778, |
| "grad_norm": 4.255781650543213, |
| "learning_rate": 1.9083333333333334e-06, |
| "loss": 0.0438, |
| "num_input_tokens_seen": 3434048, |
| "step": 229 |
| }, |
| { |
| "epoch": 2.958199356913183, |
| "grad_norm": 10.596036911010742, |
| "learning_rate": 1.916666666666667e-06, |
| "loss": 0.0905, |
| "num_input_tokens_seen": 3449952, |
| "step": 230 |
| }, |
| { |
| "epoch": 2.9710610932475885, |
| "grad_norm": 9.656909942626953, |
| "learning_rate": 1.925e-06, |
| "loss": 0.0495, |
| "num_input_tokens_seen": 3464736, |
| "step": 231 |
| }, |
| { |
| "epoch": 2.9839228295819935, |
| "grad_norm": 9.526041984558105, |
| "learning_rate": 1.9333333333333336e-06, |
| "loss": 0.0642, |
| "num_input_tokens_seen": 3480416, |
| "step": 232 |
| }, |
| { |
| "epoch": 2.996784565916399, |
| "grad_norm": 5.788015365600586, |
| "learning_rate": 1.9416666666666666e-06, |
| "loss": 0.0607, |
| "num_input_tokens_seen": 3495264, |
| "step": 233 |
| }, |
| { |
| "epoch": 3.009646302250804, |
| "grad_norm": 6.213415145874023, |
| "learning_rate": 1.9500000000000004e-06, |
| "loss": 0.0363, |
| "num_input_tokens_seen": 3510080, |
| "step": 234 |
| }, |
| { |
| "epoch": 3.022508038585209, |
| "grad_norm": 7.382534980773926, |
| "learning_rate": 1.9583333333333334e-06, |
| "loss": 0.0492, |
| "num_input_tokens_seen": 3524736, |
| "step": 235 |
| }, |
| { |
| "epoch": 3.035369774919614, |
| "grad_norm": 9.54261302947998, |
| "learning_rate": 1.9666666666666668e-06, |
| "loss": 0.0373, |
| "num_input_tokens_seen": 3539968, |
| "step": 236 |
| }, |
| { |
| "epoch": 3.0482315112540195, |
| "grad_norm": 3.8869571685791016, |
| "learning_rate": 1.975e-06, |
| "loss": 0.0203, |
| "num_input_tokens_seen": 3554464, |
| "step": 237 |
| }, |
| { |
| "epoch": 3.0610932475884245, |
| "grad_norm": 2.183758497238159, |
| "learning_rate": 1.9833333333333335e-06, |
| "loss": 0.0175, |
| "num_input_tokens_seen": 3569664, |
| "step": 238 |
| }, |
| { |
| "epoch": 3.0739549839228295, |
| "grad_norm": 3.851379871368408, |
| "learning_rate": 1.991666666666667e-06, |
| "loss": 0.0497, |
| "num_input_tokens_seen": 3585248, |
| "step": 239 |
| }, |
| { |
| "epoch": 3.0868167202572345, |
| "grad_norm": 6.622572898864746, |
| "learning_rate": 2.0000000000000003e-06, |
| "loss": 0.0361, |
| "num_input_tokens_seen": 3600160, |
| "step": 240 |
| }, |
| { |
| "epoch": 3.09967845659164, |
| "grad_norm": 3.4328978061676025, |
| "learning_rate": 2.0083333333333337e-06, |
| "loss": 0.0193, |
| "num_input_tokens_seen": 3615232, |
| "step": 241 |
| }, |
| { |
| "epoch": 3.112540192926045, |
| "grad_norm": 8.150415420532227, |
| "learning_rate": 2.0166666666666667e-06, |
| "loss": 0.0142, |
| "num_input_tokens_seen": 3629312, |
| "step": 242 |
| }, |
| { |
| "epoch": 3.12540192926045, |
| "grad_norm": 5.3073835372924805, |
| "learning_rate": 2.025e-06, |
| "loss": 0.0415, |
| "num_input_tokens_seen": 3644064, |
| "step": 243 |
| }, |
| { |
| "epoch": 3.1382636655948555, |
| "grad_norm": 4.33547830581665, |
| "learning_rate": 2.0333333333333335e-06, |
| "loss": 0.0178, |
| "num_input_tokens_seen": 3659328, |
| "step": 244 |
| }, |
| { |
| "epoch": 3.1511254019292605, |
| "grad_norm": 2.770535469055176, |
| "learning_rate": 2.041666666666667e-06, |
| "loss": 0.0166, |
| "num_input_tokens_seen": 3674720, |
| "step": 245 |
| }, |
| { |
| "epoch": 3.1639871382636655, |
| "grad_norm": 8.939215660095215, |
| "learning_rate": 2.05e-06, |
| "loss": 0.0424, |
| "num_input_tokens_seen": 3689984, |
| "step": 246 |
| }, |
| { |
| "epoch": 3.176848874598071, |
| "grad_norm": 4.35013484954834, |
| "learning_rate": 2.0583333333333337e-06, |
| "loss": 0.0464, |
| "num_input_tokens_seen": 3705216, |
| "step": 247 |
| }, |
| { |
| "epoch": 3.189710610932476, |
| "grad_norm": 4.8758368492126465, |
| "learning_rate": 2.0666666666666666e-06, |
| "loss": 0.0235, |
| "num_input_tokens_seen": 3719712, |
| "step": 248 |
| }, |
| { |
| "epoch": 3.202572347266881, |
| "grad_norm": 2.769258499145508, |
| "learning_rate": 2.075e-06, |
| "loss": 0.0128, |
| "num_input_tokens_seen": 3734272, |
| "step": 249 |
| }, |
| { |
| "epoch": 3.215434083601286, |
| "grad_norm": 4.699342727661133, |
| "learning_rate": 2.0833333333333334e-06, |
| "loss": 0.0319, |
| "num_input_tokens_seen": 3750240, |
| "step": 250 |
| }, |
| { |
| "epoch": 3.2282958199356915, |
| "grad_norm": 3.377748966217041, |
| "learning_rate": 2.091666666666667e-06, |
| "loss": 0.0196, |
| "num_input_tokens_seen": 3764736, |
| "step": 251 |
| }, |
| { |
| "epoch": 3.2411575562700965, |
| "grad_norm": 2.9469077587127686, |
| "learning_rate": 2.1000000000000002e-06, |
| "loss": 0.0326, |
| "num_input_tokens_seen": 3779744, |
| "step": 252 |
| }, |
| { |
| "epoch": 3.2540192926045015, |
| "grad_norm": 3.4729647636413574, |
| "learning_rate": 2.1083333333333336e-06, |
| "loss": 0.017, |
| "num_input_tokens_seen": 3794208, |
| "step": 253 |
| }, |
| { |
| "epoch": 3.266881028938907, |
| "grad_norm": 3.1130521297454834, |
| "learning_rate": 2.116666666666667e-06, |
| "loss": 0.0372, |
| "num_input_tokens_seen": 3809376, |
| "step": 254 |
| }, |
| { |
| "epoch": 3.279742765273312, |
| "grad_norm": 3.0094053745269775, |
| "learning_rate": 2.125e-06, |
| "loss": 0.0165, |
| "num_input_tokens_seen": 3824384, |
| "step": 255 |
| }, |
| { |
| "epoch": 3.292604501607717, |
| "grad_norm": 2.0758721828460693, |
| "learning_rate": 2.133333333333334e-06, |
| "loss": 0.0142, |
| "num_input_tokens_seen": 3840000, |
| "step": 256 |
| }, |
| { |
| "epoch": 3.305466237942122, |
| "grad_norm": 2.0254931449890137, |
| "learning_rate": 2.1416666666666668e-06, |
| "loss": 0.0117, |
| "num_input_tokens_seen": 3854240, |
| "step": 257 |
| }, |
| { |
| "epoch": 3.3183279742765275, |
| "grad_norm": 5.01600980758667, |
| "learning_rate": 2.15e-06, |
| "loss": 0.0264, |
| "num_input_tokens_seen": 3869792, |
| "step": 258 |
| }, |
| { |
| "epoch": 3.3311897106109325, |
| "grad_norm": 7.374160289764404, |
| "learning_rate": 2.1583333333333336e-06, |
| "loss": 0.034, |
| "num_input_tokens_seen": 3885152, |
| "step": 259 |
| }, |
| { |
| "epoch": 3.3440514469453375, |
| "grad_norm": 7.036656856536865, |
| "learning_rate": 2.166666666666667e-06, |
| "loss": 0.031, |
| "num_input_tokens_seen": 3900032, |
| "step": 260 |
| }, |
| { |
| "epoch": 3.356913183279743, |
| "grad_norm": 4.439852237701416, |
| "learning_rate": 2.1750000000000004e-06, |
| "loss": 0.018, |
| "num_input_tokens_seen": 3914432, |
| "step": 261 |
| }, |
| { |
| "epoch": 3.369774919614148, |
| "grad_norm": 8.757259368896484, |
| "learning_rate": 2.1833333333333333e-06, |
| "loss": 0.0461, |
| "num_input_tokens_seen": 3929600, |
| "step": 262 |
| }, |
| { |
| "epoch": 3.382636655948553, |
| "grad_norm": 4.993197917938232, |
| "learning_rate": 2.191666666666667e-06, |
| "loss": 0.0372, |
| "num_input_tokens_seen": 3944032, |
| "step": 263 |
| }, |
| { |
| "epoch": 3.395498392282958, |
| "grad_norm": 5.627339839935303, |
| "learning_rate": 2.2e-06, |
| "loss": 0.0471, |
| "num_input_tokens_seen": 3958464, |
| "step": 264 |
| }, |
| { |
| "epoch": 3.4083601286173635, |
| "grad_norm": 6.644600868225098, |
| "learning_rate": 2.2083333333333335e-06, |
| "loss": 0.037, |
| "num_input_tokens_seen": 3973024, |
| "step": 265 |
| }, |
| { |
| "epoch": 3.4212218649517685, |
| "grad_norm": 4.708102703094482, |
| "learning_rate": 2.216666666666667e-06, |
| "loss": 0.0245, |
| "num_input_tokens_seen": 3987520, |
| "step": 266 |
| }, |
| { |
| "epoch": 3.4340836012861735, |
| "grad_norm": 3.4480528831481934, |
| "learning_rate": 2.2250000000000003e-06, |
| "loss": 0.0233, |
| "num_input_tokens_seen": 4002656, |
| "step": 267 |
| }, |
| { |
| "epoch": 3.446945337620579, |
| "grad_norm": 5.0224175453186035, |
| "learning_rate": 2.2333333333333333e-06, |
| "loss": 0.0256, |
| "num_input_tokens_seen": 4017824, |
| "step": 268 |
| }, |
| { |
| "epoch": 3.459807073954984, |
| "grad_norm": 9.12070369720459, |
| "learning_rate": 2.2416666666666667e-06, |
| "loss": 0.0345, |
| "num_input_tokens_seen": 4032480, |
| "step": 269 |
| }, |
| { |
| "epoch": 3.472668810289389, |
| "grad_norm": 5.336618900299072, |
| "learning_rate": 2.25e-06, |
| "loss": 0.0656, |
| "num_input_tokens_seen": 4047648, |
| "step": 270 |
| }, |
| { |
| "epoch": 3.485530546623794, |
| "grad_norm": 4.003684997558594, |
| "learning_rate": 2.2583333333333335e-06, |
| "loss": 0.0247, |
| "num_input_tokens_seen": 4062688, |
| "step": 271 |
| }, |
| { |
| "epoch": 3.4983922829581995, |
| "grad_norm": 5.631625175476074, |
| "learning_rate": 2.266666666666667e-06, |
| "loss": 0.051, |
| "num_input_tokens_seen": 4077856, |
| "step": 272 |
| }, |
| { |
| "epoch": 3.5112540192926045, |
| "grad_norm": 4.748563766479492, |
| "learning_rate": 2.2750000000000002e-06, |
| "loss": 0.0417, |
| "num_input_tokens_seen": 4092864, |
| "step": 273 |
| }, |
| { |
| "epoch": 3.5241157556270095, |
| "grad_norm": 3.368995189666748, |
| "learning_rate": 2.2833333333333336e-06, |
| "loss": 0.0176, |
| "num_input_tokens_seen": 4107584, |
| "step": 274 |
| }, |
| { |
| "epoch": 3.536977491961415, |
| "grad_norm": 3.463752508163452, |
| "learning_rate": 2.2916666666666666e-06, |
| "loss": 0.0155, |
| "num_input_tokens_seen": 4122080, |
| "step": 275 |
| }, |
| { |
| "epoch": 3.54983922829582, |
| "grad_norm": 4.7195234298706055, |
| "learning_rate": 2.3000000000000004e-06, |
| "loss": 0.0135, |
| "num_input_tokens_seen": 4137600, |
| "step": 276 |
| }, |
| { |
| "epoch": 3.562700964630225, |
| "grad_norm": 6.3303680419921875, |
| "learning_rate": 2.3083333333333334e-06, |
| "loss": 0.0222, |
| "num_input_tokens_seen": 4152960, |
| "step": 277 |
| }, |
| { |
| "epoch": 3.57556270096463, |
| "grad_norm": 3.4051268100738525, |
| "learning_rate": 2.316666666666667e-06, |
| "loss": 0.0289, |
| "num_input_tokens_seen": 4168640, |
| "step": 278 |
| }, |
| { |
| "epoch": 3.5884244372990355, |
| "grad_norm": 3.971222162246704, |
| "learning_rate": 2.325e-06, |
| "loss": 0.0314, |
| "num_input_tokens_seen": 4183616, |
| "step": 279 |
| }, |
| { |
| "epoch": 3.6012861736334405, |
| "grad_norm": 5.509104251861572, |
| "learning_rate": 2.3333333333333336e-06, |
| "loss": 0.0582, |
| "num_input_tokens_seen": 4197536, |
| "step": 280 |
| }, |
| { |
| "epoch": 3.6141479099678455, |
| "grad_norm": 4.121878623962402, |
| "learning_rate": 2.341666666666667e-06, |
| "loss": 0.0339, |
| "num_input_tokens_seen": 4213152, |
| "step": 281 |
| }, |
| { |
| "epoch": 3.627009646302251, |
| "grad_norm": 6.977634429931641, |
| "learning_rate": 2.35e-06, |
| "loss": 0.0656, |
| "num_input_tokens_seen": 4228320, |
| "step": 282 |
| }, |
| { |
| "epoch": 3.639871382636656, |
| "grad_norm": 3.6625123023986816, |
| "learning_rate": 2.3583333333333338e-06, |
| "loss": 0.0355, |
| "num_input_tokens_seen": 4243680, |
| "step": 283 |
| }, |
| { |
| "epoch": 3.652733118971061, |
| "grad_norm": 4.691339492797852, |
| "learning_rate": 2.3666666666666667e-06, |
| "loss": 0.037, |
| "num_input_tokens_seen": 4258464, |
| "step": 284 |
| }, |
| { |
| "epoch": 3.665594855305466, |
| "grad_norm": 3.1770055294036865, |
| "learning_rate": 2.375e-06, |
| "loss": 0.0206, |
| "num_input_tokens_seen": 4273728, |
| "step": 285 |
| }, |
| { |
| "epoch": 3.6784565916398715, |
| "grad_norm": 2.424788475036621, |
| "learning_rate": 2.3833333333333335e-06, |
| "loss": 0.0286, |
| "num_input_tokens_seen": 4288416, |
| "step": 286 |
| }, |
| { |
| "epoch": 3.6913183279742765, |
| "grad_norm": 5.731730937957764, |
| "learning_rate": 2.391666666666667e-06, |
| "loss": 0.0316, |
| "num_input_tokens_seen": 4303712, |
| "step": 287 |
| }, |
| { |
| "epoch": 3.7041800643086815, |
| "grad_norm": 4.939082622528076, |
| "learning_rate": 2.4000000000000003e-06, |
| "loss": 0.0263, |
| "num_input_tokens_seen": 4319104, |
| "step": 288 |
| }, |
| { |
| "epoch": 3.717041800643087, |
| "grad_norm": 2.817589521408081, |
| "learning_rate": 2.4083333333333337e-06, |
| "loss": 0.0265, |
| "num_input_tokens_seen": 4334912, |
| "step": 289 |
| }, |
| { |
| "epoch": 3.729903536977492, |
| "grad_norm": 2.6983306407928467, |
| "learning_rate": 2.4166666666666667e-06, |
| "loss": 0.0251, |
| "num_input_tokens_seen": 4349280, |
| "step": 290 |
| }, |
| { |
| "epoch": 3.742765273311897, |
| "grad_norm": 5.260397434234619, |
| "learning_rate": 2.425e-06, |
| "loss": 0.0293, |
| "num_input_tokens_seen": 4363872, |
| "step": 291 |
| }, |
| { |
| "epoch": 3.755627009646302, |
| "grad_norm": 4.8456034660339355, |
| "learning_rate": 2.4333333333333335e-06, |
| "loss": 0.0278, |
| "num_input_tokens_seen": 4378752, |
| "step": 292 |
| }, |
| { |
| "epoch": 3.7684887459807075, |
| "grad_norm": 4.560141563415527, |
| "learning_rate": 2.441666666666667e-06, |
| "loss": 0.035, |
| "num_input_tokens_seen": 4393792, |
| "step": 293 |
| }, |
| { |
| "epoch": 3.7813504823151125, |
| "grad_norm": 6.035860538482666, |
| "learning_rate": 2.4500000000000003e-06, |
| "loss": 0.0455, |
| "num_input_tokens_seen": 4408192, |
| "step": 294 |
| }, |
| { |
| "epoch": 3.7942122186495175, |
| "grad_norm": 6.634167194366455, |
| "learning_rate": 2.4583333333333332e-06, |
| "loss": 0.0429, |
| "num_input_tokens_seen": 4422432, |
| "step": 295 |
| }, |
| { |
| "epoch": 3.807073954983923, |
| "grad_norm": 9.78260326385498, |
| "learning_rate": 2.466666666666667e-06, |
| "loss": 0.056, |
| "num_input_tokens_seen": 4437728, |
| "step": 296 |
| }, |
| { |
| "epoch": 3.819935691318328, |
| "grad_norm": 6.473577499389648, |
| "learning_rate": 2.475e-06, |
| "loss": 0.0325, |
| "num_input_tokens_seen": 4452928, |
| "step": 297 |
| }, |
| { |
| "epoch": 3.832797427652733, |
| "grad_norm": 8.688652992248535, |
| "learning_rate": 2.4833333333333334e-06, |
| "loss": 0.0401, |
| "num_input_tokens_seen": 4468096, |
| "step": 298 |
| }, |
| { |
| "epoch": 3.845659163987138, |
| "grad_norm": 4.41790771484375, |
| "learning_rate": 2.491666666666667e-06, |
| "loss": 0.0182, |
| "num_input_tokens_seen": 4482976, |
| "step": 299 |
| }, |
| { |
| "epoch": 3.8585209003215435, |
| "grad_norm": 3.7182106971740723, |
| "learning_rate": 2.5e-06, |
| "loss": 0.0344, |
| "num_input_tokens_seen": 4497952, |
| "step": 300 |
| }, |
| { |
| "epoch": 3.8713826366559485, |
| "grad_norm": 4.0538225173950195, |
| "learning_rate": 2.5083333333333336e-06, |
| "loss": 0.0111, |
| "num_input_tokens_seen": 4512896, |
| "step": 301 |
| }, |
| { |
| "epoch": 3.884244372990354, |
| "grad_norm": 3.3492507934570312, |
| "learning_rate": 2.5166666666666666e-06, |
| "loss": 0.0088, |
| "num_input_tokens_seen": 4528448, |
| "step": 302 |
| }, |
| { |
| "epoch": 3.897106109324759, |
| "grad_norm": 3.518641710281372, |
| "learning_rate": 2.5250000000000004e-06, |
| "loss": 0.013, |
| "num_input_tokens_seen": 4543712, |
| "step": 303 |
| }, |
| { |
| "epoch": 3.909967845659164, |
| "grad_norm": 5.015272617340088, |
| "learning_rate": 2.5333333333333338e-06, |
| "loss": 0.0433, |
| "num_input_tokens_seen": 4559008, |
| "step": 304 |
| }, |
| { |
| "epoch": 3.922829581993569, |
| "grad_norm": 3.2924585342407227, |
| "learning_rate": 2.5416666666666668e-06, |
| "loss": 0.0214, |
| "num_input_tokens_seen": 4573696, |
| "step": 305 |
| }, |
| { |
| "epoch": 3.935691318327974, |
| "grad_norm": 8.486410140991211, |
| "learning_rate": 2.55e-06, |
| "loss": 0.0416, |
| "num_input_tokens_seen": 4588384, |
| "step": 306 |
| }, |
| { |
| "epoch": 3.9485530546623795, |
| "grad_norm": 6.4795002937316895, |
| "learning_rate": 2.558333333333334e-06, |
| "loss": 0.0321, |
| "num_input_tokens_seen": 4602944, |
| "step": 307 |
| }, |
| { |
| "epoch": 3.9614147909967845, |
| "grad_norm": 6.211554050445557, |
| "learning_rate": 2.566666666666667e-06, |
| "loss": 0.0526, |
| "num_input_tokens_seen": 4618336, |
| "step": 308 |
| }, |
| { |
| "epoch": 3.97427652733119, |
| "grad_norm": 5.602302074432373, |
| "learning_rate": 2.5750000000000003e-06, |
| "loss": 0.025, |
| "num_input_tokens_seen": 4633472, |
| "step": 309 |
| }, |
| { |
| "epoch": 3.987138263665595, |
| "grad_norm": 9.136455535888672, |
| "learning_rate": 2.5833333333333337e-06, |
| "loss": 0.0787, |
| "num_input_tokens_seen": 4649472, |
| "step": 310 |
| }, |
| { |
| "epoch": 4.0, |
| "grad_norm": 8.398818969726562, |
| "learning_rate": 2.5916666666666667e-06, |
| "loss": 0.0559, |
| "num_input_tokens_seen": 4664384, |
| "step": 311 |
| }, |
| { |
| "epoch": 4.012861736334405, |
| "grad_norm": 2.286210536956787, |
| "learning_rate": 2.6e-06, |
| "loss": 0.0259, |
| "num_input_tokens_seen": 4679200, |
| "step": 312 |
| }, |
| { |
| "epoch": 4.02572347266881, |
| "grad_norm": 6.477627754211426, |
| "learning_rate": 2.608333333333333e-06, |
| "loss": 0.037, |
| "num_input_tokens_seen": 4693696, |
| "step": 313 |
| }, |
| { |
| "epoch": 4.038585209003215, |
| "grad_norm": 4.953974723815918, |
| "learning_rate": 2.616666666666667e-06, |
| "loss": 0.0293, |
| "num_input_tokens_seen": 4708608, |
| "step": 314 |
| }, |
| { |
| "epoch": 4.051446945337621, |
| "grad_norm": 2.7035064697265625, |
| "learning_rate": 2.6250000000000003e-06, |
| "loss": 0.0153, |
| "num_input_tokens_seen": 4722944, |
| "step": 315 |
| }, |
| { |
| "epoch": 4.064308681672026, |
| "grad_norm": 3.8599040508270264, |
| "learning_rate": 2.6333333333333332e-06, |
| "loss": 0.0234, |
| "num_input_tokens_seen": 4739296, |
| "step": 316 |
| }, |
| { |
| "epoch": 4.077170418006431, |
| "grad_norm": 5.4720072746276855, |
| "learning_rate": 2.6416666666666666e-06, |
| "loss": 0.0212, |
| "num_input_tokens_seen": 4754048, |
| "step": 317 |
| }, |
| { |
| "epoch": 4.090032154340836, |
| "grad_norm": 3.8060128688812256, |
| "learning_rate": 2.6500000000000005e-06, |
| "loss": 0.0187, |
| "num_input_tokens_seen": 4769376, |
| "step": 318 |
| }, |
| { |
| "epoch": 4.102893890675241, |
| "grad_norm": 5.436901092529297, |
| "learning_rate": 2.6583333333333334e-06, |
| "loss": 0.0245, |
| "num_input_tokens_seen": 4784640, |
| "step": 319 |
| }, |
| { |
| "epoch": 4.115755627009646, |
| "grad_norm": 1.5260390043258667, |
| "learning_rate": 2.666666666666667e-06, |
| "loss": 0.0032, |
| "num_input_tokens_seen": 4799392, |
| "step": 320 |
| }, |
| { |
| "epoch": 4.128617363344051, |
| "grad_norm": 11.145827293395996, |
| "learning_rate": 2.6750000000000002e-06, |
| "loss": 0.0367, |
| "num_input_tokens_seen": 4813696, |
| "step": 321 |
| }, |
| { |
| "epoch": 4.141479099678457, |
| "grad_norm": 5.737987518310547, |
| "learning_rate": 2.683333333333333e-06, |
| "loss": 0.038, |
| "num_input_tokens_seen": 4828928, |
| "step": 322 |
| }, |
| { |
| "epoch": 4.154340836012862, |
| "grad_norm": 12.955948829650879, |
| "learning_rate": 2.691666666666667e-06, |
| "loss": 0.0194, |
| "num_input_tokens_seen": 4843232, |
| "step": 323 |
| }, |
| { |
| "epoch": 4.167202572347267, |
| "grad_norm": 2.663821220397949, |
| "learning_rate": 2.7000000000000004e-06, |
| "loss": 0.0262, |
| "num_input_tokens_seen": 4858592, |
| "step": 324 |
| }, |
| { |
| "epoch": 4.180064308681672, |
| "grad_norm": 7.183210372924805, |
| "learning_rate": 2.7083333333333334e-06, |
| "loss": 0.0151, |
| "num_input_tokens_seen": 4872928, |
| "step": 325 |
| }, |
| { |
| "epoch": 4.192926045016077, |
| "grad_norm": 5.024378776550293, |
| "learning_rate": 2.7166666666666668e-06, |
| "loss": 0.0267, |
| "num_input_tokens_seen": 4887968, |
| "step": 326 |
| }, |
| { |
| "epoch": 4.205787781350482, |
| "grad_norm": 1.0667839050292969, |
| "learning_rate": 2.7250000000000006e-06, |
| "loss": 0.0041, |
| "num_input_tokens_seen": 4902752, |
| "step": 327 |
| }, |
| { |
| "epoch": 4.218649517684887, |
| "grad_norm": 6.243584632873535, |
| "learning_rate": 2.7333333333333336e-06, |
| "loss": 0.0365, |
| "num_input_tokens_seen": 4917280, |
| "step": 328 |
| }, |
| { |
| "epoch": 4.231511254019293, |
| "grad_norm": 3.255237579345703, |
| "learning_rate": 2.741666666666667e-06, |
| "loss": 0.0151, |
| "num_input_tokens_seen": 4933088, |
| "step": 329 |
| }, |
| { |
| "epoch": 4.244372990353698, |
| "grad_norm": 1.9820326566696167, |
| "learning_rate": 2.7500000000000004e-06, |
| "loss": 0.006, |
| "num_input_tokens_seen": 4948128, |
| "step": 330 |
| }, |
| { |
| "epoch": 4.257234726688103, |
| "grad_norm": 4.553264141082764, |
| "learning_rate": 2.7583333333333333e-06, |
| "loss": 0.0247, |
| "num_input_tokens_seen": 4962752, |
| "step": 331 |
| }, |
| { |
| "epoch": 4.270096463022508, |
| "grad_norm": 3.139723062515259, |
| "learning_rate": 2.766666666666667e-06, |
| "loss": 0.0147, |
| "num_input_tokens_seen": 4978272, |
| "step": 332 |
| }, |
| { |
| "epoch": 4.282958199356913, |
| "grad_norm": 9.011144638061523, |
| "learning_rate": 2.7750000000000005e-06, |
| "loss": 0.0413, |
| "num_input_tokens_seen": 4992384, |
| "step": 333 |
| }, |
| { |
| "epoch": 4.295819935691318, |
| "grad_norm": 11.36248779296875, |
| "learning_rate": 2.7833333333333335e-06, |
| "loss": 0.0318, |
| "num_input_tokens_seen": 5007136, |
| "step": 334 |
| }, |
| { |
| "epoch": 4.308681672025724, |
| "grad_norm": 7.9140305519104, |
| "learning_rate": 2.791666666666667e-06, |
| "loss": 0.0513, |
| "num_input_tokens_seen": 5021600, |
| "step": 335 |
| }, |
| { |
| "epoch": 4.321543408360129, |
| "grad_norm": 4.12211799621582, |
| "learning_rate": 2.8000000000000003e-06, |
| "loss": 0.0165, |
| "num_input_tokens_seen": 5037216, |
| "step": 336 |
| }, |
| { |
| "epoch": 4.334405144694534, |
| "grad_norm": 4.724823951721191, |
| "learning_rate": 2.8083333333333333e-06, |
| "loss": 0.0131, |
| "num_input_tokens_seen": 5052320, |
| "step": 337 |
| }, |
| { |
| "epoch": 4.347266881028939, |
| "grad_norm": 6.9196085929870605, |
| "learning_rate": 2.816666666666667e-06, |
| "loss": 0.0246, |
| "num_input_tokens_seen": 5067552, |
| "step": 338 |
| }, |
| { |
| "epoch": 4.360128617363344, |
| "grad_norm": 5.785276412963867, |
| "learning_rate": 2.825e-06, |
| "loss": 0.0465, |
| "num_input_tokens_seen": 5082240, |
| "step": 339 |
| }, |
| { |
| "epoch": 4.372990353697749, |
| "grad_norm": 1.6201478242874146, |
| "learning_rate": 2.8333333333333335e-06, |
| "loss": 0.0084, |
| "num_input_tokens_seen": 5097408, |
| "step": 340 |
| }, |
| { |
| "epoch": 4.385852090032154, |
| "grad_norm": 5.511898994445801, |
| "learning_rate": 2.841666666666667e-06, |
| "loss": 0.0105, |
| "num_input_tokens_seen": 5111680, |
| "step": 341 |
| }, |
| { |
| "epoch": 4.39871382636656, |
| "grad_norm": 8.025418281555176, |
| "learning_rate": 2.85e-06, |
| "loss": 0.0388, |
| "num_input_tokens_seen": 5127264, |
| "step": 342 |
| }, |
| { |
| "epoch": 4.411575562700965, |
| "grad_norm": 12.538291931152344, |
| "learning_rate": 2.8583333333333336e-06, |
| "loss": 0.0486, |
| "num_input_tokens_seen": 5142464, |
| "step": 343 |
| }, |
| { |
| "epoch": 4.42443729903537, |
| "grad_norm": 10.100396156311035, |
| "learning_rate": 2.866666666666667e-06, |
| "loss": 0.0354, |
| "num_input_tokens_seen": 5157376, |
| "step": 344 |
| }, |
| { |
| "epoch": 4.437299035369775, |
| "grad_norm": 4.218758583068848, |
| "learning_rate": 2.875e-06, |
| "loss": 0.0464, |
| "num_input_tokens_seen": 5171648, |
| "step": 345 |
| }, |
| { |
| "epoch": 4.45016077170418, |
| "grad_norm": 5.751765251159668, |
| "learning_rate": 2.8833333333333334e-06, |
| "loss": 0.0312, |
| "num_input_tokens_seen": 5186752, |
| "step": 346 |
| }, |
| { |
| "epoch": 4.463022508038585, |
| "grad_norm": 6.100085258483887, |
| "learning_rate": 2.8916666666666672e-06, |
| "loss": 0.0352, |
| "num_input_tokens_seen": 5202176, |
| "step": 347 |
| }, |
| { |
| "epoch": 4.47588424437299, |
| "grad_norm": 6.4870991706848145, |
| "learning_rate": 2.9e-06, |
| "loss": 0.0116, |
| "num_input_tokens_seen": 5216896, |
| "step": 348 |
| }, |
| { |
| "epoch": 4.488745980707396, |
| "grad_norm": 1.8258070945739746, |
| "learning_rate": 2.9083333333333336e-06, |
| "loss": 0.0118, |
| "num_input_tokens_seen": 5232448, |
| "step": 349 |
| }, |
| { |
| "epoch": 4.501607717041801, |
| "grad_norm": 2.6930766105651855, |
| "learning_rate": 2.916666666666667e-06, |
| "loss": 0.0138, |
| "num_input_tokens_seen": 5247296, |
| "step": 350 |
| }, |
| { |
| "epoch": 4.514469453376206, |
| "grad_norm": 5.420289993286133, |
| "learning_rate": 2.925e-06, |
| "loss": 0.0344, |
| "num_input_tokens_seen": 5262304, |
| "step": 351 |
| }, |
| { |
| "epoch": 4.527331189710611, |
| "grad_norm": 4.682693958282471, |
| "learning_rate": 2.9333333333333338e-06, |
| "loss": 0.0231, |
| "num_input_tokens_seen": 5277664, |
| "step": 352 |
| }, |
| { |
| "epoch": 4.540192926045016, |
| "grad_norm": 4.194357872009277, |
| "learning_rate": 2.941666666666667e-06, |
| "loss": 0.0267, |
| "num_input_tokens_seen": 5292576, |
| "step": 353 |
| }, |
| { |
| "epoch": 4.553054662379421, |
| "grad_norm": 1.0653674602508545, |
| "learning_rate": 2.95e-06, |
| "loss": 0.0109, |
| "num_input_tokens_seen": 5306304, |
| "step": 354 |
| }, |
| { |
| "epoch": 4.565916398713826, |
| "grad_norm": 1.8720684051513672, |
| "learning_rate": 2.9583333333333335e-06, |
| "loss": 0.0444, |
| "num_input_tokens_seen": 5320768, |
| "step": 355 |
| }, |
| { |
| "epoch": 4.578778135048232, |
| "grad_norm": 4.005693435668945, |
| "learning_rate": 2.9666666666666673e-06, |
| "loss": 0.0142, |
| "num_input_tokens_seen": 5336128, |
| "step": 356 |
| }, |
| { |
| "epoch": 4.591639871382637, |
| "grad_norm": 5.997867107391357, |
| "learning_rate": 2.9750000000000003e-06, |
| "loss": 0.0361, |
| "num_input_tokens_seen": 5351168, |
| "step": 357 |
| }, |
| { |
| "epoch": 4.604501607717042, |
| "grad_norm": 3.8871259689331055, |
| "learning_rate": 2.9833333333333337e-06, |
| "loss": 0.0197, |
| "num_input_tokens_seen": 5366112, |
| "step": 358 |
| }, |
| { |
| "epoch": 4.617363344051447, |
| "grad_norm": 4.269320011138916, |
| "learning_rate": 2.991666666666667e-06, |
| "loss": 0.0373, |
| "num_input_tokens_seen": 5381440, |
| "step": 359 |
| }, |
| { |
| "epoch": 4.630225080385852, |
| "grad_norm": 3.6487338542938232, |
| "learning_rate": 3e-06, |
| "loss": 0.0469, |
| "num_input_tokens_seen": 5397152, |
| "step": 360 |
| }, |
| { |
| "epoch": 4.643086816720257, |
| "grad_norm": 3.9794583320617676, |
| "learning_rate": 3.0083333333333335e-06, |
| "loss": 0.0283, |
| "num_input_tokens_seen": 5412128, |
| "step": 361 |
| }, |
| { |
| "epoch": 4.655948553054662, |
| "grad_norm": 5.071774482727051, |
| "learning_rate": 3.0166666666666673e-06, |
| "loss": 0.0271, |
| "num_input_tokens_seen": 5426816, |
| "step": 362 |
| }, |
| { |
| "epoch": 4.668810289389068, |
| "grad_norm": 3.651871681213379, |
| "learning_rate": 3.0250000000000003e-06, |
| "loss": 0.0303, |
| "num_input_tokens_seen": 5441760, |
| "step": 363 |
| }, |
| { |
| "epoch": 4.681672025723473, |
| "grad_norm": 3.1399481296539307, |
| "learning_rate": 3.0333333333333337e-06, |
| "loss": 0.0207, |
| "num_input_tokens_seen": 5457504, |
| "step": 364 |
| }, |
| { |
| "epoch": 4.694533762057878, |
| "grad_norm": 3.089629888534546, |
| "learning_rate": 3.0416666666666666e-06, |
| "loss": 0.0157, |
| "num_input_tokens_seen": 5473088, |
| "step": 365 |
| }, |
| { |
| "epoch": 4.707395498392283, |
| "grad_norm": 1.4184813499450684, |
| "learning_rate": 3.05e-06, |
| "loss": 0.0128, |
| "num_input_tokens_seen": 5487712, |
| "step": 366 |
| }, |
| { |
| "epoch": 4.720257234726688, |
| "grad_norm": 5.21925687789917, |
| "learning_rate": 3.058333333333334e-06, |
| "loss": 0.0293, |
| "num_input_tokens_seen": 5503680, |
| "step": 367 |
| }, |
| { |
| "epoch": 4.733118971061093, |
| "grad_norm": 3.7801499366760254, |
| "learning_rate": 3.066666666666667e-06, |
| "loss": 0.0211, |
| "num_input_tokens_seen": 5518464, |
| "step": 368 |
| }, |
| { |
| "epoch": 4.745980707395498, |
| "grad_norm": 9.715234756469727, |
| "learning_rate": 3.075e-06, |
| "loss": 0.0929, |
| "num_input_tokens_seen": 5533504, |
| "step": 369 |
| }, |
| { |
| "epoch": 4.758842443729904, |
| "grad_norm": 6.241518974304199, |
| "learning_rate": 3.0833333333333336e-06, |
| "loss": 0.0195, |
| "num_input_tokens_seen": 5548832, |
| "step": 370 |
| }, |
| { |
| "epoch": 4.771704180064309, |
| "grad_norm": 2.6658706665039062, |
| "learning_rate": 3.0916666666666666e-06, |
| "loss": 0.0114, |
| "num_input_tokens_seen": 5564256, |
| "step": 371 |
| }, |
| { |
| "epoch": 4.784565916398714, |
| "grad_norm": 7.38095235824585, |
| "learning_rate": 3.1000000000000004e-06, |
| "loss": 0.0207, |
| "num_input_tokens_seen": 5580064, |
| "step": 372 |
| }, |
| { |
| "epoch": 4.797427652733119, |
| "grad_norm": 5.605347156524658, |
| "learning_rate": 3.1083333333333338e-06, |
| "loss": 0.0144, |
| "num_input_tokens_seen": 5595008, |
| "step": 373 |
| }, |
| { |
| "epoch": 4.810289389067524, |
| "grad_norm": 4.410486221313477, |
| "learning_rate": 3.1166666666666668e-06, |
| "loss": 0.0197, |
| "num_input_tokens_seen": 5610304, |
| "step": 374 |
| }, |
| { |
| "epoch": 4.823151125401929, |
| "grad_norm": 5.259794235229492, |
| "learning_rate": 3.125e-06, |
| "loss": 0.0669, |
| "num_input_tokens_seen": 5626464, |
| "step": 375 |
| }, |
| { |
| "epoch": 4.836012861736334, |
| "grad_norm": 5.21687650680542, |
| "learning_rate": 3.133333333333334e-06, |
| "loss": 0.0156, |
| "num_input_tokens_seen": 5641216, |
| "step": 376 |
| }, |
| { |
| "epoch": 4.84887459807074, |
| "grad_norm": 3.3515796661376953, |
| "learning_rate": 3.141666666666667e-06, |
| "loss": 0.018, |
| "num_input_tokens_seen": 5656832, |
| "step": 377 |
| }, |
| { |
| "epoch": 4.861736334405145, |
| "grad_norm": 7.488304138183594, |
| "learning_rate": 3.1500000000000003e-06, |
| "loss": 0.064, |
| "num_input_tokens_seen": 5671744, |
| "step": 378 |
| }, |
| { |
| "epoch": 4.87459807073955, |
| "grad_norm": 2.625629186630249, |
| "learning_rate": 3.1583333333333337e-06, |
| "loss": 0.0263, |
| "num_input_tokens_seen": 5686496, |
| "step": 379 |
| }, |
| { |
| "epoch": 4.887459807073955, |
| "grad_norm": 3.379544496536255, |
| "learning_rate": 3.1666666666666667e-06, |
| "loss": 0.0256, |
| "num_input_tokens_seen": 5701216, |
| "step": 380 |
| }, |
| { |
| "epoch": 4.90032154340836, |
| "grad_norm": 2.0447638034820557, |
| "learning_rate": 3.175e-06, |
| "loss": 0.0269, |
| "num_input_tokens_seen": 5715904, |
| "step": 381 |
| }, |
| { |
| "epoch": 4.913183279742765, |
| "grad_norm": 3.5290844440460205, |
| "learning_rate": 3.183333333333334e-06, |
| "loss": 0.0281, |
| "num_input_tokens_seen": 5730976, |
| "step": 382 |
| }, |
| { |
| "epoch": 4.92604501607717, |
| "grad_norm": 2.183229923248291, |
| "learning_rate": 3.191666666666667e-06, |
| "loss": 0.0199, |
| "num_input_tokens_seen": 5745824, |
| "step": 383 |
| }, |
| { |
| "epoch": 4.938906752411576, |
| "grad_norm": 1.9661577939987183, |
| "learning_rate": 3.2000000000000003e-06, |
| "loss": 0.0277, |
| "num_input_tokens_seen": 5760160, |
| "step": 384 |
| }, |
| { |
| "epoch": 4.951768488745981, |
| "grad_norm": 5.09304666519165, |
| "learning_rate": 3.2083333333333337e-06, |
| "loss": 0.0158, |
| "num_input_tokens_seen": 5776160, |
| "step": 385 |
| } |
| ], |
| "logging_steps": 1, |
| "max_steps": 385, |
| "num_input_tokens_seen": 5776160, |
| "num_train_epochs": 5, |
| "save_steps": 1000, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 2.6009787341943603e+17, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|